path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
other/CNN.ipynb | ###Markdown
็ณใฟ่พผใฟใใฅใผใฉใซใใใใฏใผใฏ
###Code
import numpy as np
import sys, os
sys.path.append(os.pardir)
from common.util import im2col
x1 = np.random.rand(1, 3, 7, 7)
col1 = im2col(x1, 5, 5, stride=1, pad=0)
print(col1.shape) #(9, 75)
x2 = np.random.rand(10, 3, 7, 7)
col2 = im2col(x2, 5, 5, stride=1, pad=0)
print(col2.shape) #(90, 75)
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = int(1 + (H + 2*self.pad - FH) / self.stride)
out_w = int(1 + (W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T#ใใฃใซใฟใผใฎๅฑ้
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
return out
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + (H - self.pool_h) / self.stride)
out_w = int(1 + (W - self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h*self.pool_w)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)
return out
from common.layers import *
from collections import OrderedDict
class SimpleConvNet:
def __init__(self, input_dim=(1, 28, 28), conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1}, hidden_size=100, output_size=10, weight_init_std=0.01):
filter_num = conv_param['filter_num']
filter_size = conv_param['filter_size']
filter_pad = conv_param['pad']
filter_stride = conv_param['stride']
input_size = input_dim[1]
conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))#ๅคงใใใๅๅใซใใใใผใชใณใฐใๆณๅฎ๏ผ
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
self.params['b1'] = np.zeros(filter_num)
self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
self.params['b2'] = np.zeros(hidden_size)
self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b3'] = np.zeros(output_size)
self.layers = OrderedDict()
self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad'])
self.layers['Relu1'] = Relu()
self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
self.layers['Relu2'] = Relu()
self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def gradient(self, x, t):
self.loss(x, t)
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads={}
grads['W1'] = self.layers['Conv1'].dW
grads['b1'] = self.layers['Conv1'].db
grads['W2'] = self.layers['Affine1'].dW
grads['b2'] = self.layers['Affine1'].db
grads['W3'] = self.layers['Affine2'].dW
grads['b3'] = self.layers['Affine2'].db
return grads
def accuracy(self, x, t, batch_size=100):
if t.ndim != 1 : t = np.argmax(t, axis=1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i*batch_size:(i+1)*batch_size]
tt = t[i*batch_size:(i+1)*batch_size]
y = self.predict(tx)
y = np.argmax(y, axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
def save_params(self, file_name="params.pkl"):
params = {}
for key, val in self.params.items():
params[key] = val
with open(file_name, 'wb') as f:
pickle.dump(params, f)
def load_params(self, file_name="params.pkl"):
with open(file_name, 'rb') as f:
params = pickle.load(f)
for key, val in params.items():
self.params[key] = val
for i, key in enumerate(['Conv1', 'Affine1', 'Affine2']):
self.layers[key].W = self.params['W' + str(i+1)]
self.layers[key].b = self.params['b' + str(i+1)]
import sys, os
sys.path.append(os.pardir) # ่ฆชใใฃใฌใฏใใชใฎใใกใคใซใใคใณใใผใใใใใใฎ่จญๅฎ
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
#from simple_convnet import SimpleConvNet
from common.trainer import Trainer
# ใใผใฟใฎ่ชญใฟ่พผใฟ
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
# ๅฆ็ใซๆ้ใฎใใใๅ ดๅใฏใใผใฟใๅๆธ
#x_train, t_train = x_train[:5000], t_train[:5000]
#x_test, t_test = x_test[:1000], t_test[:1000]
max_epochs = 20
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000)
trainer.train()
# ใใฉใกใผใฟใฎไฟๅญ
network.save_params("params.pkl")
print("Saved Network Parameters!")
# ใฐใฉใใฎๆ็ป
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
###Output
train loss:2.300118257851044
=== epoch:1, train acc:0.087, test acc:0.089 ===
train loss:2.2988220359130755
train loss:2.295447318380627
train loss:2.290871043798905
train loss:2.2860536827346083
train loss:2.2719266760963865
train loss:2.2633038365566307
train loss:2.2341930137051484
train loss:2.2517179401726115
train loss:2.1877753944873755
train loss:2.2074775901568087
train loss:2.127452513113011
train loss:2.1284692716878677
train loss:2.12382919172393
train loss:2.040466116000816
train loss:2.0468897290111676
train loss:1.9269709746388517
train loss:1.8657272868753991
train loss:1.8902392271797641
train loss:1.7616097106923783
train loss:1.628726778311443
train loss:1.556419518632294
train loss:1.6172739758860528
train loss:1.478723617552702
train loss:1.3609264977468962
train loss:1.2585286741103408
train loss:1.1753985882579974
train loss:1.1694251853751318
train loss:1.1470417989608281
train loss:0.9489517689931622
train loss:1.1698178415872216
train loss:0.8926890268209867
train loss:0.8992193339649696
train loss:0.7255350807000492
train loss:0.8397271224817721
train loss:0.9089398663385613
train loss:0.876292954183065
train loss:0.8630776811711517
train loss:0.8204591663239568
train loss:0.6113670359726905
train loss:0.6312408785175907
train loss:0.5819422879134619
train loss:0.8181539330871572
train loss:0.6048047578666691
train loss:0.6228414642556238
train loss:0.3859350173588754
train loss:0.4928304739253133
train loss:0.6148574857909463
train loss:0.7915183159413336
train loss:0.55915595955625
train loss:0.6237747973992857
train loss:0.6820834141194154
train loss:0.4440103510610933
train loss:0.385214227112011
train loss:0.5325764712754514
train loss:0.48257897180371856
train loss:0.4762079018113136
train loss:0.6391208646987805
train loss:0.48293534831539736
train loss:0.5954841221996399
train loss:0.5020884232064248
train loss:0.4049172778436312
train loss:0.2538136712683605
train loss:0.649059062885035
train loss:0.29703576319316033
train loss:0.43613733267090593
train loss:0.46939422843132095
train loss:0.4332596082104586
train loss:0.2800314358949438
train loss:0.42547258644972963
train loss:0.5287102910997554
train loss:0.34577238913953123
train loss:0.5936576702275637
train loss:0.5303578420061962
train loss:0.4761000626203588
train loss:0.6607747911829931
train loss:0.4397405235849344
train loss:0.3980553240241347
train loss:0.6149770815713616
train loss:0.3497427518117686
train loss:0.30934918068301037
train loss:0.49383151332965347
train loss:0.45385037451827664
train loss:0.4619384590598544
train loss:0.43646300235584456
train loss:0.3517732646920053
train loss:0.3915397780314019
train loss:0.42032155964811324
train loss:0.3944291905663009
train loss:0.38825956969989833
train loss:0.3820451314571076
train loss:0.5151544922186684
train loss:0.5473289536819524
train loss:0.3456675820898656
train loss:0.3464398112589431
train loss:0.5268154003819261
train loss:0.40618943688970516
train loss:0.38481375974468324
train loss:0.46996165790946287
train loss:0.41261000326860287
train loss:0.48779532592719355
train loss:0.46136792690131967
train loss:0.3956371445291063
train loss:0.4843673753372014
train loss:0.43042676176911476
train loss:0.34331785150510674
train loss:0.4284104518981936
train loss:0.3142506275962229
train loss:0.41040141706979144
train loss:0.3866004236995092
train loss:0.2560622323448291
train loss:0.5082073195113893
train loss:0.42165870930869304
train loss:0.26161012990834237
train loss:0.4122427717894358
train loss:0.3889847266428063
train loss:0.33926280806509446
train loss:0.3202014682548693
train loss:0.3885365184077352
train loss:0.3757146467594932
train loss:0.27832384622089107
train loss:0.4159145996414542
train loss:0.2551904695673761
train loss:0.3020918258062075
train loss:0.28962238675670465
train loss:0.30243786921900473
train loss:0.20914300126355573
train loss:0.3324374448189754
train loss:0.36458709522806126
train loss:0.3139219122426887
train loss:0.348267700909378
train loss:0.3079964748347351
train loss:0.3572013021546947
train loss:0.32214733434391696
train loss:0.1991396684487296
train loss:0.31466229467923823
train loss:0.37464829091829555
train loss:0.39155925667942787
train loss:0.3249028325145988
train loss:0.3065997490275663
train loss:0.2357224085270734
train loss:0.33827622120124373
train loss:0.4779310941650929
train loss:0.34949838540033346
train loss:0.1442067349562653
train loss:0.39472533682902994
train loss:0.26640189659235075
train loss:0.3551001095991619
train loss:0.35583063870639015
train loss:0.34706152723745176
train loss:0.46918580696652773
train loss:0.3528511431820871
train loss:0.3055332456092758
train loss:0.50853145577962
train loss:0.3621998784657812
train loss:0.23329026227004884
train loss:0.38601134996427283
train loss:0.1963269928961011
train loss:0.3134083986929005
train loss:0.30827034555882177
train loss:0.20659925076149363
train loss:0.30230057390810045
train loss:0.3096477300756512
train loss:0.33395796121030535
train loss:0.28993469307154807
train loss:0.4911841469286341
train loss:0.1603859399165033
train loss:0.3481652683435775
train loss:0.2610161852700981
train loss:0.31335896160980714
train loss:0.45808033184168534
train loss:0.3282122526098118
train loss:0.24443299597700346
train loss:0.21298657288626413
train loss:0.37819177532642134
train loss:0.3806224813082182
train loss:0.366158563876603
train loss:0.35821428155808815
train loss:0.23107817841732678
train loss:0.43051976501776396
train loss:0.24948643590790393
train loss:0.4012424641385844
train loss:0.30011214597374614
train loss:0.15867004451460545
train loss:0.228759861788478
train loss:0.2616962782123859
train loss:0.21706123091829516
train loss:0.31392184928194466
train loss:0.2305889705197265
train loss:0.25131747505283525
train loss:0.2989127597317445
train loss:0.2449312029750666
train loss:0.2835122252415132
train loss:0.22016332782642897
train loss:0.24184797285011758
train loss:0.4832775567668682
train loss:0.3054862139314748
train loss:0.33500056199814254
train loss:0.2725710155205264
train loss:0.339668287254206
train loss:0.311462632226172
train loss:0.21616553162347113
train loss:0.25872489100030943
train loss:0.1791605276066444
train loss:0.40816368478771936
train loss:0.25077730144890614
train loss:0.31009154426998164
train loss:0.33713420365966557
train loss:0.27693185461888886
train loss:0.3259529081176156
train loss:0.47632403162233294
train loss:0.2185224667563763
train loss:0.3761107948115704
train loss:0.3295974391771378
train loss:0.3125875112330511
train loss:0.16387469376252134
train loss:0.3216110426313192
train loss:0.22472899031739996
train loss:0.3406296158170564
train loss:0.34693900389627
train loss:0.3077683600650194
train loss:0.3501026325166981
train loss:0.48514219376707923
train loss:0.2573197995010527
train loss:0.1522966785033454
train loss:0.24673654617156643
train loss:0.33426109401044307
train loss:0.31424181477323115
train loss:0.3445405764146362
train loss:0.19388399657089558
train loss:0.28719772082519895
train loss:0.22787598493297356
train loss:0.2728023282572549
train loss:0.40666680432927504
train loss:0.3551910437314775
train loss:0.20943923195554112
train loss:0.3000074962824635
train loss:0.22850867893362256
train loss:0.3576823091967201
train loss:0.20846057212439312
train loss:0.38751066805178835
train loss:0.28419989820177327
train loss:0.2128150536741247
train loss:0.26919893135427886
train loss:0.2110819427453971
train loss:0.3769715672292987
train loss:0.2832200197293484
train loss:0.19762571854509428
train loss:0.24215558481497218
train loss:0.1907360292268803
train loss:0.2348286000150339
train loss:0.30978430243709576
train loss:0.14749987461867206
train loss:0.2309831718318719
train loss:0.3257451244309817
train loss:0.1898924267971495
train loss:0.22362206874115262
train loss:0.2552650997543678
train loss:0.36243944458013017
train loss:0.32823754007896366
train loss:0.2360466109167669
train loss:0.21872763309613344
train loss:0.3673143924504231
train loss:0.22962534563659115
train loss:0.19769569063111658
train loss:0.3353933681433135
train loss:0.33107486987956064
train loss:0.2788177514346132
train loss:0.28471457385790405
train loss:0.17857537857516617
|
1_Train_Model.ipynb | ###Markdown
Image Captioning - Training NotebookMake sure to look at setup the project environment and data according to the README.
###Code
import torch
import torch.nn as nn
from torchvision import transforms
from pycocotools.coco import COCO
from imcaption.data_loader import get_transform, get_loader
from imcaption.model import EncoderCNN, DecoderRNN
import math
import torch.utils.data as data
import numpy as np
import os
import sys
import requests
import time
batch_size = 16 # batch size
vocab_threshold = 20 # minimum word count threshold
vocab_from_file = False # if True, load existing vocab file
embed_size = 256 # dimensionality of image and word embeddings
hidden_size = 512 # number of features in hidden state of the RNN decoder
num_epochs = 5 # number of training epochs
save_every = 1 # determines frequency of saving model weights
print_every = 2000 # determines window for printing average loss
learning_rate = 1e-3 # learning rate passed to the optimizer
saved_model_dir = "saved_models" # folder containing the saved model weights
# Setup nltk
import nltk
nltk.download('punkt')
# Define the Image Transform
transform_train = get_transform()
# Build data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=vocab_from_file)
# The size of the vocabulary.
vocab_size = len(data_loader.dataset.vocab)
# Initialize the encoder and decoder.
encoder = EncoderCNN(embed_size)
decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
# Move models to GPU if CUDA is available.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder.to(device)
decoder.to(device)
# Define the loss function.
criterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
# Specify the learnable parameters of the model.
params = decoder.parameters()
# Define the optimizer.
optimizer = torch.optim.Adam(params, lr=learning_rate)
if not os.path.exists(saved_model_dir):
os.mkdir(saved_model_dir)
# Set the total number of training steps per epoch.
total_step = math.ceil(len(data_loader.dataset.caption_lengths) / data_loader.batch_sampler.batch_size)
for epoch in range(1, num_epochs+1):
for i_step in range(1, total_step+1):
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader.dataset.get_train_indices()
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader))
# Move batch of images and captions to GPU if CUDA is available.
images = images.to(device)
captions = captions.to(device)
# Zero the gradients.
decoder.zero_grad()
encoder.zero_grad()
# Pass the inputs through the CNN-RNN model.
features = encoder(images)
outputs = decoder(features, captions)
# Calculate the batch loss.
loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))
# Backward pass.
loss.backward()
# Update the parameters in the optimizer.
optimizer.step()
# Get training statistics.
stats = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f' % (epoch, num_epochs, i_step, total_step, loss.item(), np.exp(loss.item()))
# Print training statistics (on same line).
print('\r' + stats, end="")
sys.stdout.flush()
# Print training statistics (on different line).
if i_step % print_every == 0:
print('\r' + stats)
# Save the weights.
if epoch % save_every == 0:
torch.save(decoder.state_dict(), os.path.join(saved_model_dir, 'decoder-%d.pkl' % epoch))
torch.save(encoder.state_dict(), os.path.join(saved_model_dir, 'encoder-%d.pkl' % epoch))
###Output
Epoch [1/5], Step [2000/25883], Loss: 3.6123, Perplexity: 37.0494
Epoch [1/5], Step [4000/25883], Loss: 2.7713, Perplexity: 15.9792
Epoch [1/5], Step [6000/25883], Loss: 2.2727, Perplexity: 9.70529
Epoch [1/5], Step [8000/25883], Loss: 2.2720, Perplexity: 9.69854
Epoch [1/5], Step [10000/25883], Loss: 2.4542, Perplexity: 11.6373
Epoch [1/5], Step [12000/25883], Loss: 2.0725, Perplexity: 7.94450
Epoch [1/5], Step [14000/25883], Loss: 2.4636, Perplexity: 11.7468
Epoch [1/5], Step [16000/25883], Loss: 2.3179, Perplexity: 10.1546
Epoch [1/5], Step [18000/25883], Loss: 2.5545, Perplexity: 12.8646
Epoch [1/5], Step [20000/25883], Loss: 2.2813, Perplexity: 9.78967
Epoch [1/5], Step [22000/25883], Loss: 2.0412, Perplexity: 7.70005
Epoch [1/5], Step [24000/25883], Loss: 2.0952, Perplexity: 8.12696
Epoch [2/5], Step [2000/25883], Loss: 2.0445, Perplexity: 7.725482
Epoch [2/5], Step [4000/25883], Loss: 2.1870, Perplexity: 8.90866
Epoch [2/5], Step [5137/25883], Loss: 2.3620, Perplexity: 10.6125 |
notebooks/functional/reduceimp.ipynb | ###Markdown
reduce()- apply a function to an iterable and reduce it to a single cumulative value- apply by two arguments```obj = reduce(function, iterator)```
###Code
import functools
import operator
functools.reduce(operator.add, [1,4,5])
data = list(range(1, 6))
def sum(a, b):
return a + b
functools.reduce(sum, data)
###Output
_____no_output_____ |
CIFAR10_without_dimensionality_reduction (1).ipynb | ###Markdown
**CIFAR10 images classification using different algorithms** 1. **Convolutional Neural Network (CNN)** Splitting the CIFAR10 dataset into training and testing
###Code
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print("shape of training data (X)" ,X_train.shape)
print("shape of training data(y)" ,y_train.shape)
print("shape of testing data(X)" ,X_test.shape)
print("shape of testing data(y)" ,y_test.shape)
###Output
shape of training data (X) (50000, 32, 32, 3)
shape of training data(y) (50000, 1)
shape of testing data(X) (10000, 32, 32, 3)
shape of testing data(y) (10000, 1)
###Markdown
Converting both X_train and X_test to grayscale
###Code
X_train = np.array([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in X_train])
X_test = np.array([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in X_test])
print("shape of training data (X)" ,X_train.shape)
print("shape of testing data(X)" ,X_test.shape)
###Output
shape of training data (X) (50000, 32, 32)
shape of testing data(X) (10000, 32, 32)
###Markdown
Converting output column to desired form
###Code
X_train = X_train/255
X_test = X_test/255
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
ohe = OneHotEncoder(sparse=False)
ohe.fit(y_train)
y_train = ohe.transform(y_train)
y_test = ohe.transform(y_test)
###Output
_____no_output_____
###Markdown
Defining input shape for CNN
###Code
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)
input_shape = (X_train.shape[1], X_train.shape[2], 1)
###Output
_____no_output_____
###Markdown
Building model architecture (This is a basic architrcture used in CNN)
###Code
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=(32,32,1)))
model.add(Conv2D(32, (3, 3), activation='relu', strides=(1, 1), padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(16, (3, 3), activation='relu', strides=(1, 1), padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', strides=(1, 1), padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Flatten())
model.add(Dense(245, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['acc'])
stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
###Output
_____no_output_____
###Markdown
Fitting training data to the model
###Code
history = model.fit(X_train.reshape(50000, 32, 32,1), y_train, epochs=20,
batch_size=32, validation_data=(X_test.reshape(10000, 32, 32,1), y_test), callbacks=[stop])
###Output
Epoch 1/20
1563/1563 [==============================] - 310s 198ms/step - loss: 2.1121 - acc: 0.1942 - val_loss: 1.5588 - val_acc: 0.4280
Epoch 2/20
1563/1563 [==============================] - 309s 198ms/step - loss: 1.4621 - acc: 0.4739 - val_loss: 1.2545 - val_acc: 0.5550
Epoch 3/20
1563/1563 [==============================] - 309s 198ms/step - loss: 1.2332 - acc: 0.5627 - val_loss: 1.0998 - val_acc: 0.6242
Epoch 4/20
1563/1563 [==============================] - 308s 197ms/step - loss: 1.0963 - acc: 0.6140 - val_loss: 1.0262 - val_acc: 0.6404
Epoch 5/20
1563/1563 [==============================] - 308s 197ms/step - loss: 1.0004 - acc: 0.6504 - val_loss: 0.9736 - val_acc: 0.6634
Epoch 6/20
1563/1563 [==============================] - 310s 199ms/step - loss: 0.9250 - acc: 0.6768 - val_loss: 0.9660 - val_acc: 0.6638
Epoch 7/20
1563/1563 [==============================] - 308s 197ms/step - loss: 0.8610 - acc: 0.7000 - val_loss: 0.9270 - val_acc: 0.6842
Epoch 8/20
1563/1563 [==============================] - 307s 196ms/step - loss: 0.8081 - acc: 0.7223 - val_loss: 0.9017 - val_acc: 0.6920
Epoch 9/20
1563/1563 [==============================] - 311s 199ms/step - loss: 0.7522 - acc: 0.7380 - val_loss: 0.9244 - val_acc: 0.6868
Epoch 10/20
1563/1563 [==============================] - 310s 198ms/step - loss: 0.7163 - acc: 0.7519 - val_loss: 0.9431 - val_acc: 0.6828
Epoch 11/20
1563/1563 [==============================] - 307s 196ms/step - loss: 0.6717 - acc: 0.7692 - val_loss: 0.9110 - val_acc: 0.7013
Epoch 00011: early stopping
###Markdown
Predicting output on the testing data
###Code
cnn_pred = model.predict(X_test.reshape(10000,32,32,1))
cnn_pred = ohe.inverse_transform(cnn_pred)
y_test = ohe.inverse_transform(y_test)
###Output
_____no_output_____
###Markdown
Model evaluation
###Code
ac=accuracy_score(y_test , cnn_pred)
print("----------------")
print("")
print("Accuracy using CNN: ",ac*100)
print("")
print("----------------")
plt.figure(figsize=(9,9))
sns.heatmap(confusion_matrix(y_test, cnn_pred), fmt='d', annot=True, cmap=plt.cm.Purples)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
cm = confusion_matrix(y_test, cnn_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, cbar=False, xticklabels=labels, yticklabels=labels, fmt='d', annot=True, cmap=plt.cm.Blues)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
arr=[]
for i in X_train:
i=i.ravel()
arr.append(i)
arr2=[]
for i in X_test:
i=i.ravel()
arr2.append(i)
df_test=pd.DataFrame(arr2)
df_test.shape
df=pd.DataFrame(arr)
df.shape
data=pd.concat([df , df_test],axis=0)
data.shape
y_train=ohe.inverse_transform(y_train)
###Output
_____no_output_____
###Markdown
2. **Support Vector Machine (SVM)**
###Code
svm=SVC(C=1.0, kernel='linear', gamma='scale',
cache_size=200, class_weight=None, verbose=True, max_iter=- 1,random_state=42)
svm.fit(df, y_train.ravel())
svm_pred=svm.predict(df_test)
print("----------------")
print("")
print("Accuracy using SVM classifier: ",accuracy_score(y_test , svm_pred)*100)
print("")
print("----------------")
###Output
_____no_output_____
###Markdown
3. **Linear Discriminant Analysis (LDA)**
###Code
lda=LinearDiscriminantAnalysis()
lda.fit(df, y_train.ravel())
lda_pred=lda.predict(df_test)
print("-----------------")
print("")
print("Accuracy using LDA: ",accuracy_score(y_test , lda_pred)*100)
print("")
print("-----------------")
###Output
-----------------
Accuracy using LDA: 28.299999999999997
-----------------
###Markdown
4. **XGBoost**
###Code
xgb_model = xgb.XGBClassifier(objective="multi:softprob", random_state=42)
xgb_model.fit(df, y_train.ravel())
xg_pred = xgb_model.predict(df_test)
print("---------------")
print("")
print("Accuracy using XGBoost: ", accuracy_score(y_test , xg_pred)*100)
print("")
print("---------------")
###Output
---------------
Accuracy using XGBoost: 38.1
---------------
###Markdown
**5. Random Forest Classifier**
###Code
rfc = RandomForestClassifier(n_estimators = 500, criterion = 'entropy', random_state = 42)
rfc.fit(df, y_train.ravel())
rfc_pred=rfc.predict(df_test)
print("---------------")
print("")
print("Accuracy using RF Classifier: ", accuracy_score(y_test , rfc_pred)*100)
print("")
print("---------------")
###Output
---------------
Accuracy using RF Classifier: 43.580000000000005
---------------
###Markdown
**6. Naive Bayes - Gaussian NB**
###Code
gnb = GaussianNB().fit(df, y_train.ravel())
gnb_pred = gnb.predict(df_test)
acc = gnb.score(y_test, gnb_pred)
print("---------------")
print("")
print("Accuracy using Gaussian NB: ", acc*100)
print("")
print("---------------")
###Output
---------------
Accuracy using Gaussian NB: 5.5
---------------
|
B-ๆ็บฟๅพ/่ฟ้ถๆ็บฟๅพMA_B_03/MA_B_03.ipynb | ###Markdown
Matplotlibๅพ้ดโโ่ฟ้ถๆ็บฟๅพ ๅ
ฌไผๅท๏ผๅฏ่งๅๅพ้ด
###Code
import matplotlib
print(matplotlib.__version__) #ๆฅ็Matplotlib็ๆฌ
import pandas as pd
print(pd.__version__) #ๆฅ็pandas็ๆฌ
import numpy as np
print(np.__version__) #ๆฅ็numpy็ๆฌ
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['STHeiti'] #่ฎพ็ฝฎไธญๆ
###Output
3.3.3
1.2.0
1.19.4
###Markdown
ๆณจๆ๏ผไปฃ็ ๅจไปฅไธ็ฏๅขๅ
จ้จ้่ฟๆต่ฏ:- Python 3.7.1- Matplotlib == 3.3.3- pandas == 1.2.0- numpy == 1.19.4ๅ ็ๆฌไธๅ๏ผๅฏ่ฝไผๆ้จๅ่ฏญๆณๅทฎๅผ๏ผๅฆๆๆฅ้๏ผ่ฏทๅ
ๆฃๆฅๆผๅๅ็ๆฌๆฏๅฆไธ่ด๏ผ ่ฟ้ถๆ็บฟๅพ
###Code
from pandas.plotting import andrews_curves
# via from https://www.machinelearningplus.com/plots/top-50-matplotlib-visualizations-the-master-plots-python/
# Import
df = pd.read_csv('mtcars.csv')
df.drop(['cars', 'carname'], axis=1, inplace=True)
# Plot
plt.figure(figsize=(9,8), dpi= 80)
ax = andrews_curves(df, 'cyl', colormap='Set1')
# Lighten borders
plt.gca().spines["top"].set_alpha(0)
plt.gca().spines["bottom"].set_alpha(.3)
plt.gca().spines["right"].set_alpha(0)
plt.gca().spines["left"].set_alpha(.3)
plt.title('Andrews Curves of mtcars', fontsize=22)
plt.xlim(-3,3)
plt.grid(alpha=0.3)
ax.set_xlabel('ๆๆฏx่ฝด', fontsize = 20) # Add an x-label to the axes.
ax.set_ylabel('ๆๆฏy่ฝด', fontsize = 20) # Add a y-label to the axes.
ax.set_title("ๆๆฏๆ ้ข", fontsize = 20) # Add a title to the axes.
plt.tick_params(labelsize=15)
plt.savefig("B_07.png")
plt.show()
###Output
_____no_output_____
###Markdown
่ฟ้ถๆ็บฟๅพ
###Code
from pandas.plotting import parallel_coordinates
# via from https://www.machinelearningplus.com/plots/top-50-matplotlib-visualizations-the-master-plots-python/
# Import Data
df_final = pd.read_csv("diamonds_filter.csv")
# Plot
plt.figure(figsize=(9,8), dpi= 80)
ax = parallel_coordinates(df_final, 'cut', colormap='Dark2')
# Lighten borders
plt.gca().spines["top"].set_alpha(0)
plt.gca().spines["bottom"].set_alpha(.3)
plt.gca().spines["right"].set_alpha(0)
plt.gca().spines["left"].set_alpha(.3)
plt.grid(alpha=0.3)
ax.set_xlabel('ๆๆฏx่ฝด', fontsize = 20) # Add an x-label to the axes.
ax.set_ylabel('ๆๆฏy่ฝด', fontsize = 20) # Add a y-label to the axes.
ax.set_title("ๆๆฏๆ ้ข", fontsize = 20) # Add a title to the axes.
plt.tick_params(labelsize=15)
plt.savefig("B_08.png")
plt.show()
###Output
_____no_output_____
###Markdown
่ฟ้ถๆ็บฟๅพ
###Code
import matplotlib as mpl
# via from https://www.machinelearningplus.com/plots/top-50-matplotlib-visualizations-the-master-plots-python/
# Import Data
df = pd.read_csv('AirPassengers.csv')
# Get the Peaks and Troughs
data = df['value'].values
doublediff = np.diff(np.sign(np.diff(data)))
peak_locations = np.where(doublediff == -2)[0] + 1
doublediff2 = np.diff(np.sign(np.diff(-1*data)))
trough_locations = np.where(doublediff2 == -2)[0] + 1
# Draw Plot
plt.figure(figsize=(9,8), dpi= 80)
plt.plot('date', 'value', data=df, color='tab:blue', label='Air Traffic')
plt.scatter(df.date[peak_locations], df.value[peak_locations], marker=mpl.markers.CARETUPBASE, color='tab:green', s=100, label='Peaks')
plt.scatter(df.date[trough_locations], df.value[trough_locations], marker=mpl.markers.CARETDOWNBASE, color='tab:red', s=100, label='Troughs')
# Annotate
for t, p in zip(trough_locations[1::5], peak_locations[::3]):
plt.text(df.date[p], df.value[p]+15, df.date[p], horizontalalignment='center', color='darkgreen')
plt.text(df.date[t], df.value[t]-35, df.date[t], horizontalalignment='center', color='darkred')
# Decoration
plt.ylim(50,750)
xtick_location = df.index.tolist()[::6]
xtick_labels = df.date.tolist()[::6]
plt.xticks(ticks=xtick_location, labels=xtick_labels, rotation=90, fontsize=12, alpha=.7)
plt.title("่ช็ฉบๆ
ๅฎข้ซๅณฐๅไฝ่ฐท(1949 - 1969)", fontsize=22)
plt.yticks(fontsize=12, alpha=.7)
# Lighten borders
plt.gca().spines["top"].set_alpha(.0)
plt.gca().spines["bottom"].set_alpha(.3)
plt.gca().spines["right"].set_alpha(.0)
plt.gca().spines["left"].set_alpha(.3)
plt.legend(loc='upper left', prop={'size':15})
plt.grid(axis='y', alpha=.3)
plt.savefig("B_09.png")
plt.show()
###Output
_____no_output_____ |
notebooks/consistency_of_importance_over_different_data_amounts.ipynb | ###Markdown
Define paths for the model and data of interest
###Code
model_type = "binary"
# Shared paths/constants
reference_fasta = "/users/amtseng/genomes/hg38.fasta"
chrom_sizes = "/users/amtseng/genomes/hg38.canon.chrom.sizes"
data_base_path = "/users/amtseng/att_priors/data/processed/"
model_base_path = "/users/amtseng/att_priors/models/trained_models/%s/" % model_type
chrom_set = ["chr1"]
input_length = 1346 if model_type == "profile" else 1000
profile_length = 1000
# SPI1
condition_name = "SPI1"
files_spec_path = os.path.join(data_base_path, "ENCODE_TFChIP/%s/config/SPI1/SPI1_training_paths.json" % model_type)
num_tasks = 4
num_strands = 2
task_index = None
controls = "matched"
if model_type == "profile":
model_class = profile_models.ProfilePredictorWithMatchedControls
else:
model_class = binary_models.BinaryPredictor
noprior_all_model_base_path = os.path.join(model_base_path, "SPI1/")
noprior_less_model_base_path = os.path.join(model_base_path, "SPI1_keep1/")
prior_all_model_base_path = os.path.join(model_base_path, "SPI1_prior/")
prior_less_model_base_path = os.path.join(model_base_path, "SPI1_prior_keep1/")
# GATA2
condition_name = "GATA2"
files_spec_path = os.path.join(data_base_path, "ENCODE_TFChIP/%s/config/GATA2/GATA2_training_paths.json" % model_type)
num_tasks = 3
num_strands = 2
task_index = None
controls = "matched"
if model_type == "profile":
model_class = profile_models.ProfilePredictorWithMatchedControls
else:
model_class = binary_models.BinaryPredictor
noprior_all_model_base_path = os.path.join(model_base_path, "GATA2/")
noprior_less_model_base_path = os.path.join(model_base_path, "GATA2_keep1/")
prior_all_model_base_path = os.path.join(model_base_path, "GATA2_prior/")
prior_less_model_base_path = os.path.join(model_base_path, "GATA2_prior_keep1/")
# K562
condition_name = "K562"
files_spec_path = os.path.join(data_base_path, "ENCODE_DNase/%s/config/K562/K562_training_paths.json" % model_type)
num_tasks = 1
num_strands = 1
task_index = None
controls = "shared"
if model_type == "profile":
model_class = profile_models.ProfilePredictorWithSharedControls
else:
model_class = binary_models.BinaryPredictor
noprior_all_model_base_path = os.path.join(model_base_path, "K562/")
noprior_less_model_base_path = os.path.join(model_base_path, "K562_keep1/")
prior_all_model_base_path = os.path.join(model_base_path, "K562_prior/")
prior_less_model_base_path = os.path.join(model_base_path, "K562_prior_keep1/")
# BPNet
condition_name = "BPNet"
reference_fasta = "/users/amtseng/genomes/mm10.fasta"
chrom_sizes = "/users/amtseng/genomes/mm10.canon.chrom.sizes"
files_spec_path = os.path.join(data_base_path, "BPNet_ChIPseq/%s/config/BPNet_training_paths.json" % model_type)
num_tasks = 3
num_strands = 2
task_index = None
controls = "shared"
if model_type == "profile":
model_class = profile_models.ProfilePredictorWithSharedControls
else:
model_class = binary_models.BinaryPredictor
noprior_all_model_base_path = os.path.join(model_base_path, "BPNet/")
noprior_less_model_base_path = os.path.join(model_base_path, "BPNet_keep1/")
prior_all_model_base_path = os.path.join(model_base_path, "BPNet_prior/")
prior_less_model_base_path = os.path.join(model_base_path, "BPNet_prior_keep1/")
###Output
_____no_output_____
###Markdown
Get all runs/epochs with random initializations
###Code
def import_metrics_json(model_base_path, run_num):
"""
Looks in {model_base_path}/{run_num}/metrics.json and returns the contents as a
Python dictionary. Returns None if the path does not exist.
"""
path = os.path.join(model_base_path, str(run_num), "metrics.json")
if not os.path.exists(path):
return None
with open(path, "r") as f:
return json.load(f)
def get_model_paths(
model_base_path, metric_name="val_prof_corr_losses",
reduce_func=(lambda values: np.mean(values)), compare_func=(lambda x, y: x < y),
print_found_values=True
):
"""
Looks in `model_base_path` and for each run, returns the full path to
the best epoch. By default, the best epoch in a run is determined by
the lowest validation profile loss.
"""
# Get the metrics, ignoring empty or nonexistent metrics.json files
metrics = {run_num : import_metrics_json(model_base_path, run_num) for run_num in os.listdir(model_base_path)}
metrics = {key : val for key, val in metrics.items() if val} # Remove empties
model_paths, metric_vals = [], []
for run_num in sorted(metrics.keys(), key=lambda x: int(x)):
try:
# Find the best epoch within that run
best_epoch_in_run, best_val_in_run = None, None
for i, subarr in enumerate(metrics[run_num][metric_name]["values"]):
val = reduce_func(subarr)
if best_val_in_run is None or compare_func(val, best_val_in_run):
best_epoch_in_run, best_val_in_run = i + 1, val
model_path = os.path.join(model_base_path, run_num, "model_ckpt_epoch_%d.pt" % best_epoch_in_run)
model_paths.append(model_path)
metric_vals.append(best_val_in_run)
if print_found_values:
print("\tRun %s, epoch %d: %6.2f" % (run_num, best_epoch_in_run, best_val_in_run))
except Exception:
print("Warning: Was not able to compute values for run %s" % run_num)
continue
return np.array(model_paths), np.array(metric_vals)
metric_name = "val_prof_corr_losses" if model_type == "profile" else "val_corr_losses"
noprior_all_model_paths, noprior_all_metric_vals = get_model_paths(noprior_all_model_base_path, metric_name=metric_name)
noprior_less_model_paths, noprior_less_metric_vals = get_model_paths(noprior_less_model_base_path, metric_name=metric_name)
prior_all_model_paths, prior_all_metric_vals = get_model_paths(prior_all_model_base_path, metric_name=metric_name)
prior_less_model_paths, prior_less_metric_vals = get_model_paths(prior_less_model_base_path, metric_name=metric_name)
num_models = 5 # Maximum of 5 models each
noprior_all_model_paths = noprior_all_model_paths[np.argsort(noprior_all_metric_vals)[:num_models]]
noprior_less_model_paths = noprior_less_model_paths[np.argsort(noprior_less_metric_vals)[:num_models]]
prior_all_model_paths = prior_all_model_paths[np.argsort(prior_all_metric_vals)[:num_models]]
prior_less_model_paths = prior_less_model_paths[np.argsort(prior_less_metric_vals)[:num_models]]
torch.set_grad_enabled(True)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def restore_model(model_path):
model = model_util.restore_model(model_class, model_path)
model.eval()
model = model.to(device)
return model
###Output
_____no_output_____
###Markdown
Data preparationCreate an input data loader, that maps coordinates or bin indices to data needed for the model
###Code
if model_type == "profile":
input_func = data_loading.get_profile_input_func(
files_spec_path, input_length, profile_length, reference_fasta
)
pos_examples = data_loading.get_positive_profile_coords(
files_spec_path, chrom_set=chrom_set
)
else:
input_func = data_loading.get_binary_input_func(
files_spec_path, input_length, reference_fasta
)
pos_examples = data_loading.get_positive_binary_bins(
files_spec_path, chrom_set=chrom_set
)
###Output
_____no_output_____
###Markdown
Compute importances
###Code
# Pick a sample of 100 random coordinates/bins
num_samples = 100
rng = np.random.RandomState(20200318)
sample = pos_examples[rng.choice(len(pos_examples), size=num_samples, replace=False)]
# For profile models, add a random jitter to avoid center-bias
if model_type == "profile":
jitters = np.random.randint(-128, 128 + 1, size=len(sample))
sample[:, 1] = sample[:, 1] + jitters
sample[:, 2] = sample[:, 2] + jitters
def compute_gradients(model_paths, sample):
"""
Given a list of paths to M models and a list of N coordinates or bins, computes
the input gradients over all models, returning an M x N x I x 4 array of
gradient values and an N x I x 4 array of one-hot encoded sequence.
"""
num_models, num_samples = len(model_paths), len(sample)
all_input_grads = np.empty((num_models, num_samples, input_length, 4))
all_one_hot_seqs = np.empty((num_samples, input_length, 4))
for i in tqdm.notebook.trange(num_models):
model = restore_model(model_paths[i])
if model_type == "profile":
results = compute_predictions.get_profile_model_predictions(
model, sample, num_tasks, input_func, controls=controls,
return_losses=False, return_gradients=True, show_progress=False
)
else:
results = compute_predictions.get_binary_model_predictions(
model, sample, input_func,
return_losses=False, return_gradients=True, show_progress=False
)
all_input_grads[i] = results["input_grads"]
if i == 0:
all_one_hot_seqs = results["input_seqs"]
return all_input_grads, all_one_hot_seqs
def compute_shap_scores(model_paths, sample, batch_size=128):
"""
Given a list of paths to M models and a list of N coordinates or bins, computes
the SHAP scores over all models, returning an M x N x I x 4 array of
SHAP scores and an N x I x 4 array of one-hot encoded sequence.
"""
num_models, num_samples = len(model_paths), len(sample)
num_batches = int(np.ceil(num_samples / batch_size))
all_shap_scores = np.empty((num_models, num_samples, input_length, 4))
all_one_hot_seqs = np.empty((num_samples, input_length, 4))
for i in tqdm.notebook.trange(num_models):
model = restore_model(model_paths[i])
if model_type == "profile":
shap_explainer = compute_shap.create_profile_explainer(
model, input_length, profile_length, num_tasks, num_strands, controls,
task_index=task_index
)
else:
shap_explainer = compute_shap.create_binary_explainer(
model, input_length, task_index=task_index
)
for j in range(num_batches):
batch_slice = slice(j * batch_size, (j + 1) * batch_size)
batch = sample[batch_slice]
if model_type == "profile":
input_seqs, profiles = input_func(sample)
shap_scores = shap_explainer(
input_seqs, cont_profs=profiles[:, num_tasks:], hide_shap_output=True
)
else:
input_seqs, _, _ = input_func(sample)
shap_scores = shap_explainer(
input_seqs, hide_shap_output=True
)
all_shap_scores[i, batch_slice] = shap_scores
if i == 0:
all_one_hot_seqs[batch_slice] = input_seqs
return all_shap_scores, all_one_hot_seqs
# Compute the importance scores and 1-hot seqs
imp_type = ("DeepSHAP scores", "input gradients")[0]
imp_func = compute_shap_scores if imp_type == "DeepSHAP scores" else compute_gradients
noprior_all_scores, one_hot_seqs = imp_func(noprior_all_model_paths, sample)
noprior_less_scores, _ = imp_func(noprior_less_model_paths, sample)
prior_all_scores, _ = imp_func(prior_all_model_paths, sample)
prior_less_scores, _ = imp_func(prior_less_model_paths, sample)
###Output
_____no_output_____
###Markdown
Compute similarity
###Code
def cont_jaccard(seq_1, seq_2):
"""
Takes two gradient sequences (I x 4 arrays) and computes a similarity between
them, using a continuous Jaccard metric.
"""
# L1-normalize
norm_1 = np.sum(np.abs(seq_1), axis=1, keepdims=True)
norm_2 = np.sum(np.abs(seq_2), axis=1, keepdims=True)
norm_1[norm_1 == 0] = 1
norm_2[norm_2 == 0] = 1
seq_1 = seq_1 / norm_1
seq_2 = seq_2 / norm_2
ab_1, ab_2 = np.abs(seq_1), np.abs(seq_2)
inter = np.sum(np.minimum(ab_1, ab_2) * np.sign(seq_1) * np.sign(seq_2), axis=1)
union = np.sum(np.maximum(ab_1, ab_2), axis=1)
zero_mask = union == 0
inter[zero_mask] = 0
union[zero_mask] = 1
return np.sum(inter / union)
def cosine_sim(seq_1, seq_2):
"""
Takes two gradient sequences (I x 4 arrays) and computes a similarity between
them, using a cosine similarity.
"""
seq_1, seq_2 = np.ravel(seq_1), np.ravel(seq_2)
dot = np.sum(seq_1 * seq_2)
mag_1, mag_2 = np.sqrt(np.sum(seq_1 * seq_1)), np.sqrt(np.sum(seq_2 * seq_2))
return dot / (mag_1 * mag_2) if mag_1 * mag_2 else 0
def compute_similarity_matrix(imp_scores_1, imp_scores_2, sim_func=cosine_sim):
"""
Given the M1 x N x I x 4 and M2 x N x I x 4 importance scores returned
by `compute_gradients` or `compute_shap_scores`, computes an N x M1 x M2
similarity matrix of similarity across models (i.e. each coordinate gets
a similarity matrix between the two conditions). By default uses cosine
similarity.
"""
num_models_1, num_coords = imp_scores_1.shape[0], imp_scores_2.shape[1]
num_models_2, num_coords_2 = imp_scores_2.shape[0], imp_scores_2.shape[1]
assert num_coords == num_coords_2
sim_mats = np.empty((num_coords, num_models_1, num_models_2))
for i in tqdm.notebook.trange(num_coords):
for j in range(num_models_1):
for k in range(num_models_2):
sim_score = sim_func(imp_scores_1[j][i], imp_scores_2[k][i])
sim_mats[i, j, k] = sim_score
return sim_mats
sim_type = ("Cosine", "Continuous Jaccard")[1]
sim_func = cosine_sim if sim_type == "Cosine" else cont_jaccard
noprior_sim_matrix = compute_similarity_matrix(noprior_all_scores, noprior_less_scores, sim_func=sim_func)
prior_sim_matrix = compute_similarity_matrix(prior_all_scores, prior_less_scores, sim_func=sim_func)
# Plot some examples of poor consistency, particularly ones that showed an improvement
num_to_show = 0 # 15
center_view_length = 200
plot_zoom = False
midpoint = input_length // 2
start = midpoint - (center_view_length // 2)
end = start + center_view_length
center_slice = slice(start, end)
diffs = np.max(prior_sim_matrix, axis=(1, 2)) - np.min(noprior_sim_matrix, axis=(1, 2))
best_example_inds = np.flip(np.argsort(diffs))[:num_to_show]
for sample_index in best_example_inds:
noprior_model_ind_1, noprior_model_ind_2 = np.unravel_index(np.argmin(np.ravel(noprior_sim_matrix[sample_index])), noprior_sim_matrix[sample_index].shape)
prior_model_ind_1, prior_model_ind_2 = np.unravel_index(np.argmax(np.ravel(prior_sim_matrix[sample_index])), prior_sim_matrix[sample_index].shape)
print("Sample index: %d" % sample_index)
if model_type == "binary":
bin_index = sample[sample_index]
coord = input_func(np.array([bin_index]))[2][0]
print("Coordinate: %s (bin %d)" % (str(coord), bin_index))
else:
coord = sample[sample_index]
print("Coordinate: %s" % str(coord))
print("Model indices without prior: %d vs %d" % (noprior_model_ind_1, noprior_model_ind_2))
plt.figure(figsize=(20, 2))
plt.plot(np.sum(noprior_all_scores[noprior_model_ind_1, sample_index] * one_hot_seqs[sample_index], axis=1), color="coral")
plt.show()
if plot_zoom:
viz_sequence.plot_weights(noprior_all_scores[noprior_model_ind_1, sample_index, center_slice], subticks_frequency=1000)
viz_sequence.plot_weights(noprior_all_scores[noprior_model_ind_1, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)
plt.figure(figsize=(20, 2))
plt.plot(np.sum(noprior_less_scores[noprior_model_ind_2, sample_index] * one_hot_seqs[sample_index], axis=1), color="coral")
plt.show()
if plot_zoom:
viz_sequence.plot_weights(noprior_less_scores[noprior_model_ind_2, sample_index, center_slice], subticks_frequency=1000)
viz_sequence.plot_weights(noprior_less_scores[noprior_model_ind_2, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)
print("Model indices with prior: %d vs %d" % (prior_model_ind_1, prior_model_ind_2))
plt.figure(figsize=(20, 2))
plt.plot(np.sum(prior_all_scores[prior_model_ind_1, sample_index] * one_hot_seqs[sample_index], axis=1), color="slateblue")
plt.show()
if plot_zoom:
viz_sequence.plot_weights(prior_all_scores[prior_model_ind_1, sample_index, center_slice], subticks_frequency=1000)
viz_sequence.plot_weights(prior_all_scores[prior_model_ind_1, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)
plt.figure(figsize=(20, 2))
plt.plot(np.sum(prior_less_scores[prior_model_ind_2, sample_index] * one_hot_seqs[sample_index], axis=1), color="slateblue")
plt.show()
if plot_zoom:
viz_sequence.plot_weights(prior_less_scores[prior_model_ind_2, sample_index, center_slice], subticks_frequency=1000)
viz_sequence.plot_weights(prior_less_scores[prior_model_ind_2, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)
noprior_avg_sims = np.mean(noprior_sim_matrix, axis=(1, 2))
prior_avg_sims = np.mean(prior_sim_matrix, axis=(1, 2))
bin_num = 30
all_vals = np.concatenate([noprior_avg_sims, prior_avg_sims])
bins = np.linspace(np.min(all_vals), np.max(all_vals), bin_num)
fig, ax = plt.subplots(figsize=(16, 8))
ax.hist(noprior_avg_sims, bins=bins, color="coral", label="No prior", alpha=0.7)
ax.hist(prior_avg_sims, bins=bins, color="slateblue", label="With Fourier prior", alpha=0.7)
plt.legend()
plt.title(
("Mean pairwise similarities of %s between training on all vs 1%% of the peaks" % imp_type) +
("\n%s %s models" % (condition_name, model_type)) +
("\nComputed over top 5 models without/with Fourier prior on %d randomly drawn test peaks" % num_samples)
)
plt.xlabel("%s similarity" % sim_type)
print("Average similarity without priors: %f" % np.nanmean(noprior_avg_sims))
print("Average similarity with priors: %f" % np.nanmean(prior_avg_sims))
print("Standard error without priors: %f" % scipy.stats.sem(noprior_avg_sims, nan_policy="omit"))
print("Standard error with priors: %f" % scipy.stats.sem(prior_avg_sims, nan_policy="omit"))
w, p = scipy.stats.wilcoxon(noprior_avg_sims, prior_avg_sims, alternative="less")
print("One-sided Wilcoxon test: w = %f, p = %f" % (w, p))
avg_sim_diffs = prior_avg_sims - noprior_avg_sims
plt.figure(figsize=(16, 8))
plt.hist(avg_sim_diffs, bins=30, color="mediumorchid")
plt.title(
("Paired difference of %s similarity between training on all vs 1%% of the peaks" % imp_type) +
("\n%s %s models" % (condition_name, model_type)) +
("\nComputed over top 5 models without/with Fourier prior on %d randomly drawn test peaks" % num_samples)
)
plt.xlabel("Average similarity difference: with Fourier prior - no prior")
def get_bias(sim_matrix):
num_examples, num_models, _ = sim_matrix.shape
bias_vals = []
for i in range(num_models):
avg = np.sum(sim_matrix[:, i]) / (num_examples * (num_models - 1))
bias_vals.append(avg)
print("%d: %f" % (i + 1, avg))
return bias_vals
print("Model-specific bias without priors")
noprior_bias_vals = get_bias(noprior_sim_matrix)
print("Model-specific bias with priors")
prior_bias_vals = get_bias(prior_sim_matrix)
###Output
_____no_output_____ |
CompSciAITF115.ipynb | ###Markdown
Train a Simple Audio Recognition model for microcontroller use This notebook demonstrates how to train an approximately 20kb [Simple Audio Recognition](https://www.tensorflow.org/tutorials/sequences/audio_recognition) model for [TensorFlow Lite for Microcontrollers](https://tensorflow.org/lite/microcontrollers/overview). It will produce a model that can be used in the [micro_speech](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech) example application.The notebook has been adapted from the [example](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech/train_speech_model.ipynb) shipped with TensorFlow. The notebook runs Python scripts to train and freeze the model, and uses the TensorFlow Lite converter to convert it for use with TensorFlow Lite for Microcontrollers.**Training is much faster using GPU acceleration.** Before you proceed, ensure you are using a GPU runtime by going to **Runtime -> Change runtime type** and selecting **GPU**. Training 18,000 iterations will take 1.5-2 hours on a GPU runtime. Check that tensorflow is the right version, 1.15
###Code
import tensorflow as tf
tf.__version__
###Output
_____no_output_____
###Markdown
Configure trainingThe following `os.environ` lines can be customized to set the words that will be trained for, and the steps and learning rate of the training. The default values will result in the same model that is used in the micro_speech example. Run the cell to set the configuration:
###Code
import os
# A comma-delimited list of the words you want to train for.
# The options are: yes,no,up,down,left,right,on,off,stop,go
# All other words will be used to train an "unknown" category.
os.environ["WANTED_WORDS"] = "stop"
# The number of steps and learning rates can be specified as comma-separated
# lists to define the rate at each stage. For example,
# TRAINING_STEPS=15000,3000 and LEARNING_RATE=0.001,0.0001
# will run 18,000 training loops in total, with a rate of 0.001 for the first
# 15,000, and 0.0001 for the final 3,000.
os.environ["TRAINING_STEPS"]="15000,3000"
os.environ["LEARNING_RATE"]="0.001,0.0001"
# Calculate the total number of steps, which is used to identify the checkpoint
# file name.
total_steps = sum(map(lambda string: int(string),
os.environ["TRAINING_STEPS"].split(",")))
os.environ["TOTAL_STEPS"] = str(total_steps)
# Print the configuration to confirm it
!echo "Training these words: ${WANTED_WORDS}"
!echo "Training steps in each stage: ${TRAINING_STEPS}"
!echo "Learning rate in each stage: ${LEARNING_RATE}"
!echo "Total number of training steps: ${TOTAL_STEPS}"
###Output
_____no_output_____
###Markdown
DependenciesMake sure that `xxd` is installed```sudo apt install xxd```Assume that we set up the Anaconda environment for Tensorflow 1.15 with GPU support```conda create -n tf15-gpu tensorflow-gpu=1.15conda activate tg15-gpuconda install anaconda-navigator```We will want tensorflow in the same directory as this notebook, which could be `${HOME}/notebooks````cd ${HOME}/notebooksgit clone -q https://github.com/tensorflow/tensorflow```The `tensorflow` repo contains the scripts that train and freeze the model.Check out a commit that has been tested to work with the build of TensorFlow we are using.
###Code
!git -c advice.detachedHead=false -C tensorflow checkout 17ce384df70
###Output
_____no_output_____
###Markdown
Finally, make sure there is a `content` directory for the work below.```cd ${HOME}/notebooksmkdir content``` Load TensorBoardNow, set up TensorBoard so that we can graph our accuracy and loss as training proceeds.
###Code
# Delete any old logs from previous runs
!rm -rf ./content/retrain_logs
# Load TensorBoard
%load_ext tensorboard
%tensorboard --logdir ./content/retrain_logs
###Output
_____no_output_____
###Markdown
Begin trainingNext, run the following script to begin training. The script will check for, and if needed download, the training data:
###Code
!python tensorflow/tensorflow/examples/speech_commands/train.py \
--model_architecture=tiny_conv --window_stride=20 --preprocess=micro \
--wanted_words=${WANTED_WORDS} --silence_percentage=25 --unknown_percentage=25 \
--quantize=1 --verbosity=WARN --how_many_training_steps=${TRAINING_STEPS} \
--learning_rate=${LEARNING_RATE} --summaries_dir=./content/retrain_logs \
--data_dir=./content/speech_dataset --train_dir=./content/speech_commands_train \
###Output
_____no_output_____
###Markdown
Freeze the graphOnce training is complete, run the following cell to freeze the graph.
###Code
!python tensorflow/tensorflow/examples/speech_commands/freeze.py \
--model_architecture=tiny_conv --window_stride=20 --preprocess=micro \
--wanted_words=${WANTED_WORDS} --quantize=1 --output_file=./content/tiny_conv.pb \
--start_checkpoint=./content/speech_commands_train/tiny_conv.ckpt-${TOTAL_STEPS}
###Output
_____no_output_____
###Markdown
Convert the modelRun this cell to use the TensorFlow Lite converter to convert the frozen graph into the TensorFlow Lite format, fully quantized for use with embedded devices.
###Code
!toco \
--graph_def_file=./content/tiny_conv.pb --output_file=./content/tiny_conv.tflite \
--input_shapes=1,49,40,1 --input_arrays=Reshape_2 --output_arrays='labels_softmax' \
--inference_type=QUANTIZED_UINT8 --mean_values=0 --std_dev_values=9.8077
###Output
_____no_output_____
###Markdown
The following cell will print the model size, which will be under 20 kilobytes.
###Code
import os
model_size = os.path.getsize("./content/tiny_conv.tflite")
print("Model is %d bytes" % model_size)
###Output
_____no_output_____
###Markdown
Finally, we use xxd to transform the model into a source file that can be included in a C++ project and loaded by TensorFlow Lite for Microcontrollers.
###Code
# Save the file as a C source file
!xxd -i ./content/tiny_conv.tflite > ./content/tiny_conv.cc
# Print the source file
!cat ./content/tiny_conv.cc
###Output
_____no_output_____ |
notebooks/Olegs_RMS_thingy.ipynb | ###Markdown
olegs says: * make a plot s2n ratio vs rms for both wsclean and vacuum * image noise ~ SEFD*constant * So make an image without sources, measure its rms * then constant = rms/SEFD * (but 'constant' will be different with different telescopes, different observation time, etc.)
###Code
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
SNR_wsclean = []
SNR_vacuum = []
rms = []
sefd = 520 # specified in the spiel simulation pipeline
for i in range(1800, 1900):
f = f'/scratch/datasets/meerkat16_deep2like/rms/{i}-wsclean-dirty.fits'
noise_data = fits.open(f)[0].data
f = f'/scratch/datasets/meerkat16_deep2like/{i}-wsclean-residual.fits'
wsclean_data = fits.open(f)[0].data
f = f'/scratch/vacuum-cleaner/test/meerkat16_deep2like/fits/{i}-residuals.fits'
vacuum_data = fits.open(f)[0].data
SNR_wsclean.append(np.max(wsclean_data) / np.std(noise_data))
SNR_vacuum.append(np.max(vacuum_data) / np.std(noise_data))
rms.append(np.std(noise_data))
f, (a1, a2) = plt.subplots(1, 2, figsize=(16,8))
a1.plot(SNR_wsclean, rms, 'o')
a1.set_xlabel('SNR WSClean')
a1.set_ylabel('RMS noise')
a1.set_title('SNR WSClean vs RMS noise')
a2.plot(SNR_vacuum, rms, 'o')
a2.set_xlabel('SNR vacuum')
a2.set_ylabel('RMS noise')
_ = a2.set_title('SNR vacuum vs RMS noise')
###Output
_____no_output_____ |
using_open_source_model_packages/pytorch_eqa_model/using_extractive_question_answering_model.ipynb | ###Markdown
Deploy and perform inference on Model Package from AWS Marketplace This notebook provides you instructions on how to deploy and perform inference on model packages from AWS Marketplace Extractive Question Answering model.This notebook is compatible only with those Extractive Question Answering model packages which this notebook is linked to. Pre-requisites:1. **Note**: This notebook contains elements which render correctly in Jupyter interface. Open this notebook from an Amazon SageMaker Notebook Instance or Amazon SageMaker Studio.1. Ensure that IAM role used has **AmazonSageMakerFullAccess**1. To deploy this ML model successfully, ensure that: 1. Either your IAM role has these three permissions and you have authority to make AWS Marketplace subscriptions in the AWS account used: 1. **aws-marketplace:ViewSubscriptions** 1. **aws-marketplace:Unsubscribe** 1. **aws-marketplace:Subscribe** 2. or your AWS account has a subscription to this Extractive Question Answering model. If so, skip step: [Subscribe to the model package](1.-Subscribe-to-the-model-package) Contents:1. [Subscribe to the model package](1.-Subscribe-to-the-model-package)2. [Create an endpoint and perform real-time inference](2.-Create-an-endpoint-and-perform-real-time-inference) 1. [Create an endpoint](A.-Create-an-endpoint) 2. [Create input payload](B.-Create-input-payload) 3. [Perform real-time inference](C.-Perform-real-time-inference) 4. [Delete the endpoint](D.-Delete-the-endpoint)3. [Perform batch inference](3.-Perform-batch-inference) 4. [Clean-up](4.-Clean-up) 1. [Delete the model](A.-Delete-the-model) 2. [Unsubscribe to the listing (optional)](B.-Unsubscribe-to-the-listing-(optional)) Usage instructionsYou can run this notebook one cell at a time (By using Shift+Enter for running a cell).**Note** - This notebook requires you to follow instructions and specify values for parameters, as instructed. 1. Subscribe to the model package To subscribe to the model package:1. Open the model package listing page you opened this notebook for.1. On the AWS Marketplace listing, click on the **Continue to subscribe** button.1. On the **Subscribe to this software** page, review and click on **"Accept Offer"** if you and your organization agrees with EULA, pricing, and support terms. 1. Once you click on **Continue to configuration button** and then choose a **region**, you will see a **Product Arn** displayed. This is the model package ARN that you need to specify while creating a deployable model using Boto3. Copy the ARN corresponding to your region and specify the same in the following cell.
###Code
model_package_arn='<Customer to specify Model package ARN corresponding to their AWS region>'
import json
from sagemaker import ModelPackage
import sagemaker as sage
from sagemaker import get_execution_role
role = get_execution_role()
sagemaker_session = sage.Session()
boto3 = sagemaker_session.boto_session
bucket = sagemaker_session.default_bucket()
region = sagemaker_session.boto_region_name
s3 = boto3.client("s3")
runtime= boto3.client('runtime.sagemaker')
###Output
_____no_output_____
###Markdown
In next step, you would be deploying the model for real-time inference. For information on how real-time inference with Amazon SageMaker works, see [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html). 2. Create an endpoint and perform real-time inference
###Code
model_name='extractive-question-answering-model'
#The Extractive Question Answering model packages this notebook notebook is compatible with, support application/list-text as the
#content-type.
content_type='application/list-text'
###Output
_____no_output_____
###Markdown
Review and update the compatible instance type for the model package in the following cell.
###Code
real_time_inference_instance_type='ml.g4dn.xlarge'
batch_transform_inference_instance_type='ml.p2.xlarge'
###Output
_____no_output_____
###Markdown
A. Create an endpoint
###Code
#create a deployable model from the model package.
model = ModelPackage(role=role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session)
#Deploy the model
predictor = model.deploy(1, real_time_inference_instance_type, endpoint_name=model_name)
###Output
_____no_output_____
###Markdown
Once endpoint has been created, you would be able to perform real-time inference. B. Prepare input file for performing real-time inference Let's put in some example question-contexts pairs. You can put in any question-context pairs, the model will predict a part of context that contains the answer.These examples are taken from SQuAD2.0 dataset downloaded from [Dataset Homepage](https://rajpurkar.github.io/SQuAD-explorer/). [CC BY-SA 4.0 License](https://creativecommons.org/licenses/by-sa/4.0/legalcode). Citations:@article{rajpurkar2016squad, title={Squad: 100,000+ questions for machine comprehension of text}, author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy}, journal={arXiv preprint arXiv:1606.05250}, year={2016}}@inproceedings{wang2019glue, title={ {GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019}}
###Code
question_context1 = ["What is Southern California often abbreviated as?", "Southern California, often abbreviated SoCal, is a geographic and cultural region that generally comprises California's southernmost 10 counties. The region is traditionally described as ""eight counties"", based on demographics and economic ties: Imperial, Los Angeles, Orange, Riverside, San Bernardino, San Diego, Santa Barbara, and Ventura. The more extensive 10-county definition, including Kern and San Luis Obispo counties, is also used based on historical political divisions. Southern California is a major economic center for the state of California and the United States."]
question_context2 = ["Who directed Spectre?", "Spectre (2015) is the twenty-fourth James Bond film produced by Eon Productions. It features Daniel Craig in his fourth performance as James Bond, and Christoph Waltz as Ernst Stavro Blofeld, with the film marking the character's re-introduction into the series. It was directed by Sam Mendes as his second James Bond film following Skyfall, and was written by John Logan, Neal Purvis, Robert Wade and Jez Butterworth. It is distributed by Metro-Goldwyn-Mayer and Columbia Pictures. With a budget around $245 million, it is the most expensive Bond film and one of the most expensive films ever made."]
###Output
_____no_output_____
###Markdown
C. Query endpoint that you have created
###Code
#perform_inference method performs inference on the endpoint and prints predictions.
newline = '\n'
bold = '\033[1m'
unbold = '\033[0m'
def query_endpoint(encoded_text):
endpoint_name = model_name
response = runtime.invoke_endpoint(EndpointName=endpoint_name, ContentType=content_type, Body=encoded_text)
model_predictions = json.loads(response['Body'].read())
return model_predictions
for question_context in [question_context1, question_context2]:
model_predictions = query_endpoint(json.dumps(question_context).encode('utf-8'))
print (f"Inference:{newline}"
f"Question: {bold}{question_context[0]}{unbold}{newline}"
f"Context: {question_context[1]}{newline}"
f"model answer: {bold}{model_predictions}{unbold}{newline}")
###Output
_____no_output_____
###Markdown
D. Delete the endpoint Now that you have successfully performed a real-time inference, you do not need the endpoint any more. You can terminate the endpoint to avoid being charged.
###Code
model.sagemaker_session.delete_endpoint(model_name)
model.sagemaker_session.delete_endpoint_config(model_name)
###Output
_____no_output_____
###Markdown
3. Perform batch inference In this section, you will perform batch inference using multiple input payloads together. If you are not familiar with batch transform, and want to learn more, see [How to run a batch transform job](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html)
###Code
#upload the batch-transform job input files to S3
transform_input_key_prefix = 'extractive-question-answering-model-transform-input'
f = open("transform-input-data.txt", "w")
f.write("[\"Who directed Spectre?\", \"Spectre (2015) is the twenty-fourth James Bond film produced by Eon Productions. It features Daniel Craig in his fourth performance as James Bond, and Christoph Waltz as Ernst Stavro Blofeld, with the film marking the character's re-introduction into the series. It was directed by Sam Mendes as his second James Bond film following Skyfall, and was written by John Logan, Neal Purvis, Robert Wade and Jez Butterworth. It is distributed by Metro-Goldwyn-Mayer and Columbia Pictures. With a budget around $245 million, it is the most expensive Bond film and one of the most expensive films ever made.\"]")
f.close()
transform_input = sagemaker_session.upload_data("transform-input-data.txt", key_prefix=transform_input_key_prefix)
print("Transform input uploaded to " + transform_input)
#Run the batch-transform job
transformer = model.transformer(1, batch_transform_inference_instance_type)
transformer.transform(transform_input, content_type=content_type)
transformer.wait()
# output is available on following path
transformer.output_path
output_bucket_name, output_path = transformer.output_path.replace("s3://", "").split("/", 1)
obj = s3.get_object(Bucket=output_bucket_name, Key=output_path + '/transform-input-data.txt.out')
batch_prediction = obj['Body'].read().decode('utf-8')
# print out batch-transform job output
print(batch_prediction)
###Output
_____no_output_____
###Markdown
4. Clean-up A. Delete the model
###Code
model.delete_model()
###Output
_____no_output_____ |
examples/device_graphstates/train_GraphState_adjT_devicenoise.ipynb | ###Markdown
Load Data
###Code
data_path = '/data/MLQC/GraphState_nqbits9_DeviceNoise_040620'
traindata = QCIRCDataSet('%s_train.lmdb' % data_path, debug=False)
testdata = QCIRCDataSet('%s_test.lmdb' % data_path, debug=False)
print('Total # of samples in train set: {}, test set:{}'.format(len(traindata), len(testdata)))
trainloader = DataLoader(traindata, batch_size=64, shuffle=True, pin_memory=True, drop_last=True)
testloader = DataLoader(testdata, batch_size=64, shuffle=True, pin_memory=True, drop_last=True)
if not os.path.exists(data_path):
os.mkdir(data_path)
###Output
Total # of samples in train set: 7600, test set:380
###Markdown
Plot a sample
###Code
idx = np.random.randint(0, len(traindata)-1)
inputs, targets, encodings = traindata[idx]['input'], traindata[idx]['target'], traindata[idx]['encoding']
inputs_dim = inputs.shape[0]
targets_dim = targets.shape[0]
encodings_dim = encodings.shape
plt.figure(1)
plt.bar(np.arange(inputs_dim), inputs.numpy(), width=5, color='w', edgecolor='k')
plt.title("Prob vector")
plt.figure(2)
plt.imshow(encodings.numpy()[0], cmap='RdBu')
plt.title("Adjacency Tensor, plane=0")
###Output
_____no_output_____
###Markdown
Model
###Code
p_dropout=0.1
inputs_dim = inputs.shape[0]
targets_dim = targets.shape[0]
encodings_dim = encodings.shape
net_res = AdjTAsymModel(inputs_dim=inputs_dim, targets_dim=targets_dim, encodings_dim=encodings_dim,
combine_mode='Multiply', asym_mode='residual', p_dropout=p_dropout)
# print('AdjNet (Residual Units):\n', net_res)
net_dense = AdjTAsymModel(inputs_dim=inputs_dim, targets_dim=targets_dim, encodings_dim=encodings_dim,
combine_mode='Multiply', asym_mode='dense', p_dropout=p_dropout)
# print('AdjNet (Dense Units):\n',net_dense)
###Output
_____no_output_____
###Markdown
Train loss functions
###Code
def mse(outputs, targets):
MSE = torch.nn.MSELoss(reduction='sum')
outputs = F.softmax(outputs, dim=1)
return MSE(outputs, targets)
def kl(outputs, targets):
KL = torch.nn.KLDivLoss(reduction='sum')
outputs = F.log_softmax(outputs, dim=1)
return KL(outputs, targets)
###Output
_____no_output_____
###Markdown
Optimizer/Learning policy
###Code
def exp_scheduler(net, ilr=1e-3, lr_decay=0.9, weight_decay=1e-5):
optimizer = optim.Adam(net.parameters(), lr=ilr, weight_decay=weight_decay)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, lr_decay)
return scheduler, optimizer
###Output
_____no_output_____
###Markdown
Dense Block
###Code
model_path = os.path.join(data_path,'dense_model_0p1.pt')
loss_func = kl
scheduler, optimizer = exp_scheduler(net_dense, ilr=1e-3, lr_decay=0.95, weight_decay=2e-4)
test_func_args = (net_dense, testloader, loss_func)
dense_logs = train(net_dense, trainloader, loss_func, scheduler, optimizer, save_epoch=1,
step_log= 500, num_epochs=10, test_epoch=1, test_func_args=test_func_args, path=model_path)
logs = dense_logs
fig, ax = plt.subplots()
ax.plot(logs["step"], logs["loss"], label="Train", marker='o', linestyle='dashed')
ax.plot(logs["test_step"], logs["test_loss"], label="Test", marker='^', linestyle='dashed')
ax.set_xlabel("Iterations", fontsize=14)
ax.set_ylabel("Loss (KL Divergence)", fontsize=14)
ax.legend()
fig.savefig("training_drp0p25_no2d_wd1em4_dense.png", dpi=300)
logs["test_loss"][-10:]
###Output
_____no_output_____
###Markdown
Residual Block
###Code
model_path = os.path.join(data_path,'residual_model_0p1.pt')
loss_func = kl
scheduler, optimizer = exp_scheduler(net_res, ilr=1e-3, lr_decay=0.9, weight_decay=5e-4)
test_func_args = (net_res, testloader, loss_func)
res_logs = train(net_res, trainloader, loss_func, scheduler, optimizer, save_epoch=1, lr=1e-3,
step_log= 250, num_epochs=5, test_epoch=1, test_func_args=test_func_args, path=model_path)
logs = res_logs
fig, ax = plt.subplots()
ax.plot(logs["step"], logs["loss"], label="Train", marker='o', linestyle='dashed')
ax.plot(logs["test_step"], logs["test_loss"], label="Test", marker='^', linestyle='dashed')
ax.set_xlabel("Iterations", fontsize=14)
ax.set_ylabel("Loss (KL Divergence)", fontsize=14)
ax.legend()
fig.savefig("training_drp0p25_wd1em4_residual.png", dpi=300)
###Output
_____no_output_____
###Markdown
Load Trained Model from checkpoint
###Code
model_path = 'data/GraphState_nqbits9_UnitaryNoise_032920/residual_model_0.0002_0.1.pt'
net_res = AdjTAsymModel(inputs_dim=inputs_dim, targets_dim=targets_dim, encodings_dim=encodings_dim,
combine_mode='Multiply', asym_mode='residual')
net_res.load_state_dict(torch.load(model_path))
model_path = 'data/GraphState_nqbits9_UnitaryNoise_032920/dense_model_0p1.pt'
net_dense = AdjTAsymModel(inputs_dim=inputs_dim, targets_dim=targets_dim, encodings_dim=encodings_dim,
combine_mode='Multiply', asym_mode='dense')
net_dense.load_state_dict(torch.load(model_path))
13.994/64
###Output
_____no_output_____
###Markdown
Test
###Code
test(net_res, testloader, kl)
test(net_dense, testloader, kl)
idx = np.random.randint(0, len(testdata)-1)
print('sample=%d'%idx)
inputs, targets, encodings = testdata[idx]['input'], testdata[idx]['target'], testdata[idx]['encoding']
with torch.no_grad():
inputs = torch.unsqueeze(inputs,0)
encodings = torch.unsqueeze(encodings, 0)
net = net_res.to('cpu')
net.eval()
outputs_res = net(inputs, encodings)
outputs_res = F.softmax(outputs_res)
fig,axes = plt.subplots(1,2,figsize=(14,6), sharex=True, sharey=True)
axes[0].bar(np.arange(inputs_dim), np.squeeze(outputs_res.numpy()), width=4, label='ML Ouput')
axes[0].bar(np.arange(inputs_dim), np.squeeze(targets.numpy()), width=4, label='Target- Ideal Circuit', color='w', edgecolor='r', alpha=0.35)
axes[1].bar(np.arange(inputs_dim), np.squeeze(outputs_res.numpy()), width=4, label='ML Ouput')
axes[1].bar(np.arange(inputs_dim), np.squeeze(inputs.numpy()), width=4, label='Input- Unitary Noise', color='w', edgecolor='k', alpha=0.35)
axes[0].set_title('Test Sample #%d AdjTAsym Model\nTrained on 9-qubit Graph States w/ Phase-Amplitude Error.\n'\
'Tested: on different Graph States'%idx, fontsize=12)
axes[1].set_title('Test Sample #%d AdjTAsym Model\nAverage KL divergence per sample: 0.1 (In distribution), 0.2 (out of distribution) ' %idx, fontsize=12)
axes[0].set_xlabel("Computational Basis ($2^N$ Possible Outcomes)", fontsize=14)
axes[0].set_ylabel("Probability", fontsize=14)
axes[0].legend()
axes[1].legend()
fig.savefig("graphstate_nqubits9_032920_unitarynoise_residual.png", dpi=300)
idx = np.random.randint(0, len(testdata)-1)
print('sample=%d'%idx)
inputs, targets, encodings = testdata[idx]['input'], testdata[idx]['target'], testdata[idx]['encoding']
with torch.no_grad():
inputs = torch.unsqueeze(inputs,0)
encodings = torch.unsqueeze(encodings, 0)
net = net_dense.to('cpu')
net.eval()
outputs_dense = net(inputs, encodings)
outputs_dense = F.softmax(outputs_dense)
fig,axes = plt.subplots(1,2,figsize=(14,6), sharex=True, sharey=True)
axes[0].bar(np.arange(inputs_dim), np.squeeze(outputs_dense.numpy()), width=4, label='ML Ouput- dense')
axes[0].bar(np.arange(inputs_dim), np.squeeze(targets.numpy()), width=4, label='Target- Ideal Circuit', color='w', edgecolor='r', alpha=0.35)
axes[1].bar(np.arange(inputs_dim), np.squeeze(outputs_dense.numpy()), width=4, label='ML Ouput- dense')
axes[1].bar(np.arange(inputs_dim), np.squeeze(inputs.numpy()), width=4, label='Input- Unitary Noise', color='w', edgecolor='k', alpha=0.35)
axes[0].set_title('Test Sample #%d AdjTAsym Model\nTrained on 9-qubit Graph States w/ Phase-Amplitude Error.\n'\
'Tested: on different Graph States'%idx, fontsize=12)
axes[1].set_title('Test Sample #%d AdjTAsym Model' %idx, fontsize=12)
axes[0].set_xlabel("Computational Basis ($2^N$ Possible Outcomes)", fontsize=14)
axes[0].set_ylabel("Probability", fontsize=14)
axes[0].legend()
axes[1].legend()
###Output
sample=25382
|
_notebooks/2020-09-13-Exploring-Titanic-Dataset.ipynb | ###Markdown
Exploring Titanic Dataset  >The objective of this notebook is to explain each steps and decision we take during solution and development of Titanic Dataset in Kaggle Competitions.
###Code
#collapse-show
# data analysis
import pandas as pd
import numpy as np
import random as rnd
# data visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
#collapse-hide
pd.options.display.max_columns = 100
###Output
_____no_output_____
###Markdown
1.Reading data>The Python Pandas packages helps us work with our datasets. We start by acquiring the training and testing datasets into Pandas DataFrames. We also combine these datasets to run certain operations on both datasets together.
###Code
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
df = [train, test]
###Output
_____no_output_____
###Markdown
2.Exploratory Data Analysis>Here we have to analyze and investigate data sets and summarize their main characteristics.
###Code
print(train.columns.values)
train.dtypes
###Output
_____no_output_____
###Markdown
We can classify data into : 1.Categorical and Numerical* Categorical: Survived, Sex, and Embarked. Ordinal: Pclass. * Numerical: Age, Fare. Discrete: SibSp, Parch.
###Code
train.head()
train.tail()
###Output
_____no_output_____
###Markdown
* Name contains Titles(eg. Mr,Mrs,Miss etc)* Ticket coloumn contains alphanumeric data.* Cabin is also alphanumeric.
###Code
train.info()
test.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 418 entries, 0 to 417
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 418 non-null int64
1 Pclass 418 non-null int64
2 Name 418 non-null object
3 Sex 418 non-null object
4 Age 332 non-null float64
5 SibSp 418 non-null int64
6 Parch 418 non-null int64
7 Ticket 418 non-null object
8 Fare 417 non-null float64
9 Cabin 91 non-null object
10 Embarked 418 non-null object
dtypes: float64(2), int64(4), object(5)
memory usage: 36.0+ KB
###Markdown
* In training dataset Cabin,Age,Embarked features contain a number of null values.* In testing dataset Cabin,Age contain a number of null values.
###Code
((train.isnull().sum()/len(train))*100)
((test.isnull().sum()/len(test))*100)
train.describe()
test.describe()
train.describe(include=['O'])
test.describe(include=['O'])
###Output
_____no_output_____
###Markdown
* Names are unique,Total 891 unique names,* 65% of data are male.* Cabin values have several dupicates. Meaning lot of people shared a cabin.* Embarked takes three possible values. * S port used by most passengers. //Southampton* Ticket feature has 22% of duplicate values
###Code
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived',
ascending=False)
train[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
train[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
train[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived',
ascending=False)
###Output
_____no_output_____
###Markdown
* Pclass We observe significant correlation (>0.5) among Pclass=1 and Survived.* Sex We confirm the observation during problem definition that Sex=female had very high survival rate at 74%.* SibSp and Parch These features have zero correlation for certain values. 3.Data Visualization> Visualization of data can reveal many insights that can help us in determining features in modelling.
###Code
sns.set(palette = "flare")
grid = sns.FacetGrid(train, col='Survived',height = 10)
grid.map(plt.hist, 'Age', bins=20)
###Output
_____no_output_____
###Markdown
* Most passengers are in 15-35 age range.* Children aged <=4 had a high survival rate.* Oldest passengers aged above 80 survived.* Large number of 15-25 year olds did not survive.* Age is an important feature.
###Code
grid = sns.FacetGrid(train, col='Survived', row='Pclass', size=5)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend();
###Output
/home/jithin/.local/lib/python3.8/site-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code.
warnings.warn(msg, UserWarning)
###Markdown
* Pclass=3 had most passengers, however most did not survive. * Child passengers in Pclass=2 and Pclass=3 mostly survived. * Most passengers in Pclass=1 survived. * Pclass varies in terms of Age distribution of passengers.
###Code
grid = sns.FacetGrid(train, row='Embarked', size=5)
grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex')
grid.add_legend()
###Output
/home/jithin/.local/lib/python3.8/site-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code.
warnings.warn(msg, UserWarning)
/home/jithin/.local/lib/python3.8/site-packages/seaborn/axisgrid.py:643: UserWarning: Using the pointplot function without specifying `order` is likely to produce an incorrect plot.
warnings.warn(warning)
/home/jithin/.local/lib/python3.8/site-packages/seaborn/axisgrid.py:648: UserWarning: Using the pointplot function without specifying `hue_order` is likely to produce an incorrect plot.
warnings.warn(warning)
###Markdown
* Female passengers had much better survival rate than males.* In Embarked=C where males had higher survival rate.* Males had better survival rate in Pclass=3 when compared with Pclass=2 for C and Q ports.* Ports of embarkation have varying survival rates for Pclass=3 and among male passengers.
###Code
grid = sns.FacetGrid(train, row='Embarked', col='Survived', size=5)
grid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)
grid.add_legend()
###Output
/home/jithin/.local/lib/python3.8/site-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code.
warnings.warn(msg, UserWarning)
/home/jithin/.local/lib/python3.8/site-packages/seaborn/axisgrid.py:643: UserWarning: Using the barplot function without specifying `order` is likely to produce an incorrect plot.
warnings.warn(warning)
###Markdown
* Higher fare paying passengers had better survival.* Port of embarkation correlates with survival rates. 4.Feature Engineering
###Code
for dataset in df:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train['Title'], train['Sex'])
for dataset in df:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
#Converting the categorical titles to ordinal.
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Other": 5}
for dataset in df:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train.head()
train = train.drop(['Name', 'PassengerId'], axis=1)
test = test.drop(['Name'], axis=1)
df = [train, test]
train.shape, test.shape
#Dropping the Name and PassengerID feature from training and testing datasets
###Output
_____no_output_____
###Markdown
Converting Categorical Features
###Code
for dataset in df:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
train.head()
grid = sns.FacetGrid(train, row='Pclass', col='Sex', size=5)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
for dataset in df:
dataset["Age"].fillna(dataset.groupby("Title")["Age"].transform("median"), inplace=True)
train.isnull().sum()
train['AgeBand'] = pd.cut(train['Age'], 5)
train[['AgeBand', 'Survived']].groupby(['AgeBand'],as_index=False).mean().sort_values(by='AgeBand', ascending=True)
###Output
_____no_output_____
###Markdown
Age values should be replaced as ordinals.
###Code
for dataset in df:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train.head()
train = train.drop(['AgeBand'], axis=1) #removing AgeBand
df = [train, test]
train.head()
for dataset in df:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived',
ascending=False)
for dataset in df:
dataset['Alone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'Alone'] = 1
train[['Alone', 'Survived']].groupby(['Alone'], as_index=False).mean()
train = train.drop(['Parch', 'SibSp', 'FamilySize','Ticket','Cabin'], axis=1)
test = test.drop(['Parch', 'SibSp', 'FamilySize','Ticket','Cabin'], axis=1)
df = [train, test]
train.head()
train.isnull().sum()
mode = train.Embarked.dropna().mode()[0]
for dataset in df:
dataset['Embarked'] = dataset['Embarked'].fillna(mode)
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived',
ascending=False)
###Output
_____no_output_____
###Markdown
Converting categorical values to numeric values
###Code
for dataset in df:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train.head()
train.isnull().sum()
test.isnull().sum()
test['Fare'].fillna(test['Fare'].dropna().median(), inplace=True)
test.head()
train['FareBand'] = pd.qcut(train['Fare'], 4)
train[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand',
ascending=True)
for dataset in df:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train = train.drop(['FareBand'], axis=1)
df = [train, test]
train.head()
test.head()
###Output
_____no_output_____
###Markdown
Modelling>Now we can train a model and predict the required solution.Best Model should be selected from these ML algorithms.* Logistic Regression* KNN or k-Nearest Neighbors* Support Vector Machines* Naive Bayes classifier* Decision Tree* Random Forrest
###Code
X_train = train.drop("Survived", axis=1)
Y_train = train["Survived"]
X_test = test.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
acc_log
coeff_df = pd.DataFrame(train.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train) * 100, 2)
acc_svc
#Knearestneighbours
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train) * 100, 2)
acc_knn
# Gaussian Naive Bayes
gaussian = GaussianNB()
gaussian.fit(X_train, Y_train)
Y_pred = gaussian.predict(X_test)
acc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2)
acc_gaussian
# Decision Tree
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
Y_pred = decision_tree.predict(X_test)
acc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)
acc_decision_tree
# Random Forest
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest','Naive Bayes','Decision Tree'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_decision_tree]})
models.sort_values(by='Score', ascending=False)
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": Y_pred
})
###Output
_____no_output_____ |
notebooks/1-scraping-reviews.ipynb | ###Markdown
Scraping Link Produk Fungsi Scraping Link ProdukFungsi untuk scraping link produk yang sesuai dengan kata kunci
###Code
def scrapeLink(list_product, category):
chrome_options = Options()
# chrome_options.add_argument('--headless')
chrome_options.add_argument('log-level=2')
driver = webdriver.Chrome('chromedriver', options=chrome_options)
# katakunci = input('Masukkan kata kunci : ')
def search(katakunci):
links = []
print('mencari semua product dengan kata kunci ' + katakunci)
url = 'https://shopee.co.id/search?keyword=' + katakunci
try:
driver.get(url)
time.sleep(5)
driver.execute_script('window.scrollTo(0, 1500);')
time.sleep(5)
driver.execute_script('window.scrollTo(0, 2500);')
time.sleep(5)
soup_a = BeautifulSoup(driver.page_source, 'html.parser')
products = soup_a.find('div', class_='row shopee-search-item-result__items')
for link in products.find_all('a'):
links.append('https://shopee.co.id/' + str(link.get('href')))
# print(link.get('href'))
except TimeoutException:
print('failed to get links with query ' )
return links
for katakunci in list_product:
print('Scrape link ' + katakunci)
product_urls = search(katakunci)
dict_urls = {'url' : product_urls}
df = pd.DataFrame(data = dict_urls)
PATH = '../data/raw/'+ category +'/link_product'
file_name = PATH + '/links_' + katakunci +'.csv'
df.to_csv(file_name, index=False)
###Output
_____no_output_____
###Markdown
Kategori dan Kata KunciMenginisiasi list kategori dan kata kunci
###Code
CATEGORY = ['fashion', 'food', 'electronic']
fashion = ['hoodie', 'hijab', 'celana', 'kemeja', 'gamis', 'sepatu', 'kaos', 'sandal', 'tas', 'jeans', 'jaket', 'sweater']
food = ['snack', 'daging', 'selai', 'gula', 'susu', 'mie', 'roti', 'bumbu', 'camilan', 'bahan_kue', 'minuman', 'bahan_pokok']
electronic = ['laptop', 'mouse', 'handphone', 'earphone', 'tv', 'mixer', 'setrika', 'mesin_cuci', 'penyedot_debu', 'kamera', 'speaker', 'lampu']
###Output
_____no_output_____
###Markdown
Scraping Link ProdukScraping link produk berdasarkan kata kunci
###Code
# for category in [fashion, food, electronic]:
# scrapeLink(category)
###Output
_____no_output_____
###Markdown
Scraping Ulasan ProdukScraping ulasan produk dari link yang telah ada
###Code
from os import walk
import re
import timeit
import requests
###Output
_____no_output_____
###Markdown
Fungsi Scraping Ulasan
###Code
def shopeeScraper (url):
url = url
r = re.search(r'i\.(\d+)\.(\d+)', url)
shop_id, item_id = r[1], r[2]
ratings_url = 'https://shopee.co.id/api/v2/item/get_ratings?filter=0&flag=1&itemid={item_id}&limit=20&offset={offset}&shopid={shop_id}&type=0'
data_scrape = []
start = timeit.default_timer()
runtime = 0
offset = 0
print('Scraping Process...')
while True:
data = requests.get(ratings_url.format(shop_id=shop_id, item_id=item_id, offset=offset)).json()
i = 1
try :
for i, rating in enumerate(data['data']['ratings'], 1):
if rating['comment'] == '':
pass
else:
data_scrape.append([rating['rating_star'], rating['comment']])
except :
pass
if i % 20:
break
stop = timeit.default_timer()
runtime+= (stop-start)
print(runtime)
offset += 20
print('Scraping Done.')
stop = timeit.default_timer()
print('Time: ', stop - start)
df = pd.DataFrame(data_scrape, columns=['rating', 'reviews'])
df = df.dropna(axis=0)
return df
###Output
_____no_output_____
###Markdown
List File LinkMembuat list file dari direktori link_product
###Code
LINK_PRODUCT = {'fashion' : [], 'food' : [], 'electronic' : []}
for ctg in CATEGORY:
for (dirpath, dirnames, filenames) in walk('../data/raw/link_product/' + ctg):
LINK_PRODUCT[ctg].extend(filenames)
break
###Output
_____no_output_____
###Markdown
Scraping Ulasan ProdukScraping Ulasan Produk dari daftar link produk di folder link_product
###Code
dataReviews = pd.DataFrame({'rating' : [], 'reviews' : []})
ctg = 'food' # ganti dengan direktori tempat file link
file = LINK_PRODUCT[ctg][0] # ganti index dengan link yang belum di scraping
links = pd.read_csv('../data/raw/link_product/' + ctg + '/' + file)
for i, link in enumerate(links['url']):
if i >= 0:
print('Scraping ' + file + ' Link ke-' + str(i))
reviews = shopeeScraper(link)
frames = [dataReviews, reviews]
result = pd.concat(frames).drop_duplicates().reset_index(drop=True)
dataReviews = result
if i % 2 == 0:
dataReviews.to_csv('../data/raw/reviews_product/' + ctg + '/' + file[6:-4] + '_v' + str(i/2) + '.csv', index=False)
LINK_MERGE = []
ctg = 'food' # Ganti dengan kategori data yang ingin digabungkan
for (dirpath, dirnames, filenames) in walk('../data/raw/reviews_product/' + ctg):
LINK_MERGE.extend(filenames) # Membuat list nama file dari direktori kategori
break
merge_list, count_data = [], 0
for i in LINK_MERGE:
data = pd.read_csv('../data/raw/reviews_product/' + ctg + '/' + i)
count_data += len(data)
merge_list.append(data)
merge_result = pd.concat(merge_list).drop_duplicates().reset_index(drop=True)
merge_result.to_csv('../data/raw/' + ctg + '_reviews.csv', index=False)
print(count_data)
print(merge_result)
###Output
1992261
rating reviews
0 5.0 Jenis:sp pengembang\nHarga:murah\nKualitas:bai...
1 5.0 Masya Alloh,, pengiriman cepet,, pengemasan ra...
2 5.0 Pengiriman cepat pengemasan cepat hrga murah e...
3 5.0 Harga:Terjangkau\nKualitas:Bagus\nJenis:Pengem...
4 5.0 Jenis:koepoe koepoe baking mix s p pengemulsi\...
... ... ...
215047 4.0 Harga:baik\nKualitas:baiik\nRasa:baik\n\nBrg S...
215048 5.0 Harga: Murah\nKualitas: Baik\nRasa: Manis\n\nP...
215049 5.0 paking rapi
215050 5.0 LalalalaLalalalaLalalalaLalalalaLalalalaLalala...
215051 5.0 Okeeeeee kaaaa\nTerima kasih\nSudah sampai\nAm...
[215052 rows x 2 columns]
|
EdX/IBM DL0110EN - Deep Learning with Python and PyTorch/1.3derivativesandGraphsinPytorch_v2 (1).ipynb | ###Markdown
Differentiation in PyTorch Table of ContentsIn this lab, you will learn the basics of differentiation. Derivatives Partial DerivativesEstimated Time Needed: 25 min Preparation The following are the libraries we are going to use for this lab.
###Code
# These are the libraries will be useing for this lab.
import torch
import matplotlib.pylab as plt
import torch.functional as F
###Output
_____no_output_____
###Markdown
Derivatives Let us create the tensor x and set the parameter requires_grad to true because you are going to take the derivative of the tensor.
###Code
# Create a tensor x
x = torch.tensor(2.0, requires_grad = True)
print("The tensor x: ", x)
###Output
The tensor x: tensor(2., requires_grad=True)
###Markdown
Then let us create a tensor according to the equation $ y=x^2 $.
###Code
# Create a tensor y according to y = x^2
y = x ** 2
print("The result of y = x^2: ", y)
###Output
The result of y = x^2: tensor(4., grad_fn=<PowBackward0>)
###Markdown
Then let us take the derivative with respect x at x = 2
###Code
# Take the derivative. Try to print out the derivative at the value x = 2
y.backward()
print("The dervative at x = 2: ", x.grad)
###Output
The dervative at x = 2: tensor(4.)
###Markdown
The preceding lines perform the following operation: $\frac{\mathrm{dy(x)}}{\mathrm{dx}}=2x$ $\frac{\mathrm{dy(x=2)}}{\mathrm{dx}}=2(2)=4$ Let us try to calculate the derivative for a more complicated function.
###Code
# Calculate the y = x^2 + 2x + 1, then find the derivative
x = torch.tensor(2.0, requires_grad = True)
y = x ** 2 + 2 * x + 1
print("The result of y = x^2 + 2x + 1: ", y)
y.backward()
print("The dervative at x = 2: ", x.grad)
###Output
The result of y = x^2 + 2x + 1: tensor(9., grad_fn=<AddBackward>)
The dervative at x = 2: tensor(6.)
###Markdown
The function is in the following form:$y=x^{2}+2x+1$ The derivative is given by: $\frac{\mathrm{dy(x)}}{\mathrm{dx}}=2x+2$$\frac{\mathrm{dy(x=2)}}{\mathrm{dx}}=2(2)+2=6$ Practice Determine the derivative of $ y = 2x^3+x $ at $x=1$
###Code
# Practice: Calculate the derivative of y = 2x^3 + x at x = 1
x = torch.tensor(1.0, requires_grad = True)
y = 2*x ** 3 + x
print("The result of y = x^2 + 2x + 1: ", y)
y.backward()
print("The dervative at x = 2: ", x.grad)
# Type your code here
###Output
The result of y = x^2 + 2x + 1: tensor(3., grad_fn=<ThAddBackward>)
The dervative at x = 2: tensor(7.)
###Markdown
Double-click here for the solution.<!-- x = torch.tensor(1.0, requires_grad=True)y = 2 * x ** 3 + xy.backward()print("The derivative result: ", x.grad) --> Partial Derivatives We can also calculate Partial Derivatives. Consider the function: $f(u,v)=vu+u^{2}$ Let us create u tensor, v tensor and f tensor
###Code
# Calculate f(u, v) = v * u + u^2 at u = 1, v = 2
u = torch.tensor(1.0,requires_grad=True)
v = torch.tensor(2.0,requires_grad=True)
f = u * v + u ** 2
print("The result of v * u + u^2: ", f)
###Output
The result of v * u + u^2: tensor(3., grad_fn=<ThAddBackward>)
###Markdown
This is equivalent to the following: $f(u=1,v=2)=(2)(1)+1^{2}=3$ Now let us take the derivative with respect to u:
###Code
# Calculate the derivative with respect to u
f.backward()
print("The partial derivative with respect to u: ", u.grad)
###Output
The partial derivative with respect to u: tensor(4.)
###Markdown
the expression is given by: $\frac{\mathrm{\partial f(u,v)}}{\partial {u}}=v+2u$$\frac{\mathrm{\partial f(u=1,v=2)}}{\partial {u}}=2+2(1)=4$ Now, take the derivative with respect to v:
###Code
# Calculate the derivative with respect to v
print("The partial derivative with respect to u: ", v.grad)
###Output
The partial derivative with respect to u: tensor(1.)
###Markdown
The equation is given by: $\frac{\mathrm{\partial f(u,v)}}{\partial {v}}=u$$\frac{\mathrm{\partial f(u=1,v=2)}}{\partial {v}}=1$ Calculate the derivative with respect to a function with multiple values as follows. You use the sum trick to produce a scalar valued function and then take the gradient:
###Code
# Calculate the derivative with multiple values
x = torch.linspace(-10, 10, 10, requires_grad = True)
Y = x ** 2
y = torch.sum(x ** 2)
###Output
_____no_output_____
###Markdown
We can plot the function and its derivative
###Code
# Take the derivative with respect to multiple value. Plot out the function and its derivative
y.backward()
plt.plot(x.detach().numpy(), Y.detach().numpy(), label = 'function')
plt.plot(x.detach().numpy(), x.grad.numpy(), label = 'derivative')
plt.xlabel('x')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
The orange line is the slope of the blue line at the intersection point, which is the derivative of the blue line. The relu activation function is an essential function in neural networks. We can take the derivative as follows:
###Code
import torch.nn.functional as F
# Take the derivative of Relu with respect to multiple value. Plot out the function and its derivative
x = torch.linspace(-3, 3, 100, requires_grad = True)
Y = F.relu(x)
y = Y.sum()
y.backward()
plt.plot(x.detach().numpy(), Y.detach().numpy(), label = 'function')
plt.plot(x.detach().numpy(), x.grad.numpy(), label = 'derivative')
plt.xlabel('x')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Practice Try to determine partial derivative $u$ of the following function where $u=2$ and $v=1$: $ f=uv+(uv)^2$
###Code
# Practice: Calculate the derivative of f = u * v + (u * v) ** 2 at u = 2, v = 1
u = torch.tensor(2.0, requires_grad = True)
v = torch.tensor(1.0, requires_grad = True)
f = u * v + (u * v) ** 2
f.backward()
print("The result is ", u.grad)
# Type the code here
###Output
The result is tensor(5.)
|
Dibujar_Mapa.ipynb | ###Markdown
###Code
from random import randint
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____ |
Visualization_Assignments/A_06_VisualizationWithSeaborn_en_SerhanOner.ipynb | ###Markdown
In this assignment you will continue to make some plots on the [Coronavirus Source Data](https://ourworldindata.org/coronavirus-source-data). For plotting you will use Seaborn library. **(1)** Plot a line plot with seaborn for total deaths four the four countries (Spain, France, Germany, Italy) after April 1, 2020.
###Code
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('owid-covid-data.csv', parse_dates=["date"], low_memory=False)
df2 = df[df['date'] > '2020-03-31']
df2
spain = df2[df2['location'] == 'Spain']
france = df2[df2['location'] == 'France']
germany = df2[df2['location'] == 'Germany']
italy = df2[df2['location'] == 'Italy']
spain
spain2 = df2[(df2['location'] == 'Spain') & (df2['new_deaths'] >= 0)]
france2 = df2[(df2['location'] == 'France') & (df2['new_deaths'] >= 0)]
germany2 = df2[(df2['location'] == 'Germany') & (df2['new_deaths'] >= 0)]
italy2 = df2[(df2['location'] == 'Italy') & (df2['new_deaths'] >= 0)]
spain2
plt.figure(figsize=(12, 5), dpi = 100)
sns.lineplot(x = df2.date, y = spain['total_deaths'], data = df2, lw = 3, color = 'blue')
sns.lineplot(x = df2.date, y = france['total_deaths'], data = df2, lw = 3, color = 'yellow')
sns.lineplot(x = df2.date, y = germany['total_deaths'], data = df2, lw = 3, color = 'green')
sns.lineplot(x = df2.date, y = italy['total_deaths'], data = df2, lw = 3, color = 'red')
plt.title('Total Deaths for 4 Countries')
plt.xlabel('Dates')
plt.ylabel('Count of Deaths')
plt.legend(['Spain', 'France', 'Germany', 'Italy'], title = 'location')
plt.xticks(rotation = 45, fontsize = 9)
plt.show()
###Output
_____no_output_____
###Markdown
**(2)** Plot a bar plot with seaborn for average death number that compares continents.
###Code
plt.figure(figsize=(12, 5), dpi = 100)
sns.barplot(x = 'continent', y = 'total_deaths', data = df2)
plt.title('Total Deaths', c = 'darkred')
plt.xlabel('Continents', c = 'blue')
plt.ylabel('Average Count of Deaths', c = 'blue')
plt.show()
###Output
_____no_output_____
###Markdown
**(3)** Plot a histogram for daily deaths for any country you choose. Make four subplots for different bin counts and `kde` arguments.
###Code
plt.figure(figsize=(12, 15), dpi = 300)
plt.subplot(2,2,1)
plt.title("New Deaths\n Bins = 50, KDE = False")
plt.xlabel('New Deaths')
plt.ylabel('Normalized Counts')
sns.distplot(spain.new_deaths.dropna(), kde = False, color = 'blue', bins = 50)
plt.subplot(2,2,2)
plt.title("New Deaths\n Bins = 20, KDE = False")
plt.xlabel('New Deaths')
plt.ylabel('Normalized Counts')
sns.distplot(spain.new_deaths.dropna(), kde = False, color = 'blue', bins = 20)
plt.subplot(2,2,3)
plt.title("New Deaths\n Bins = 50, KDE = True")
plt.xlabel('New Deaths')
plt.ylabel('Normalized Counts')
sns.distplot(spain.new_deaths.dropna(), kde = True, color = 'blue', bins = 50)
plt.subplot(2,2,4)
plt.title("New Deaths\n Bins = 20, KDE = True")
plt.xlabel('New Deaths')
plt.ylabel('Normalized Counts')
sns.distplot(spain.new_deaths.dropna(), kde = True, color = 'blue', bins = 20)
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
**(4)** Create a figure and three subplots containing boxplot, violin plot and swarm plot for daily deaths of two countries you choose.
###Code
plt.figure(figsize=(12, 15), dpi = 100)
plt.subplot(3,2,1)
plt.title("Boxplot")
sns.boxplot(x = italy2['location'], y = 'new_deaths', data = df2, palette = ['blue', '#22B573'])
plt.subplot(3,2,2)
plt.title("BoxPlot")
sns.boxplot(x = spain2['location'], y = 'new_deaths', data = df2, palette = ['orange', 'black'])
plt.subplot(3,2,3)
plt.title("Violin Plot")
sns.violinplot(x = italy2['location'], y = 'new_deaths', data = df2, palette = ['blue', '#FF914D'])
plt.subplot(3,2,4)
plt.title("Violin Plot")
sns.violinplot(x = spain2['location'], y = 'new_deaths', data = df2, palette = ['orange', 'black'])
plt.subplot(3,2,5)
plt.title("Swarm Plot")
sns.swarmplot(x = italy2['location'], y = 'new_deaths' , data = df2, palette = ['blue', '#22B573'])
plt.subplot(3,2,6)
plt.title("Swarm Plot")
sns.swarmplot(x = spain2['location'], y = 'new_deaths' , data = df2, palette = ['orange', 'black'])
plt.show()
###Output
_____no_output_____ |
week04/week04_part2.ipynb | ###Markdown
ISM Lecture 3 continued in week 04 Part 2This content is authored by Maria Boutchkova for use in the University of Edinbugh Business School Investment and Securities Markets course in Autumn 2020. Make sure to have watched the videos preceeding this Notebook and have covered the slides. Detailed explanations in the assigned textbook chapters.This lesson covers:* Portfolio risk and return of 2 assetsThe first computational cell below (with In \[ \] in front) contains the solution. Go over the command lines, make sure they make sense to you, click inside the cell, it should become surrounded by a green rectangle, press Esc - the rectangle will become blue, now press Shift+Enter - this will execute the cell and produce the results beneath it.To remove all output in the notebook and start again, go to the Kernel tab above, select Restart and Clear Output.In this notebook we use the functionality of the pandas library. If you want to explore its full documetation, see [here](https://pandas.pydata.org/pandas-docs/stable/index.html). Solved Problem 1: 2-asset portfolios average return and st. dev. under rho = 1We are given 2 stocks in Lecture 3 Part 2: Colonel Motors (C) and Separated Edison (S) with their corresponsing exp returns and st dev-s: C: E(R) = .14 sigma = .06; S: E(R) = .08 sigma = .03We are going to graph the possible portfolios consisting of these two assets under different value for the covariance between them. We start with the two assets being perfectly positively correlated.A portfolio expected return is the weighted average of the expected returns of the constituent assets.Let us define a list of weights for the first asset ranging for .1 to .9 with the remainder to 1 in the other asset.
###Code
import pandas as pd
lst1 = list(range(1,10,1))
lst2 = pd.DataFrame(lst1)
weights_c = lst2/10
weights_c
# declare the exp returns and sigmas of the two assets
exp_ret_c = .14
sig_c = .06
exp_ret_s = .08
sig_s = .03
# calculate the expected returns of 9 portfolios corresponding to each weight in weights_c,
# for example the first portfolio will have .1 in C and .9 in S
port_ex_ret = weights_c*exp_ret_c + (1-weights_c)*exp_ret_s
port_ex_ret
# calculate the st dev of 9 portfolios corresponding to each weight in weights_c when rho = 1
port_sig_1 = ((weights_c*sig_c)**2 + ((1-weights_c)*sig_s)**2 + 2*weights_c*(1-weights_c)*sig_c*sig_s)**(1/2)
port_sig_1
# now when rho = 0
port_sig_0 = ((weights_c*sig_c)**2 + ((1-weights_c)*sig_s)**2)**(1/2)
port_sig_0
# now when rho = -1 we have 2 strands of the porfolio variance function:
# one for 0 <= weight_c <= sig_s/(sig_s + sig_c)
# and another for sig_s/(sig_s + sig_c) <= weight_c <= 1
# define an auxiliary container for the first strand
aux = (1-weights_c)*sig_s - weights_c*sig_c
# define the 2nd strand but not simplified, keep the square and square root so as it produceds NaN when the base is negative
port_sig_neg1 = ((weights_c*sig_c)**2 + ((1-weights_c)*sig_s)**2 - 2*weights_c*(1-weights_c)*sig_c*sig_s)**(1/2)
port_sig_neg1 = port_sig_neg1.fillna(aux)
port_sig_neg1
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(port_sig_1, port_ex_ret, c='b', marker='x', label='Rho=1')
plt.scatter(port_sig_0, port_ex_ret, c='g', marker='x', label='Rho=0')
plt.scatter(port_sig_neg1, port_ex_ret, c='r', marker='x', label='Rho=-1')
plt.scatter(sig_c, exp_ret_c, c='k', marker='o', label='C')
plt.scatter(sig_s, exp_ret_s, c='k', marker='s', label='S')
plt.legend(loc='lower right')
# add axes labels
plt.xlabel('Sigma')
plt.ylabel('E(R)')
# add title
plt.title('Portfolios of 2 assets: C and S');
# control axes ranges
plt.xlim(0, .08)
plt.ylim(0, .16)
plt.show()
###Output
_____no_output_____
###Markdown
Practice Problem 1: 2-asset portfolios average return and st. dev. under different correlationsPractice performing all the steps above for another 2 stocks: A and B with their corresponsing exp returns and st dev-s:A: E(R) = .04 sigma = .12; B: E(R) = .02 sigma = .06Use the same vector weights_c as above. Name all outputs _ab.
###Code
# declare the exp returns and sigmas of the two assets
# calculate the expected returns of 9 portfolios corresponding to each weight in weights_c
# calculate the st dev when rho = 1
# now when rho = 0
# now when rho = -1 we have 2 strands of the porfolio variance function:
# one for 0 <= weight_c <= sig_s/(sig_s + sig_c)
# and another for sig_s/(sig_s + sig_c) <= weight_c <= 1
# define an auxiliary container for the first strand
# define the 2nd strand but not simplified, keep the square and square root so as it produceds NaN when the base is negative
# now produce the plot - import matplotlib.pyplot as ab instead of plt
# adjust all titles and contaner names accordingly
import matplotlib.pyplot as ab
%matplotlib inline
###Output
_____no_output_____ |
UnsupervisedSimCSE.ipynb | ###Markdown
ๅฎไนๆจกๅ
###Code
class SimCSE(nn.Module):
def __init__(self,
bert_model_path,
temperature=0.05,
is_distilbert=False,
device='cpu'):
super(SimCSE,self).__init__()
self.encoder=SentenceTransformer(model_name_or_path=bert_model_path,device=device)
self.temperature=temperature
self.is_distilbert=is_distilbert#่ธ้ฆ็ๆฌ็BERTไธๆฏๆtoken_type_ids
def cal_cos_sim(self,embeddings1,embeddings2):
embeddings1_norm=torch.nn.functional.normalize(embeddings1,p=2,dim=1)
embeddings2_norm=torch.nn.functional.normalize(embeddings2,p=2,dim=1)
return torch.mm(embeddings1_norm,embeddings2_norm.transpose(0,1))#(batch_size,batch_size)
def forward(self,batch_inputs):
'''
ไธบไบๅฎ็ฐๅ
ผๅฎน๏ผๆๆmodel็batch_inputsๆๅไธไธชไฝ็ฝฎๅฟ
้กปๆฏlabels๏ผๅณไฝฟไธบNone
get token_embeddings,cls_token_embeddings,sentence_embeddings
sentence_embeddingsๆฏ็ป่ฟPoolingๅฑๅconcat็embeddingใ็ปดๅบฆ=768*k๏ผๅ
ถไธญkๅๅณไบpooling็็ญ็ฅ
ไธ่ฌๆฅ่ฎฒ๏ผๅชไผๅไธ็งpooling็ญ็ฅ๏ผ่ฆไน็ดๆฅcls่ฆไนmean last or mean last2 or mean first and last layer๏ผๆไปฅsentence_embeddings็็ปดๅบฆไนๆฏ768
'''
batch1_features,batch2_features,_=batch_inputs
if self.is_distilbert:
del batch1_features['token_type_ids']
del batch2_features['token_type_ids']
batch1_embeddings=self.encoder(batch1_features)['sentence_embedding']
batch2_embeddings=self.encoder(batch2_features)['sentence_embedding']
cos_sim=self.cal_cos_sim(batch1_embeddings,batch2_embeddings)/self.temperature#(batch_size,batch_size)
batch_size=cos_sim.size(0)
assert cos_sim.size()==(batch_size,batch_size)
labels=torch.arange(batch_size).to(cos_sim.device)
return nn.CrossEntropyLoss()(cos_sim,labels)
def encode(self, sentences,
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False):
'''
ไผ ่ฟๆฅ็sentencesๅช่ฝๆฏsingle_batch
'''
return self.encoder.encode(sentences=sentences,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
output_value=output_value,
convert_to_numpy=convert_to_numpy,
convert_to_tensor=convert_to_tensor,
device=device,
normalize_embeddings=normalize_embeddings)
def save(self,output_path):
os.makedirs(output_path,exist_ok=True)
with open(os.path.join(output_path, 'model_param_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(output_path), fOut)
self.encoder.save(output_path)
def get_config_dict(self,output_path):
'''
ไธๅฎ่ฆๆdict๏ผ่ฟๆ ทๆ่ฝๅๅงๅModel
'''
return {'output_path':output_path,'temperature': self.temperature, 'is_distilbert': self.is_distilbert}
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'model_param_config.json')) as fIn:
config = json.load(fIn)
return SimCSE(**config)
device='cpu'
simcse=SimCSE(bert_model_path=model_path2,is_distilbert=True,device=device)
###Output
2021-10-12 18:28:42 - INFO - __init__ - 41 : Load pretrained SentenceTransformer: /data/nfs14/nfs/aisearch/asr/xhsun/bwbd_recall/distiluse-base-multilingual-cased-v1/
2021-10-12 18:28:42 - INFO - __init__ - 107 : Load SentenceTransformer from folder: /data/nfs14/nfs/aisearch/asr/xhsun/bwbd_recall/distiluse-base-multilingual-cased-v1/
###Markdown
่ทๅ dataloader ่ทๅtrain_dataloader
###Code
train_sentences=[]
with open(train_file) as f:
lines=f.readlines()
for line in lines:
line_split=line.strip().split('\t')
if line_split[-1]=='1':
train_sentences.append([line_split[0],line_split[1]])
#train_sentences=list(train_sentences)
print(len(train_sentences))
train_sentences[:3]
print(train_sentences[:2])
random.shuffle(train_sentences)
train_sentences=train_sentences
print(len(train_sentences))
print(train_sentences[:2])
train_examples=[InputExample(text_list=sentence,label=1) for sentence in train_sentences]
train_dataloader=DataLoader(train_examples,shuffle=True,batch_size=batch_size)
def smart_batching_collate(batch):
features_of_a,features_of_b,labels=convert_examples_to_features(examples=batch,tokenizer=tokenizer,max_seq_len=max_seq_len)
return features_of_a,features_of_b,labels
train_dataloader.collate_fn=smart_batching_collate
print(train_examples[0])
convert_examples_to_features([train_examples[0]],tokenizer)
###Output
_____no_output_____
###Markdown
่ทๅdev_dataloader
###Code
dev_examples=getExamples(dev_file,label2id={"0":0,"1":1},filter_heads=True,mode='dev',isCL=False)
###Output
2021-10-12 18:29:47 - INFO - getExamples - 44 : Heads like : ้ฆ็ไฟๆคๆ ้ฅๅ่ง่ฒไธไผ ๅค้ฟๆถ้ดๆๆ 0
2021-10-12 18:29:47 - INFO - getExamples - 57 : *****************************Logging some dev examples*****************************
2021-10-12 18:29:47 - INFO - getExamples - 58 : Total dev nums is : 8946
2021-10-12 18:29:47 - INFO - getExamples - 61 : ๆจ่ๅฅๅคไน
ๅๆพ ๆฟไบง่ฏ้่ฆไธไผ ๅ ้กต 0
2021-10-12 18:29:47 - INFO - getExamples - 61 : ้จๅบๆข็ต่ ่ฎข่ดญ็ฉ่ต็ฝ็ซ็ฝๅๆฏไปไน๏ผ 1
2021-10-12 18:29:47 - INFO - getExamples - 61 : ๆไนๆพ้ฅๅ่ฎฐๅฝ ๆ็บฟไธ็ญพ็บฆ็งๆฟ็ๆต็จๅ 0
2021-10-12 18:29:47 - INFO - getExamples - 61 : ็ปๆต้็จๆฟไบคๆ่งๅฎๆฏไปไน ็ญพ็บฆ่ชๅฆ็ไธ็ปฉๆไนๆฅ็ 0
2021-10-12 18:29:47 - INFO - getExamples - 61 : ่ทจๅบไธไฟๆคๆฏไปไน๏ผ ๅญฆๅ้ช็ๆต็จ 0
###Markdown
ๆ้ evaluator
###Code
dev_sentences=[example.text_list for example in dev_examples]
dev_labels=[example.label for example in dev_examples]
print(dev_sentences[0],dev_labels[0])
sentences1_list=[sen[0] for sen in dev_sentences]
sentences2_list=[sen[1] for sen in dev_sentences]
evaluator=stsEvaluator(sentences1=sentences1_list,sentences2=sentences2_list,batch_size=64,write_csv=False,scores=dev_labels)
for i in range(5):
print(evaluator.sentences1[i],evaluator.sentences2[i],evaluator.scores[i])
evaluator(model=simcse)
###Output
2021-10-12 18:29:49 - INFO - __call__ - 72 : EmbeddingSimilarityEvaluator: Evaluating the model on dataset:
###Markdown
train model
###Code
epochs=10
output_path='/data/nfs14/nfs/aisearch/asr/xhsun/bwbd_recall/tmp'
tensorboard_logdir=os.path.join(output_path,'log')
###Output
_____no_output_____
###Markdown
่ทๅoptimizer
###Code
optimizer_type='AdamW'
scheduler='WarmupLinear'
warmup_proportion=0.1
optimizer_params={'lr': 2e-5}
weight_decay=0.01
num_train_steps = int(len(train_dataloader) * epochs)
warmup_steps = num_train_steps*warmup_proportion
optimizer = get_optimizer(model=simcse,optimizer_type=optimizer_type,weight_decay=weight_decay,optimizer_params=optimizer_params)
scheduler = get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
###Output
_____no_output_____
###Markdown
่ทๅTrainer
###Code
trainer=Trainer(epochs=epochs,output_path=output_path,tensorboard_logdir=tensorboard_logdir,early_stop_patience=20)
trainer.train(train_dataloader=train_dataloader,
model=simcse,
optimizer=optimizer,
scheduler=scheduler,
evaluator=evaluator,
)
evaluator(model=simcse)
###Output
2021-10-12 20:03:49 - INFO - __call__ - 72 : EmbeddingSimilarityEvaluator: Evaluating the model on dataset:
|
archive/2021-03-7/results/notebooks/advb_article/7_compare_preps.ipynb | ###Markdown
*best viewed in [nbviewer](https://nbviewer.jupyter.org/github/CambridgeSemiticsLab/BH_time_collocations/blob/master/results/notebooks/6_definite_modifier.ipynb)* Time Adverbial Distribution and Collocations Difference in Prepositions between Time and Loca Cody Kingham
###Code
! echo "last updated:"; date
###Output
last updated:
Mon 30 Nov 2020 11:52:26 GMT
###Markdown
IntroductionThis notebook will produce data for my in-progress article on time adverbial components. PythonNow we import the modules and data needed for the analysis.
###Code
# see .py's for variables
from config import *
from get_hmod_sample import hm_df
# load POS PCA placements
pca_comps = pd.read_csv('head_pos_PCA.csv')
pca_comps.set_index(['head_lexn', 'head', 'head_voc', 'head_pos'], inplace=True)
###Output
_____no_output_____
###Markdown
###Code
hm_df.head()
hm_df.function.value_counts()
hm_df.columns
hm_df.tokenized_prep.value_counts()
###Output
_____no_output_____
###Markdown
Compare prepositions
###Code
prep_ct = pd.pivot_table(
hm_df,
index='function',
columns='tokenized_prep',
aggfunc='size',
fill_value=0,
)
# sort on prep sums
prep_ct = prep_ct[prep_ct.sum().sort_values(ascending=False).index]
prep_pr = prep_ct.div(prep_ct.sum(1), 0)
prep_ct.iloc[:, :20]
prep_pr
prep_dp = mystats.apply_deltaP(prep_pr, 0, 1)
prep_dp
sns.heatmap(prep_dp.iloc[:, :4], robust=True, square=True, center=0)
###Output
_____no_output_____
###Markdown
Compare with PCA
###Code
advbs_pca = pca_comps[pca_comps['PC1'] < 0]
advbs_lexns = advbs_pca.index.get_level_values('head_lexn')
nouns_pca = pca_comps[pca_comps['PC1'] > 0]
nouns_lexns = nouns_pca.index.get_level_values('head_lexn')
nouns_pca
###Output
_____no_output_____
###Markdown
Within Time
###Code
tprep_ct = pd.pivot_table(
hm_df[hm_df.function == 'Time'],
index='head_etcbc',
columns='tokenized_prep',
aggfunc='size',
fill_value=0,
)
# sort twice
tprep_ct = tprep_ct[tprep_ct.sum().sort_values(ascending=False).index]
tprep_ct = tprep_ct.loc[tprep_ct.sum(1).sort_values(ascending=False).index]
# proportions
tprep_pr = tprep_ct.div(tprep_ct.sum(1), 0)
tprep_ct.head()
tprep_pr
###Output
_____no_output_____
###Markdown
Example of MXRT for article
###Code
tprep_ct.loc['MXRT/'].sort_values(ascending=False)
tprep_ct.loc['MXRT/'].sum()
tprep_pr.loc['MXRT/'].sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Nominalized uses of ืขืึนืึธื
###Code
hm_df[
(hm_df.head_etcbc == '<WLM/')
& (hm_df.has_nom)
]
olam_def = A.search('''
phrase function*
word lex=H
<: word lex=<WLM/
''')
A.show(olam_def)
for ph in olam_def:
print(ph[0], ph[0] in hm_df.index, T.sectionFromNode(ph[0]))
print(T.text(ph[0]))
print()
olam_pl = A.search('''
phrase
word lex=<WLM/ nu=pl
''')
A.show(olam_pl)
###Output
0.50s 12 results
|
Longo_BMI_Final.ipynb | ###Markdown
HPV vaccination rates in Young Adults April 6, 2020 University of UtahDepartment of Biomedical Informatics Simone Longo Group:Monika BakerBetsy Campbell IntroductionThe human papillomavirus (HPV) is the most common sexually transmitted infection (STI) and affects 78 million Americans, primarily in their late teens and early twenties. While many HPV infections are benign, more severe cases can lead to lesions, warts, and a significantly increased risk of cancer. The WHO reports that nearly all cervical cancers as well as large proportions of cancers of other reproductive regions can be attributed to HPV infections. Forunately a vaccine exists to protect against the most virulent forms of HPV and is recommended for all people from as early as 9 up to 27 years old. If the immunization schedule is started early enough, the entire dose may be administered in two doses, however most cases require three vaccination rounds.The CDC provides vaccination data as a proportion of adults aged 12-17 by state who have received each round of the HPV vaccination (link: https://www.cdc.gov/mmwr/volumes/65/wr/mm6533a4.htmT3_down). Reading and Processing Data
###Code
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = 25, 15
plt.rcParams['font.size'] = 18
data['Vaccine Round'] = data['vaccine'].apply(lambda x: x.split('_')[0])
data
fig, ax = plt.subplots(2, 3)
gender = ['fem', 'm']
for i in range(3):
for j, g in enumerate(gender):
key = 'gte' + str(i+1) + '_HPV_' + g
sm.qqplot(data[data['vaccine'] == key]['proportion'], ax=ax[j,i], line='r')
ax[j,i].set_title(key)
###Output
_____no_output_____
###Markdown
Get a quick overview of the data.
###Code
import pandas as pd
import seaborn as sns
data = pd.read_csv('hpv_melt.csv')
#sns.boxplot(x=data['Vaccine Round'], y=data['proportion'])
###Output
_____no_output_____
###Markdown
From this plot, we immediately see that the proportion of HPV vaccinations decreases from one round of shots to the next. We also see a large difference between male and female rates.
###Code
from statannot import add_stat_annotation
melt_hpv = data
melt_hpv['gender'] = melt_hpv.vaccine.apply(lambda x: x.split('_')[-1])
melt_hpv['HPV_round'] = melt_hpv.vaccine.apply(
lambda x: "".join(x.split('_')[:-1]))
order = list(set(melt_hpv.HPV_round))
boxpairs = [((order[0], 'fem'), (order[0], 'm')),
((order[1], 'fem'), (order[1], 'm')),
((order[2], 'fem'), (order[2], 'm'))]
ax = sns.boxplot(x="HPV_round", y="proportion", hue="gender", data=melt_hpv)
res = add_stat_annotation(ax,
data=melt_hpv,
x="HPV_round",
y="proportion",
hue="gender",
box_pairs=boxpairs,
test='Mann-Whitney',
loc='inside')
###Output
p-value annotation legend:
ns: 5.00e-02 < p <= 1.00e+00
*: 1.00e-02 < p <= 5.00e-02
**: 1.00e-03 < p <= 1.00e-02
***: 1.00e-04 < p <= 1.00e-03
****: p <= 1.00e-04
gte2HPV_fem v.s. gte2HPV_m: Mann-Whitney-Wilcoxon test two-sided with Bonferroni correction, P_val=2.565e-07 U_stat=2.330e+03
gte1HPV_fem v.s. gte1HPV_m: Mann-Whitney-Wilcoxon test two-sided with Bonferroni correction, P_val=9.262e-08 U_stat=2.360e+03
gte3HPV_fem v.s. gte3HPV_m: Mann-Whitney-Wilcoxon test two-sided with Bonferroni correction, P_val=1.372e-08 U_stat=2.412e+03
###Markdown
We can also see that differences between male and female proportions from one round to the next are also statistically significant. Comparing to Education DataWe first load the data from https://nces.ed.gov/programs/digest/d19/tables/dt19_203.40.asp?current=yes to obtain current enrollment information. This will be used to standardize spending and other statewide metrics on a per-pupil basis.Total expenditures per state can be found here https://nces.ed.gov/programs/digest/d19/tables/dt19_236.30.asp?current=yes. In the following cells, the data from these 2 sources will be combined to show how HPV vaccination rates correlates to per-pupil education spending.
###Code
# Get total enrollment across states and territories after a little data cleaning
enrollment = pd.read_csv('enrollment.csv', header=None)
# standardize names
enrollment[0] = [i.strip().split('..')[0].strip() for i in enrollment[0]]
expenditures = pd.read_csv('expenditures.csv', header=None, index_col=0)
expenditures.index = [
i.strip().split('..')[0].strip() for i in expenditures.index
]
expenditures.iloc[:, 0] = [
int(str(i).replace(',', '')) for i in expenditures.iloc[:, 0]
]
expenditures['enrollment'] = [
int(str(i).replace(',', '')) for i in enrollment.iloc[:, 1]
]
expenditures[
'CostPerStudent'] = expenditures.iloc[:, 0] / expenditures.iloc[:, 1]
expenditures.columns = ['expenditures', 'enrollment', 'CostPerStudent']
#expenditures = expenditures.sort_values(by='CostPerStudent').iloc[:-1,:]
expenditures = expenditures.sort_index()
expenditures
import statsmodels.api as sm
sm.qqplot(expenditures['CostPerStudent'], line='r')
plt.title("Distribution of total spent per pupil")
plt.rcParams['figure.figsize'] = 40, 15 # height, width
plt.rcParams['font.size'] = 25
#fig, ax = plt.subplots(1,2)
from matplotlib import gridspec
fig = plt.figure()
gs= gridspec.GridSpec(1, 2, width_ratios=[4, 1])
ax0 = plt.subplot(gs[0])
expenditures['CostPerStudent'].sort_values().plot.bar(ax=ax0)
ax0.set(ylabel='Spending per Pupil (USD)')
ax1 = plt.subplot(gs[1])
ax1= sns.boxplot(expenditures['CostPerStudent'], orient='v')
ax1.set(ylabel=None)
ax1.set(yticklabels=[])
df.columns
df = pd.read_csv('hpv_clean_w_err.csv', index_col=0)
df.columns = ['State', *df.columns[1:]]
df = df.set_index('State')
hpv = df.iloc[:, 3:9]
hpv['AverageHPV_Rate'] = df.mean(axis=1)
hpv['AdjustedAverage'] = (df['gte2_HPV_fem'] + df['gte3_HPV_fem'] + df['gte2_HPV_m'] + df['gte3_HPV_m'])/4
hpv = hpv.sort_index()
sns.scatterplot(y=hpv.AverageHPV_Rate, x=expenditures.CostPerStudent)
plot_trendline(y=hpv.AverageHPV_Rate, x=expenditures.CostPerStudent)
expend = pd.read_csv('total_expenditures.csv', index_col=0)
expend.index = [r.split('..')[0].strip() for r in expend.index]
#expend
import scipy
def ftest(X, Y, alpha=0.05):
F = np.var(X) / np.var(Y)
df1 = len(X) - 1
df2 = len(Y) - 1
return scipy.stats.f.cdf(F, df1, df2)
enrollment[2] = [expend.loc[s.strip(), 'Student support'] if s in expend.index else np.nan for s in enrollment[0] ]
enrollment = enrollment.dropna()
enrollment.columns = ['State', 'TotalEnrolled', 'StudentSupportSpending']
from IPython.display import display
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
plt.rcParams['figure.figsize'] = 30, 20 # height, width
plt.rcParams['font.size'] = 22
enrollment['HPV_round2'] = [hpv.loc[s, 'gte2_HPV_fem'] if s in hpv.index else np.nan for s in enrollment['State']]
enrollment['TotalSpending'] = [expend.loc[s.strip(), 'Total'] if s in expend.index else np.nan for s in enrollment['State'] ]
sns.scatterplot(y=enrollment['HPV_round2'], x=enrollment['StudentSupportSpending'] / enrollment['TotalEnrolled'])
plt.xlabel('Student Support Spending per Pupil (USD)')
plot_trendline(y=enrollment['HPV_round2'], x=enrollment['StudentSupportSpending'] / enrollment['TotalEnrolled'])
scipy.stats.pearsonr(enrollment['TotalSpending'] / enrollment['TotalEnrolled'], enrollment['HPV_round2'])
help(sns.distplot)
sns.distplot(cdm['AdjustedAverage'], label='All')
sns.distplot(cdm[cdm.Required==0]['AdjustedAverage'], label='Not Required')
sns.distplot(cdm[cdm.Required==1]['AdjustedAverage'], label='Required')
plt.legend()
###Output
_____no_output_____
###Markdown
We see some weak correlation between higher spending per-pupil and higher HPV vaccination rates. This evidence is further validated by examining sexual education requirements. The following sexual education data was taken from https://www.guttmacher.org/state-policy/explore/sex-and-hiv-education.
###Code
cdm = pd.read_csv('condoms.csv', header=None, index_col=0)
cdm[2] = [hpv.loc[x, 'AdjustedAverage'] for x in cdm.index]
#sns.boxplot(cdm[1], cdm[2])
cdm.columns = ['Required', 'AdjustedAverage']
mww_2g(cdm[cdm.Required == 0].AdjustedAverage,
cdm[cdm.Required == 1].AdjustedAverage,
names=['NotRequired', 'Required'],
col_names=['Average HPV Immunization Rate', 'Are condoms required in sex ed?'])
stats.ttest_ind(cdm[cdm.Required == 0].AdjustedAverage, cdm[cdm.Required == 1].AdjustedAverage)
from scipy import stats
print(stats.ttest_rel(hpv['gte2_HPV_fem'], hpv['gte2_HPV_m']))
print(stats.ttest_rel(hpv['gte3_HPV_fem'], hpv['gte3_HPV_m']))
# Some helper functions
from statsmodels.formula.api import ols
import numpy as np
from scipy.stats import mannwhitneyu as mww
import itertools as it
def plot_trendline(x, y, c='r'):
data = {'x': x, 'y': y}
model = ols("y ~ x", data=data)
results = model.fit()
print(results.cov_type)
m = results.params[1]
b = results.params[0]
xax = np.linspace(x.min(), x.max(), 100)
yax = m * xax + b
plt.plot(xax,
yax,
c,
label='y = {} x + {}\nR^2 = {}'.format(m, b, results.rsquared))
plt.legend(fontsize=24)
plt.show()
def mww_2g(g1, g2, names=None, col_names=['Value', 'Variable']):
if names is None:
name1 = g1.name
name2 = g2.name
else:
name1 = names[0]
name2 = names[1]
order = [name1, name2]
boxpairs = [(name1, name2)]
stat, pvalue = mww(g1, g2)
df = pd.DataFrame(zip(g1, it.repeat(name1)))
df = df.append(pd.DataFrame(zip(g2, it.repeat(name2))))
df.columns = col_names
plt.figure()
ax = sns.boxplot(data=df, x=col_names[1], y=col_names[0], order=order)
res = add_stat_annotation(ax,
data=df,
x=col_names[1],
y=col_names[0],
box_pairs=boxpairs,
perform_stat_test=False,
pvalues=[pvalue],
test_short_name='Mann-Whitney-Wilcoxon',
text_format='star',
verbose=2,
loc='inside')
###Output
_____no_output_____ |
demo/8_EnergyPlus_using_epJSON/8_EnergyPlus_using_epJSON.ipynb | ###Markdown
8_EnergyPlus_using_epJSON Running an EnergyPlus simulation
###Code
import os
output_directory=os.path.abspath('sim')
idf_arg=os.path.join(output_directory,
'1ZoneUncontrolled.epJson'
)
weather_arg=r'-w C:\EnergyPlusV8-9-0\WeatherData\USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw'
output_directory_arg='-d ' + output_directory
st=' '.join(['-r',
output_directory_arg,
weather_arg,
idf_arg])
print(st)
!C:\EnergyPlusV8-9-0\EnergyPlus $st
###Output
-r -d C:\Users\cvskf\git\stevenkfirth\energyplus_python_demo\demo\8_EnergyPlus_using_epJSON\sim -w C:\EnergyPlusV8-9-0\WeatherData\USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw C:\Users\cvskf\git\stevenkfirth\energyplus_python_demo\demo\8_EnergyPlus_using_epJSON\sim\1ZoneUncontrolled.epJson
EnergyPlus Starting
EnergyPlus, Version 8.9.0-40101eaafd, YMD=2018.05.23 18:04
Adjusting Air System Sizing
Adjusting Standard 62.1 Ventilation Sizing
Initializing Simulation
Reporting Surfaces
Beginning Primary Simulation
Initializing New Environment Parameters
Warming up {1}
Warming up {2}
Warming up {3}
Warming up {4}
Warming up {5}
Warming up {6}
Warming up {7}
Warming up {8}
Warming up {9}
Warming up {10}
Warming up {11}
Warming up {12}
Warming up {13}
Warming up {14}
Warming up {15}
Warming up {16}
Starting Simulation at 07/21 for DENVER CENTENNIAL GOLDEN N ANN CLG 1% CONDNS DB=>MWB
Initializing New Environment Parameters
Warming up {1}
Warming up {2}
Warming up {3}
Warming up {4}
Warming up {5}
Warming up {6}
Warming up {7}
Warming up {8}
Warming up {9}
Warming up {10}
Warming up {11}
Warming up {12}
Warming up {13}
Warming up {14}
Warming up {15}
Warming up {16}
Warming up {17}
Warming up {18}
Warming up {19}
Warming up {20}
Warming up {21}
Warming up {22}
Starting Simulation at 12/21 for DENVER CENTENNIAL GOLDEN N ANN HTG 99% CONDNS DB
Initializing New Environment Parameters
Warming up {1}
Warming up {2}
Warming up {3}
Warming up {4}
Warming up {5}
Warming up {6}
Warming up {7}
Warming up {8}
Warming up {9}
Warming up {10}
Warming up {11}
Warming up {12}
Warming up {13}
Warming up {14}
Warming up {15}
Warming up {16}
Warming up {17}
Warming up {18}
Starting Simulation at 01/01 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=01/21
Continuing Simulation at 01/21 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=02/10
Continuing Simulation at 02/10 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=03/02
Continuing Simulation at 03/02 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=03/22
Continuing Simulation at 03/22 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=04/11
Continuing Simulation at 04/11 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=05/01
Continuing Simulation at 05/01 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=05/21
Continuing Simulation at 05/21 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=06/10
Continuing Simulation at 06/10 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=06/30
Continuing Simulation at 06/30 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=07/20
Continuing Simulation at 07/20 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=08/09
Continuing Simulation at 08/09 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=08/29
Continuing Simulation at 08/29 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=09/18
Continuing Simulation at 09/18 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=10/08
Continuing Simulation at 10/08 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=10/28
Continuing Simulation at 10/28 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=11/17
Continuing Simulation at 11/17 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=12/07
Continuing Simulation at 12/07 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Updating Shadowing Calculations, Start Date=12/27
Continuing Simulation at 12/27 for San Francisco Intl Ap CA USA TMY3 WMO#=724940
Writing tabular output file results using comma format.
Writing tabular output file results using tab format.
Writing tabular output file results using text format.
Writing tabular output file results using HTML format.
Writing tabular output file results using XML format.
Writing final SQL reports
ReadVarsESO program starting.
ReadVars Run Time=00hr 00min 1.01sec
ReadVarsESO program completed successfully.
ReadVarsESO program starting.
ReadVars Run Time=00hr 00min 0.39sec
ReadVarsESO program completed successfully.
EnergyPlus Run Time=00hr 00min 4.08sec
|
NLP projects in Python and Spark/Yelp review classification (with Logistic Regression model).ipynb | ###Markdown
Data description & Problem statement: I will use the Yelp Review Data Set from Kaggle. Each observation in this dataset is a review of a particular business by a particular user. The "stars" column is the number of stars (1 through 5) assigned by the reviewer to the business. (Higher stars is better.) In other words, it is the rating of the business by the person who wrote the review. The "cool" column is the number of "cool" votes this review received from other Yelp users. The "useful" and "funny" columns are similar to the "cool" column. The goal of this project is to predict whether the customer will rate the business as GOOD or BAD. Workflow:- Load the training and test datasets- Data cleaning (e.g. remove formats and punctuations)- Basic data exploration- Text vectorization, using "Bag of Words" technique- Use "tf-idf transformation", and find the "N-grams" to improve the model performace- Use a supervised classifier (e.g. Logistic Regression, Naive Bayes, etc.) for text classification: Use Grid-Serach along with Cross-Validation technique for finding the optimal hyper-parameters of best classifier- Evaluate the performance of best classifier on the test data set, by calculating: - f1, Precision, Recall scores - Confusion matrix - ROC curve- Finally, determine most important words/features during semantic analysis for both Ham and Spam emails * Note: I repeat abovementioned process with and without Word Normalization (i.e. using lammatization/stemming) for the sake of comparison. For the word normalization I use "SpaCy" library.
###Code
import sklearn
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
# we insatll and import spacy package for some advanced tokenizaion techniques:
import spacy
# we also install and import mglearn package (using !pip install mglearn) for some interesting visualization of results:
import mglearn
ls
###Output
Volume in drive C is OS
Volume Serial Number is 3EA9-93A4
Directory of C:\Users\rhash\Documents\Datasets\NLP projects (sklearn & Spark)
09/11/2018 01:41 PM <DIR> .
09/11/2018 01:41 PM <DIR> ..
09/11/2018 11:21 AM <DIR> .ipynb_checkpoints
09/10/2018 11:29 AM <DIR> aclImdb
09/10/2018 11:57 AM <DIR> cache
08/02/2018 06:00 PM 100,912 Dataset_Challenge_Dataset_Agreement.pdf
09/11/2018 12:54 AM 149,226 IMDb review (positive vs negative reviews, in sklearn).ipynb
09/11/2018 10:02 AM 8,797 IMDb review (topic modeling, sklearn).ipynb
04/18/2011 02:53 PM 5,868 readme
09/11/2018 10:53 AM 198,102 sms filteration (ham vs spam, sklearn).ipynb
09/11/2018 10:54 AM 27,708 sms filteration (topic modeling, sklearn)-Copy1.ipynb
03/15/2011 10:36 PM 477,907 SMSSpamCollection
09/11/2018 01:41 PM 97,890 Yelp review (1 vs 5 star, sklearn).ipynb
02/07/2018 01:02 AM 3,791,120,545 yelp_review.csv
09/11/2018 11:33 AM 1,602,777,975 yelp_review.csv.zip
09/11/2018 11:14 AM 3,149,412,274 yelp-dataset.zip
11 File(s) 8,544,377,204 bytes
5 Dir(s) 390,859,423,744 bytes free
###Markdown
load, prepare and explore the text data:
###Code
reviews = pd.read_csv('yelp_review.csv')
reviews.describe().T
reviews.head(3)
starsCount=pd.DataFrame(reviews['stars'].value_counts()).reset_index()
starsCount.columns=['stars', 'count']
starsCount.sort_values('stars')
sns.countplot(x='stars', data=reviews, palette='rainbow')
plt.show()
sns.boxplot(x=reviews['stars'][0:1000], y=reviews['cool'][0:1000])
plt.show()
sns.boxplot(x=reviews['stars'][0:1000], y=reviews['useful'][0:1000])
plt.show()
reviews['length']= reviews['text'].apply(len) #.map(lambda x: len(x))
sns.boxplot(x=reviews['stars'][1:10000], y=reviews['length'][1:10000], palette='rainbow')
plt.show()
sns.set_style('white')
g=sns.FacetGrid(reviews, col='stars')
g.map(plt.hist, 'length', bins=100)
plt.show()
reviews.groupby('stars').mean()
featureCorr=reviews.groupby('stars').mean().corr()
featureCorr
sns.heatmap(featureCorr, annot=True)
plt.show()
reviews['stars']=reviews['stars'].map({1:0, 2:0, 3:0, 4:1, 5:1})
from sklearn.model_selection import train_test_split
text_train, text_test, y_train, y_test = train_test_split(reviews['text'][0:100000], reviews['stars'][0:100000],
test_size=0.3, shuffle=True, random_state=42)
reviews['stars'].value_counts()
###Output
_____no_output_____
###Markdown
Semantic analysis with tf-idf and n-grams techniques using LR model: * Approach 1: without word normalization (i.e. lammatization or stemming)
###Code
# We find the best setting of n-gram range and logistic regression parameter using a grid search:
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV, StratifiedKFold
pipe = make_pipeline(TfidfVectorizer(min_df=10, max_df=0.5),
LogisticRegression(random_state=42, class_weight='balanced'))
# running the grid search takes a long time because of the relatively large grid and the inclusion of trigrams
param_grid = {"logisticregression__C": [ 0.1, 1, 5, 10, 100],
"tfidfvectorizer__ngram_range": [(1, 1), (1, 2), (1, 3)]}
kfold=StratifiedKFold(n_splits=2, shuffle=True, random_state=42)
grid1 = GridSearchCV(pipe, param_grid, n_jobs=-1, cv=kfold, scoring='roc_auc')
grid1.fit(text_train, y_train)
G=pd.DataFrame(grid1.cv_results_)
G.sort_values("rank_test_score").head(3)
print("Best parameters:\n{}".format(grid1.best_params_), '\n')
print("Best cross-validation score: {:.2f}".format(grid1.best_score_))
print("Test score: {:.2f}".format(grid1.score(text_test, y_test)))
# extract scores from grid_search and visualize them for ranges of parametrs:
plt.figure().set_size_inches(12, 3)
h=G[["param_logisticregression__C", "param_tfidfvectorizer__ngram_range", "mean_test_score"]] .pivot_table(index="param_tfidfvectorizer__ngram_range", columns="param_logisticregression__C", values="mean_test_score")
sns.heatmap(h, annot=True)
plt.show()
# Classification report:
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report
report=classification_report(y_test, grid1.predict(text_test), target_names=['1-3 stars', '4-5 stars'])
print(report)
# Plot a confusion matrix.
# cm is the confusion matrix, names are the names of the classes.
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class_names=["1-3 stars", "4-5 stars"]
# Compute confusion matrix
cm = confusion_matrix(y_test, grid1.predict(text_test))
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, class_names, title='Normalized confusion matrix')
plt.show()
# ROC curve & auc:
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score
fpr, tpr, thresholds=roc_curve(np.array(y_test),grid1.predict_proba(text_test)[:, 1] , pos_label=1)
roc_auc=roc_auc_score(np.array(y_test), grid1.predict_proba(text_test)[:, 1])
plt.figure()
plt.step(fpr, tpr, color='darkorange', lw=2, label='ROC curve (auc = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', alpha=0.4, lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.plot([cm_normalized[0,1]], [cm_normalized[1,1]], 'or')
plt.show()
# Most important features when using unigrams, bigrams:
vect = grid1.best_estimator_.named_steps['tfidfvectorizer']
feature_names = np.array(vect.get_feature_names())
coef = grid1.best_estimator_.named_steps['logisticregression'].coef_
mglearn.tools.visualize_coefficients(coef, feature_names, n_top_features=20)
print('Most important features when using unigrams, bigrams, and trigrams with tf-idf rescaling:')
###Output
Most important features when using unigrams, bigrams, and trigrams with tf-idf rescaling:
###Markdown
Semantic analysis with tf-idf and n-grams techniques using LR model: * Approach 2: with word normalization (i.e. using lammatization or stemming)
###Code
# run following script in the command line with admin privilage, to load the english package in spaCy:
# python -m spacy download en
# Technicality: we want to use the regexp-based tokenizer that is used by CountVectorizer
# and only use the lemmatization from spacy.
# We replace en_nlp.tokenizer (the spacy tokenizer) with the regexp-based tokenization:
from sklearn.feature_extraction.text import CountVectorizer
import re
# regexp used in CountVectorizer
regexp = re.compile('(?u)\\b\\w\\w+\\b')
# load spacy language model and save old tokenizer
en_nlp = spacy.load('en')
old_tokenizer = en_nlp.tokenizer
# replace the tokenizer with the preceding regexp
en_nlp.tokenizer = lambda string: old_tokenizer.tokens_from_list(regexp.findall(string))
# create a custom tokenizer using the spacy document processing pipeline (now using our own tokenizer)
def custom_tokenizer(document):
doc_spacy = en_nlp(document)
return [token.lemma_ for token in doc_spacy]
# define a count vectorizer with the custom tokenizer
lemma_vect = CountVectorizer(tokenizer=custom_tokenizer,
min_df=10,
max_df=0.6,
ngram_range=(1, 2)).fit(text_train)
# transform text_train using CountVectorizer with lemmatization
X_train_lemma = lemma_vect.transform(text_train)
X_test_lemma = lemma_vect.transform(text_test[0:1000])
print("X_train_lemma.shape: {}".format(X_train_lemma.shape))
# We find the best logistic regression parameter using a grid search:
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
pipe = make_pipeline(TfidfTransformer(),
LogisticRegression(random_state=42, class_weight='balanced'))
# running the grid search takes a long time because of the relatively large grid and the inclusion of trigrams
param_grid = {"logisticregression__C": [0.1, 1, 10, 100]}
kfold=StratifiedKFold(n_splits=2, shuffle=True, random_state=42)
grid2 = GridSearchCV(pipe, param_grid, n_jobs=-1, cv=kfold, scoring='roc_auc')
grid2.fit(X_train_lemma, y_train)
G=pd.DataFrame(grid2.cv_results_)
G.sort_values("rank_test_score").head(3)
print("Best parameters:\n{}".format(grid2.best_params_), '\n')
print("Best cross-validation score: {:.2f}".format(grid2.best_score_))
print("Test score: {:.2f}".format(grid2.score(X_test_lemma, y_test[0:1000])))
# Classification report:
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report
report=classification_report(y_test[0:1000], grid2.predict(X_test_lemma), target_names=['spam', 'ham'])
print(report)
# Plot a confusion matrix.
# cm is the confusion matrix, names are the names of the classes.
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class_names=["1-3 stars", "4-5 stars"]
# Compute confusion matrix
cm = confusion_matrix(y_test[0:1000], grid2.predict(X_test_lemma))
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, class_names, title='Normalized confusion matrix')
plt.show()
# ROC curve & auc:
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score
fpr, tpr, thresholds=roc_curve(np.array(y_test[0:1000]),grid2.predict_proba(X_test_lemma)[:, 1] , pos_label=1)
roc_auc=roc_auc_score(np.array(y_test[0:1000]), grid2.predict_proba(X_test_lemma)[:, 1])
plt.figure()
plt.step(fpr, tpr, color='darkorange', lw=2, label='ROC curve (auc = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', alpha=0.4, lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.plot([cm_normalized[0,1]], [cm_normalized[1,1]], 'or')
plt.show()
# Most important features when using unigrams, bigrams:
feature_names = np.array(lemma_vect.get_feature_names())
coef = grid2.best_estimator_.named_steps['logisticregression'].coef_
mglearn.tools.visualize_coefficients(coef, feature_names, n_top_features=20)
print('Most important features when using unigrams, bigrams, and trigrams with tf-idf rescaling:')
###Output
Most important features when using unigrams, bigrams, and trigrams with tf-idf rescaling:
|
tutorial/04 - Data Sources and Transformations.ipynb | ###Markdown
<img src="assets/bokeh-transparent.png" style="width:50px" > Bokeh ๆ็จ 04. ๆฐๆฎๆบๅๆฐๆฎ่ฝฌๆข
###Code
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
output_notebook()
###Output
_____no_output_____
###Markdown
ๆฆ่ฆๆไปฌๅทฒ็ป็ๅฐBokehๆไน่ฝๅคๅพๅฅฝๅฐไธPythonๅ่กจใNumPyๆฐ็ปใPandasๅบๅไธ่ตทๅทฅไฝใๅจๅบๅฑ๏ผ่ฟไบ่พๅ
ฅๆฐๆฎ่ขซ่ฝฌๆขไธบBokeh็`ColumnDataSource`็ฑปๅใ่ฟไธชๆฐๆฎ็ฑปๅๆฏBokehไฝฟ็จ็ๆ ธๅฟๆฐๆฎๆบๅฏน่ฑกใ่ฝ็ถBokeh็ปๅธธ้่ฟ้ๆ็ๆนๅผๅๅปบ่ฏฅ็ฑปๅๅฏน่ฑก๏ผไฝๆไบๆถๅไน้่ฆ้่ฟๆพๅผ็ๆนๅผๅๅปบใไธไธ่๏ผๆไปฌไผ็ๅฐhover tooltips๏ผcomputed transforms ๅ CustomJS interactions ็ญไฝฟ็จ`ColumnDataSource`็ๅ่ฝ๏ผๆไปฅ๏ผ็ฐๅจๆไปฌๅฟซ้็็ไธ็ผใ ้่ฟ Python Dicts ๅๅปบไป `bokeh.models` ๅฏผๅ
ฅ `ColumnDataSource`๏ผ
###Code
from bokeh.models import ColumnDataSource
###Output
_____no_output_____
###Markdown
`ColumnDataSource`ๆฏไปๅๅ๏ผๅญ็ฌฆไธฒ๏ผๅฐๅผๅบๅ็ๆ ๅฐใไธ้ขๆฏไธไธช็ฎๅ็ไพๅญใๆ ๅฐๆฏ้่ฟไผ ๅ
ฅPython `dict`ๅปบ็ซ็๏ผๅ
ถๅญ็ฌฆไธฒ้ฎไฝไธบๅๅ๏ผๅฏนๅบ็Python listไฝไธบๅผๅบๅใ่ฏฅๅผๅบๅไนๅฏไปฅๆฏNumPyๆฐ็ป๏ผๆPandasๅบๅใ***ๆณจๆ๏ผ`ColumnDataSource` ไธญๆๆ็ๅๅฟ
้กป็ญ้ฟใ***
###Code
source = ColumnDataSource(data={
'x' : [1, 2, 3, 4, 5],
'y' : [3, 7, 8, 5, 1],
})
###Output
_____no_output_____
###Markdown
ๅฐ็ฎๅ๏ผๆไปฌๅทฒ็ป้่ฟ็ดๆฅไผ ๅ
ฅๆๆฌๅ่กจๆๆฐ็ปๆฐๆฎ็ๆนๅผ่ฐ็จๅฝๆฐ๏ผๅฆ`p.circle`ใๅฝๆไปฌ่ฟๆ ทๅๆถ๏ผBokehไผ่ชๅจๅธฎๆไปฌๅๅปบไธไธช `ColumnDataSource`ใไฝๆไปฌไนๅฏไปฅๆพๅผๅฐๆ `ColumnDataSource` ไฝไธบ `source` ๅๆฐไผ ๅ
ฅglyphๅฝๆฐใๅฝๆไปฌ่ฟๆ ทๅๆถ๏ผๅฆๆๆไปฌๆณ่ฆ็ปไธไธชๅฑๆง๏ผๅฆ`"x"` ๆ `"y"` ๆ `"fill_color"`๏ผๆๅฎไธไธชๅผๅบๅ๏ผๆไปฌไผ ๅ
ฅๅฏนๅบ็***ๅๅ***๏ผ
###Code
p = figure(plot_width=400, plot_height=400)
p.circle('x', 'y', size=20, source=source)
show(p)
# Exercise: create a column data source with NumPy arrays as column values and plot it
###Output
_____no_output_____
###Markdown
้่ฟ Pandas DataFrames ๅๅปบ้่ฟPandas Dataframeๅๅปบ `ColumnDataSource` ๅฏน่ฑกไนๅพๅฎนๆใๅช้่ฆๅจๅๅปบ `ColumnDataSource` ็ๆถๅๆDataframeไผ ๅ
ฅๅฐฑ่ก๏ผ
###Code
from bokeh.sampledata.iris import flowers as df
source = ColumnDataSource(df)
###Output
_____no_output_____
###Markdown
็ฐๅจๆไปฌๅฐฑๅฏไปฅไฝฟ็จ่ฟไธชๅฏน่ฑก็ปๅถไธไพไธญ็ๅพไบ๏ผๆๅฏนๅบ็ๅๅไผ ๅ
ฅglyphๅฝๆฐ๏ผ
###Code
p = figure(plot_width=400, plot_height=400)
p.circle('petal_length', 'petal_width', source=source)
show(p)
# Exercise: create a column data source with the autompg sample data frame and plot it
from bokeh.sampledata.autompg import autompg_clean as df
###Output
_____no_output_____ |
notebooks/week2/Level 2.ipynb | ###Markdown
Testing Stuff
###Code
import pandas as pd
simple_better = pd.read_csv("/workspace/ltr_output/analysis/simple_better.csv")
simple_better
exp = pd.read_csv("/workspace/ltr_output/analysis/simple_ltr_explains.csv")
exp
###Output
_____no_output_____ |
notebooks/ruta-training/Chapter 2 - Specific tasks/Exercise 2 - Enumerations.ipynb | ###Markdown
Exercise 2: EnumerationsThe goal of this exercise is to create a simple script for annotating enumerations. We declare a new annotation type `Enumeration` with one feature `elements` of the type `FSArray`. Broadly said, `FSArray` is an array of `Annotation` instances (`CHUNKs` in our case).We specify rules so that the annotations of the type `Enumeration` cover the complete span of the enumeration in the examples while their features contain the single elements of the enumerations.
###Code
%%documentText
This is not an enumeration.
breast and ovarian cancer.
colorectal, endometrial, and ovarian cancers.
colorectal, endometrial and ovarian cancers.
vasculopathy of the heart and brain.
abnormalities of eyes, nervous system, and kidneys.
breast or ovarian cancer.
breast and/or ovarian cancer.
Have a nice day.
%displayMode DYNAMIC_HTML
%dynamicHtmlAllowedTypes Enumeration Chunk Conj
DECLARE Enumeration (FSArray elements);
DECLARE Conj, Chunk;
"and" {-> Conj};
"or" {-> Conj};
// For special cases like "and/or", we combine both Conj into a single one
Conj{-> SHIFT(Conj, 1, 3)} SPECIAL Conj{-> UNMARK(Conj)};
// All words outside Conj are considered Chunks
W+{-PARTOF(Conj) -> Chunk};
// Combine Chunks into Enumeration
(Chunk{-PARTOF(Enumeration)}
(COMMA Chunk)* COMMA? Conj Chunk)
{-> CREATE(Enumeration, "elements" = Chunk)};
###Output
_____no_output_____ |
Course 1 - Introduction to Data Science/assignment2.ipynb | ###Markdown
Assignment 2For this assignment you'll be looking at 2017 data on immunizations from the CDC. Your datafile for this assignment is in [assets/NISPUF17.csv](assets/NISPUF17.csv). A data users guide for this, which you'll need to map the variables in the data to the questions being asked, is available at [assets/NIS-PUF17-DUG.pdf](assets/NIS-PUF17-DUG.pdf). **Note: you may have to go to your Jupyter tree (click on the Coursera image) and navigate to the assignment 2 assets folder to see this PDF file).** Question 1Write a function called `proportion_of_education` which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (12) and college degree.*This function should return a dictionary in the form of (use the correct numbers, do not round numbers):* ``` {"less than high school":0.2, "high school":0.4, "more than high school but not college":0.2, "college":0.2}```
###Code
import pandas as pd
import numpy as np
df = pd.read_csv('assets/NISPUF17.csv')
def proportion_of_education():
d = dict()
length = len(df)
d["less than high school"] = len(df[df['EDUC1'] == 1]) / length
d["high school"] = len(df[df['EDUC1'] == 2]) / length
d["more than high school but not college"] = len(df[df['EDUC1'] == 3]) / length
d["college"] = len(df[df['EDUC1'] == 4]) / length
return d
# proportion_of_education()
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
###Output
_____no_output_____
###Markdown
Question 2Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.*This function should return a tuple in the form (use the correct numbers:*```(2.5, 0.1)```
###Code
def average_influenza_doses():
bf = df.loc[df['CBF_01'] == 1]
nbf = df.loc[df['CBF_01'] == 2]
av_inf_bf = np.mean(bf['P_NUMFLU'].dropna().values)
av_inf_nbf = np.mean(nbf['P_NUMFLU'].dropna().values)
return (av_inf_bf, av_inf_nbf)
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
###Output
_____no_output_____
###Markdown
Question 3It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex. *This function should return a dictionary in the form of (use the correct numbers):* ``` {"male":0.2, "female":0.4}```Note: To aid in verification, the `chickenpox_by_sex()['female']` value the autograder is looking for starts with the digits `0.0077`.
###Code
import numpy as np
def chickenpox_by_sex():
ch_pox = df.loc[df['P_NUMVRC'].dropna().index]
ch_pox = ch_pox.loc[ch_pox['P_NUMVRC'] > 0]
males = ch_pox[ch_pox['SEX'] == 1]
females = ch_pox[ch_pox['SEX'] == 2]
male_had = len(males[males['HAD_CPOX'] == 1])
male_had_not = len(males[males['HAD_CPOX'] == 2])
female_had = len(females[females['HAD_CPOX'] == 1])
female_had_not = len(females[females['HAD_CPOX'] == 2])
d1 = {'male': male_had/male_had_not, 'female': female_had/female_had_not}
return d1
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
###Output
{'male': 0.009675583380762664, 'female': 0.0077918259335489565}
###Markdown
Question 4A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).Some notes on interpreting the answer. The `had_chickenpox_column` is either `1` (for yes) or `2` (for no), and the `num_chickenpox_vaccine_column` is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., `corr > 0`) means that an increase in `had_chickenpox_column` (which means more noโs) would also increase the values of `num_chickenpox_vaccine_column` (which means more doses of vaccine). If there is a negative correlation (e.g., `corr < 0`), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.Also, `pval` is the probability that we observe a correlation between `had_chickenpox_column` and `num_chickenpox_vaccine_column` which is greater than or equal to a particular value occurred by chance. A small `pval` means that the observed correlation is highly unlikely to occur by chance. In this case, `pval` should be very small (will end in `e-18` indicating a very small number).[1] This isnโt really the full picture, since we are not looking at when the dose was given. Itโs possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
###Code
def corr_chickenpox():
import scipy.stats as stats
import numpy as np
import pandas as pd
# this is just an example dataframe
# df = pd.DataFrame({"had_chickenpox_column":np.random.randint(1,3,size=(100)),
# "num_chickenpox_vaccine_column":np.random.randint(0,6,size=(100))})
df2 = df.loc[(df['HAD_CPOX'] == 1) | (df['HAD_CPOX'] == 2)]
df3 = df2.loc[df2['P_NUMVRC'].dropna().index]
corr, pval = stats.pearsonr(df3['HAD_CPOX'].reset_index(drop=True), df3['P_NUMVRC'].reset_index(drop=True))
# here is some stub code to actually run the correlation
# corr, pval=stats.pearsonr(df["had_chickenpox_column"],df["num_chickenpox_vaccine_column"])
# just return the correlation
return corr
assert -1<=corr_chickenpox()<=1, "You must return a float number between -1.0 and 1.0."
###Output
0.07044873460147986
|
Python/zzz_training_challenge/UdemyPythonPro/Chapter5_Functions/Functions/default_argument_problems.ipynb | ###Markdown
BAD
###Code
def grow_list(val, my_list=[]):
my_list.append(val)
return my_list
my_list = []
my_list = grow_list(42, my_list)
my_list = grow_list(42, my_list)
my_list = grow_list(42, my_list)
print(my_list)
my_list2 = grow_list(42)
print(my_list2)
# neue Liste wird mit Default aufgerufen und trotzdem wurde die andere dabei
my_list3 = grow_list(43)
print(my_list3)
###Output
[42]
[42, 43]
###Markdown
**Achtung**
In diesem Fall wurde my_list = [] im Speicher abgelegt und damit immer dieser Speicher verwendet.
Dieser Fall tritt รถfters bei mutables auf! GOOD
beim Erstellen einer Funktion wird immer der Funktionskopf beim Start ausgefรผhrt
wenn in der Funktion die Liste erstellt wird, wird dies nicht beim Starten ausgefรผhrt, sondern fรผr jeden Fall einzeln
###Code
# Default Value am besten None. Auf keinen Fall mutable type
def grow_list(val, my_list=None):
if my_list:
my_list.append(val)
else:
my_list = [val]
return my_list
my_list2 = grow_list(42)
print(my_list2)
my_list3 = grow_list(43)
print(my_list3)
###Output
[42]
[43]
|
01_mnist_mlp.ipynb | ###Markdown
Trenowanie prostej sieci neuronowej na zbiorze MNIST
###Code
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import warnings
warnings.filterwarnings('ignore')
###Output
Using TensorFlow backend.
###Markdown
Zaลadowanie danych
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data()
###Output
_____no_output_____
###Markdown
Eksploracja danych
###Code
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print('Liczba dabych treningowych:', X_train.shape[0])
print('Liczba danych testowych:', X_test.shape[0])
print('Rozmiar pojedynczego obrazka:', X_train[0].shape)
###Output
(60000, 28, 28)
(10000, 28, 28)
(60000,)
(10000,)
Liczba dabych treningowych: 60000
Liczba danych testowych: 10000
Rozmiar pojedynczego obrazka: (28, 28)
###Markdown
Wyลwietlenie obrazka
###Code
import matplotlib.pyplot as plt
plt.imshow(X_train[0], cmap='Greys')
plt.axis('off')
###Output
_____no_output_____
###Markdown
Wyลwietlenie kilku obrazkรณw
###Code
plt.figure(figsize=(13, 13))
for i in range(1, 11):
plt.subplot(1, 10, i)
plt.axis('off')
plt.imshow(X_train[i], cmap='Greys')
plt.show()
###Output
_____no_output_____
###Markdown
Wyลwietlenie danych
###Code
print(X_train[0][10])
# dolna poลรณwka obrazka
plt.imshow(X_train[0][14:], cmap='Greys')
# gรณrna poลรณwka obrazka
plt.imshow(X_train[0][:14], cmap='Greys')
###Output
_____no_output_____
###Markdown
Przycinanie obrazka
###Code
plt.imshow(X_train[0][5:20, 5:20], cmap='Greys')
###Output
_____no_output_____
###Markdown
Wyลwietlenie etykiet
###Code
print('y_train:', y_train)
print('y_train shape:', y_train.shape)
###Output
y_train: [5 0 4 ... 5 6 8]
y_train shape: (60000,)
###Markdown
Przygotowanie danych
###Code
X_train = X_train.reshape(60000, 28*28)
X_test = X_test.reshape(10000, 28*28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape)
print(X_test.shape)
###Output
(60000, 784)
(10000, 784)
###Markdown
Przygotowanie etykiet
###Code
y_train = keras.utils.to_categorical(y_train, num_classes=10)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
print(y_train.shape)
print(y_test.shape)
print(y_train[0])
###Output
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
###Markdown
Budowa modelu
###Code
model = Sequential()
model.add(Dense(units=512, activation='relu', input_shape=(28*28,)))
model.add(Dense(units=512, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
###Output
WARNING: Logging before flag parsing goes to stderr.
W0711 07:41:11.785608 140436952602496 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.
W0711 07:41:11.805384 140436952602496 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
W0711 07:41:11.807734 140436952602496 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.
###Markdown
Kompilacja modelu
###Code
model.compile(optimizer=RMSprop(),
loss='categorical_crossentropy',
metrics=['accuracy'])
###Output
W0711 07:41:11.868982 140436952602496 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
W0711 07:41:11.877096 140436952602496 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3295: The name tf.log is deprecated. Please use tf.math.log instead.
###Markdown
Trenowanie modelu
###Code
history = model.fit(X_train, y_train,
batch_size=128,
epochs=20,
validation_data=(X_test, y_test))
###Output
W0711 07:41:11.973828 140436952602496 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
W0711 07:41:12.047617 140436952602496 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:986: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.
###Markdown
Ocena modelu
###Code
score = model.evaluate(X_test, y_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
###Output
10000/10000 [==============================] - 0s 37us/step
Test loss: 0.15132887862331407
Test accuracy: 0.9815
###Markdown
Wykres dokลadnoลci
###Code
def make_accuracy_plot(history):
"""
Funkcja zwraca wykres dokลadnoลci (accuracy) modelu na zbiorze treningowym
i walidacyjnym.
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
acc, val_acc = history.history['acc'], history.history['val_acc']
epochs = range(1, len(acc) + 1)
plt.figure(figsize=(10, 8))
plt.plot(epochs, acc, label='Dokลadnoลฤ trenowania', marker='o')
plt.plot(epochs, val_acc, label='Dokลadnoลฤ walidacji', marker='o')
plt.legend()
plt.title('Dokลadnoลฤ trenowania i walidacji')
plt.xlabel('Epoki')
plt.ylabel('Dokลadnoลฤ')
plt.show()
def make_loss_plot(history):
"""
Funkcja zwraca wykres straty (loss) modelu na zbiorze treningowym
i walidacyjnym.
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
loss, val_loss = history.history['loss'], history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.figure(figsize=(10, 8))
plt.plot(epochs, loss, label='Strata trenowania', marker='o')
plt.plot(epochs, val_loss, label='Strata walidacji', marker='o')
plt.legend()
plt.title('Strata trenowania i walidacji')
plt.xlabel('Epoki')
plt.ylabel('Strata')
plt.show()
make_accuracy_plot(history)
make_loss_plot(history)
###Output
_____no_output_____
###Markdown
Budowa modelu z warstwami Dropout
###Code
model = Sequential()
model.add(Dense(units=512, activation='relu', input_shape=(28*28,)))
model.add(Dropout(0.2))
model.add(Dense(units=512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=RMSprop(),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train,
batch_size=128,
epochs=20,
validation_data=(X_test, y_test))
make_accuracy_plot(history)
make_loss_plot(history)
###Output
_____no_output_____ |
02.tf.operations/02-1.tf.Variable.ipynb | ###Markdown
`tf.Variable`
###Code
import tensorflow as tf
# Graph execution examples
#tf.enable_eager_execution()
###Output
_____no_output_____ |
Ketton Pore-Scale 3D Conditioning Example.ipynb | ###Markdown
Conditioning of a 3D-DCGAN to 3 orthogonal crosssections Pre-trained on the Ketton dataset In this example we have pretrained a GAN on the Ketton limestone pore-scale training image. We will use the methodology of Yeh et. al. to condition GAN simulations to a 1D well.
###Code
%load_ext autoreload
%autoreload 2
import torch
from models import dcgan_ketton_3d
from conditioner.conditioner import Conditioner, Unconditional
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load Pre-trained Networks We have pretrained a DCGAN model with a latent vector size of 512 on the Ketton limestone training image. The model architecture for the generator and discriminator are defined in the models folder.
###Code
generator = dcgan_ketton_3d.DCGAN3D_G()
generator.load_state_dict(torch.load("./checkpoints/dcgan_ketton_3d/netG_epoch_170_cpu.pth"))
latent_size = 512
discriminator = dcgan_ketton_3d.DCGAN3D_D()
discriminator.load_state_dict(torch.load("./checkpoints/dcgan_ketton_3d/netD_epoch_170_cpu.pth"))
###Output
_____no_output_____
###Markdown
Determine Latent Size We determine first the size of the latent vector required to simulate images the size of the our conditioning data.We can choose to do this analytically if the network architecture is known, or else empirically by scaling the latent vector.
###Code
uncond = Unconditional(3, generator, latent_size, "normal", use_cuda=True)
for i in range(1, 10):
samples = uncond.create_unconditional_simulations(1, [i, i, i]) #You can choose other sizes of latent vectors
if samples is not None:
print "Imsize: ", i, "Uncoditional Simulation Size: ", samples.shape[2]
###Output
Imsize: 1 Uncoditional Simulation Size: 64
Imsize: 2 Uncoditional Simulation Size: 80
Imsize: 3 Uncoditional Simulation Size: 96
Imsize: 4 Uncoditional Simulation Size: 112
Imsize: 5 Uncoditional Simulation Size: 128
Imsize: 6 Uncoditional Simulation Size: 144
Imsize: 7 Uncoditional Simulation Size: 160
Imsize: 8 Uncoditional Simulation Size: 176
Imsize: 9 Uncoditional Simulation Size: 192
###Markdown
Determine Trim Size Due to border artifacts resulting from transposed convolution, it is necessary to trim outer regions when these occur. We determine this by sampling unconditional simulations and creating a mean and standard deviation plot. In our case we see little amounts of artifacts on the edges and few repeated artifacts everywhere else.
###Code
samples = []
for i in tqdm_notebook(range(32)):
samples.append(uncond.create_unconditional_simulations(1, [5, 5, 5]).reshape(1, 128, 128, 128))
samples_arr = np.concatenate(samples, 0)
mean = np.mean(samples_arr, axis=0)
std = np.std(samples_arr, axis=0)
fig, ax = plt.subplots(1, 3, figsize=(12, 12))
ax[0].imshow(mean[64], cmap="gray", vmin=0.0, vmax=0.5)
ax[1].imshow(std[64], cmap="gray", vmin=0.0, vmax=1.0)
ax[2].imshow(samples_arr[0, 64], cmap="gray")
###Output
_____no_output_____
###Markdown
Load conditioning data and set the conditioning maskIn this case we condition to three orthogonal cross-sections through the origin.
###Code
data = np.load("./datasets/ketton_3d/Ketton.npy")/255.
data = (data-0.5)/0.5
print data.min(), data.max()
mask = np.zeros((128, 128, 128))
mask[mask.shape[0]/2, :, :] = 1
mask[:, mask.shape[1]/2, :] = 1
mask[:, :, mask.shape[2]/2] = 1
conditioning_data = data[0:128, 0:128, 0:128]
fig, ax = plt.subplots(1, 3, figsize=(12, 12))
ax[0].imshow(conditioning_data[64, :, :], cmap="gray")
ax[1].imshow(conditioning_data[:, 64, :], cmap="gray")
ax[2].imshow(conditioning_data[:, :, 64], cmap="gray")
###Output
_____no_output_____
###Markdown
Create Conditioned Images We now instantiate the conditioner and provide a generator, discriminator, the conditioning data and a mask.To condition on the mse we will use Conditioner.condition(). Our evaluation metric is the mean squared error for the mismatch on the conditioning data.
###Code
cond = Conditioner(3, generator, discriminator=discriminator, conditioning_data=conditioning_data, mask=mask, latent_size=latent_size, latent_dist="normal", verbose=True, use_cuda=True)
#We provide the latent vector size and the MSE criterion
cond.condition(1, [5, 5, 5], 1e-2*(128*128*3))
x_cond = generator(cond.zhat)
###Output
('Iteration: ', 0, 'Current MSE: 13919.9', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 13919.864', ' Target MSE: 491.52')
('Iteration: ', 1, 'Current MSE: 13914.5', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 13914.449', ' Target MSE: 491.52')
('Iteration: ', 2, 'Current MSE: 7068.1', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 7068.068', ' Target MSE: 491.52')
('Iteration: ', 3, 'Current MSE: 6877.4', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 6877.379', ' Target MSE: 491.52')
('Iteration: ', 4, 'Current MSE: 4841.3', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 4841.345', ' Target MSE: 491.52')
('Iteration: ', 5, 'Current MSE: 3768.8', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 3768.836', ' Target MSE: 491.52')
('Iteration: ', 6, 'Current MSE: 3227.1', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 3227.090', ' Target MSE: 491.52')
('Iteration: ', 7, 'Current MSE: 2766.0', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 2766.015', ' Target MSE: 491.52')
('Iteration: ', 8, 'Current MSE: 2349.3', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 2349.282', ' Target MSE: 491.52')
('Iteration: ', 9, 'Current MSE: 2072.7', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 2072.664', ' Target MSE: 491.52')
('Iteration: ', 10, 'Current MSE: 1843.1', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1843.120', ' Target MSE: 491.52')
('Iteration: ', 11, 'Current MSE: 1627.3', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1627.318', ' Target MSE: 491.52')
('Iteration: ', 12, 'Current MSE: 1522.1', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1522.048', ' Target MSE: 491.52')
('Iteration: ', 13, 'Current MSE: 1386.2', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1386.226', ' Target MSE: 491.52')
('Iteration: ', 14, 'Current MSE: 1321.4', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1321.363', ' Target MSE: 491.52')
('Iteration: ', 15, 'Current MSE: 1204.6', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1204.573', ' Target MSE: 491.52')
('Iteration: ', 16, 'Current MSE: 1127.9', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1127.888', ' Target MSE: 491.52')
('Iteration: ', 17, 'Current MSE: 1050.2', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 1050.159', ' Target MSE: 491.52')
('Iteration: ', 18, 'Current MSE: 944.6', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 944.562', ' Target MSE: 491.52')
('Iteration: ', 19, 'Current MSE: 882.0', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 882.006', ' Target MSE: 491.52')
('Iteration: ', 20, 'Current MSE: 805.4', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 805.420', ' Target MSE: 491.52')
('Iteration: ', 21, 'Current MSE: 744.8', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 744.780', ' Target MSE: 491.52')
('Iteration: ', 22, 'Current MSE: 696.1', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 696.146', ' Target MSE: 491.52')
('Iteration: ', 23, 'Current MSE: 658.8', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 658.816', ' Target MSE: 491.52')
('Iteration: ', 24, 'Current MSE: 617.4', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 617.365', ' Target MSE: 491.52')
('Iteration: ', 25, 'Current MSE: 582.7', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 582.712', ' Target MSE: 491.52')
('Iteration: ', 26, 'Current MSE: 551.6', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 551.569', ' Target MSE: 491.52')
('Iteration: ', 27, 'Current MSE: 518.6', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 518.632', ' Target MSE: 491.52')
('Iteration: ', 28, 'Current MSE: 489.5', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 489.477', ' Target MSE: 491.52')
('Iteration: ', 29, 'Current MSE: 459.8', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 459.782', ' Target MSE: 491.52')
('Iteration: ', 30, 'Current MSE: 436.7', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 436.702', ' Target MSE: 491.52')
('Iteration: ', 31, 'Current MSE: 416.5', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 416.495', ' Target MSE: 491.52')
('Iteration: ', 32, 'Current MSE: 395.1', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 395.140', ' Target MSE: 491.52')
('Iteration: ', 33, 'Current MSE: 372.2', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 372.190', ' Target MSE: 491.52')
('Iteration: ', 34, 'Current MSE: 354.5', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 354.537', ' Target MSE: 491.52')
('Iteration: ', 35, 'Current MSE: 342.6', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 342.592', ' Target MSE: 491.52')
('Iteration: ', 36, 'Current MSE: 326.8', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 326.834', ' Target MSE: 491.52')
('Iteration: ', 37, 'Current MSE: 315.7', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 315.726', ' Target MSE: 491.52')
('Iteration: ', 38, 'Current MSE: 303.5', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 303.449', ' Target MSE: 491.52')
('Iteration: ', 39, 'Current MSE: 291.5', ' Current Perceptual Loss: -0.003', ' Current Total Loss: 291.540', ' Target MSE: 491.52')
('Step: ', 0, ' current loss: ', array([291.5433], dtype=float32), ' target value: ', 491.52)
###Markdown
Evaluation
###Code
x_cond = generator(cond.zhat)
x_hat_cond = x_cond.data.cpu().numpy()[0, 0]
###Output
_____no_output_____
###Markdown
The following graph compares the conditioning data (top) the conditioned GAN realization (bottom). The resulting images are visually very close and represent the required MSE.
###Code
fig, ax = plt.subplots(2, 3, figsize=(12, 12))
m=64
ax[0, 0].imshow(x_hat_cond[m, :, :], cmap="gray")
ax[0, 1].imshow(x_hat_cond[:, m, :], cmap="gray")
ax[0, 2].imshow(x_hat_cond[:, :, m], cmap="gray")
ax[1, 0].imshow(conditioning_data[m, :, :], cmap="gray")
ax[1, 1].imshow(conditioning_data[:, m, :], cmap="gray")
ax[1, 2].imshow(conditioning_data[:, :, m], cmap="gray")
###Output
_____no_output_____
###Markdown
We now compute the error by subtracting the conditioning data from the generated realization. The following plot shows a cross-section not through the origin. We hope to see a zero error where the cross-section through the origin occur and noise everywhere else. This indicates that our realization matches the data at the conditioning locations and represents a valid realization everywhere else within the generated volume.
###Code
error = np.abs(x_hat_cond-conditioning_data)
fig, ax = plt.subplots(1, 3, figsize=(12, 12))
ax[0].imshow(error[32, :, :], cmap="gray", vmin=0)
ax[1].imshow(error[:, 32, :], cmap="gray", vmin=0)
ax[2].imshow(error[:, :, 32], cmap="gray", vmin=0)
###Output
_____no_output_____ |
how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/.ipynb_checkpoints/train-hyperparameter-tune-deploy-with-keras-checkpoint.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Training, hyperparameter tune, and deploy with Keras IntroductionThis tutorial shows how to train a simple deep neural network using the MNIST dataset and Keras on Azure Machine Learning. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of `28x28` pixels, representing number from 0 to 9. The goal is to create a multi-class classifier to identify the digit each image represents, and deploy it as a web service in Azure.For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/). Prerequisite:* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning* If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to: * install the AML SDK * create a workspace and its configuration file (`config.json`)* For local scoring test, you will also need to have `tensorflow` and `keras` installed in the current Jupyter kernel. Let's get started. First let's import some Python libraries.
###Code
%matplotlib inline
import numpy as np
import os
import matplotlib.pyplot as plt
import azureml
from azureml.core import Workspace
# check core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize workspaceInitialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
###Code
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
###Output
_____no_output_____
###Markdown
Create an Azure ML experimentLet's create an experiment named "keras-mnist" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.
###Code
from azureml.core import Experiment
script_folder = './keras-mnist'
os.makedirs(script_folder, exist_ok=True)
exp = Experiment(workspace=ws, name='keras-mnist')
###Output
_____no_output_____
###Markdown
Explore dataBefore you train a model, you need to understand the data that you are using to train it. In this section you learn how to:* Download the MNIST dataset* Display some sample images Download the MNIST datasetDownload the MNIST dataset and save the files into a `data` directory locally. Images and labels for both training and testing are downloaded.
###Code
import urllib.request
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',
filename=os.path.join(data_folder, 'train-images-idx3-ubyte.gz'))
urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',
filename=os.path.join(data_folder, 'train-labels-idx1-ubyte.gz'))
urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',
filename=os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'))
urllib.request.urlretrieve('https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz',
filename=os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'))
###Output
_____no_output_____
###Markdown
Display some sample imagesLoad the compressed files into `numpy` arrays. Then use `matplotlib` to plot 30 random images from the dataset with their labels above them. Note this step requires a `load_data` function that's included in an `utils.py` file. This file is included in the sample folder. Please make sure it is placed in the same folder as this notebook. The `load_data` function simply parses the compressed files into numpy arrays.
###Code
# make sure utils.py is in the same directory as this code
from utils import load_data, one_hot_encode
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster.
X_train = load_data(os.path.join(data_folder, 'train-images-idx3-ubyte.gz'), False) / 255.0
X_test = load_data(os.path.join(data_folder, 't10k-images-idx3-ubyte.gz'), False) / 255.0
y_train = load_data(os.path.join(data_folder, 'train-labels-idx1-ubyte.gz'), True).reshape(-1)
y_test = load_data(os.path.join(data_folder, 't10k-labels-idx1-ubyte.gz'), True).reshape(-1)
# now let's show some randomly chosen images from the training set.
count = 0
sample_size = 30
plt.figure(figsize = (16, 6))
for i in np.random.permutation(X_train.shape[0])[:sample_size]:
count = count + 1
plt.subplot(1, sample_size, count)
plt.axhline('')
plt.axvline('')
plt.text(x=10, y=-10, s=y_train[i], fontsize=18)
plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys)
plt.show()
###Output
_____no_output_____
###Markdown
Now you have an idea of what these images look like and the expected prediction outcome. Create a FileDatasetA FileDataset references one or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred. [Learn More](https://aka.ms/azureml/howto/createdatasets)
###Code
from azureml.core.dataset import Dataset
web_paths = ['https://azureopendatastorage.blob.core.windows.net/mnist/train-images-idx3-ubyte.gz',
'https://azureopendatastorage.blob.core.windows.net/mnist/train-labels-idx1-ubyte.gz',
'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-images-idx3-ubyte.gz',
'https://azureopendatastorage.blob.core.windows.net/mnist/t10k-labels-idx1-ubyte.gz'
]
dataset = Dataset.File.from_files(path = web_paths)
###Output
_____no_output_____
###Markdown
Use the `register()` method to register datasets to your workspace so they can be shared with others, reused across various experiments, and referred to by name in your training script.You can try get the dataset first to see if it's already registered.
###Code
dataset_registered = False
try:
temp = Dataset.get_by_name(workspace = ws, name = 'mnist-dataset')
dataset_registered = True
except:
print("The dataset mnist-dataset is not registered in workspace yet.")
if not dataset_registered:
dataset = dataset.register(workspace = ws,
name = 'mnist-dataset',
description='training and test dataset',
create_new_version=True)
###Output
_____no_output_____
###Markdown
Create or Attach existing AmlComputeYou will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecturecompute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource. If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:1. create the configuration (this step is local and only takes a second)2. create the cluster (this step will take about **20 seconds**)3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "gpu-cluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
# can poll for a minimum number of nodes and for a specific timeout.
# if no min node count is provided it uses the scale settings for the cluster
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# use get_status() to get a detailed status for the current cluster.
print(compute_target.get_status().serialize())
###Output
_____no_output_____
###Markdown
Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named "gpu-cluster" of type `AmlCompute`.
###Code
compute_targets = ws.compute_targets
for name, ct in compute_targets.items():
print(name, ct.type, ct.provisioning_state)
###Output
_____no_output_____
###Markdown
Copy the training files into the script folderThe Keras training script is already created for you. You can simply copy it into the script folder, together with the utility library used to load compressed data file into numpy array.
###Code
import shutil
# the training logic is in the keras_mnist.py file.
shutil.copy('./keras_mnist.py', script_folder)
# the utils.py just helps loading data from the downloaded MNIST dataset into numpy arrays.
shutil.copy('./utils.py', script_folder)
###Output
_____no_output_____
###Markdown
Construct neural network in KerasIn the training script `keras_mnist.py`, it creates a very simple DNN (deep neural network), with just 2 hidden layers. The input layer has 28 * 28 = 784 neurons, each representing a pixel in an image. The first hidden layer has 300 neurons, and the second hidden layer has 100 neurons. The output layer has 10 neurons, each representing a targeted label from 0 to 9. Azure ML concepts Please note the following three things in the code below:1. The script accepts arguments using the argparse package. In this case there is one argument `--data_folder` which specifies the FileDataset in which the script can find the MNIST data``` parser = argparse.ArgumentParser() parser.add_argument('--data_folder')```2. The script is accessing the Azure ML `Run` object by executing `run = Run.get_context()`. Further down the script is using the `run` to report the loss and accuracy at the end of each epoch via callback.``` run.log('Loss', log['val_loss']) run.log('Accuracy', log['val_accuracy'])```3. When running the script on Azure ML, you can write files out to a folder `./outputs` that is relative to the root directory. This folder is specially tracked by Azure ML in the sense that any files written to that folder during script execution on the remote target will be picked up by Run History; these files (known as artifacts) will be available as part of the run history record. The next cell will print out the training code for you to inspect.
###Code
with open(os.path.join(script_folder, './keras_mnist.py'), 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Create TensorFlow estimator & add KerasNext, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the `gpu-cluster` as compute target, and pass the mount-point of the datastore to the training code as a parameter.The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed. In this case, we add `keras` package (for the Keras framework obviously), and `matplotlib` package for plotting a "Loss vs. Accuracy" chart and record it in run history.
###Code
dataset = Dataset.get_by_name(ws, 'mnist-dataset')
# list the files referenced by mnist dataset
dataset.to_path()
from azureml.train.dnn import TensorFlow
script_params = {
'--data-folder': dataset.as_named_input('mnist').as_mount(),
'--batch-size': 50,
'--first-layer-neurons': 300,
'--second-layer-neurons': 100,
'--learning-rate': 0.001
}
est = TensorFlow(source_directory=script_folder,
script_params=script_params,
compute_target=compute_target,
entry_script='keras_mnist.py',
framework_version='2.1',
pip_packages=['keras<=2.3.1','azureml-dataprep[pandas,fuse]','matplotlib','tensorflow==2'])
###Output
_____no_output_____
###Markdown
Submit job to runSubmit the estimator to the Azure ML experiment to kick off the execution.
###Code
run = exp.submit(est)
###Output
_____no_output_____
###Markdown
Monitor the RunAs the Run is executed, it will go through the following stages:1. Preparing: A docker image is created matching the Python environment specified by the TensorFlow estimator and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation.2. Scaling: If the compute needs to be scaled up (i.e. the AmlCompute cluster requires more nodes to execute the run than currently available), the cluster will attempt to scale up in order to make the required amount of nodes available. Scaling typically takes about **5 minutes**.3. Running: All scripts in the script folder are uploaded to the compute target, data stores are mounted/copied and the `entry_script` is executed. While the job is running, stdout and the `./logs` folder are streamed to the run history and can be viewed to monitor the progress of the run.4. Post-Processing: The `./outputs` folder of the run is copied over to the run historyThere are multiple ways to check the progress of a running job. We can use a Jupyter notebook widget. **Note: The widget will automatically update ever 10-15 seconds, always showing you the most up-to-date information about the run**
###Code
from azureml.widgets import RunDetails
RunDetails(run).show()
###Output
_____no_output_____
###Markdown
We can also periodically check the status of the run object, and navigate to Azure portal to monitor the run.
###Code
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
In the outputs of the training script, it prints out the Keras version number. Please make a note of it. The Run objectThe Run object provides the interface to the run history -- both to the job and to the control plane (this notebook), and both while the job is running and after it has completed. It provides a number of interesting features for instance:* `run.get_details()`: Provides a rich set of properties of the run* `run.get_metrics()`: Provides a dictionary with all the metrics that were reported for the Run* `run.get_file_names()`: List all the files that were uploaded to the run history for this Run. This will include the `outputs` and `logs` folder, azureml-logs and other logs, as well as files that were explicitly uploaded to the run using `run.upload_file()`Below are some examples -- please run through them and inspect their output.
###Code
run.get_details()
run.get_metrics()
run.get_file_names()
###Output
_____no_output_____
###Markdown
Download the saved model In the training script, the Keras model is saved into two files, `model.json` and `model.h5`, in the `outputs/models` folder on the gpu-cluster AmlCompute node. Azure ML automatically uploaded anything written in the `./outputs` folder into run history file store. Subsequently, we can use the `run` object to download the model files. They are under the the `outputs/model` folder in the run history file store, and are downloaded into a local folder named `model`.
###Code
# create a model folder in the current directory
os.makedirs('./model', exist_ok=True)
for f in run.get_file_names():
if f.startswith('outputs/model'):
output_file_path = os.path.join('./model', f.split('/')[-1])
print('Downloading from {} to {} ...'.format(f, output_file_path))
run.download_file(name=f, output_file_path=output_file_path)
###Output
_____no_output_____
###Markdown
Predict on the test setLet's check the version of the local Keras. Make sure it matches with the version number printed out in the training script. Otherwise you might not be able to load the model properly.
###Code
import keras
import tensorflow as tf
print("Keras version:", keras.__version__)
print("Tensorflow version:", tf.__version__)
###Output
_____no_output_____
###Markdown
Now let's load the downloaded model.
###Code
from keras.models import model_from_json
# load json and create model
json_file = open('model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model/model.h5")
print("Model loaded from disk.")
###Output
_____no_output_____
###Markdown
Feed test dataset to the persisted model to get predictions.
###Code
# evaluate loaded model on test data
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
y_test_ohe = one_hot_encode(y_test, 10)
y_hat = np.argmax(loaded_model.predict(X_test), axis=1)
# print the first 30 labels and predictions
print('labels: \t', y_test[:30])
print('predictions:\t', y_hat[:30])
###Output
_____no_output_____
###Markdown
Calculate the overall accuracy by comparing the predicted value against the test set.
###Code
print("Accuracy on the test set:", np.average(y_hat == y_test))
###Output
_____no_output_____
###Markdown
Intelligent hyperparameter tuningWe have trained the model with one set of hyperparameters, now let's how we can do hyperparameter tuning by launching multiple runs on the cluster. First let's define the parameter space using random sampling.
###Code
from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal
from azureml.train.hyperdrive import choice, loguniform
ps = RandomParameterSampling(
{
'--batch-size': choice(25, 50, 100),
'--first-layer-neurons': choice(10, 50, 200, 300, 500),
'--second-layer-neurons': choice(10, 50, 200, 500),
'--learning-rate': loguniform(-6, -1)
}
)
###Output
_____no_output_____
###Markdown
Next, we will create a new estimator without the above parameters since they will be passed in later by Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep.
###Code
est = TensorFlow(source_directory=script_folder,
script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()},
compute_target=compute_target,
entry_script='keras_mnist.py',
framework_version='2.0',
pip_packages=['keras<=2.3.1','azureml-dataprep[pandas,fuse]','matplotlib'])
###Output
_____no_output_____
###Markdown
Now we will define an early termnination policy. The `BanditPolicy` basically states to check the job every 2 iterations. If the primary metric (defined later) falls outside of the top 10% range, Azure ML terminate the job. This saves us from continuing to explore hyperparameters that don't show promise of helping reach our target metric.
###Code
policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1)
###Output
_____no_output_____
###Markdown
Now we are ready to configure a run configuration object, and specify the primary metric `Accuracy` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster.
###Code
hdc = HyperDriveConfig(estimator=est,
hyperparameter_sampling=ps,
policy=policy,
primary_metric_name='Accuracy',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=20,
max_concurrent_runs=4)
###Output
_____no_output_____
###Markdown
Finally, let's launch the hyperparameter tuning job.
###Code
hdr = exp.submit(config=hdc)
###Output
_____no_output_____
###Markdown
We can use a run history widget to show the progress. Be patient as this might take a while to complete.
###Code
RunDetails(hdr).show()
hdr.wait_for_completion(show_output=True)
assert(hdr.get_status() == "Completed")
###Output
_____no_output_____
###Markdown
Warm start a Hyperparameter Tuning experiment and resuming child runsOften times, finding the best hyperparameter values for your model can be an iterative process, needing multiple tuning runs that learn from previous hyperparameter tuning runs. Reusing knowledge from these previous runs will accelerate the hyperparameter tuning process, thereby reducing the cost of tuning the model and will potentially improve the primary metric of the resulting model. When warm starting a hyperparameter tuning experiment with Bayesian sampling, trials from the previous run will be used as prior knowledge to intelligently pick new samples, so as to improve the primary metric. Additionally, when using Random or Grid sampling, any early termination decisions will leverage metrics from the previous runs to determine poorly performing training runs. Azure Machine Learning allows you to warm start your hyperparameter tuning run by leveraging knowledge from up to 5 previously completed hyperparameter tuning parent runs. Additionally, there might be occasions when individual training runs of a hyperparameter tuning experiment are cancelled due to budget constraints or fail due to other reasons. It is now possible to resume such individual training runs from the last checkpoint (assuming your training script handles checkpoints). Resuming an individual training run will use the same hyperparameter configuration and mount the storage used for that run. The training script should accept the "--resume-from" argument, which contains the checkpoint or model files from which to resume the training run. You can also resume individual runs as part of an experiment that spends additional budget on hyperparameter tuning. Any additional budget, after resuming the specified training runs is used for exploring additional configurations.For more information on warm starting and resuming hyperparameter tuning runs, please refer to the [Hyperparameter Tuning for Azure Machine Learning documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) Find and register best modelWhen all the jobs finish, we can find out the one that has the highest accuracy.
###Code
best_run = hdr.get_best_run_by_primary_metric()
print(best_run.get_details()['runDefinition']['arguments'])
###Output
_____no_output_____
###Markdown
Now let's list the model files uploaded during the run.
###Code
print(best_run.get_file_names())
###Output
_____no_output_____
###Markdown
We can then register the folder (and all files in it) as a model named `keras-dnn-mnist` under the workspace for deployment.
###Code
model = best_run.register_model(model_name='keras-mlp-mnist', model_path='outputs/model')
###Output
_____no_output_____
###Markdown
Deploy the model in ACINow we are ready to deploy the model as a web service running in Azure Container Instance [ACI](https://azure.microsoft.com/en-us/services/container-instances/). Azure Machine Learning accomplishes this by constructing a Docker image with the scoring logic and model baked in. Create score.pyFirst, we will create a scoring script that will be invoked by the web service call. * Note that the scoring script must have two required functions, `init()` and `run(input_data)`. * In `init()` function, you typically load the model into a global object. This function is executed only once when the Docker container is started. * In `run(input_data)` function, the model is used to predict a value based on the input data. The input and output to `run` typically use JSON as serialization and de-serialization format but you are not limited to that.
###Code
%%writefile score.py
import json
import numpy as np
import os
from keras.models import model_from_json
from azureml.core.model import Model
def init():
global model
model_root = Model.get_model_path('keras-mlp-mnist')
# load json and create model
json_file = open(os.path.join(model_root, 'model.json'), 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
# load weights into new model
model.load_weights(os.path.join(model_root, "model.h5"))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = np.argmax(model.predict(data), axis=1)
return y_hat.tolist()
###Output
_____no_output_____
###Markdown
Create myenv.ymlWe also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda packages `tensorflow` and `keras`.
###Code
from azureml.core.conda_dependencies import CondaDependencies
cd = CondaDependencies.create()
cd.add_tensorflow_conda_package()
cd.add_conda_package('keras<=2.3.1')
cd.add_pip_package("azureml-defaults")
cd.save_to_file(base_directory='./', conda_file_path='myenv.yml')
print(cd.serialize_to_string())
###Output
_____no_output_____
###Markdown
Deploy to ACIWe are almost ready to deploy. Create the inference configuration and deployment configuration and deploy to ACI. This cell will run for about 7-8 minutes.
###Code
from azureml.core.webservice import AciWebservice
from azureml.core.model import InferenceConfig
from azureml.core.model import Model
from azureml.core.environment import Environment
myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
auth_enabled=True, # this flag generates API keys to secure access
memory_gb=1,
tags={'name': 'mnist', 'framework': 'Keras'},
description='Keras MLP on MNIST')
service = Model.deploy(workspace=ws,
name='keras-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(True)
print(service.state)
###Output
_____no_output_____
###Markdown
**Tip: If something goes wrong with the deployment, the first thing to look at is the logs from the service by running the following command:** `print(service.get_logs())` This is the scoring web service endpoint:
###Code
print(service.scoring_uri)
###Output
_____no_output_____
###Markdown
Test the deployed modelLet's test the deployed model. Pick 30 random samples from the test set, and send it to the web service hosted in ACI. Note here we are using the `run` API in the SDK to invoke the service. You can also make raw HTTP calls using any HTTP tool such as curl.After the invocation, we print the returned predictions and plot them along with the input images. Use red font color and inversed image (white on black) to highlight the misclassified samples. Note since the model accuracy is pretty high, you might have to run the below cell a few times before you can see a misclassified sample.
###Code
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y=-10, s=y_test[s], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
We can retrieve the API keys used for accessing the HTTP endpoint.
###Code
# Retrieve the API keys. Two keys were generated.
key1, Key2 = service.get_keys()
print(key1)
###Output
_____no_output_____
###Markdown
We can now send construct raw HTTP request and send to the service. Don't forget to add key to the HTTP header.
###Code
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
###Output
_____no_output_____
###Markdown
Let's look at the workspace after the web service was deployed. You should see * a registered model named 'keras-mlp-mnist' and with the id 'model:1' * a webservice called 'keras-mnist-svc' with some scoring URL
###Code
model = ws.models['keras-mlp-mnist']
print("Model: {}, ID: {}".format('keras-mlp-mnist', model.id))
webservice = ws.webservices['keras-mnist-svc']
print("Webservice: {}, scoring URI: {}".format('keras-mnist-svc', webservice.scoring_uri))
###Output
_____no_output_____
###Markdown
Clean upYou can delete the ACI deployment with a simple delete API call.
###Code
service.delete()
###Output
_____no_output_____ |
book/_build/jupyter_execute/notebooks/02-coordinate-reference-systems.ipynb | ###Markdown
Coordinate reference systems
###Code
%matplotlib inline
import pandas as pd
import geopandas
countries = geopandas.read_file("zip://./data/ne_110m_admin_0_countries.zip")
cities = geopandas.read_file("zip://./data/ne_110m_populated_places.zip")
rivers = geopandas.read_file("zip://./data/ne_50m_rivers_lake_centerlines.zip")
###Output
_____no_output_____
###Markdown
Coordinate reference systemsUp to now, we have used the geometry data with certain coordinates without further wondering what those coordinates mean or how they are expressed.> The **Coordinate Reference System (CRS)** relates the coordinates to a specific location on earth.For a nice in-depth explanation, see https://docs.qgis.org/2.8/en/docs/gentle_gis_introduction/coordinate_reference_systems.html Geographic coordinates> Degrees of latitude and longitude.>> E.g. 48ยฐ51โฒN, 2ยฐ17โฒEThe most known type of coordinates are geographic coordinates: we define a position on the globe in degrees of latitude and longitude, relative to the equator and the prime meridian. With this system, we can easily specify any location on earth. It is used widely, for example in GPS. If you inspect the coordinates of a location in Google Maps, you will also see latitude and longitude.**Attention!**in Python we use (lon, lat) and not (lat, lon)- Longitude: [-180, 180]{{1}}- Latitude: [-90, 90]{{1}} Projected coordinates> `(x, y)` coordinates are usually in meters or feetAlthough the earth is a globe, in practice we usually represent it on a flat surface: think about a physical map, or the figures we have made with Python on our computer screen.Going from the globe to a flat map is what we call a *projection*.We project the surface of the earth onto a 2D plane so we can express locations in cartesian x and y coordinates, on a flat surface. In this plane, we then typically work with a length unit such as meters instead of degrees, which makes the analysis more convenient and effective.However, there is an important remark: the 3 dimensional earth can never be represented perfectly on a 2 dimensional map, so projections inevitably introduce distortions. To minimise such errors, there are different approaches to project, each with specific advantages and disadvantages.Some projection systems will try to preserve the area size of geometries, such as the Albers Equal Area projection. Other projection systems try to preserve angles, such as the Mercator projection, but will see big distortions in the area. Every projection system will always have some distortion of area, angle or distance. **Projected size vs actual size (Mercator projection)**: Coordinate Reference Systems in Python / GeoPandas A GeoDataFrame or GeoSeries has a `.crs` attribute which holds (optionally) a description of the coordinate reference system of the geometries:
###Code
countries.crs
###Output
_____no_output_____
###Markdown
For the `countries` dataframe, it indicates that it uses the EPSG 4326 / WGS84 lon/lat reference system, which is one of the most used for geographic coordinates.It uses coordinates as latitude and longitude in degrees, as can you be seen from the x/y labels on the plot:
###Code
countries.plot()
###Output
_____no_output_____
###Markdown
The `.crs` attribute is given as a dictionary. In this case, it only indicates the EPSG code, but it can also contain the full "proj4" string (in dictionary form).Possible CRS representation:- **`proj4` string** Example: `+proj=longlat +datum=WGS84 +no_defs` Or its dict representation: `{'proj': 'longlat', 'datum': 'WGS84', 'no_defs': True}`- **EPSG code** Example: `EPSG:4326` = WGS84 geographic CRS (longitude, latitude) - Well-Know-Text (WKT) representation (better support coming with PROJ6 in the next GeoPandas version)See eg https://epsg.io/4326Under the hood, GeoPandas uses the `pyproj` / `PROJ` libraries to deal with the re-projections.For more information, see also http://geopandas.readthedocs.io/en/latest/projections.html. Transforming to another CRSWe can convert a GeoDataFrame to another reference system using the `to_crs` function. For example, let's convert the countries to the World Mercator projection (http://epsg.io/3395):
###Code
# remove Antartica, as the Mercator projection cannot deal with the poles
countries = countries[(countries['name'] != "Antarctica")]
countries_mercator = countries.to_crs(epsg=3395) # or .to_crs({'init': 'epsg:3395'})
countries_mercator.plot()
###Output
_____no_output_____
###Markdown
Note the different scale of x and y. Why using a different CRS?There are sometimes good reasons you want to change the coordinate references system of your dataset, for example:- Different sources with different CRS -> need to convert to the same crs ```python df1 = geopandas.read_file(...) df2 = geopandas.read_file(...) df2 = df2.to_crs(df1.crs) ```- Mapping (distortion of shape and distances)- Distance / area based calculations -> ensure you use an appropriate projected coordinate system expressed in a meaningful unit such as metres or feet (not degrees).**ATTENTION:**All the calculations that happen in geopandas and shapely assume that your data is in a 2D cartesian plane, and thus the result of those calculations will only be correct if your data is properly projected. Let's practice!Again, we will go back to the Paris datasets. Up to now, we provided the datasets in an appropriate projected CRS for the exercises. But the original data actually were geographic coordinates. In the following exercises, we will start from there.--- Going back to the Paris districts dataset, this is now provided as a GeoJSON file (`"data/paris_districts.geojson"`) in geographic coordinates.For converting to projected coordinates, we will use the standard projected CRS for France is the RGF93 / Lambert-93 reference system, referenced by the `EPSG:2154` number (in Belgium this would be Lambert 72, EPSG:31370).**EXERCISE: Projecting a GeoDataFrame*** Read the districts datasets (`"data/paris_districts.geojson"`) into a GeoDataFrame called `districts`.* Look at the CRS attribute of the GeoDataFrame. Do you recognize the EPSG number?* Make a simple plot of the `districts` dataset.* Calculate the area of all districts.* Convert the `districts` to a projected CRS (using the `EPSG:2154` for France). Call the new dataset `districts_RGF93`.* Make a similar plot of `districts_RGF93`.* Calculate the area of all districts again with `districts_RGF93` (the result will now be expressed in mยฒ). Hints* The CRS information is stored in the `crs` attribute of a GeoDataFrame.* Making a simple plot of a GeoDataFrame can be done with the `.plot()` method.* Converting to a different CRS can be done with the `to_crs()` method, and the CRS can be specified as an EPSG number using the `epsg` keyword.
###Code
# %load _solved/solutions/02-coordinate-reference-systems1.py
# %load _solved/solutions/02-coordinate-reference-systems2.py
# %load _solved/solutions/02-coordinate-reference-systems3.py
# %load _solved/solutions/02-coordinate-reference-systems4.py
# %load _solved/solutions/02-coordinate-reference-systems5.py
# %load _solved/solutions/02-coordinate-reference-systems6.py
# %load _solved/solutions/02-coordinate-reference-systems7.py
# %load _solved/solutions/02-coordinate-reference-systems8.py
###Output
_____no_output_____
###Markdown
**EXERCISE:**In the first notebook, we did an exercise on plotting the bike stations locations in Paris and adding a background map to it using the `contextily` package.Currently, `contextily` assumes that your data is in the Web Mercator projection, the system used by most web tile services. And in that first exercise, we provided the data in the appropriate CRS so you didn't need to care about this aspect.However, typically, your data will not come in Web Mercator (`EPSG:3857`) and you will have to align them with web tiles on your own. * Read the bike stations datasets (`"data/paris_bike_stations.geojson"`) into a GeoDataFrame called `stations`.* Convert the `stations` dataset to the Web Mercator projection (`EPSG:3857`). Call the result `stations_webmercator`, and inspect the result.* Make a plot of this projected dataset (specify the marker size to be 5) and add a background map using `contextily`. Hints* Making a simple plot of a GeoDataFrame can be done with the `.plot()` method. This returns a matplotlib axes object.* The marker size can be specified with the `markersize` keyword if the `plot()` method.* To add a background map, use the `contextily.add_basemap()` function. It takes the matplotlib `ax` to which to add a map as the first argument.
###Code
# %load _solved/solutions/02-coordinate-reference-systems9.py
# %load _solved/solutions/02-coordinate-reference-systems10.py
# %load _solved/solutions/02-coordinate-reference-systems11.py
###Output
_____no_output_____ |
analyse_tags.ipynb | ###Markdown
Analyse public tags added to TroveThis notebook loads public tags that users have added to records in Trove from the CSV file [harvested by this notebook](harvest-tags.ipynb). It then attempts some analysis of the tags.The complete CSV is too large to store on GitHub. You can download it from [CloudStor](https://cloudstor.aarnet.edu.au/plus/s/YiWStNrhnTo18JI) or [Zenodo](https://doi.org/10.5281/zenodo.5094314).User content added to Trove, including tags, is available for reuse under a CC-BY-NC licence.
###Code
import pandas as pd
import altair as alt
from wordcloud import WordCloud
from IPython.display import display, Image
# You will need to download the CSV file first from CloudStor or Zenodo
df = pd.read_csv('trove_tags_20210710.csv')
###Output
_____no_output_____
###Markdown
Tags by zone
###Code
df['zone'].value_counts()
###Output
_____no_output_____
###Markdown
How many duplicates across zones?A single resource in Trove can appear in multiple zones โ for example, a book that includes maps and illustrations might appear in the 'book', 'picture', and 'map' zones. This means that some of the tags will essentially be duplicates โ harvested from different zones, but relating to the same resource. We can quantify this by finding put how many tags there are in the overlapping 'book', 'article', 'picture', 'music', 'map', and 'collection' zones, then dropping duplicates based on the `tag`, `date` and `record_id` fields.
###Code
# Total tags across overlapping zones
df.loc[df['zone'].isin(['book', 'article', 'picture', 'music', 'map', 'collection'])].shape
###Output
_____no_output_____
###Markdown
Now let's remove the duplicates and see how many are left.
###Code
df.loc[df['zone'].isin(['book', 'article', 'picture', 'music', 'map', 'collection'])].drop_duplicates(subset=['tag', 'date', 'record_id']).shape
###Output
_____no_output_____
###Markdown
So there's about 50,000 'duplicates'. This doesn't really matter if you want to examine tagging behaviour within zones, but if you're aggregating tags across zones you might want to remove them, as demonstrated below. Top tags!If we're going to look at the most common tags across all zones, then we should probably remove the duplicates mentioned above first.
###Code
# Dedupe overlapping zones
deduped_works = df.loc[df['zone'].isin(['book', 'article', 'picture', 'music', 'map', 'collection'])].drop_duplicates(subset=['tag', 'date', 'record_id'])
# Non overlapping zones
other_zones = df.loc[df['zone'].isin(['newspaper', 'gazette', 'list'])]
# Combine the two to create a new deduped df
deduped = pd.concat([deduped_works, other_zones])
deduped.shape
###Output
_____no_output_____
###Markdown
Now let's view the 50 most common tags.
###Code
deduped['tag'].value_counts()[:50]
###Output
_____no_output_____
###Markdown
Let's convert the complete tag counts into a new dataframe, and save it as a CSV file.
###Code
tag_counts = deduped['tag'].value_counts().to_frame().reset_index()
tag_counts.columns = ['tag', 'count']
tag_counts.to_csv('trove_tag_counts_20210710.csv', index=False)
###Output
_____no_output_____
###Markdown
Let's display the top 200 tags as a word cloud.
###Code
# Get the top 200 tags
top_200 = tag_counts[:200].to_dict(orient='records')
# Reshape into a tag:count dictionary.
top_200 = {tag['tag']: tag['count'] for tag in top_200}
WordCloud(width=800, height=500).fit_words(top_200).to_image()
###Output
_____no_output_____
###Markdown
Tags on picturesMost of the tags are on newspaper articles, but we can filter the results to look at the top tags in other zones.
###Code
df.loc[df['zone'] == 'picture']['tag'].value_counts()[:20]
###Output
_____no_output_____
###Markdown
View tags by yearWe can use the `date` field to examine when tags were added.
###Code
# Convert date to datetime data type
df['date'] = pd.to_datetime(df['date'])
# Create a new column with the year
df['year'] = df['date'].dt.year
# Get counts of tags by year
year_counts = df.value_counts(['year', 'zone']).to_frame().reset_index()
year_counts.columns = ['year', 'zone', 'count']
# Chart tags by year
alt.Chart(year_counts).mark_bar(size=18).encode(
x=alt.X('year:Q', axis=alt.Axis(format='c')),
y=alt.Y('count:Q', stack=True),
color='zone:N',
tooltip=['year:Q', 'count:Q', 'zone:N']
)
###Output
_____no_output_____
###Markdown
An obvious feature in the chart above is the large number of tags in zones other than 'newspaper' that were added in 2009. From memory I believe these 'tags' were automatically ingested from related Wikipedia pages. Unlike the bulk of the tags, these were not added by individual users, so if your interest is user activity you might want to exclude these by filtering on date or zone. View tags by month
###Code
# This creates a column with the date of the first day of the month in which the tag was added
# We can use this to aggregate by month
df['year_month'] = df['date'] + pd.offsets.MonthEnd(0) - pd.offsets.MonthBegin(normalize=True)
# Get tag counts by month
month_counts = df.value_counts(['year_month', 'zone']).to_frame().reset_index()
month_counts.columns = ['year_month', 'zone', 'count']
alt.Chart(month_counts).mark_bar().encode(
x='yearmonth(year_month):T',
y='count:Q',
color='zone:N',
tooltip=['yearmonth(year_month):T', 'count', 'zone']
).properties(width=700).interactive()
###Output
_____no_output_____
###Markdown
So we can see that the machine generated tags were added in November 2009. We can even zoom in further to see on which days most of the automatically generated tags were ingested.
###Code
df.loc[df['year_month'] == '2009-11-01']['date'].dt.floor('d').value_counts()
###Output
_____no_output_____
###Markdown
View tags by month in newspapers and gazettes
###Code
alt.Chart(month_counts.loc[month_counts['zone'].isin(['newspaper', 'gazette'])]).mark_bar().encode(
x='yearmonth(year_month):T',
y='count:Q',
color='zone:N',
tooltip=['yearmonth(year_month):T', 'count', 'zone']
).properties(width=700)
###Output
_____no_output_____
###Markdown
What's the trend in newspaper tagging? There seems to have been a drop since the Trove interface was changed, but the month-to-month differences are quite large, so there might be other factors at play.
###Code
base = alt.Chart(month_counts.loc[(month_counts['zone'].isin(['newspaper'])) & (month_counts['year_month'] < '2021-07-01')]).mark_point().encode(
x='yearmonth(year_month):T',
y='count:Q',
tooltip=['yearmonth(year_month):T', 'count', 'zone']
).properties(width=700)
polynomial_fit = base.transform_regression(
'year_month', 'count', method="poly", order=4
).mark_line(color="red")
alt.layer(base, polynomial_fit)
###Output
_____no_output_____ |
kaggle_30_days_ml.ipynb | ###Markdown
Machine LearningKaggle's 30 days of Machine Learning *AI Pilipinas**Prepared by:***Jude Michael Teves** Faculty, Software Technology Department College of Computer Studies - De La Salle University This notebook shows how perform basic machine learning in Python using sklearn. Preliminaries Import libraries These 3 libraries are sometimes called the data science library trio or trifecta since almost all data analysis projects in Python use these.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
The following cell is for modifying the aesthetics of the graphs.
###Code
import seaborn as sns
sns.set_style('darkgrid')
sns.set_palette('Set2')
# sns.color_palette('Set2')
###Output
_____no_output_____
###Markdown
Load DatasetWe'll be using the **Iris dataset** for this exercise. The dataset contains 3 classes: - 0 - Iris-Setosa - 1 - Iris-Versicolour - 2 - Iris-VirginicaWe will also be splitting the dataset into train and test sets.
###Code
from sklearn import datasets
from sklearn.model_selection import train_test_split
X, y = datasets.load_iris(return_X_y=True)
###Output
_____no_output_____
###Markdown
Exploratory Data AnalysisIt is a good practice to explore our data first before we do modeling. Descriptive Statistics
###Code
pd.DataFrame(X).describe()
###Output
_____no_output_____
###Markdown
Visualize the dataTypically, in the EDA step, we visualize the data that we have to understand it better.
###Code
merged_X_y = np.concatenate((X, y.reshape(-1, 1)), axis=1)
df = pd.DataFrame(merged_X_y, columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'target'])
df['target'] = df['target'].astype(int)
plt.figure(figsize=(8,6), dpi=100)
sns.scatterplot(data=df, x='sepal_length', y='sepal_width', hue='target', style='target', palette="Set2")
plt.title('Whole Set Data Points')
plt.show()
fig, ax = plt.subplots(1,1, figsize=(5,3), dpi=100)
df['target'].value_counts().plot(kind='bar')
plt.xticks(ticks=range(3), labels=['Setosa', 'Virginica', 'Versicolour'], rotation=0)
plt.ylabel('Count')
plt.title('Flower Type distribution')
plt.show()
fig, ax = plt.subplots(1,1, figsize=(8,4), dpi=100)
df[df['target']==0].iloc[:,0].hist(alpha=0.5, label='Setosa')
df[df['target']==1].iloc[:,0].hist(alpha=0.5, label='Virginica')
df[df['target']==2].iloc[:,0].hist(alpha=0.5, label='Versicolour')
plt.title('Sepal Length Distribution')
plt.ylabel('Count')
plt.xlabel('Length (centimeters)')
plt.legend()
plt.show()
fig, ax = plt.subplots(1,1, figsize=(8,4), dpi=100)
df[df['target']==0].iloc[:,1].hist(alpha=0.5, label='Setosa')
df[df['target']==1].iloc[:,1].hist(alpha=0.5, label='Virginica')
df[df['target']==2].iloc[:,1].hist(alpha=0.5, label='Versicolour')
plt.title('Sepal Width Distribution')
plt.ylabel('Count')
plt.xlabel('Length (centimeters)')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Modeling using sklearn**What is sklearn?**> "Scikit-learn is an open source machine learning library that supports supervised and unsupervised learning. It also provides various tools for model fitting, data preprocessing, model selection and evaluation, and many other utilities." For more details, please refer to sklearn website.sklearn has classification and regression implementations for many machine learning models. We'll be using them in the following examples. In practice, we often use the `sklearn` library to do machine learning. We can do train and evaluate an ML model with the following lines of code: **TL;DR Code**```pythonmodel = ... use appropriate model and set necessary hyperparameters.model.fit(X_train, y_train) train the model using train set.model.score(X_test, y_test) predict the test set and evaluate the results.```This is applicable to almost all (if not all) ML algorithms in sklearn. Note that this is the simplest case and a naive way of doing things. This could work if you have a very easy problem/dataset. Normally, you will be doing other things such as preprocessing, normalization, feature engineering, and hyperparameter optimization in between. Preprocessing
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(f"Original Size | Input: {X.shape}, Output: {y.shape}")
print(f"Train Set Size | Input: {X_train.shape}, Output: {y_train.shape}")
print(f"Test Set Size | Input: {X_test.shape}, Output: {y_test.shape}")
train_set = np.concatenate((X_train, y_train.reshape(-1,1)), axis=1)
df_train = pd.DataFrame(train_set, columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'target'])
# df_train = pd.DataFrame(train_set, columns=columns)
df_train['target'] = df_train['target'].astype(int)
test_set = np.concatenate((X_test, y_test.reshape(-1,1)), axis=1)
df_test = pd.DataFrame(test_set, columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'target'])
# df_test = pd.DataFrame(test_set, columns=columns)
df_test['target'] = df_test['target'].astype(int)
df_train.head()
plt.figure(figsize=(8,6), dpi=100)
sns.scatterplot(data=df_train, x='sepal_length', y='sepal_width', hue='target', style='target', palette="Set2")
sns.scatterplot(data=df_test, x='sepal_length', y='sepal_width', color='black', label='unlabeled')
plt.title('Train + Unlabeled Set Data Points')
plt.show()
###Output
_____no_output_____
###Markdown
ClassificationLet's do classification using sklearn Model Training
###Code
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3, p=2, n_jobs=-1) # instantiate model
clf.fit(X_train, y_train) # train the model
###Output
_____no_output_____
###Markdown
Prediction
###Code
y_pred = clf.predict(X_test) # predict the corresponding labels given the trained model and input matrix
y_pred[:5]
###Output
_____no_output_____
###Markdown
EvaluationBy default, this uses the accuracy metric.
###Code
score = clf.score(X_test, y_test) # predicts and evaluates in one line
print(f"Test Set Score: {score:.2%}")
###Output
Test Set Score: 100.00%
###Markdown
Since this is an easy dataset, we were able to get very high accuracies for both train and test set.Let's also compute for other classification metrics.
###Code
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0 1.00 1.00 1.00 10
1 1.00 1.00 1.00 9
2 1.00 1.00 1.00 11
accuracy 1.00 30
macro avg 1.00 1.00 1.00 30
weighted avg 1.00 1.00 1.00 30
###Markdown
Visualize our predictions
###Code
y_pred = clf.predict(X_test)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,6), dpi=100)
pred_set = np.concatenate((X_test, np.array(y_pred).reshape(-1,1)), axis=1)
df_pred = pd.DataFrame(pred_set, columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'target'])
df_pred['target'] = df_pred['target'].astype(int)
df_pred = pd.concat([df_train, df_pred])
sns.scatterplot(data=df_train, x='sepal_length', y='sepal_width', hue='target', style='target', palette="Set2", ax=ax1)
sns.scatterplot(data=df_test, x='sepal_length', y='sepal_width', color='black', label='unlabeled', palette="Set2", ax=ax1)
sns.scatterplot(data=df_pred, x='sepal_length', y='sepal_width', hue='target', style='target', palette="Set2", ax=ax2)
ax1.set_title('Train + Unlabeled Set Data Points')
ax2.set_title('Train + Predicted Set Data Points')
plt.show()
###Output
_____no_output_____
###Markdown
Spam ClassifierLet's try to identify which emails are spam based on the content.
###Code
df_spam = pd.read_csv('https://raw.githubusercontent.com/Cyntwikip/data-repository/main/spam_ham_dataset.csv')
df_spam = df_spam[['label', 'text']]
df_spam.head()
fig, ax = plt.subplots(1,1, figsize=(5,3), dpi=100)
df_spam['label'].value_counts().plot(kind='bar')
plt.title('Spam distribution')
plt.xticks(rotation=0)
plt.ylabel('Count')
plt.show()
###Output
_____no_output_____
###Markdown
PreprocessingWe will preprocess our text data first so that it can be understood by our ML model.
###Code
X = df_spam['text']
y = df_spam['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(f"Original Size | Input: {X.shape}, Output: {y.shape}")
print(f"Train Set Size | Input: {X_train.shape}, Output: {y_train.shape}")
print(f"Test Set Size | Input: {X_test.shape}, Output: {y_test.shape}")
from sklearn.feature_extraction.text import CountVectorizer
corpus = X_train
vec = CountVectorizer(stop_words='english', min_df=10)
X_train_transformed = vec.fit_transform(X_train).todense()
X_test_transformed = vec.transform(X_test).todense()
X_train_transformed.shape, X_test_transformed.shape
###Output
_____no_output_____
###Markdown
Modeling
###Code
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X_train_transformed, y_train)
score = clf.score(X_train_transformed, y_train) # predicts and evaluates in one line
print(f"Train Set Score: {score:.2%}")
score = clf.score(X_test_transformed, y_test) # predicts and evaluates in one line
print(f"Test Set Score: {score:.2%}")
###Output
Test Set Score: 97.68%
###Markdown
Not Spam (Ham) examples
###Code
hams = y_test[y_test=='ham'].iloc[:5]
hams
sample = X_test[hams.index]
sample
clf.predict(vec.transform(sample))
###Output
_____no_output_____
###Markdown
Spam examples
###Code
spams = y_test[y_test=='spam'].iloc[:5]
spams
sample = X_test[spams.index]
sample
clf.predict(vec.transform(sample))
###Output
_____no_output_____
###Markdown
RegressionRegression is very similar to classification in sklearn.We'll be using another dataset for the regression task since we need continuous values as labels / outputs / targets. One readily-available dataset for this is the **Boston dataset**. It contains the house pricing data in Boston as well as the following features: - CRIM per capita crime rate by town - ZN proportion of residential land zoned for lots over 25,000 sq.ft. - INDUS proportion of non-retail business acres per town - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) - NOX nitric oxides concentration (parts per 10 million) - RM average number of rooms per dwelling - AGE proportion of owner-occupied units built prior to 1940 - DIS weighted distances to five Boston employment centres - RAD index of accessibility to radial highways - TAX full-value property-tax rate per \$10,000 - PTRATIO pupil-teacher ratio by town - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town - LSTAT % lower status of the population - MEDV Median value of owner-occupied homes in $1000โsFor more info, please see this link.And, as usual, we will also be splitting the dataset into train and test sets.
###Code
X, y = datasets.load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(f"Original Size | Input: {X.shape}, Output: {y.shape}")
print(f"Train Set Size | Input: {X_train.shape}, Output: {y_train.shape}")
print(f"Test Set Size | Input: {X_test.shape}, Output: {y_test.shape}")
###Output
Original Size | Input: (506, 13), Output: (506,)
Train Set Size | Input: (404, 13), Output: (404,)
Test Set Size | Input: (102, 13), Output: (102,)
###Markdown
Model Training
###Code
from sklearn.neighbors import KNeighborsRegressor
reg = KNeighborsRegressor(n_neighbors=3, p=2, n_jobs=-1) # instantiate model
reg.fit(X_train, y_train) # train the model
###Output
_____no_output_____
###Markdown
Prediction
###Code
y_pred = reg.predict(X_test) # predict the corresponding labels given the trained model and input matrix
y_pred[:5]
###Output
_____no_output_____
###Markdown
EvaluationBy default, sklearn uses R-squared when evaluating regression tasks.
###Code
score = reg.score(X_test, y_test) # predicts and evaluates in one line
print(f"Test Set Score: {score:.2%}")
###Output
Test Set Score: 70.46%
|
0.15/_downloads/plot_find_ecg_artifacts.ipynb | ###Markdown
Find ECG artifactsLocate QRS component of ECG.
###Code
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###Output
_____no_output_____
###Markdown
Set parameters
###Code
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###Output
_____no_output_____
###Markdown
Plot ECG artifacts
###Code
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
###Output
_____no_output_____ |
nexon/Logistic regression.ipynb | ###Markdown
Building a ML Model Problem You want to create a ML model. For example, there is a dataset on which you want to build a classifier model using logistic regression. SolutionCreate a Model Block for LogisticRegression which will perform the logistic regression operationExample1. Create a generic classifer block to handle the training and inference of a model * `fit` - This function contains the logic needed to train the model * `Evaluate` - This function contains the logic needed to evaluate the model using test datasets
###Code
from abc import abstractmethod
import pickle
from sklearn import metrics
class Classifier(ModelBlock):
@abstractmethod
def create_model(self, **kwargs):
"""
Should create and return the model object
"""
@abstractmethod
def attribute_func(self, *args, **kwargs):
"""
Should return the attribute values
"""
@abstractmethod
def operation_func(self, *args, **kwargs):
"""
Should return the operation reuslts
"""
def load_saved_model(self, path):
self.model = pickle.load(open(self.save_path(path=path), 'rb'))
def save_model(self, path):
pickle.dump(self.model, open(self.save_path(path=path), 'wb'))
def persist_train(self, x_data, y_data, path):
self.load_saved_model(path)
self.fit(x_data, y_data)
def transform(self, x_data, predictions):
predictions.put(self.model.predict(x_data))
def fit(self, x_data, y_data, test_x_data, test_y_data, metric_function, average, operation, predictions, attribute, model_attributes, path):
if path is not None and len(path) != 0:
try:
self.load_saved_model(path)
except:
pass
pred = self.operation_func(x_data=x_data, y_data=y_data, test_x_data=test_x_data, test_y_data=test_y_data,
operation=operation, metric_function=metric_function, average=average)
predictions.put([pred] if isinstance(pred, (int, float, str)) else pred)
k = self.model
if attribute is not None and len(attribute) != 0:
attr_value = self.attribute_func(attribute)
print(f"{attribute}: ", attr_value)
model_attributes.put([attr_value] if isinstance(attr_value, (int, float, str)) else attr_value)
if all(v is not None for v in [x_data, y_data]) or all(v is not None for v in [test_x_data, test_y_data]) and operation != "evaluate":
if test_x_data is None:
test_x_data, test_y_data = x_data, y_data
self.evaluate(test_x_data, test_y_data, metric_function, average)
def evaluate(self, test_x_data, test_y_data, metric_function, average):
metric_func = {"accuracy_score": metrics.accuracy_score,
"precision_score": metrics.precision_score,
"recall_score": metrics.recall_score,
"f1_score": metrics.f1_score,
"confusion_matrix": metrics.confusion_matrix}
metric_function = [metric_function] if not isinstance(metric_function, list) else metric_function
final_metrics = []
for _metric_function in metric_function:
eval_metric = metric_func.get(_metric_function, None)
if _metric_function not in ["accuracy_score", "confusion_matrix"]:
_metric_value = eval_metric(test_y_data, self.model.predict(test_x_data), average=average)
else:
_metric_value = eval_metric(test_y_data, self.model.predict(test_x_data))
print(f'{_metric_function}: ', _metric_value)
final_metrics.append(_metric_value)
return (final_metrics if len(final_metrics) > 1 else final_metrics[0])
###Output
_____no_output_____
###Markdown
2. Create an instance of `LogisticRegression` class with following parameters and train the model by invoking execute(): * `penalty` - Specify the norm used in the penalization (default= 'l2'). * `tol` - Specify tolerance for stopping criteria (default=0.0001). * `C` - Specify regularization parameter (default=1.0). * `class_weight` - Weightage to be given to each class in the data (default=None). * `random_state` - Seed value of the random number generator (default=None). * `solver` - Specify optimization algorithm to use (default='lbfgs'). * `max_iter` - Maximum iterations for the solvers to converge (default=100). * `n_jobs` - Specify number of CPU cores to be used when parallelizing (default=None). * `x_data` - Data on which the model is to be trained. * `y_data` - Target values corresponding to each data point in train data. * `test_x_data` - Test data on which the model is to be evaluated. * `test_y_data` - Test target values corresponding to each data point in `test_x_data`. * `operation` - Defines what operation to perform. * For training model - `fit` * For Predicting model - `predict` * For evaluating model - `evaluate` * `attribute` - Specify suitable attribute from the following, to be found from the trained model. `classes_`, `coef_`, `intercept_`, `n_iter_` * `metric_function` - Specify suitable evaluation metric from the folloeing, that best suits for the problem that you are dealing with. `confusion_matrix`, `accuracy_score`, `precision_score`, `recall_score`, `f1_score`, `roc_curve` * `path` - Specify the path where the trained model is to be saved.
###Code
from sklearn.linear_model import LogisticRegression as Logrec
@inputs.atomic.generic('penalty', doc="{โl1โ, โl2โ, โelasticnetโ, โnoneโ}", default= 'l2')
@inputs.atomic.generic('dual', default=False)
@inputs.atomic.generic('tol', default=0.0001)
@inputs.atomic.generic('C', default=1.0)
@inputs.atomic.generic('fit_intercept', default=True)
@inputs.atomic.generic('intercept_scaling', default=1)
@inputs.atomic.generic('class_weight', default=None)
@inputs.atomic.generic('random_state', default=None)
@inputs.atomic.generic('solver', default='lbfgs')
@inputs.atomic.generic('max_iter', default=100)
@inputs.atomic.generic('multi_class', default='auto')
@inputs.atomic.generic('verbose', default=0)
@inputs.atomic.generic('warm_start', default=False)
@inputs.atomic.generic('n_jobs', default=None)
@inputs.atomic.generic('l1_ratio', default=None)
@inputs.atomic.generic('mode', default="FIT")
@inputs.atomic.generic('operation', default="fit",
doc="fit, decision_function, densify, predict, predict_proba, predict_log_proba, sparsify, evaluate, score",
required=True)
@inputs.atomic.generic('attribute', default=None,
doc="classes_, coef_, intercept_, n_iter_")
@inputs.atomic.generic("x_data")
@inputs.atomic.generic("y_data")
@inputs.atomic.generic("test_x_data", required=False)
@inputs.atomic.generic("test_y_data", required=False)
@inputs.atomic.generic("metric_function", default="accuracy_score")
@inputs.atomic.generic("average", default="binary")
@inputs.atomic.generic("path", default=None)
@outputs.atomic.generic("predictions")
@outputs.atomic.generic("model_attributes")
class LogisticRegression(Classifier):
"""
Logistic Regression from sklearn
"""
def create_model(self, penalty, dual, tol, C, fit_intercept, intercept_scaling, class_weight, random_state, solver,
max_iter, multi_class, verbose, warm_start, n_jobs, l1_ratio):
return Logrec(penalty=penalty, dual=dual, tol=tol, C=C, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling, class_weight=class_weight,
random_state=random_state, solver=solver,
max_iter=max_iter, multi_class=multi_class, verbose=verbose,
warm_start=warm_start, n_jobs=n_jobs, l1_ratio=l1_ratio)
def operation_func(self, x_data, y_data, test_x_data, test_y_data, operation, metric_function, average):
func = {"fit": self.model.fit, "decision_function": self.model.decision_function,
"predict": self.model.predict, "densify": self.model.densify,
"predict_log_proba": self.model.predict_log_proba, "predict_proba": self.model.predict_proba,
"sparsify": self.model.sparsify, "score": self.model.score, "evaluate": self.evaluate}
method = func.get(operation, None)
if operation in ["fit", "score"]:
pred = method(x_data, y_data)
elif operation in ["decision_function", "predict", "predict_proba", "predict_log_proba"]:
pred = method(x_data)
elif operation in ["evaluate"]:
if test_x_data is None:
test_x_data, test_y_data = x_data, y_data
pred = self.evaluate(test_x_data, test_y_data, metric_function, average)
else:
pred = method()
return pred
def attribute_func(self, attribute):
k = self.model
attr = {"classes_": k.classes_, "coef_": k.coef_,
"intercept_": k.intercept_, "n_iter_": k.n_iter_}
attr_value = attr.get(attribute, None)
return attr_value
###Output
_____no_output_____
###Markdown
Defining a Train pipeline Problem As a user you want to use the logistic regression model in a train pipeline SolutionThe created model block can be used in the pipeline, through which data can be fed and the model can be trained. The following example illustrates a simple pipeline which processes and then trains a model Block 1 - Reads a csv in chunks and gives a series output
###Code
@inputs.atomic.generic("filepath_or_buffer", required=True)
@inputs.atomic.generic("delimiter", default = None)
@inputs.atomic.generic("chunk_size", default=10)
@inputs.atomic.generic("nrows", default=None)
@outputs.series.generic("output")
class CsvReader(Block):
def run(self, filepath_or_buffer, delimiter, chunk_size, nrows, output):
chunks = pd.read_csv(filepath_or_buffer, chunksize=chunk_size, nrows=nrows, delimiter = delimiter)
for df in chunks:
output.put(df)
###Output
_____no_output_____
###Markdown
Block 2 - Filters out the rows in which Nan is present
###Code
@inputs.atomic.generic("columns")
@inputs.series.generic("df_chunks", required=True)
@outputs.atomic.generic("output")
class DfFilterNan(Block):
def run(self, df_chunks, columns, output):
concat_df = pd.DataFrame()
for df in df_chunks:
df.dropna(axis=0, inplace=True)
concat_df = pd.concat([concat_df, df])
output.put(concat_df)
###Output
_____no_output_____
###Markdown
Block 3 - Converts the categorical columns to numerical values
###Code
@inputs.atomic.generic("columns")
@inputs.atomic.generic("df", required=True)
@outputs.atomic.generic("output")
class DfCategorical(Block):
def run(self, df, columns, output):
for col in columns:
df[col] = df[col].astype('category')
df[col] = df[col].cat.codes
output.put(df)
###Output
_____no_output_____
###Markdown
Block 4 - Returns the Features and Labels from a dataframe
###Code
@inputs.atomic.generic("x_columns")
@inputs.atomic.generic("y_column", default=None)
@inputs.atomic.generic("df", required=True)
@outputs.atomic.generic("out_x")
@outputs.atomic.generic("out_y")
class Get_data(Block):
def run(self, df, x_columns, y_column, out_x, out_y):
if y_column is not None and len(y_column)!=0:
x = df[x_columns].values
y = np.squeeze(df[y_column].values)
out_x.put(x)
out_y.put(y)
else:
x = df[x_columns].values
out_x.put(x)
###Output
_____no_output_____
###Markdown
Defining the Training pipeline
###Code
csv_reader = CsvReader(filepath_or_buffer="/myspace/data/titanic/train.csv")
df_filter = (DfFilterNan()
.columns(["Age", "Cabin"])
.df_chunks(csv_reader.output))
df_cat = (DfCategorical()
.columns(["Sex", "Cabin", "Embarked"])
.df(df_filter.output))
train_data = (Get_data()
.x_columns(['PassengerId', 'Pclass', 'Sex', 'Age', 'SibSp', 'Fare', 'Cabin', 'Embarked'])
.y_column(['Survived'])
.df(df_cat.output))
lr_model_train = (LogisticRegression()
.random_state(111)
.operation("fit")
.x_data(train_data.out_x)
.y_data(train_data.out_y)
.test_x_data(train_data.out_x)
.test_y_data(train_data.out_y)
.attribute("n_iter_")
.path("lr_m1.sav"))
train_pipeline = Pipeline(targets=[lr_model_train])
train_pipeline.show()
###Output
_____no_output_____
###Markdown
Executing the training pipeline on local instance
###Code
train_pipeline.execute()
###Output
_____no_output_____
###Markdown
Executing the training pipeline on Engine
###Code
from razor.platform import engines
deployed_pipeline = engines(name='Engine-1').deploy(pipeline=train_pipeline)
running_pipeline = deployed_pipeline.execute()
running_pipeline.status().__dict__
running_pipeline.logs(pretty=True)
running_pipeline.metrics().plot()
###Output
_____no_output_____
###Markdown
Running inference Problem Load the saved model and predict on test data and save the results SolutionCreate an inference pipeline, to predict on test data1. Create an instance of `LogisticRegression` class with following parameters and evaluate the model by running execute(): * `operation` - `predict` * `x_data` - Test data on which the model is to be predicted. * `attribute` - Column name for the predicted value. * `path` - Specify the path where the trained model is saved.
###Code
lr_model_predict = (LogisticRegression()
.operation("predict")
.x_data(train_data.out_x)
.attribute("classes_")
.path("lr_m1.sav"))
@inputs.atomic.generic("output_path", required=True)
@inputs.atomic.generic("numpy_array", required=True)
class NumpyToCsv(Block):
def run(self, numpy_array, output_path):
pd.DataFrame(numpy_array,columns=['Predictions']).to_csv(output_path)
csv_writer = (NumpyToCsv()
.output_path("/myspace/data/lr_pred_1.csv")
.numpy_array(lr_model_predict.predictions))
predict_pipeline = Pipeline(targets=[csv_writer])
predict_pipeline.show()
###Output
_____no_output_____
###Markdown
Run the inference pipeline in local
###Code
predict_pipeline.execute()
###Output
_____no_output_____
###Markdown
Run the inference pipeline in Engine
###Code
from razor.platform import engines
deployed_predict_pipeline = engines(name='Engine-1').deploy(pipeline=predict_pipeline)
running_predict_pipeline = deployed_predict_pipeline.execute()
running_predict_pipeline.status().__dict__
running_predict_pipeline.logs(pretty=True)
running_predict_pipeline.metrics()
###Output
_____no_output_____
###Markdown
Evaluation of Logistic Regression model ProblemThe user wants evaluate the logistic regression model that is trained and saved with given metric. SolutionThe evaluation of the classification model comprises following parameters:1. Create an instance of `LogisticRegression` class with following parameters and evaluate the model by running execute(): * `operation` - `evaluate` * `metric_function` - Specify suitable evaluation metric from the folloeing, that best suits for the problem that you are dealing with. `confusion_matrix`, `accuracy_score`, `precision_score`, `recall_score`, `f1_score`, `roc_curve` * `test_x_data` - Test data on which the model is to be evaluated. * `test_y_data` - Test target values corresponding to each data point in `test_x_data`. * `path` - Specify the path where the trained model is saved.For example:
###Code
lr_model_conf = (LogisticRegression()
.operation("evaluate")
.metric_function("confusion_matrix")
.test_x_data(train_data.out_x)
.test_y_data(train_data.out_y)
.path("/tmp/lr_m1.sav"))
evaluate_pipeline = Pipeline(targets=[lr_model_conf])
evaluate_pipeline.execute()
evaluate_pipeline.show()
###Output
_____no_output_____
###Markdown
2. One can compute multiple metrics by passing a list of metrics for the parameter metric_function.For example:
###Code
lr_model_metrc = (LogisticRegression()
.operation("evaluate")
.metric_function(["f1_score","precision_score","accuracy_score","recall_score"])
.test_x_data(train_data.out_x)
.test_y_data(train_data.out_y)
.path("/tmp/lr_m1.sav"))
evaluate_pipeline = Pipeline(targets=[lr_model_metrc])
evaluate_pipeline.execute()
evaluate_pipeline.show()
###Output
_____no_output_____ |
tests/test_execute/test_basic_failing_cache.ipynb | ###Markdown
a titlesome text
###Code
raise Exception('oopsie!')
###Output
_____no_output_____ |
docs/circuit-examples/qubit-couplers/TwoTransmonsWithMeander.ipynb | ###Markdown
Bus Resonator Coupler (transmon-transmon)
###Code
%load_ext autoreload
%autoreload 2
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, Headings
import pyEPR as epr
###Output
_____no_output_____
###Markdown
Create the design in MetalSetup a design of a given dimension. Dimensions will be respected in the design rendering. Note that the design size extends from the origin into the first quadrant.
###Code
design = designs.DesignPlanar({}, True)
design.chips.main.size['size_x'] = '4mm'
design.chips.main.size['size_y'] = '6mm'
gui = MetalGUI(design)
###Output
_____no_output_____
###Markdown
Create two transmons with one meander resonator.
###Code
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
from qiskit_metal.qlibrary.interconnects.meandered import RouteMeander
TransmonPocket.get_template_options(design)
RouteMeander.get_template_options(design)
q1 = TransmonPocket(design, 'Q1', options = dict(
pad_width = '425 um',
pocket_height = '650um',
connection_pads=dict(
readout = dict(loc_W=+1,loc_H=+1, pad_width='200um')
)))
q2 = TransmonPocket(design, 'Q2', options = dict(
pos_x = '2.0 mm',
pad_width = '425 um',
pocket_height = '650um',
connection_pads=dict(
readout = dict(loc_W=-1,loc_H=+1, pad_width='200um')
)))
coupler_options = Dict(
pin_inputs=Dict(
start_pin=Dict(component='Q1', pin='readout'),
end_pin=Dict(
component='Q2', pin='readout')),
fillet='99.9um',
total_length = '5mm',
lead = Dict(start_straight = '200um'
))
bus = RouteMeander(design, 'coupler', options= coupler_options)
gui.rebuild()
gui.autoscale()
# Get a list of all the qcomponents in QDesign and then zoom on them.
all_component_names = design.components.keys()
gui.zoom_on_components(all_component_names)
#Save screenshot as a .png formatted file.
gui.screenshot()
# Screenshot the canvas only as a .png formatted file.
gui.figure.savefig('shot.png')
from IPython.display import Image, display
_disp_ops = dict(width=500)
display(Image('shot.png', **_disp_ops))
# Closing the Qiskit Metal GUI
gui.main_window.close()
###Output
_____no_output_____ |
Python Programs for YouTube/2_Data Structure/1_Lists.ipynb | ###Markdown
Lists Data Structure: A data structure is a collection of data elements (such as numbers or charactersโor even other data structures) that is structured in some way, for example, by numbering the elements. The most basic data structure in Python is the "sequence". -> List is one of the Sequence Data structure-> Lists are collection of items (Strings, integers or even other lists)-> Lists are enclosed in [ ]-> Each item in the list has an assigned index value.-> Each item in a list is separated by a comma-> Lists are mutable, which means they can be changed. List Creation
###Code
emptyList = []
lst = ['one', 'two', 'three', 'four'] # list of strings
lst2 = [1, 2, 3, 4] #list of integers
lst3 = [[1, 2], [3, 4]] # list of lists
lst4 = [1, 'ramu', 24, 1.24] # list of different datatypes
print(lst)
###Output
['one', 'two', 'three', 'four']
###Markdown
List Length
###Code
lst = ['one', 'two', 'three', 'four']
#find length of a list
print(len(lst))
###Output
4
###Markdown
List Append
###Code
lst = ['one', 'two', 'three', 'four']
lst.append('five') # append will add the item at the end
print(lst)
###Output
['one', 'two', 'three', 'four', 'five']
###Markdown
List Insert
###Code
#syntax: lst.insert(x, y)
lst = ['one', 'two', 'four']
lst.insert(0, "zero") # will add element y at location x
print(lst)
lst.insert(2, "three") # will add element y at location x
print(lst)
###Output
['zero', 'one', 'three', 'two', 'four']
###Markdown
List Remove
###Code
#syntax: lst.remove(x)
lst = ['one', 'two', 'three', 'four', 'two']
lst.remove('two') #it will remove first occurence of 'two' in a given list
print(lst)
###Output
['one', 'three', 'four', 'two']
###Markdown
List Append & Extend
###Code
lst = ['one', 'two', 'three', 'four']
lst2 = ['five', 'six']
#append
lst.append(lst2)
print(lst)
print(lst[4][0])
lst = ['one', 'two', 'three', 'four']
lst2 = ['five', 'six']
#extend will join the list with list1
lst.extend(lst2)
print(lst)
###Output
['one', 'two', 'three', 'four', 'five', 'six']
###Markdown
List Delete
###Code
#del to remove item based on index position
lst = ['one', 'two', 'three', 'four', 'five']
# del lst[1]
# print(lst)
# #or we can use pop() method
a = lst.pop(1)
print(a)
print(lst)
lst = ['one', 'two', 'three', 'four']
#remove an item from list
lst.remove('three')
print(lst)
###Output
['one', 'two', 'four']
###Markdown
List realted keywords in Python
###Code
#keyword 'in' is used to test if an item is in a list
lst = ['one', 'two', 'three', 'four']
if 'six' in lst:
print('AI')
#keyword 'not' can combined with 'in'
if 'two' not in lst:
print('ML')
###Output
_____no_output_____
###Markdown
List Reverse
###Code
#reverse is reverses the entire list
lst = ['one', 'two', 'three', 'four']
lst.reverse()
print(lst)
###Output
['four', 'three', 'two', 'one']
###Markdown
List Sorting The easiest way to sort a List is with the sorted(list) function. That takes a list and returns a new list with those elements in sorted order. The original list is not changed. The sorted() optional argument reverse=True, e.g. sorted(list, reverse=True), makes it sort backwards.
###Code
#create a list with numbers
numbers = [3, 1, 6, 2, 8]
sorted_lst = sorted(numbers)
print("Sorted list :", sorted_lst)
#original list remain unchanged
print("Original list: ", numbers)
#print a list in reverse sorted order
print("Reverse sorted list :", sorted(numbers, reverse=True))
#orginal list remain unchanged
print("Original list :", numbers)
lst = [1, 20, 5, 5, 4.2]
#sort the list and stored in itself
lst.sort()
# add element 'a' to the list to show an error
print("Sorted list: ", lst)
lst = ['a', 'z', 'b', 'c', 'a']
print(sorted(lst)) # sort list with element of different datatypes.
lst
###Output
_____no_output_____
###Markdown
List Having Multiple References
###Code
lst = [1, 2, 3, 4, 5]
abc = lst
# abc.append(6)
# #print original list
# print("Original list: ", lst)
print(lst)
print(abc)
abc.append(6)
lst.append(7)
###Output
_____no_output_____
###Markdown
String Split to create a list
###Code
#let's take a string
s = "one_two_three_four_five"
# slst = s.split(',')
# print(slst)
print(s)
spl=s.split('_') #default space considered as splitting character
print(spl)
s = "This is Python Programming Course"
split_lst = s.split() # default split is white-character: space or tab
print(split_lst)
###Output
['This', 'is', 'Python', 'Programming', 'Course']
###Markdown
List Indexing Each item in the list has an assigned index value starting from 0.Accessing elements in a list is called indexing.
###Code
lst = [1, 2, 3, 4]
print(lst[1]) #print second element
#print last element using negative index
print(lst[-2])
#print last element using negative index
print(lst[-4])
###Output
1
###Markdown
List Slicing Accessing parts of segments is called slicing. The key point to remember is that the :end value represents the first value that is not in the selected slice.
###Code
numbers = [10, 20, 30, 40, 50,60,70,80]
#print all numbers
print(numbers[:])
#print from index 0 to index 3
print(numbers[0:5])
print(numbers)
#print alternate elements in a list
print(numbers[::3])
#print elemnts start from 0 through rest of the list
print(numbers[2::2])
###Output
[30, 50, 70]
###Markdown
List extend using "+"
###Code
lst1 = [1, 2, 3, 4]
lst2 = ['varma', 'naveen', 'murali', 'brahma', 45.6]
new_lst = lst1 + lst2
print(new_lst)
# print(lst1)
# print(lst2)
###Output
[1, 2, 3, 4, 'varma', 'naveen', 'murali', 'brahma', 45.6]
###Markdown
List Count
###Code
numbers = [1, 2, 3, 1, 3, 4, 2, 5]
# #frequency of 1 in a list
# print(numbers.count(1))
#frequency of 3 in a list
print(numbers.count(5))
###Output
1
###Markdown
List Looping
###Code
#loop through a list
lst = ['one', 'two', 'three', 'four']
for ele in lst:
print(ele)
#loop through a list
lst = ['one', 'two', 'three', 'four']
for ele in range(len(lst)):
print(ele)
###Output
0
1
2
3
###Markdown
List Comprehensions List comprehensions provide a concise way to create lists. Common applications are to make new lists where each element is the result of some operations applied to each member of another sequence or iterable, or to create a subsequence of those elements that satisfy a certain condition.
###Code
# without list comprehension
squares = []
for i in range(10):
squares.append(i**2) #list append
print(squares)
#using list comprehension
squares = [i**2 for i in range(10)]
print(squares)
#example
lst = [-10, -20, 10, 20, 50]
#create a new list with values doubled
new_lst = [i*2 for i in lst]
print(new_lst)
#filter the list to exclude negative numbers
new_lst = [i for i in lst if i >= 0]
print(new_lst)
#create a list of tuples like (number, square_of_number)
new_lst = [(i, i**2) for i in range(10)]
print(new_lst)
###Output
[-20, -40, 20, 40, 100]
[10, 20, 50]
[(0, 0), (1, 1), (2, 4), (3, 9), (4, 16), (5, 25), (6, 36), (7, 49), (8, 64), (9, 81)]
###Markdown
Nested List Comprehensions
###Code
#let's suppose we have a matrix
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
]
#transpose of a matrix without list comprehension
transposed = []
for i in range(4):
lst = []
for row in matrix:
lst.append(row[i])
transposed.append(lst)
print(transposed)
#let's suppose we have a matrix
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
]
#with list comprehension
transposed = [[row[i] for row in matrix] for i in range(4)]
print(transposed)
###Output
[[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]
|
inspect_CHAOS_MMA.ipynb | ###Markdown
Compare 2F, 2C, CHAOS MMA model values for one day - 01/01/2016
###Code
from viresclient import SwarmRequest
import datetime as dt
import matplotlib.pyplot as plt
%matplotlib inline
request = SwarmRequest()
request.set_collection("SW_OPER_MAGA_LR_1B")
request.set_products(measurements=["B_NEC", "F"],
models=["MCO_SHA_2C", "MCO_SHA_2F",
"MMA_SHA_2C-Primary", "MMA_SHA_2C-Secondary",
"MMA_SHA_2F-Primary", "MMA_SHA_2F-Secondary",
"CHAOS-6-Core", "CHAOS-6-MMA-Primary", "CHAOS-6-MMA-Secondary",],
sampling_step="PT1S")
ds_20160101 = request.get_between(start_time=dt.datetime(2016,1,1),
end_time=dt.datetime(2016,1,2)).as_xarray()
ds = ds_20160101.copy()
ds
###Output
[1/1] Processing: 100%|โโโโโโโโโโ| [ Elapsed: 01:07, Remaining: 00:00 ]
Downloading: 100%|โโโโโโโโโโ| [ Elapsed: 00:02, Remaining: 00:00 ] (30.514MB)
###Markdown
From top panel down: X, Y, Z components2F in black, 2C in blue, CHAOS in redSolid lines: primary (external) component, dashed lines: secondary (internal)
###Code
fig, axes = plt.subplots(figsize=(20,10), nrows=4, ncols=1, sharex=True)
for i, ylabel in enumerate(['B_N', 'B_E', 'B_C']):
axes[i].plot(ds["Timestamp"], ds["B_NEC_MMA_SHA_2F-Primary"][:,i], c='k')
axes[i].plot(ds["Timestamp"], ds["B_NEC_MMA_SHA_2F-Secondary"][:,i], c='k', linestyle="--")
axes[i].plot(ds["Timestamp"], ds["B_NEC_MMA_SHA_2C-Primary"][:,i], c='b')
axes[i].plot(ds["Timestamp"], ds["B_NEC_MMA_SHA_2C-Secondary"][:,i], c='b', linestyle="--")
axes[i].plot(ds["Timestamp"], ds["B_NEC_CHAOS-6-MMA-Primary"][:,i], c='r')
axes[i].plot(ds["Timestamp"], ds["B_NEC_CHAOS-6-MMA-Secondary"][:,i], c='r', linestyle="--")
axes[i].set_ylabel(ylabel)
axes[i].grid()
axes[3].plot(ds["Timestamp"], ds["F_MMA_SHA_2F-Primary"], c='k', label="F-Primary")
axes[3].plot(ds["Timestamp"], ds["F_MMA_SHA_2F-Secondary"], c='k', linestyle="--", label="F-Secondary")
axes[3].plot(ds["Timestamp"], ds["F_MMA_SHA_2C-Primary"], c='b', label="C-Primary")
axes[3].plot(ds["Timestamp"], ds["F_MMA_SHA_2C-Secondary"], c='b', linestyle="--", label="C-Secondary")
axes[3].plot(ds["Timestamp"], ds["F_CHAOS-6-MMA-Primary"], c='r', label="CHAOS-Primary")
axes[3].plot(ds["Timestamp"], ds["F_CHAOS-6-MMA-Secondary"], c='r', linestyle="--", label="CHAOS-Secondary")
axes[3].set_ylabel("F")
axes[3].grid()
axes[3].legend();
axes[0].set_title("Magnetospheric model (MMA) values during one day\n"
"Black lines: F (fast-track)\n"
"Blue lines: C (comprehensive inversion)\n"
"Red lines: CHAOS\n"
" Solid lines: Primary, Dashed: Secondary (induced)", size=15, loc="left");
###Output
/home/ash/miniconda3/envs/py37/lib/python3.7/site-packages/pandas/plotting/_matplotlib/converter.py:102: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters.
To register the converters:
>>> from pandas.plotting import register_matplotlib_converters
>>> register_matplotlib_converters()
warnings.warn(msg, FutureWarning)
/home/ash/miniconda3/envs/py37/lib/python3.7/site-packages/IPython/core/events.py:88: UserWarning: Creating legend with loc="best" can be slow with large amounts of data.
func(*args, **kwargs)
/home/ash/miniconda3/envs/py37/lib/python3.7/site-packages/IPython/core/pylabtools.py:128: UserWarning: Creating legend with loc="best" can be slow with large amounts of data.
fig.canvas.print_figure(bytes_io, **kw)
###Markdown
Compare 2F, 2C, CHAOS data-(core+mma) residual values
###Code
ds["B_res_2F"] = ds["B_NEC"] - ds["B_NEC_MCO_SHA_2F"] \
- ds["B_NEC_MMA_SHA_2F-Primary"] \
- ds["B_NEC_MMA_SHA_2F-Secondary"]
ds["F_res_2F"] = ds["F"] - ds["F_MCO_SHA_2F"] \
- ds["F_MMA_SHA_2F-Primary"] \
- ds["F_MMA_SHA_2F-Secondary"]
ds["B_res_2C"] = ds["B_NEC"] - ds["B_NEC_MCO_SHA_2C"] \
- ds["B_NEC_MMA_SHA_2C-Primary"] \
- ds["B_NEC_MMA_SHA_2C-Secondary"]
ds["F_res_2C"] = ds["F"] - ds["F_MCO_SHA_2C"] \
- ds["F_MMA_SHA_2C-Primary"] \
- ds["F_MMA_SHA_2C-Secondary"]
ds["B_res_CHAOS"] = ds["B_NEC"] - ds["B_NEC_CHAOS-6-Core"] \
- ds["B_NEC_CHAOS-6-MMA-Primary"] \
- ds["B_NEC_CHAOS-6-MMA-Secondary"]
ds["F_res_CHAOS"] = ds["F"] - ds["F_CHAOS-6-Core"] \
- ds["F_CHAOS-6-MMA-Primary"] \
- ds["F_CHAOS-6-MMA-Secondary"]
fig, axes = plt.subplots(figsize=(20,10), nrows=4, ncols=1, sharex=True)
for i, ylabel in enumerate(['B_N_res', 'B_E_res', 'B_C_res']):
axes[i].plot(ds["Timestamp"], ds["B_res_2F"][:,i], c='k')
axes[i].plot(ds["Timestamp"], ds["B_res_2C"][:,i], c='b')
axes[i].plot(ds["Timestamp"], ds["B_res_CHAOS"][:,i], c='r')
axes[i].set_ylabel(ylabel)
axes[i].grid()
axes[0].legend();
axes[3].plot(ds["Timestamp"], ds["F_res_2F"], c='k')
axes[3].plot(ds["Timestamp"], ds["F_res_2C"], c='b')
axes[3].plot(ds["Timestamp"], ds["F_res_CHAOS"], c='r')
axes[3].set_ylabel("F_res")
axes[3].grid()
axes[0].set_title("data-model residuals for each of MMA-F (black), MMA-C (blue), MMA-CHAOS (red)", size=15);
###Output
_____no_output_____
###Markdown
--- Several years Swarm-A time series of CHAOS_MMA at 1-minute sampling
###Code
request = SwarmRequest()
request.set_collection("SW_OPER_MAGA_LR_1B")
request.set_products(measurements=["B_NEC", "F"],
models=[
'"CHAOS-6-Combined" = "CHAOS-6-Core" + "CHAOS-6-Static"',
'"CHAOS-6-MMA" = "CHAOS-6-MMA-Primary" + "CHAOS-6-MMA-Secondary"'
],
sampling_step="PT1M")
ds = request.get_between(start_time=dt.datetime(2013,1,1),
end_time=dt.datetime(2019,1,1)).as_xarray()
fig, axes = plt.subplots(figsize=(20,10), nrows=4, ncols=1, sharex=True)
for i, ylabel in enumerate(['B_N', 'B_E', 'B_C']):
axes[i].plot(ds["Timestamp"], ds["B_NEC_CHAOS-6-MMA"][:,i], c='k', linewidth=0.2)
axes[i].set_ylabel(ylabel)
axes[i].grid()
axes[0].legend();
axes[3].plot(ds["Timestamp"], ds["F_CHAOS-6-MMA"], c='k', linewidth=0.2)
axes[3].set_ylabel("F")
axes[3].grid()
fig.suptitle("CHAOS-MMA (Primary+Secondary) evaluated on VirES", fontsize=20);
###Output
_____no_output_____
###Markdown
full CHAOS residuals (data - CHAOS-core-static-mma)
###Code
ds["B_NEC_res_CHAOS-Full"] = ds["B_NEC"] - ds["B_NEC_CHAOS-6-Combined"] \
- ds["B_NEC_CHAOS-6-MMA"]
fig, axes = plt.subplots(figsize=(20,10), nrows=3, ncols=1, sharex=True)
for i, ylabel in enumerate(['B_N', 'B_E', 'B_C']):
axes[i].plot(ds["Timestamp"], ds["B_NEC_res_CHAOS-Full"][:,i], c='k')
axes[i].set_ylabel(ylabel)
axes[i].grid()
axes[0].legend();
###Output
_____no_output_____ |
Mรณdulo12Aula08.ipynb | ###Markdown
###Code
class A:
def __init__(self,x):
self.__x = x
def fun(self):
print('Public: Mรฉtodo A: fun, x = ', self.__x)
def _fun(self):
print('Protected: Mรฉtodo A: _fun, x = ', self.__x)
def __fun(self):
print('Private: Mรฉtodo A: __fun, x = ', self.__x)
def mPrivate(self):
self.__fun()
obj = A(1)
# Mรฉtodo public
obj.fun()
# Mรฉtodo protected
obj._fun()
# Mรฉtodo private
obj.__fun() # nรฃo posso chamar aqui... sรณ um mรฉtodo dentro da classe pode chamรก-lo
obj.mPrivate()
###Output
Private: Mรฉtodo A: __fun, x = 1
|
src/.ipynb_checkpoints/EXTRAPOLATION-checkpoint.ipynb | ###Markdown
EXTRAPOLATION sp-air Recall to select the myenv in the python kernel"source activate myenv"
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import matplotlib.pyplot as plt
import sys
print(sys.version)
import time
import pickle
import os
import copy
import numpy
import numpy as np
import tensorflow as tf
from scipy.misc import imresize
import config
import model
import rat_spn
from visualize import draw_images
###Output
_____no_output_____
###Markdown
Helper functions
###Code
def load_dataset_from_file(train_file, test_file, out_of_sample_test_file):
loaded_train = numpy.load(train_file)
loaded_test = numpy.load(test_file)
loaded_test_out = numpy.load(out_of_sample_test_file)
return (loaded_train['images'],
loaded_train['counts']), (loaded_test['images'],
loaded_test['counts']), (loaded_test_out['images'],
loaded_test_out['counts'])
class SpnReconstructor:
def __init__(self, spn):
self.spn = spn
self.input_ph = tf.placeholder(tf.float32, (1, spn.num_dims))
self.marginalized = tf.placeholder(tf.float32, (1, spn.num_dims))
self.spn_out = spn.forward(self.input_ph, self.marginalized)
self.max_idx_tensors = {}
for layer in spn.vector_list:
for vector in layer:
if isinstance(vector, rat_spn.SumVector):
self.max_idx_tensors[vector.name] = vector.max_child_idx
def reconstruct(self, image, marginalized, sess, sample=False):
original_shape = image.shape
image = np.reshape(image, (1, -1))
marginalized = np.reshape(marginalized, (1, -1))
feed_dict = {self.input_ph: image, self.marginalized: marginalized}
max_idxs = sess.run(self.max_idx_tensors, feed_dict=feed_dict)
recon = self.spn.reconstruct(max_idxs, sess, sample)
recon = recon * (1 - marginalized)
recon = np.clip(recon, 0.0, 1.0)
return np.reshape(recon, original_shape)
class SupairTrainer:
def __init__(self, conf):
# load data and add make sure that conf has the appropriate variables describing the dataset
bboxes = None
if conf.dataset == 'MNIST':
(x, counts), (x_test, c_test), (x_test_out, c_test_out) = load_dataset_from_file(conf.train_file,
conf.test_file,
conf.out_test_file)
conf.scene_width = x.shape[-3]
conf.scene_height = x.shape[-2]
conf.channels = x.shape[-1]
else:
raise ValueError('unknown dataset', conf.dataset)
# shuffle the order
shuffle = numpy.random.permutation(x.shape[0])
self.x, self.counts = x[shuffle,...], counts[shuffle]
shuffle = numpy.random.permutation(x_test.shape[0])
self.x_test, self.c_test = x_test[shuffle,...], c_test[shuffle]
shuffle = numpy.random.permutation(x_test_out.shape[0])
self.x_test_out, self.c_test_out = x_test_out[shuffle,...], c_test_out[shuffle]
self.conf = conf
# determine and create result dir
i = 1
log_path = conf.result_path + 'run0'
while os.path.exists(log_path):
log_path = '{}run{}'.format(conf.result_path, i)
i += 1
os.makedirs(log_path)
self.log_path = log_path
if not os.path.exists(conf.checkpoint_dir):
os.makedirs(conf.checkpoint_dir)
input_shape = [conf.batch_size, conf.scene_width, conf.scene_height, conf.channels]
# build model
with tf.device(conf.device):
self.mdl = model.Supair(conf)
self.in_ph = tf.placeholder(tf.float32, input_shape)
self.elbo = self.mdl.elbo(self.in_ph)
self.mdl.num_parameters()
self.optimizer = tf.train.AdamOptimizer()
self.train_op = self.optimizer.minimize(-1 * self.elbo)
self.sess = tf.Session()
self.saver = tf.train.Saver()
if self.conf.load_params:
resume_ckpt = os.path.join(self.conf.path_to_ckpt)
self.saver.restore(self.sess, resume_ckpt)
else:
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
print('Built model')
self.obj_reconstructor = SpnReconstructor(self.mdl.obj_spn)
self.bg_reconstructor = SpnReconstructor(self.mdl.bg_spn)
tfgraph = tf.get_default_graph()
self.tensors_of_interest = {
'z_where': tfgraph.get_tensor_by_name('z_where:0'),
'z_pres': tfgraph.get_tensor_by_name('z_pres:0'),
'bg_score': tfgraph.get_tensor_by_name('bg_score:0'),
'y': tfgraph.get_tensor_by_name('y:0'),
'obj_vis': tfgraph.get_tensor_by_name('obj_vis:0'),
'bg_maps': tfgraph.get_tensor_by_name('bg_maps:0')
}
def log_and_print_progress(self, n_iter, acc, elbo, avg_obj, log_file, title):
print('{}, N_iter {}, Accuracy: {}, avg_obj: {}, elbo {}'.format(title, n_iter, acc, avg_obj, elbo))
log_file.write('{}, {}, {}, {}, {}\n'.format(title, n_iter, acc, avg_obj, elbo))
log_file.flush()
def reconstruct_scenes(self, images, cur_values, draw_boxes=True):
num_detected = np.sum(np.rint(cur_values['z_pres']), axis=1).astype(np.int32)
results = []
for i in range(images.shape[0]):
n = int(num_detected[i])
y = cur_values['y'][i]
z_where = cur_values['z_where'][i]
obj_vis = cur_values['obj_vis'][i]
objects = [self.obj_reconstructor.reconstruct(y[k], 1 - obj_vis[k], self.sess)
for k in range(n)]
bg_map = cur_values['bg_maps'][i, n]
bg = self.bg_reconstructor.reconstruct(images[i], 1 - bg_map, self.sess, sample=True)
for j in range(n - 1, -1, -1):
col = int(z_where[j, 2])
row = int(z_where[j, 5])
w = int(z_where[j, 0] * self.conf.patch_width)
h = int(z_where[j, 4] * self.conf.patch_height)
# check for pathological object dimensions; treat as not present
if h <= 0 or w <= 0 or row < 0 or col < 0 or row + h > 50 or col + w > 50:
continue
obj = imresize(np.squeeze(objects[j]), (h, w)).astype(np.float32) / 255.0
bg[row:row + h, col:col + w, 0] = obj
results.append(bg)
results = np.stack(results, 0)
results = np.clip(results, 0.0, 1.0)
# Now Add the bounding boxes
if draw_boxes:
boxes = draw_images(results[...,0], cur_values['z_where'], cur_values['z_pres'], window_size=self.conf.patch_width, text=None)
boxes = numpy.moveaxis(boxes, -3, -1) / 255.0
return boxes + (boxes==0)*results # this should work b/c broadcasting
else:
return results
def run_training(self):
batch_size = self.conf.batch_size
batches_per_epoch = self.x.shape[0] // batch_size
sess = self.sess
perf_log = open(self.conf.log_file, 'a')
for n_iter in range(20000):
i = n_iter % batches_per_epoch
batch = self.x[i * batch_size: (i + 1) * batch_size]
# print("DEBUG ->",n_iter, i, batch.shape)
_, cur_elbo, cur_values = sess.run([self.train_op, self.elbo, self.tensors_of_interest], feed_dict={self.in_ph: batch})
if (n_iter % 1000 ==0) and (n_iter > 0):
# save the model
ckpt_file = os.path.join(self.conf.checkpoint_dir, "model_"+str(n_iter)+".ckpt")
self.saver.save(sess, ckpt_file)
if (n_iter % 100 == 0):
# train accuracy
num_detected = np.sum(np.rint(cur_values['z_pres']), axis=1).astype(np.int32)
batch_counts = self.counts[i * batch_size: (i + 1) * batch_size]
train_acc = np.mean(num_detected == batch_counts)
avg_obj = np.average(num_detected)
self.log_and_print_progress(n_iter, train_acc, cur_elbo, avg_obj, perf_log, title="train")
if (n_iter % 100 == 0):
print("computing test acc")
test_elbo = 0
test_acc, test_avg_obj = self.compute_test_acc(kind="in")
self.log_and_print_progress(n_iter, test_acc, test_elbo, test_avg_obj, perf_log, title="test_in")
test_acc, test_avg_obj = self.compute_test_acc(kind="out")
self.log_and_print_progress(n_iter, test_acc, test_elbo, test_avg_obj, perf_log, title="test_out")
perf_log.close()
def compute_test_acc(self, kind):
batch_size = self.conf.batch_size
if kind == "in":
num_batches = self.x_test.shape[0] // batch_size
elif kind == "out":
num_batches = self.x_test_out.shape[0] // batch_size
else:
raise ValueError('unknown kind', kind)
z_pres = self.tensors_of_interest['z_pres']
correct, num_detected_tot = 0, 0
for i in range(num_batches):
if kind == "in":
x_batch = self.x_test[i * batch_size: (i + 1) * batch_size]
c_batch = self.c_test[i * batch_size: (i + 1) * batch_size]
elif kind == "out":
x_batch = self.x_test_out[i * batch_size: (i + 1) * batch_size]
c_batch = self.c_test_out[i * batch_size: (i + 1) * batch_size]
else:
raise ValueError('unknown kind', kind)
cur_pres = self.sess.run(z_pres, feed_dict={self.in_ph: x_batch})
num_detected = np.sum(np.rint(cur_pres), axis=1)
correct += np.sum(num_detected == c_batch)
num_detected_tot += np.sum(num_detected)
test_acc = correct / (num_batches * batch_size)
avg_obj = num_detected_tot / (num_batches * batch_size)
return test_acc, avg_obj
def grid_images(images, ncol=8, figsize=(12, 8)):
nrow = int(numpy.ceil(float(images.shape[0]) / ncol))
RGB = (3 == images.shape[-1])
if nrow > 1:
fig, ax = plt.subplots(ncols=ncol, nrows=nrow, figsize=figsize)
for i in range(images.shape[0]):
c,r = i % ncol, i // ncol
if RGB:
ax[r,c].imshow(images[i,...], vmin=0, vmax=1)
else:
ax[r,c].imshow(images[i,...,0], cmap='gray', vmin=0, vmax=1)
ax[r,c].set_axis_off()
else:
fig, ax = plt.subplots(ncols=images.shape[0], nrows=1, figsize=figsize)
for i in range(images.shape[0]):
if RGB:
ax[i].imshow(images[i,...], vmin=0, vmax=1)
else:
ax[i].imshow(images[i,...,0], cmap='gray', vmin=0, vmax=1)
ax[i].set_axis_off()
fig.tight_layout()
plt.close(fig)
return fig
def sub_select_inference(mask, cur_values):
return {'z_where' : cur_values['z_where'][mask,...],
'z_pres' : cur_values['z_pres'][mask,...],
'bg_score' : cur_values['bg_score'][mask,...],
'y' : cur_values['y'][mask,...],
'obj_vis' : cur_values['obj_vis'][mask,...],
'bg_maps' : cur_values['bg_maps'][mask,...],
}
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
conf = config.SupairConfig()
conf.visual = False
# data config
conf.dataset = 'MNIST' # select dataset from 'MNIST', 'sprites', 'omniglot'
conf.patch_width = conf.patch_height = 28
conf.noise = False # add Gaussian noise
conf.structured_noise = True # add background grid
conf.background_model = True # use learned background model
conf.num_steps = 6 # maximum number of digits
conf.dataset = 'MNIST' # select MNIST
# learning config
conf.load_params = False
conf.save_params = True
conf.batch_size = 64 # default is 256
# ORIGINAL
# conf.train_file = '../processed_dataset/their_mnist_train.npz'
# conf.test_file = '../processed_dataset/their_mnist_test.npz'
# conf.out_test_file = '../processed_dataset/their_mnist_test.npz'
# conf.checkpoint_dir = 'checkpoints_original'
# conf.log_file = 'reproduce.csv'
# conf.dir_result = 'REPRODUCE'
# conf.min_obj_scale = 0.3 # bounds for width of bounding box relative to native width=28
# conf.max_obj_scale = 0.9 # bounds for width of bounding box relative to native width=28
## EXTRAPOLATION NO GRID
# conf.train_file = '../processed_dataset/mnist_train_80x80_n0_3_no_grid.npz'
# conf.test_file = '../processed_dataset/mnist_test_80x80_n0_3_no_grid.npz'
# conf.out_test_file = '../processed_dataset/mnist_test_80x80_n4_6_no_grid.npz'
# conf.checkpoint_dir = 'checkpoints_no_grid'
# conf.log_file = 'no_grid.csv'
# conf.dir_result = 'EXTRAPOLATION_NO_GRID'
# conf.min_obj_scale = 0.3 # bounds for width of bounding box relative to native width=28
# conf.max_obj_scale = 1.5 # bounds for width of bounding box relative to native width=28
## EXTRAPOLATION NO GRID V2
# conf.train_file = '../processed_dataset/mnist_train_80x80_n0_3_no_grid.npz'
# conf.test_file = '../processed_dataset/mnist_test_80x80_n0_3_no_grid.npz'
# conf.out_test_file = '../processed_dataset/mnist_test_80x80_n4_6_no_grid.npz'
# conf.checkpoint_dir = 'checkpoints_no_grid_v2'
# conf.log_file = 'no_grid_v2.csv'
# conf.dir_result = 'EXTRAPOLATION_NO_GRID_V2'
# conf.min_obj_scale = 0.5 # bounds for width of bounding box relative to native width=28
# conf.max_obj_scale = 1.5 # bounds for width of bounding box relative to native width=28
# EXTRAPOLATION WITH GRID
conf.train_file = '../processed_dataset/mnist_train_80x80_n0_3_with_grid.npz'
conf.test_file = '../processed_dataset/mnist_test_80x80_n0_3_with_grid.npz'
conf.out_test_file = '../processed_dataset/mnist_test_80x80_n4_6_with_grid.npz'
conf.checkpoint_dir = 'checkpoints_with_grid'
conf.log_file = 'with_grid.csv'
conf.dir_result = 'EXTRAPOLATION_WITH_GRID'
conf.min_obj_scale = 0.5 # bounds for width of bounding box relative to native width=28
conf.max_obj_scale = 1.5 # bounds for width of bounding box relative to native width=28
###Output
_____no_output_____
###Markdown
Convert their dataset in our format
###Code
#(x, counts), (x_test, c_test) = datasets.load_mnist(conf.scene_width, max_digits=2, path=conf.data_path)
#
#numpy.savez_compressed("their_mnist_train", images=x, counts=counts)
#numpy.savez_compressed("their_mnist_test", images=x_test, counts=c_test)
###Output
_____no_output_____
###Markdown
Check the dataset
###Code
(x_train, c_train), (x_test, c_test), (x_test_out, c_test_out)= load_dataset_from_file(conf.train_file,
conf.test_file,
conf.out_test_file)
print(x_train.shape, x_test.shape, x_test_out.shape)
print(c_train[0:16])
grid_images(x_train[0:16], figsize=(12,3))
###Output
[3 3 3 3 2 3 3 3 3 3 3 2 3 3 3 3]
###Markdown
Do the training
###Code
tf.reset_default_graph()
trainer = SupairTrainer(conf)
trainer.run_training()
#tf.reset_default_graph()
###Output
Number of trainable parameters:
rnn 6830077
obj-spn 286560
bg-spn 230508
TOTAL 7347145
Built model
train, N_iter 0, Accuracy: 0.0, avg_obj: 6.0, elbo -3509.302001953125
computing test acc
test_in, N_iter 0, Accuracy: 0.2109375, avg_obj: 1.338961693548387, elbo 0
test_out, N_iter 0, Accuracy: 0.0033967391304347825, avg_obj: 0.8872282608695652, elbo 0
train, N_iter 100, Accuracy: 0.234375, avg_obj: 2.796875, elbo -1912.3363037109375
computing test acc
test_in, N_iter 100, Accuracy: 0.23941532258064516, avg_obj: 2.7265625, elbo 0
test_out, N_iter 100, Accuracy: 0.19735054347826086, avg_obj: 4.0665760869565215, elbo 0
train, N_iter 200, Accuracy: 0.375, avg_obj: 2.15625, elbo -798.5372314453125
computing test acc
test_in, N_iter 200, Accuracy: 0.2913306451612903, avg_obj: 2.241431451612903, elbo 0
test_out, N_iter 200, Accuracy: 0.18817934782608695, avg_obj: 3.993546195652174, elbo 0
train, N_iter 300, Accuracy: 0.4375, avg_obj: 1.625, elbo -65.51655578613281
computing test acc
test_in, N_iter 300, Accuracy: 0.4357358870967742, avg_obj: 1.6912802419354838, elbo 0
test_out, N_iter 300, Accuracy: 0.10020380434782608, avg_obj: 3.28125, elbo 0
train, N_iter 400, Accuracy: 0.65625, avg_obj: 1.421875, elbo 317.781982421875
computing test acc
test_in, N_iter 400, Accuracy: 0.5688004032258065, avg_obj: 1.40625, elbo 0
test_out, N_iter 400, Accuracy: 0.05366847826086957, avg_obj: 2.919497282608696, elbo 0
train, N_iter 500, Accuracy: 0.546875, avg_obj: 1.8125, elbo 408.61895751953125
computing test acc
test_in, N_iter 500, Accuracy: 0.5735887096774194, avg_obj: 1.444304435483871, elbo 0
test_out, N_iter 500, Accuracy: 0.08016304347826086, avg_obj: 3.1752717391304346, elbo 0
train, N_iter 600, Accuracy: 0.5625, avg_obj: 1.484375, elbo 890.8296508789062
computing test acc
test_in, N_iter 600, Accuracy: 0.625, avg_obj: 1.4770665322580645, elbo 0
test_out, N_iter 600, Accuracy: 0.08865489130434782, avg_obj: 3.266304347826087, elbo 0
train, N_iter 700, Accuracy: 0.59375, avg_obj: 1.4375, elbo 1101.2481689453125
computing test acc
test_in, N_iter 700, Accuracy: 0.6108870967741935, avg_obj: 1.3971774193548387, elbo 0
test_out, N_iter 700, Accuracy: 0.08084239130434782, avg_obj: 3.227921195652174, elbo 0
train, N_iter 800, Accuracy: 0.734375, avg_obj: 1.203125, elbo 1439.7093505859375
computing test acc
test_in, N_iter 800, Accuracy: 0.6360887096774194, avg_obj: 1.3868447580645162, elbo 0
test_out, N_iter 800, Accuracy: 0.07201086956521739, avg_obj: 3.1311141304347827, elbo 0
train, N_iter 900, Accuracy: 0.65625, avg_obj: 1.125, elbo 1690.60107421875
computing test acc
test_in, N_iter 900, Accuracy: 0.6383568548387096, avg_obj: 1.3923891129032258, elbo 0
test_out, N_iter 900, Accuracy: 0.06895380434782608, avg_obj: 3.1222826086956523, elbo 0
train, N_iter 1000, Accuracy: 0.625, avg_obj: 1.484375, elbo 1658.3543701171875
computing test acc
test_in, N_iter 1000, Accuracy: 0.6285282258064516, avg_obj: 1.3855846774193548, elbo 0
test_out, N_iter 1000, Accuracy: 0.08355978260869565, avg_obj: 3.2516983695652173, elbo 0
train, N_iter 1100, Accuracy: 0.671875, avg_obj: 1.203125, elbo 1970.20068359375
computing test acc
test_in, N_iter 1100, Accuracy: 0.6307963709677419, avg_obj: 1.413054435483871, elbo 0
test_out, N_iter 1100, Accuracy: 0.08491847826086957, avg_obj: 3.2561141304347827, elbo 0
train, N_iter 1200, Accuracy: 0.671875, avg_obj: 1.328125, elbo 2162.904296875
computing test acc
test_in, N_iter 1200, Accuracy: 0.6302923387096774, avg_obj: 1.438508064516129, elbo 0
test_out, N_iter 1200, Accuracy: 0.08457880434782608, avg_obj: 3.251358695652174, elbo 0
train, N_iter 1300, Accuracy: 0.625, avg_obj: 1.609375, elbo 2150.49462890625
computing test acc
test_in, N_iter 1300, Accuracy: 0.6423891129032258, avg_obj: 1.4402721774193548, elbo 0
test_out, N_iter 1300, Accuracy: 0.0703125, avg_obj: 3.1895380434782608, elbo 0
train, N_iter 1400, Accuracy: 0.671875, avg_obj: 1.546875, elbo 2467.62255859375
computing test acc
test_in, N_iter 1400, Accuracy: 0.6479334677419355, avg_obj: 1.5017641129032258, elbo 0
test_out, N_iter 1400, Accuracy: 0.09103260869565218, avg_obj: 3.3063858695652173, elbo 0
train, N_iter 1500, Accuracy: 0.71875, avg_obj: 1.3125, elbo 2616.33349609375
computing test acc
test_in, N_iter 1500, Accuracy: 0.6464213709677419, avg_obj: 1.4329637096774193, elbo 0
test_out, N_iter 1500, Accuracy: 0.08389945652173914, avg_obj: 3.2496603260869565, elbo 0
train, N_iter 1600, Accuracy: 0.546875, avg_obj: 1.375, elbo 2622.511962890625
computing test acc
test_in, N_iter 1600, Accuracy: 0.6368447580645161, avg_obj: 1.4889112903225807, elbo 0
test_out, N_iter 1600, Accuracy: 0.09544836956521739, avg_obj: 3.3733016304347827, elbo 0
train, N_iter 1700, Accuracy: 0.59375, avg_obj: 1.46875, elbo 2767.562255859375
computing test acc
test_in, N_iter 1700, Accuracy: 0.6307963709677419, avg_obj: 1.4430443548387097, elbo 0
test_out, N_iter 1700, Accuracy: 0.08491847826086957, avg_obj: 3.257133152173913, elbo 0
train, N_iter 1800, Accuracy: 0.65625, avg_obj: 1.484375, elbo 2874.06494140625
computing test acc
test_in, N_iter 1800, Accuracy: 0.639616935483871, avg_obj: 1.4906754032258065, elbo 0
test_out, N_iter 1800, Accuracy: 0.08118206521739131, avg_obj: 3.243546195652174, elbo 0
train, N_iter 1900, Accuracy: 0.65625, avg_obj: 1.421875, elbo 3048.802978515625
computing test acc
test_in, N_iter 1900, Accuracy: 0.6323084677419355, avg_obj: 1.618195564516129, elbo 0
test_out, N_iter 1900, Accuracy: 0.12194293478260869, avg_obj: 3.535326086956522, elbo 0
train, N_iter 2000, Accuracy: 0.5, avg_obj: 1.65625, elbo 3098.987060546875
computing test acc
test_in, N_iter 2000, Accuracy: 0.6433971774193549, avg_obj: 1.5662802419354838, elbo 0
test_out, N_iter 2000, Accuracy: 0.11243206521739131, avg_obj: 3.495923913043478, elbo 0
train, N_iter 2100, Accuracy: 0.640625, avg_obj: 1.359375, elbo 3305.924560546875
computing test acc
test_in, N_iter 2100, Accuracy: 0.6433971774193549, avg_obj: 1.5304939516129032, elbo 0
test_out, N_iter 2100, Accuracy: 0.09850543478260869, avg_obj: 3.421195652173913, elbo 0
train, N_iter 2200, Accuracy: 0.609375, avg_obj: 1.421875, elbo 3376.988037109375
computing test acc
test_in, N_iter 2200, Accuracy: 0.6328125, avg_obj: 1.4125504032258065, elbo 0
test_out, N_iter 2200, Accuracy: 0.07608695652173914, avg_obj: 3.193953804347826, elbo 0
train, N_iter 2300, Accuracy: 0.625, avg_obj: 1.390625, elbo 3605.28955078125
computing test acc
test_in, N_iter 2300, Accuracy: 0.6368447580645161, avg_obj: 1.5378024193548387, elbo 0
test_out, N_iter 2300, Accuracy: 0.10971467391304347, avg_obj: 3.4643342391304346, elbo 0
train, N_iter 2400, Accuracy: 0.5625, avg_obj: 1.5, elbo 3787.35693359375
computing test acc
test_in, N_iter 2400, Accuracy: 0.6426411290322581, avg_obj: 1.6076108870967742, elbo 0
test_out, N_iter 2400, Accuracy: 0.10461956521739131, avg_obj: 3.434442934782609, elbo 0
train, N_iter 2500, Accuracy: 0.6875, avg_obj: 1.359375, elbo 3679.219970703125
computing test acc
test_in, N_iter 2500, Accuracy: 0.6391129032258065, avg_obj: 1.6134072580645162, elbo 0
test_out, N_iter 2500, Accuracy: 0.12126358695652174, avg_obj: 3.539741847826087, elbo 0
train, N_iter 2600, Accuracy: 0.671875, avg_obj: 1.515625, elbo 3760.82421875
computing test acc
test_in, N_iter 2600, Accuracy: 0.6456653225806451, avg_obj: 1.4624495967741935, elbo 0
test_out, N_iter 2600, Accuracy: 0.07880434782608696, avg_obj: 3.2401494565217392, elbo 0
train, N_iter 2700, Accuracy: 0.65625, avg_obj: 1.53125, elbo 3701.72802734375
computing test acc
test_in, N_iter 2700, Accuracy: 0.6333165322580645, avg_obj: 1.442288306451613, elbo 0
test_out, N_iter 2700, Accuracy: 0.08016304347826086, avg_obj: 3.2578125, elbo 0
train, N_iter 2800, Accuracy: 0.71875, avg_obj: 1.421875, elbo 4045.912353515625
computing test acc
test_in, N_iter 2800, Accuracy: 0.6348286290322581, avg_obj: 1.590725806451613, elbo 0
test_out, N_iter 2800, Accuracy: 0.11786684782608696, avg_obj: 3.472826086956522, elbo 0
train, N_iter 2900, Accuracy: 0.59375, avg_obj: 1.5, elbo 4161.00341796875
computing test acc
test_in, N_iter 2900, Accuracy: 0.6365927419354839, avg_obj: 1.5831653225806452, elbo 0
test_out, N_iter 2900, Accuracy: 0.1015625, avg_obj: 3.436141304347826, elbo 0
train, N_iter 3000, Accuracy: 0.734375, avg_obj: 1.515625, elbo 4144.3359375
computing test acc
test_in, N_iter 3000, Accuracy: 0.6386088709677419, avg_obj: 1.5864415322580645, elbo 0
test_out, N_iter 3000, Accuracy: 0.10767663043478261, avg_obj: 3.468070652173913, elbo 0
train, N_iter 3100, Accuracy: 0.6875, avg_obj: 1.390625, elbo 4585.88037109375
computing test acc
test_in, N_iter 3100, Accuracy: 0.640625, avg_obj: 1.5335181451612903, elbo 0
test_out, N_iter 3100, Accuracy: 0.08220108695652174, avg_obj: 3.3067255434782608, elbo 0
train, N_iter 3200, Accuracy: 0.640625, avg_obj: 1.484375, elbo 4340.27734375
computing test acc
test_in, N_iter 3200, Accuracy: 0.6373487903225806, avg_obj: 1.5897177419354838, elbo 0
test_out, N_iter 3200, Accuracy: 0.11514945652173914, avg_obj: 3.4864130434782608, elbo 0
train, N_iter 3300, Accuracy: 0.671875, avg_obj: 1.609375, elbo 4379.78955078125
computing test acc
test_in, N_iter 3300, Accuracy: 0.6300403225806451, avg_obj: 1.6373487903225807, elbo 0
test_out, N_iter 3300, Accuracy: 0.13688858695652173, avg_obj: 3.6025815217391304, elbo 0
train, N_iter 3400, Accuracy: 0.703125, avg_obj: 1.40625, elbo 4493.3271484375
computing test acc
test_in, N_iter 3400, Accuracy: 0.6302923387096774, avg_obj: 1.6451612903225807, elbo 0
test_out, N_iter 3400, Accuracy: 0.13485054347826086, avg_obj: 3.621263586956522, elbo 0
train, N_iter 3500, Accuracy: 0.671875, avg_obj: 1.46875, elbo 4807.9541015625
computing test acc
test_in, N_iter 3500, Accuracy: 0.6388608870967742, avg_obj: 1.6020665322580645, elbo 0
test_out, N_iter 3500, Accuracy: 0.10224184782608696, avg_obj: 3.4629755434782608, elbo 0
train, N_iter 3600, Accuracy: 0.640625, avg_obj: 1.859375, elbo 4346.23681640625
computing test acc
test_in, N_iter 3600, Accuracy: 0.6355846774193549, avg_obj: 1.5148689516129032, elbo 0
test_out, N_iter 3600, Accuracy: 0.09001358695652174, avg_obj: 3.3175951086956523, elbo 0
train, N_iter 3700, Accuracy: 0.640625, avg_obj: 1.5625, elbo 4666.9833984375
computing test acc
test_in, N_iter 3700, Accuracy: 0.6444052419354839, avg_obj: 1.5703125, elbo 0
test_out, N_iter 3700, Accuracy: 0.10529891304347826, avg_obj: 3.406929347826087, elbo 0
train, N_iter 3800, Accuracy: 0.609375, avg_obj: 1.625, elbo 4626.7314453125
computing test acc
test_in, N_iter 3800, Accuracy: 0.6368447580645161, avg_obj: 1.6257560483870968, elbo 0
test_out, N_iter 3800, Accuracy: 0.14470108695652173, avg_obj: 3.6762907608695654, elbo 0
train, N_iter 3900, Accuracy: 0.6875, avg_obj: 1.390625, elbo 5124.1982421875
computing test acc
test_in, N_iter 3900, Accuracy: 0.6348286290322581, avg_obj: 1.6370967741935485, elbo 0
test_out, N_iter 3900, Accuracy: 0.1453804347826087, avg_obj: 3.6266983695652173, elbo 0
train, N_iter 4000, Accuracy: 0.6875, avg_obj: 1.375, elbo 5173.12548828125
computing test acc
test_in, N_iter 4000, Accuracy: 0.6373487903225806, avg_obj: 1.6484375, elbo 0
test_out, N_iter 4000, Accuracy: 0.13077445652173914, avg_obj: 3.571671195652174, elbo 0
train, N_iter 4100, Accuracy: 0.6875, avg_obj: 1.625, elbo 4746.5537109375
computing test acc
test_in, N_iter 4100, Accuracy: 0.6370967741935484, avg_obj: 1.6018145161290323, elbo 0
test_out, N_iter 4100, Accuracy: 0.10869565217391304, avg_obj: 3.4514266304347827, elbo 0
train, N_iter 4200, Accuracy: 0.734375, avg_obj: 1.40625, elbo 5159.9140625
computing test acc
test_in, N_iter 4200, Accuracy: 0.5874495967741935, avg_obj: 1.7482358870967742, elbo 0
test_out, N_iter 4200, Accuracy: 0.15115489130434784, avg_obj: 3.688858695652174, elbo 0
train, N_iter 4300, Accuracy: 0.65625, avg_obj: 1.4375, elbo 5299.1923828125
computing test acc
test_in, N_iter 4300, Accuracy: 0.6272681451612904, avg_obj: 1.6615423387096775, elbo 0
test_out, N_iter 4300, Accuracy: 0.14605978260869565, avg_obj: 3.624320652173913, elbo 0
train, N_iter 4400, Accuracy: 0.640625, avg_obj: 1.796875, elbo 5085.4306640625
computing test acc
test_in, N_iter 4400, Accuracy: 0.6413810483870968, avg_obj: 1.612399193548387, elbo 0
test_out, N_iter 4400, Accuracy: 0.11684782608695653, avg_obj: 3.485733695652174, elbo 0
train, N_iter 4500, Accuracy: 0.671875, avg_obj: 1.671875, elbo 5513.91552734375
computing test acc
test_in, N_iter 4500, Accuracy: 0.6350806451612904, avg_obj: 1.6665826612903225, elbo 0
test_out, N_iter 4500, Accuracy: 0.13688858695652173, avg_obj: 3.6042798913043477, elbo 0
train, N_iter 4600, Accuracy: 0.703125, avg_obj: 1.421875, elbo 5534.6650390625
computing test acc
test_in, N_iter 4600, Accuracy: 0.6383568548387096, avg_obj: 1.6393649193548387, elbo 0
test_out, N_iter 4600, Accuracy: 0.1402853260869565, avg_obj: 3.5754076086956523, elbo 0
train, N_iter 4700, Accuracy: 0.578125, avg_obj: 1.453125, elbo 5265.904296875
computing test acc
test_in, N_iter 4700, Accuracy: 0.6423891129032258, avg_obj: 1.5932459677419355, elbo 0
test_out, N_iter 4700, Accuracy: 0.10733695652173914, avg_obj: 3.4599184782608696, elbo 0
train, N_iter 4800, Accuracy: 0.65625, avg_obj: 1.453125, elbo 5502.2412109375
computing test acc
test_in, N_iter 4800, Accuracy: 0.639616935483871, avg_obj: 1.5559475806451613, elbo 0
test_out, N_iter 4800, Accuracy: 0.09137228260869565, avg_obj: 3.338994565217391, elbo 0
train, N_iter 4900, Accuracy: 0.671875, avg_obj: 1.5625, elbo 5497.7666015625
computing test acc
test_in, N_iter 4900, Accuracy: 0.6461693548387096, avg_obj: 1.5955141129032258, elbo 0
test_out, N_iter 4900, Accuracy: 0.11141304347826086, avg_obj: 3.492866847826087, elbo 0
train, N_iter 5000, Accuracy: 0.671875, avg_obj: 1.4375, elbo 5481.40234375
computing test acc
test_in, N_iter 5000, Accuracy: 0.6355846774193549, avg_obj: 1.6423891129032258, elbo 0
test_out, N_iter 5000, Accuracy: 0.13417119565217392, avg_obj: 3.6019021739130435, elbo 0
train, N_iter 5100, Accuracy: 0.5625, avg_obj: 1.6875, elbo 5533.8046875
computing test acc
test_in, N_iter 5100, Accuracy: 0.6370967741935484, avg_obj: 1.611391129032258, elbo 0
test_out, N_iter 5100, Accuracy: 0.11073369565217392, avg_obj: 3.472826086956522, elbo 0
train, N_iter 5200, Accuracy: 0.671875, avg_obj: 1.453125, elbo 5647.3701171875
computing test acc
test_in, N_iter 5200, Accuracy: 0.647429435483871, avg_obj: 1.578125, elbo 0
test_out, N_iter 5200, Accuracy: 0.09646739130434782, avg_obj: 3.3923233695652173, elbo 0
train, N_iter 5300, Accuracy: 0.640625, avg_obj: 1.5625, elbo 5551.23681640625
computing test acc
test_in, N_iter 5300, Accuracy: 0.6345766129032258, avg_obj: 1.6315524193548387, elbo 0
test_out, N_iter 5300, Accuracy: 0.1409646739130435, avg_obj: 3.608695652173913, elbo 0
train, N_iter 5400, Accuracy: 0.640625, avg_obj: 1.546875, elbo 5827.232421875
computing test acc
test_in, N_iter 5400, Accuracy: 0.6444052419354839, avg_obj: 1.5955141129032258, elbo 0
test_out, N_iter 5400, Accuracy: 0.11548913043478261, avg_obj: 3.5095108695652173, elbo 0
train, N_iter 5500, Accuracy: 0.65625, avg_obj: 1.484375, elbo 6144.92041015625
computing test acc
test_in, N_iter 5500, Accuracy: 0.6388608870967742, avg_obj: 1.6189516129032258, elbo 0
test_out, N_iter 5500, Accuracy: 0.11854619565217392, avg_obj: 3.5448369565217392, elbo 0
train, N_iter 5600, Accuracy: 0.671875, avg_obj: 1.40625, elbo 5497.4375
computing test acc
test_in, N_iter 5600, Accuracy: 0.6297883064516129, avg_obj: 1.6370967741935485, elbo 0
test_out, N_iter 5600, Accuracy: 0.11345108695652174, avg_obj: 3.4853940217391304, elbo 0
train, N_iter 5700, Accuracy: 0.65625, avg_obj: 1.609375, elbo 5785.82177734375
computing test acc
test_in, N_iter 5700, Accuracy: 0.6330645161290323, avg_obj: 1.6554939516129032, elbo 0
test_out, N_iter 5700, Accuracy: 0.140625, avg_obj: 3.6110733695652173, elbo 0
train, N_iter 5800, Accuracy: 0.59375, avg_obj: 1.8125, elbo 5549.8115234375
computing test acc
test_in, N_iter 5800, Accuracy: 0.6355846774193549, avg_obj: 1.6496975806451613, elbo 0
test_out, N_iter 5800, Accuracy: 0.15013586956521738, avg_obj: 3.629076086956522, elbo 0
train, N_iter 5900, Accuracy: 0.6875, avg_obj: 1.453125, elbo 5954.6943359375
computing test acc
test_in, N_iter 5900, Accuracy: 0.6421370967741935, avg_obj: 1.6325604838709677, elbo 0
test_out, N_iter 5900, Accuracy: 0.12839673913043478, avg_obj: 3.5699728260869565, elbo 0
train, N_iter 6000, Accuracy: 0.546875, avg_obj: 1.53125, elbo 6016.3935546875
computing test acc
test_in, N_iter 6000, Accuracy: 0.6255040322580645, avg_obj: 1.6869959677419355, elbo 0
test_out, N_iter 6000, Accuracy: 0.15421195652173914, avg_obj: 3.71875, elbo 0
train, N_iter 6100, Accuracy: 0.734375, avg_obj: 1.484375, elbo 5857.5078125
computing test acc
test_in, N_iter 6100, Accuracy: 0.6365927419354839, avg_obj: 1.6325604838709677, elbo 0
test_out, N_iter 6100, Accuracy: 0.12160326086956522, avg_obj: 3.5478940217391304, elbo 0
train, N_iter 6200, Accuracy: 0.703125, avg_obj: 1.4375, elbo 6575.70703125
computing test acc
test_in, N_iter 6200, Accuracy: 0.6461693548387096, avg_obj: 1.580141129032258, elbo 0
test_out, N_iter 6200, Accuracy: 0.11039402173913043, avg_obj: 3.46875, elbo 0
train, N_iter 6300, Accuracy: 0.625, avg_obj: 1.5, elbo 5946.0009765625
computing test acc
test_in, N_iter 6300, Accuracy: 0.6358366935483871, avg_obj: 1.6078629032258065, elbo 0
test_out, N_iter 6300, Accuracy: 0.10733695652173914, avg_obj: 3.436141304347826, elbo 0
train, N_iter 6400, Accuracy: 0.640625, avg_obj: 1.640625, elbo 5920.8134765625
computing test acc
test_in, N_iter 6400, Accuracy: 0.6252520161290323, avg_obj: 1.6759072580645162, elbo 0
test_out, N_iter 6400, Accuracy: 0.12771739130434784, avg_obj: 3.602241847826087, elbo 0
train, N_iter 6500, Accuracy: 0.75, avg_obj: 1.453125, elbo 6054.58203125
computing test acc
test_in, N_iter 6500, Accuracy: 0.6451612903225806, avg_obj: 1.619203629032258, elbo 0
test_out, N_iter 6500, Accuracy: 0.11548913043478261, avg_obj: 3.503736413043478, elbo 0
train, N_iter 6600, Accuracy: 0.6875, avg_obj: 1.5, elbo 6387.99609375
computing test acc
test_in, N_iter 6600, Accuracy: 0.6391129032258065, avg_obj: 1.658266129032258, elbo 0
test_out, N_iter 6600, Accuracy: 0.12058423913043478, avg_obj: 3.555366847826087, elbo 0
train, N_iter 6700, Accuracy: 0.625, avg_obj: 1.859375, elbo 5678.662109375
computing test acc
test_in, N_iter 6700, Accuracy: 0.6461693548387096, avg_obj: 1.6174395161290323, elbo 0
test_out, N_iter 6700, Accuracy: 0.11480978260869565, avg_obj: 3.5061141304347827, elbo 0
train, N_iter 6800, Accuracy: 0.734375, avg_obj: 1.59375, elbo 6315.30859375
computing test acc
test_in, N_iter 6800, Accuracy: 0.6481854838709677, avg_obj: 1.6060987903225807, elbo 0
test_out, N_iter 6800, Accuracy: 0.11175271739130435, avg_obj: 3.498641304347826, elbo 0
train, N_iter 6900, Accuracy: 0.65625, avg_obj: 1.625, elbo 6063.3310546875
computing test acc
test_in, N_iter 6900, Accuracy: 0.6358366935483871, avg_obj: 1.6852318548387097, elbo 0
test_out, N_iter 6900, Accuracy: 0.14266304347826086, avg_obj: 3.6871603260869565, elbo 0
train, N_iter 7000, Accuracy: 0.703125, avg_obj: 1.375, elbo 6570.7822265625
computing test acc
test_in, N_iter 7000, Accuracy: 0.6469254032258065, avg_obj: 1.6136592741935485, elbo 0
test_out, N_iter 7000, Accuracy: 0.10461956521739131, avg_obj: 3.4514266304347827, elbo 0
train, N_iter 7100, Accuracy: 0.6875, avg_obj: 1.34375, elbo 6637.845703125
computing test acc
test_in, N_iter 7100, Accuracy: 0.6476814516129032, avg_obj: 1.637600806451613, elbo 0
test_out, N_iter 7100, Accuracy: 0.13077445652173914, avg_obj: 3.598505434782609, elbo 0
train, N_iter 7200, Accuracy: 0.640625, avg_obj: 1.734375, elbo 6141.11279296875
computing test acc
test_in, N_iter 7200, Accuracy: 0.633820564516129, avg_obj: 1.6769153225806452, elbo 0
test_out, N_iter 7200, Accuracy: 0.14707880434782608, avg_obj: 3.686141304347826, elbo 0
train, N_iter 7300, Accuracy: 0.765625, avg_obj: 1.34375, elbo 6710.3251953125
computing test acc
test_in, N_iter 7300, Accuracy: 0.6333165322580645, avg_obj: 1.6751512096774193, elbo 0
test_out, N_iter 7300, Accuracy: 0.14911684782608695, avg_obj: 3.6854619565217392, elbo 0
train, N_iter 7400, Accuracy: 0.671875, avg_obj: 1.453125, elbo 6736.2998046875
computing test acc
test_in, N_iter 7400, Accuracy: 0.6370967741935484, avg_obj: 1.6547379032258065, elbo 0
test_out, N_iter 7400, Accuracy: 0.14300271739130435, avg_obj: 3.6657608695652173, elbo 0
train, N_iter 7500, Accuracy: 0.59375, avg_obj: 1.8125, elbo 6497.1513671875
computing test acc
test_in, N_iter 7500, Accuracy: 0.6433971774193549, avg_obj: 1.6244959677419355, elbo 0
test_out, N_iter 7500, Accuracy: 0.10835597826086957, avg_obj: 3.5003396739130435, elbo 0
train, N_iter 7600, Accuracy: 0.6875, avg_obj: 1.640625, elbo 6889.0732421875
computing test acc
test_in, N_iter 7600, Accuracy: 0.6401209677419355, avg_obj: 1.6320564516129032, elbo 0
test_out, N_iter 7600, Accuracy: 0.12907608695652173, avg_obj: 3.597486413043478, elbo 0
train, N_iter 7700, Accuracy: 0.671875, avg_obj: 1.484375, elbo 6657.8232421875
computing test acc
test_in, N_iter 7700, Accuracy: 0.6378528225806451, avg_obj: 1.6577620967741935, elbo 0
test_out, N_iter 7700, Accuracy: 0.13824728260869565, avg_obj: 3.644701086956522, elbo 0
train, N_iter 7800, Accuracy: 0.59375, avg_obj: 1.46875, elbo 6532.13671875
computing test acc
test_in, N_iter 7800, Accuracy: 0.647429435483871, avg_obj: 1.6285282258064515, elbo 0
test_out, N_iter 7800, Accuracy: 0.10971467391304347, avg_obj: 3.509171195652174, elbo 0
train, N_iter 7900, Accuracy: 0.640625, avg_obj: 1.5625, elbo 6531.091796875
computing test acc
test_in, N_iter 7900, Accuracy: 0.6454133064516129, avg_obj: 1.6207157258064515, elbo 0
test_out, N_iter 7900, Accuracy: 0.12669836956521738, avg_obj: 3.5703125, elbo 0
train, N_iter 8000, Accuracy: 0.703125, avg_obj: 1.625, elbo 6545.72265625
computing test acc
test_in, N_iter 8000, Accuracy: 0.6524697580645161, avg_obj: 1.5866935483870968, elbo 0
test_out, N_iter 8000, Accuracy: 0.10394021739130435, avg_obj: 3.439877717391304, elbo 0
train, N_iter 8100, Accuracy: 0.671875, avg_obj: 1.421875, elbo 6702.7236328125
computing test acc
test_in, N_iter 8100, Accuracy: 0.6464213709677419, avg_obj: 1.6373487903225807, elbo 0
test_out, N_iter 8100, Accuracy: 0.13213315217391305, avg_obj: 3.5608016304347827, elbo 0
train, N_iter 8200, Accuracy: 0.609375, avg_obj: 1.859375, elbo 6601.9052734375
computing test acc
test_in, N_iter 8200, Accuracy: 0.633820564516129, avg_obj: 1.7048891129032258, elbo 0
test_out, N_iter 8200, Accuracy: 0.1640625, avg_obj: 3.7550951086956523, elbo 0
train, N_iter 8300, Accuracy: 0.640625, avg_obj: 1.4375, elbo 6726.62939453125
computing test acc
test_in, N_iter 8300, Accuracy: 0.6464213709677419, avg_obj: 1.5859375, elbo 0
test_out, N_iter 8300, Accuracy: 0.10461956521739131, avg_obj: 3.4514266304347827, elbo 0
train, N_iter 8400, Accuracy: 0.65625, avg_obj: 1.578125, elbo 6688.2568359375
computing test acc
test_in, N_iter 8400, Accuracy: 0.6451612903225806, avg_obj: 1.6343245967741935, elbo 0
test_out, N_iter 8400, Accuracy: 0.13451086956521738, avg_obj: 3.6049592391304346, elbo 0
train, N_iter 8500, Accuracy: 0.625, avg_obj: 1.546875, elbo 6949.71484375
computing test acc
test_in, N_iter 8500, Accuracy: 0.6486895161290323, avg_obj: 1.6207157258064515, elbo 0
test_out, N_iter 8500, Accuracy: 0.11888586956521739, avg_obj: 3.535326086956522, elbo 0
train, N_iter 8600, Accuracy: 0.6875, avg_obj: 1.484375, elbo 7299.4599609375
computing test acc
test_in, N_iter 8600, Accuracy: 0.6436491935483871, avg_obj: 1.622983870967742, elbo 0
test_out, N_iter 8600, Accuracy: 0.11175271739130435, avg_obj: 3.496263586956522, elbo 0
train, N_iter 8700, Accuracy: 0.734375, avg_obj: 1.40625, elbo 6598.7470703125
computing test acc
test_in, N_iter 8700, Accuracy: 0.6444052419354839, avg_obj: 1.6305443548387097, elbo 0
test_out, N_iter 8700, Accuracy: 0.11277173913043478, avg_obj: 3.5458559782608696, elbo 0
train, N_iter 8800, Accuracy: 0.625, avg_obj: 1.65625, elbo 6794.54443359375
computing test acc
test_in, N_iter 8800, Accuracy: 0.6431451612903226, avg_obj: 1.6486895161290323, elbo 0
test_out, N_iter 8800, Accuracy: 0.11990489130434782, avg_obj: 3.555366847826087, elbo 0
train, N_iter 8900, Accuracy: 0.625, avg_obj: 1.8125, elbo 6701.75390625
computing test acc
test_in, N_iter 8900, Accuracy: 0.640625, avg_obj: 1.6804435483870968, elbo 0
test_out, N_iter 8900, Accuracy: 0.15081521739130435, avg_obj: 3.703125, elbo 0
train, N_iter 9000, Accuracy: 0.609375, avg_obj: 1.515625, elbo 6847.18994140625
computing test acc
test_in, N_iter 9000, Accuracy: 0.6413810483870968, avg_obj: 1.6509576612903225, elbo 0
test_out, N_iter 9000, Accuracy: 0.140625, avg_obj: 3.668817934782609, elbo 0
train, N_iter 9100, Accuracy: 0.578125, avg_obj: 1.546875, elbo 6986.703125
computing test acc
test_in, N_iter 9100, Accuracy: 0.6408770161290323, avg_obj: 1.668850806451613, elbo 0
test_out, N_iter 9100, Accuracy: 0.13145380434782608, avg_obj: 3.608016304347826, elbo 0
train, N_iter 9200, Accuracy: 0.75, avg_obj: 1.515625, elbo 6735.5810546875
computing test acc
test_in, N_iter 9200, Accuracy: 0.6381048387096774, avg_obj: 1.6912802419354838, elbo 0
test_out, N_iter 9200, Accuracy: 0.1484375, avg_obj: 3.6830842391304346, elbo 0
train, N_iter 9300, Accuracy: 0.703125, avg_obj: 1.453125, elbo 7429.443359375
computing test acc
test_in, N_iter 9300, Accuracy: 0.6507056451612904, avg_obj: 1.6131552419354838, elbo 0
test_out, N_iter 9300, Accuracy: 0.11311141304347826, avg_obj: 3.513247282608696, elbo 0
train, N_iter 9400, Accuracy: 0.65625, avg_obj: 1.5625, elbo 6933.6552734375
computing test acc
test_in, N_iter 9400, Accuracy: 0.657258064516129, avg_obj: 1.559475806451613, elbo 0
test_out, N_iter 9400, Accuracy: 0.10020380434782608, avg_obj: 3.4252717391304346, elbo 0
train, N_iter 9500, Accuracy: 0.65625, avg_obj: 1.671875, elbo 6837.1162109375
computing test acc
test_in, N_iter 9500, Accuracy: 0.6358366935483871, avg_obj: 1.684475806451613, elbo 0
test_out, N_iter 9500, Accuracy: 0.1484375, avg_obj: 3.6932744565217392, elbo 0
train, N_iter 9600, Accuracy: 0.71875, avg_obj: 1.421875, elbo 6994.140625
computing test acc
test_in, N_iter 9600, Accuracy: 0.6539818548387096, avg_obj: 1.616179435483871, elbo 0
test_out, N_iter 9600, Accuracy: 0.11956521739130435, avg_obj: 3.5322690217391304, elbo 0
train, N_iter 9700, Accuracy: 0.65625, avg_obj: 1.5, elbo 7044.685546875
computing test acc
test_in, N_iter 9700, Accuracy: 0.6466733870967742, avg_obj: 1.6207157258064515, elbo 0
test_out, N_iter 9700, Accuracy: 0.11073369565217392, avg_obj: 3.501358695652174, elbo 0
train, N_iter 9800, Accuracy: 0.671875, avg_obj: 1.875, elbo 6486.7451171875
computing test acc
test_in, N_iter 9800, Accuracy: 0.6476814516129032, avg_obj: 1.6013104838709677, elbo 0
test_out, N_iter 9800, Accuracy: 0.10903532608695653, avg_obj: 3.5122282608695654, elbo 0
train, N_iter 9900, Accuracy: 0.703125, avg_obj: 1.640625, elbo 6927.41796875
computing test acc
test_in, N_iter 9900, Accuracy: 0.6393649193548387, avg_obj: 1.6799395161290323, elbo 0
test_out, N_iter 9900, Accuracy: 0.15183423913043478, avg_obj: 3.7021059782608696, elbo 0
train, N_iter 10000, Accuracy: 0.640625, avg_obj: 1.65625, elbo 6614.599609375
computing test acc
test_in, N_iter 10000, Accuracy: 0.6368447580645161, avg_obj: 1.6915322580645162, elbo 0
test_out, N_iter 10000, Accuracy: 0.15625, avg_obj: 3.717391304347826, elbo 0
train, N_iter 10100, Accuracy: 0.6875, avg_obj: 1.421875, elbo 7127.7666015625
computing test acc
test_in, N_iter 10100, Accuracy: 0.6391129032258065, avg_obj: 1.6539818548387097, elbo 0
test_out, N_iter 10100, Accuracy: 0.14198369565217392, avg_obj: 3.682744565217391, elbo 0
train, N_iter 10200, Accuracy: 0.71875, avg_obj: 1.40625, elbo 7177.9912109375
computing test acc
test_in, N_iter 10200, Accuracy: 0.6413810483870968, avg_obj: 1.6567540322580645, elbo 0
test_out, N_iter 10200, Accuracy: 0.14605978260869565, avg_obj: 3.671875, elbo 0
train, N_iter 10300, Accuracy: 0.640625, avg_obj: 1.734375, elbo 6710.0517578125
computing test acc
test_in, N_iter 10300, Accuracy: 0.6484375, avg_obj: 1.6179435483870968, elbo 0
test_out, N_iter 10300, Accuracy: 0.11379076086956522, avg_obj: 3.5207201086956523, elbo 0
train, N_iter 10400, Accuracy: 0.78125, avg_obj: 1.390625, elbo 7404.05859375
computing test acc
test_in, N_iter 10400, Accuracy: 0.6449092741935484, avg_obj: 1.6507056451612903, elbo 0
test_out, N_iter 10400, Accuracy: 0.1324728260869565, avg_obj: 3.616508152173913, elbo 0
train, N_iter 10500, Accuracy: 0.65625, avg_obj: 1.46875, elbo 7288.82421875
computing test acc
test_in, N_iter 10500, Accuracy: 0.6212197580645161, avg_obj: 1.7318548387096775, elbo 0
test_out, N_iter 10500, Accuracy: 0.15455163043478262, avg_obj: 3.7177309782608696, elbo 0
train, N_iter 10600, Accuracy: 0.625, avg_obj: 1.78125, elbo 7096.3623046875
computing test acc
test_in, N_iter 10600, Accuracy: 0.6431451612903226, avg_obj: 1.6554939516129032, elbo 0
test_out, N_iter 10600, Accuracy: 0.1297554347826087, avg_obj: 3.5869565217391304, elbo 0
train, N_iter 10700, Accuracy: 0.734375, avg_obj: 1.625, elbo 7526.7451171875
computing test acc
test_in, N_iter 10700, Accuracy: 0.6509576612903226, avg_obj: 1.606350806451613, elbo 0
test_out, N_iter 10700, Accuracy: 0.11447010869565218, avg_obj: 3.5285326086956523, elbo 0
train, N_iter 10800, Accuracy: 0.671875, avg_obj: 1.484375, elbo 7240.9306640625
computing test acc
test_in, N_iter 10800, Accuracy: 0.6451612903225806, avg_obj: 1.628024193548387, elbo 0
test_out, N_iter 10800, Accuracy: 0.13043478260869565, avg_obj: 3.581861413043478, elbo 0
train, N_iter 10900, Accuracy: 0.65625, avg_obj: 1.484375, elbo 6999.43603515625
computing test acc
test_in, N_iter 10900, Accuracy: 0.6353326612903226, avg_obj: 1.6953125, elbo 0
test_out, N_iter 10900, Accuracy: 0.15353260869565216, avg_obj: 3.700067934782609, elbo 0
train, N_iter 11000, Accuracy: 0.671875, avg_obj: 1.515625, elbo 7052.21044921875
computing test acc
test_in, N_iter 11000, Accuracy: 0.6504536290322581, avg_obj: 1.6300403225806452, elbo 0
test_out, N_iter 11000, Accuracy: 0.12296195652173914, avg_obj: 3.5448369565217392, elbo 0
train, N_iter 11100, Accuracy: 0.71875, avg_obj: 1.5625, elbo 7237.453125
computing test acc
test_in, N_iter 11100, Accuracy: 0.6376008064516129, avg_obj: 1.6549899193548387, elbo 0
test_out, N_iter 11100, Accuracy: 0.12669836956521738, avg_obj: 3.5954483695652173, elbo 0
train, N_iter 11200, Accuracy: 0.6875, avg_obj: 1.4375, elbo 7229.001953125
computing test acc
test_in, N_iter 11200, Accuracy: 0.6469254032258065, avg_obj: 1.599546370967742, elbo 0
test_out, N_iter 11200, Accuracy: 0.10631793478260869, avg_obj: 3.4769021739130435, elbo 0
train, N_iter 11300, Accuracy: 0.625, avg_obj: 1.8125, elbo 6920.943359375
computing test acc
test_in, N_iter 11300, Accuracy: 0.6423891129032258, avg_obj: 1.6446572580645162, elbo 0
test_out, N_iter 11300, Accuracy: 0.12601902173913043, avg_obj: 3.6283967391304346, elbo 0
train, N_iter 11400, Accuracy: 0.671875, avg_obj: 1.5, elbo 7084.68017578125
computing test acc
test_in, N_iter 11400, Accuracy: 0.6370967741935484, avg_obj: 1.6809475806451613, elbo 0
test_out, N_iter 11400, Accuracy: 0.13654891304347827, avg_obj: 3.6535326086956523, elbo 0
train, N_iter 11500, Accuracy: 0.65625, avg_obj: 1.578125, elbo 7009.826171875
computing test acc
test_in, N_iter 11500, Accuracy: 0.6469254032258065, avg_obj: 1.6292842741935485, elbo 0
test_out, N_iter 11500, Accuracy: 0.1280570652173913, avg_obj: 3.5737092391304346, elbo 0
train, N_iter 11600, Accuracy: 0.640625, avg_obj: 1.546875, elbo 7287.25439453125
computing test acc
test_in, N_iter 11600, Accuracy: 0.641633064516129, avg_obj: 1.6292842741935485, elbo 0
test_out, N_iter 11600, Accuracy: 0.12092391304347826, avg_obj: 3.5862771739130435, elbo 0
train, N_iter 11700, Accuracy: 0.6875, avg_obj: 1.515625, elbo 7644.5126953125
computing test acc
test_in, N_iter 11700, Accuracy: 0.6413810483870968, avg_obj: 1.6549899193548387, elbo 0
test_out, N_iter 11700, Accuracy: 0.14266304347826086, avg_obj: 3.6623641304347827, elbo 0
train, N_iter 11800, Accuracy: 0.75, avg_obj: 1.421875, elbo 6745.142578125
computing test acc
test_in, N_iter 11800, Accuracy: 0.6398689516129032, avg_obj: 1.666078629032258, elbo 0
test_out, N_iter 11800, Accuracy: 0.14300271739130435, avg_obj: 3.663383152173913, elbo 0
train, N_iter 11900, Accuracy: 0.625, avg_obj: 1.6875, elbo 7032.9345703125
computing test acc
test_in, N_iter 11900, Accuracy: 0.6325604838709677, avg_obj: 1.7222782258064515, elbo 0
test_out, N_iter 11900, Accuracy: 0.16949728260869565, avg_obj: 3.8012907608695654, elbo 0
train, N_iter 12000, Accuracy: 0.59375, avg_obj: 1.8125, elbo 6983.994140625
computing test acc
test_in, N_iter 12000, Accuracy: 0.6418850806451613, avg_obj: 1.6539818548387097, elbo 0
test_out, N_iter 12000, Accuracy: 0.14300271739130435, avg_obj: 3.699048913043478, elbo 0
train, N_iter 12100, Accuracy: 0.640625, avg_obj: 1.453125, elbo 7272.92138671875
computing test acc
test_in, N_iter 12100, Accuracy: 0.6486895161290323, avg_obj: 1.6287802419354838, elbo 0
test_out, N_iter 12100, Accuracy: 0.12296195652173914, avg_obj: 3.5910326086956523, elbo 0
train, N_iter 12200, Accuracy: 0.59375, avg_obj: 1.484375, elbo 7331.61962890625
computing test acc
test_in, N_iter 12200, Accuracy: 0.647429435483871, avg_obj: 1.6343245967741935, elbo 0
test_out, N_iter 12200, Accuracy: 0.1280570652173913, avg_obj: 3.5886548913043477, elbo 0
train, N_iter 12300, Accuracy: 0.765625, avg_obj: 1.515625, elbo 7223.56591796875
computing test acc
test_in, N_iter 12300, Accuracy: 0.6388608870967742, avg_obj: 1.6743951612903225, elbo 0
test_out, N_iter 12300, Accuracy: 0.13417119565217392, avg_obj: 3.664741847826087, elbo 0
train, N_iter 12400, Accuracy: 0.703125, avg_obj: 1.4375, elbo 7744.64599609375
computing test acc
test_in, N_iter 12400, Accuracy: 0.6436491935483871, avg_obj: 1.6449092741935485, elbo 0
test_out, N_iter 12400, Accuracy: 0.12703804347826086, avg_obj: 3.5876358695652173, elbo 0
train, N_iter 12500, Accuracy: 0.65625, avg_obj: 1.5625, elbo 7261.81787109375
computing test acc
test_in, N_iter 12500, Accuracy: 0.6502016129032258, avg_obj: 1.6098790322580645, elbo 0
test_out, N_iter 12500, Accuracy: 0.11379076086956522, avg_obj: 3.5574048913043477, elbo 0
train, N_iter 12600, Accuracy: 0.65625, avg_obj: 1.640625, elbo 7403.734375
computing test acc
test_in, N_iter 12600, Accuracy: 0.6524697580645161, avg_obj: 1.610383064516129, elbo 0
test_out, N_iter 12600, Accuracy: 0.11345108695652174, avg_obj: 3.510190217391304, elbo 0
train, N_iter 12700, Accuracy: 0.75, avg_obj: 1.421875, elbo 7400.921875
computing test acc
test_in, N_iter 12700, Accuracy: 0.6466733870967742, avg_obj: 1.6439012096774193, elbo 0
test_out, N_iter 12700, Accuracy: 0.1324728260869565, avg_obj: 3.6402853260869565, elbo 0
train, N_iter 12800, Accuracy: 0.671875, avg_obj: 1.484375, elbo 7563.529296875
computing test acc
test_in, N_iter 12800, Accuracy: 0.6403729838709677, avg_obj: 1.6759072580645162, elbo 0
test_out, N_iter 12800, Accuracy: 0.14436141304347827, avg_obj: 3.694633152173913, elbo 0
train, N_iter 12900, Accuracy: 0.65625, avg_obj: 1.90625, elbo 6807.2412109375
computing test acc
test_in, N_iter 12900, Accuracy: 0.6484375, avg_obj: 1.6481854838709677, elbo 0
test_out, N_iter 12900, Accuracy: 0.12873641304347827, avg_obj: 3.5981657608695654, elbo 0
train, N_iter 13000, Accuracy: 0.75, avg_obj: 1.59375, elbo 7393.8271484375
computing test acc
test_in, N_iter 13000, Accuracy: 0.6499495967741935, avg_obj: 1.6257560483870968, elbo 0
test_out, N_iter 13000, Accuracy: 0.12330163043478261, avg_obj: 3.5563858695652173, elbo 0
train, N_iter 13100, Accuracy: 0.703125, avg_obj: 1.640625, elbo 6964.80615234375
computing test acc
test_in, N_iter 13100, Accuracy: 0.641633064516129, avg_obj: 1.6655745967741935, elbo 0
test_out, N_iter 13100, Accuracy: 0.1375679347826087, avg_obj: 3.635190217391304, elbo 0
train, N_iter 13200, Accuracy: 0.6875, avg_obj: 1.421875, elbo 7464.0751953125
computing test acc
test_in, N_iter 13200, Accuracy: 0.6285282258064516, avg_obj: 1.711945564516129, elbo 0
test_out, N_iter 13200, Accuracy: 0.14164402173913043, avg_obj: 3.6776494565217392, elbo 0
train, N_iter 13300, Accuracy: 0.734375, avg_obj: 1.421875, elbo 7383.40771484375
computing test acc
test_in, N_iter 13300, Accuracy: 0.649445564516129, avg_obj: 1.655241935483871, elbo 0
test_out, N_iter 13300, Accuracy: 0.13688858695652173, avg_obj: 3.6423233695652173, elbo 0
train, N_iter 13400, Accuracy: 0.609375, avg_obj: 1.765625, elbo 7136.3134765625
computing test acc
test_in, N_iter 13400, Accuracy: 0.6469254032258065, avg_obj: 1.6393649193548387, elbo 0
test_out, N_iter 13400, Accuracy: 0.14741847826086957, avg_obj: 3.6752717391304346, elbo 0
train, N_iter 13500, Accuracy: 0.765625, avg_obj: 1.375, elbo 7823.6318359375
computing test acc
test_in, N_iter 13500, Accuracy: 0.6514616935483871, avg_obj: 1.6096270161290323, elbo 0
test_out, N_iter 13500, Accuracy: 0.11243206521739131, avg_obj: 3.4989809782608696, elbo 0
train, N_iter 13600, Accuracy: 0.71875, avg_obj: 1.4375, elbo 7547.31005859375
computing test acc
test_in, N_iter 13600, Accuracy: 0.6517137096774194, avg_obj: 1.6580141129032258, elbo 0
test_out, N_iter 13600, Accuracy: 0.12364130434782608, avg_obj: 3.5825407608695654, elbo 0
train, N_iter 13700, Accuracy: 0.625, avg_obj: 1.828125, elbo 7345.2998046875
computing test acc
test_in, N_iter 13700, Accuracy: 0.6408770161290323, avg_obj: 1.6867439516129032, elbo 0
test_out, N_iter 13700, Accuracy: 0.15387228260869565, avg_obj: 3.7353940217391304, elbo 0
train, N_iter 13800, Accuracy: 0.734375, avg_obj: 1.671875, elbo 7540.9345703125
computing test acc
test_in, N_iter 13800, Accuracy: 0.6426411290322581, avg_obj: 1.6685987903225807, elbo 0
test_out, N_iter 13800, Accuracy: 0.1331521739130435, avg_obj: 3.6396059782608696, elbo 0
train, N_iter 13900, Accuracy: 0.734375, avg_obj: 1.453125, elbo 7496.4326171875
computing test acc
test_in, N_iter 13900, Accuracy: 0.6544858870967742, avg_obj: 1.6136592741935485, elbo 0
test_out, N_iter 13900, Accuracy: 0.1171875, avg_obj: 3.522078804347826, elbo 0
train, N_iter 14000, Accuracy: 0.609375, avg_obj: 1.46875, elbo 7180.009765625
computing test acc
test_in, N_iter 14000, Accuracy: 0.6512096774193549, avg_obj: 1.6088709677419355, elbo 0
test_out, N_iter 14000, Accuracy: 0.12194293478260869, avg_obj: 3.5363451086956523, elbo 0
train, N_iter 14100, Accuracy: 0.625, avg_obj: 1.59375, elbo 7160.17919921875
computing test acc
test_in, N_iter 14100, Accuracy: 0.6386088709677419, avg_obj: 1.6811995967741935, elbo 0
test_out, N_iter 14100, Accuracy: 0.14741847826086957, avg_obj: 3.7014266304347827, elbo 0
train, N_iter 14200, Accuracy: 0.71875, avg_obj: 1.59375, elbo 7400.4951171875
computing test acc
test_in, N_iter 14200, Accuracy: 0.6489415322580645, avg_obj: 1.6451612903225807, elbo 0
test_out, N_iter 14200, Accuracy: 0.11956521739130435, avg_obj: 3.569633152173913, elbo 0
train, N_iter 14300, Accuracy: 0.6875, avg_obj: 1.421875, elbo 7412.21875
computing test acc
test_in, N_iter 14300, Accuracy: 0.6444052419354839, avg_obj: 1.6537298387096775, elbo 0
test_out, N_iter 14300, Accuracy: 0.12466032608695653, avg_obj: 3.5998641304347827, elbo 0
train, N_iter 14400, Accuracy: 0.59375, avg_obj: 1.78125, elbo 7199.34912109375
computing test acc
test_in, N_iter 14400, Accuracy: 0.6423891129032258, avg_obj: 1.6675907258064515, elbo 0
test_out, N_iter 14400, Accuracy: 0.12228260869565218, avg_obj: 3.582880434782609, elbo 0
train, N_iter 14500, Accuracy: 0.625, avg_obj: 1.515625, elbo 7384.53076171875
computing test acc
test_in, N_iter 14500, Accuracy: 0.6464213709677419, avg_obj: 1.6711189516129032, elbo 0
test_out, N_iter 14500, Accuracy: 0.13179347826086957, avg_obj: 3.6331521739130435, elbo 0
train, N_iter 14600, Accuracy: 0.6875, avg_obj: 1.578125, elbo 7409.865234375
computing test acc
test_in, N_iter 14600, Accuracy: 0.6509576612903226, avg_obj: 1.6360887096774193, elbo 0
test_out, N_iter 14600, Accuracy: 0.11514945652173914, avg_obj: 3.5448369565217392, elbo 0
train, N_iter 14700, Accuracy: 0.65625, avg_obj: 1.53125, elbo 7525.92578125
computing test acc
test_in, N_iter 14700, Accuracy: 0.6502016129032258, avg_obj: 1.6484375, elbo 0
test_out, N_iter 14700, Accuracy: 0.11786684782608696, avg_obj: 3.5635190217391304, elbo 0
train, N_iter 14800, Accuracy: 0.71875, avg_obj: 1.515625, elbo 7965.45703125
computing test acc
test_in, N_iter 14800, Accuracy: 0.6554939516129032, avg_obj: 1.610383064516129, elbo 0
test_out, N_iter 14800, Accuracy: 0.10495923913043478, avg_obj: 3.459578804347826, elbo 0
train, N_iter 14900, Accuracy: 0.71875, avg_obj: 1.421875, elbo 7227.63916015625
computing test acc
test_in, N_iter 14900, Accuracy: 0.6587701612903226, avg_obj: 1.6121471774193548, elbo 0
test_out, N_iter 14900, Accuracy: 0.10326086956521739, avg_obj: 3.4704483695652173, elbo 0
train, N_iter 15000, Accuracy: 0.671875, avg_obj: 1.6875, elbo 7464.9716796875
computing test acc
test_in, N_iter 15000, Accuracy: 0.647429435483871, avg_obj: 1.6784274193548387, elbo 0
test_out, N_iter 15000, Accuracy: 0.13960597826086957, avg_obj: 3.6161684782608696, elbo 0
train, N_iter 15100, Accuracy: 0.609375, avg_obj: 1.796875, elbo 7246.58740234375
computing test acc
test_in, N_iter 15100, Accuracy: 0.6507056451612904, avg_obj: 1.6449092741935485, elbo 0
test_out, N_iter 15100, Accuracy: 0.12567934782608695, avg_obj: 3.5682744565217392, elbo 0
train, N_iter 15200, Accuracy: 0.625, avg_obj: 1.46875, elbo 7543.07275390625
computing test acc
test_in, N_iter 15200, Accuracy: 0.6640625, avg_obj: 1.598538306451613, elbo 0
test_out, N_iter 15200, Accuracy: 0.09612771739130435, avg_obj: 3.4364809782608696, elbo 0
train, N_iter 15300, Accuracy: 0.59375, avg_obj: 1.46875, elbo 7557.34423828125
computing test acc
test_in, N_iter 15300, Accuracy: 0.6489415322580645, avg_obj: 1.642641129032258, elbo 0
test_out, N_iter 15300, Accuracy: 0.12771739130434784, avg_obj: 3.5669157608695654, elbo 0
train, N_iter 15400, Accuracy: 0.765625, avg_obj: 1.484375, elbo 7467.517578125
computing test acc
test_in, N_iter 15400, Accuracy: 0.6549899193548387, avg_obj: 1.6267641129032258, elbo 0
test_out, N_iter 15400, Accuracy: 0.11480978260869565, avg_obj: 3.5, elbo 0
train, N_iter 15500, Accuracy: 0.6875, avg_obj: 1.453125, elbo 7959.595703125
computing test acc
test_in, N_iter 15500, Accuracy: 0.6484375, avg_obj: 1.65625, elbo 0
test_out, N_iter 15500, Accuracy: 0.13620923913043478, avg_obj: 3.597486413043478, elbo 0
train, N_iter 15600, Accuracy: 0.65625, avg_obj: 1.5625, elbo 7454.08984375
computing test acc
test_in, N_iter 15600, Accuracy: 0.6496975806451613, avg_obj: 1.6330645161290323, elbo 0
test_out, N_iter 15600, Accuracy: 0.10903532608695653, avg_obj: 3.501358695652174, elbo 0
train, N_iter 15700, Accuracy: 0.65625, avg_obj: 1.6875, elbo 7548.15625
computing test acc
test_in, N_iter 15700, Accuracy: 0.6512096774193549, avg_obj: 1.6272681451612903, elbo 0
test_out, N_iter 15700, Accuracy: 0.10461956521739131, avg_obj: 3.464673913043478, elbo 0
train, N_iter 15800, Accuracy: 0.765625, avg_obj: 1.453125, elbo 7637.9814453125
computing test acc
test_in, N_iter 15800, Accuracy: 0.6502016129032258, avg_obj: 1.6502016129032258, elbo 0
test_out, N_iter 15800, Accuracy: 0.12771739130434784, avg_obj: 3.594429347826087, elbo 0
train, N_iter 15900, Accuracy: 0.671875, avg_obj: 1.484375, elbo 7768.455078125
computing test acc
test_in, N_iter 15900, Accuracy: 0.6524697580645161, avg_obj: 1.615171370967742, elbo 0
test_out, N_iter 15900, Accuracy: 0.11005434782608696, avg_obj: 3.4942255434782608, elbo 0
train, N_iter 16000, Accuracy: 0.65625, avg_obj: 1.890625, elbo 6919.31640625
computing test acc
test_in, N_iter 16000, Accuracy: 0.6456653225806451, avg_obj: 1.6315524193548387, elbo 0
test_out, N_iter 16000, Accuracy: 0.11311141304347826, avg_obj: 3.498641304347826, elbo 0
train, N_iter 16100, Accuracy: 0.734375, avg_obj: 1.59375, elbo 7602.4833984375
computing test acc
test_in, N_iter 16100, Accuracy: 0.6486895161290323, avg_obj: 1.6507056451612903, elbo 0
test_out, N_iter 16100, Accuracy: 0.12601902173913043, avg_obj: 3.5451766304347827, elbo 0
train, N_iter 16200, Accuracy: 0.6875, avg_obj: 1.640625, elbo 7362.81591796875
computing test acc
test_in, N_iter 16200, Accuracy: 0.6577620967741935, avg_obj: 1.6176915322580645, elbo 0
test_out, N_iter 16200, Accuracy: 0.10665760869565218, avg_obj: 3.471127717391304, elbo 0
train, N_iter 16300, Accuracy: 0.671875, avg_obj: 1.453125, elbo 7642.8095703125
computing test acc
test_in, N_iter 16300, Accuracy: 0.6179435483870968, avg_obj: 1.7288306451612903, elbo 0
test_out, N_iter 16300, Accuracy: 0.14436141304347827, avg_obj: 3.639266304347826, elbo 0
train, N_iter 16400, Accuracy: 0.734375, avg_obj: 1.390625, elbo 7659.943359375
computing test acc
test_in, N_iter 16400, Accuracy: 0.6582661290322581, avg_obj: 1.6154233870967742, elbo 0
test_out, N_iter 16400, Accuracy: 0.11582880434782608, avg_obj: 3.5190217391304346, elbo 0
train, N_iter 16500, Accuracy: 0.65625, avg_obj: 1.734375, elbo 7196.1220703125
computing test acc
test_in, N_iter 16500, Accuracy: 0.6486895161290323, avg_obj: 1.6413810483870968, elbo 0
test_out, N_iter 16500, Accuracy: 0.11447010869565218, avg_obj: 3.550611413043478, elbo 0
train, N_iter 16600, Accuracy: 0.765625, avg_obj: 1.375, elbo 7979.95458984375
computing test acc
test_in, N_iter 16600, Accuracy: 0.6539818548387096, avg_obj: 1.6305443548387097, elbo 0
test_out, N_iter 16600, Accuracy: 0.12771739130434784, avg_obj: 3.5940896739130435, elbo 0
train, N_iter 16700, Accuracy: 0.71875, avg_obj: 1.453125, elbo 7790.1689453125
computing test acc
test_in, N_iter 16700, Accuracy: 0.6577620967741935, avg_obj: 1.6290322580645162, elbo 0
test_out, N_iter 16700, Accuracy: 0.11956521739130435, avg_obj: 3.564877717391304, elbo 0
train, N_iter 16800, Accuracy: 0.625, avg_obj: 1.796875, elbo 7598.50146484375
computing test acc
test_in, N_iter 16800, Accuracy: 0.6441532258064516, avg_obj: 1.662046370967742, elbo 0
test_out, N_iter 16800, Accuracy: 0.13451086956521738, avg_obj: 3.6161684782608696, elbo 0
train, N_iter 16900, Accuracy: 0.734375, avg_obj: 1.625, elbo 7486.0498046875
computing test acc
test_in, N_iter 16900, Accuracy: 0.6454133064516129, avg_obj: 1.651461693548387, elbo 0
test_out, N_iter 16900, Accuracy: 0.1324728260869565, avg_obj: 3.614130434782609, elbo 0
train, N_iter 17000, Accuracy: 0.671875, avg_obj: 1.53125, elbo 7652.72998046875
computing test acc
test_in, N_iter 17000, Accuracy: 0.6431451612903226, avg_obj: 1.6648185483870968, elbo 0
test_out, N_iter 17000, Accuracy: 0.15251358695652173, avg_obj: 3.6953125, elbo 0
train, N_iter 17100, Accuracy: 0.609375, avg_obj: 1.46875, elbo 7321.1181640625
computing test acc
test_in, N_iter 17100, Accuracy: 0.6436491935483871, avg_obj: 1.6678427419354838, elbo 0
test_out, N_iter 17100, Accuracy: 0.13179347826086957, avg_obj: 3.626358695652174, elbo 0
train, N_iter 17200, Accuracy: 0.65625, avg_obj: 1.5625, elbo 7426.7685546875
computing test acc
test_in, N_iter 17200, Accuracy: 0.6486895161290323, avg_obj: 1.6537298387096775, elbo 0
test_out, N_iter 17200, Accuracy: 0.13009510869565216, avg_obj: 3.590692934782609, elbo 0
train, N_iter 17300, Accuracy: 0.71875, avg_obj: 1.609375, elbo 7706.6572265625
computing test acc
test_in, N_iter 17300, Accuracy: 0.6486895161290323, avg_obj: 1.6388608870967742, elbo 0
test_out, N_iter 17300, Accuracy: 0.11786684782608696, avg_obj: 3.543138586956522, elbo 0
train, N_iter 17400, Accuracy: 0.6875, avg_obj: 1.40625, elbo 7730.5322265625
computing test acc
test_in, N_iter 17400, Accuracy: 0.6466733870967742, avg_obj: 1.6310483870967742, elbo 0
test_out, N_iter 17400, Accuracy: 0.11073369565217392, avg_obj: 3.4775815217391304, elbo 0
train, N_iter 17500, Accuracy: 0.625, avg_obj: 1.75, elbo 7481.93017578125
computing test acc
test_in, N_iter 17500, Accuracy: 0.6507056451612904, avg_obj: 1.611391129032258, elbo 0
test_out, N_iter 17500, Accuracy: 0.11311141304347826, avg_obj: 3.4966032608695654, elbo 0
train, N_iter 17600, Accuracy: 0.671875, avg_obj: 1.46875, elbo 7687.2587890625
computing test acc
test_in, N_iter 17600, Accuracy: 0.6549899193548387, avg_obj: 1.611391129032258, elbo 0
test_out, N_iter 17600, Accuracy: 0.10597826086956522, avg_obj: 3.4690896739130435, elbo 0
train, N_iter 17700, Accuracy: 0.6875, avg_obj: 1.578125, elbo 7615.126953125
computing test acc
test_in, N_iter 17700, Accuracy: 0.6522177419354839, avg_obj: 1.6330645161290323, elbo 0
test_out, N_iter 17700, Accuracy: 0.11311141304347826, avg_obj: 3.513247282608696, elbo 0
train, N_iter 17800, Accuracy: 0.640625, avg_obj: 1.53125, elbo 7755.91748046875
computing test acc
test_in, N_iter 17800, Accuracy: 0.6534778225806451, avg_obj: 1.6247479838709677, elbo 0
test_out, N_iter 17800, Accuracy: 0.10869565217391304, avg_obj: 3.5, elbo 0
train, N_iter 17900, Accuracy: 0.71875, avg_obj: 1.515625, elbo 8077.80712890625
computing test acc
test_in, N_iter 17900, Accuracy: 0.6496975806451613, avg_obj: 1.6265120967741935, elbo 0
test_out, N_iter 17900, Accuracy: 0.11345108695652174, avg_obj: 3.5394021739130435, elbo 0
train, N_iter 18000, Accuracy: 0.71875, avg_obj: 1.453125, elbo 7420.6337890625
computing test acc
test_in, N_iter 18000, Accuracy: 0.6456653225806451, avg_obj: 1.628024193548387, elbo 0
test_out, N_iter 18000, Accuracy: 0.11073369565217392, avg_obj: 3.5061141304347827, elbo 0
train, N_iter 18100, Accuracy: 0.65625, avg_obj: 1.671875, elbo 7442.10791015625
computing test acc
test_in, N_iter 18100, Accuracy: 0.6544858870967742, avg_obj: 1.5962701612903225, elbo 0
test_out, N_iter 18100, Accuracy: 0.10054347826086957, avg_obj: 3.4588994565217392, elbo 0
train, N_iter 18200, Accuracy: 0.625, avg_obj: 1.8125, elbo 7306.3056640625
computing test acc
test_in, N_iter 18200, Accuracy: 0.6433971774193549, avg_obj: 1.6559979838709677, elbo 0
test_out, N_iter 18200, Accuracy: 0.12907608695652173, avg_obj: 3.589673913043478, elbo 0
train, N_iter 18300, Accuracy: 0.671875, avg_obj: 1.484375, elbo 7554.189453125
computing test acc
test_in, N_iter 18300, Accuracy: 0.6446572580645161, avg_obj: 1.6645665322580645, elbo 0
test_out, N_iter 18300, Accuracy: 0.12907608695652173, avg_obj: 3.596127717391304, elbo 0
train, N_iter 18400, Accuracy: 0.5625, avg_obj: 1.46875, elbo 7747.86181640625
computing test acc
test_in, N_iter 18400, Accuracy: 0.6565020161290323, avg_obj: 1.6126512096774193, elbo 0
test_out, N_iter 18400, Accuracy: 0.10563858695652174, avg_obj: 3.492866847826087, elbo 0
train, N_iter 18500, Accuracy: 0.734375, avg_obj: 1.515625, elbo 7574.14404296875
computing test acc
test_in, N_iter 18500, Accuracy: 0.6489415322580645, avg_obj: 1.6391129032258065, elbo 0
test_out, N_iter 18500, Accuracy: 0.13043478260869565, avg_obj: 3.5981657608695654, elbo 0
train, N_iter 18600, Accuracy: 0.6875, avg_obj: 1.453125, elbo 8164.7998046875
computing test acc
test_in, N_iter 18600, Accuracy: 0.6464213709677419, avg_obj: 1.653225806451613, elbo 0
test_out, N_iter 18600, Accuracy: 0.12024456521739131, avg_obj: 3.5703125, elbo 0
train, N_iter 18700, Accuracy: 0.65625, avg_obj: 1.5625, elbo 7587.79345703125
computing test acc
test_in, N_iter 18700, Accuracy: 0.6469254032258065, avg_obj: 1.6600302419354838, elbo 0
test_out, N_iter 18700, Accuracy: 0.125, avg_obj: 3.596127717391304, elbo 0
train, N_iter 18800, Accuracy: 0.640625, avg_obj: 1.6875, elbo 7619.0283203125
computing test acc
test_in, N_iter 18800, Accuracy: 0.6514616935483871, avg_obj: 1.6476814516129032, elbo 0
test_out, N_iter 18800, Accuracy: 0.1297554347826087, avg_obj: 3.6015625, elbo 0
train, N_iter 18900, Accuracy: 0.75, avg_obj: 1.453125, elbo 7828.6640625
computing test acc
test_in, N_iter 18900, Accuracy: 0.6587701612903226, avg_obj: 1.6043346774193548, elbo 0
test_out, N_iter 18900, Accuracy: 0.10461956521739131, avg_obj: 3.4775815217391304, elbo 0
train, N_iter 19000, Accuracy: 0.703125, avg_obj: 1.453125, elbo 7920.3154296875
computing test acc
test_in, N_iter 19000, Accuracy: 0.6441532258064516, avg_obj: 1.6340725806451613, elbo 0
test_out, N_iter 19000, Accuracy: 0.11786684782608696, avg_obj: 3.5254755434782608, elbo 0
train, N_iter 19100, Accuracy: 0.65625, avg_obj: 1.90625, elbo 7197.0537109375
computing test acc
test_in, N_iter 19100, Accuracy: 0.6547379032258065, avg_obj: 1.6353326612903225, elbo 0
test_out, N_iter 19100, Accuracy: 0.11175271739130435, avg_obj: 3.5380434782608696, elbo 0
train, N_iter 19200, Accuracy: 0.75, avg_obj: 1.578125, elbo 7591.7431640625
computing test acc
test_in, N_iter 19200, Accuracy: 0.6544858870967742, avg_obj: 1.627016129032258, elbo 0
test_out, N_iter 19200, Accuracy: 0.12058423913043478, avg_obj: 3.5237771739130435, elbo 0
train, N_iter 19300, Accuracy: 0.6875, avg_obj: 1.65625, elbo 7657.939453125
computing test acc
test_in, N_iter 19300, Accuracy: 0.6444052419354839, avg_obj: 1.6625504032258065, elbo 0
test_out, N_iter 19300, Accuracy: 0.12907608695652173, avg_obj: 3.6110733695652173, elbo 0
train, N_iter 19400, Accuracy: 0.6875, avg_obj: 1.40625, elbo 7885.58642578125
computing test acc
test_in, N_iter 19400, Accuracy: 0.6587701612903226, avg_obj: 1.6154233870967742, elbo 0
test_out, N_iter 19400, Accuracy: 0.10835597826086957, avg_obj: 3.4921875, elbo 0
train, N_iter 19500, Accuracy: 0.734375, avg_obj: 1.390625, elbo 7970.1875
computing test acc
test_in, N_iter 19500, Accuracy: 0.6544858870967742, avg_obj: 1.6222278225806452, elbo 0
test_out, N_iter 19500, Accuracy: 0.11379076086956522, avg_obj: 3.5234375, elbo 0
train, N_iter 19600, Accuracy: 0.65625, avg_obj: 1.765625, elbo 7595.234375
computing test acc
test_in, N_iter 19600, Accuracy: 0.6401209677419355, avg_obj: 1.6814516129032258, elbo 0
test_out, N_iter 19600, Accuracy: 0.13417119565217392, avg_obj: 3.6396059782608696, elbo 0
train, N_iter 19700, Accuracy: 0.765625, avg_obj: 1.375, elbo 8216.3193359375
computing test acc
test_in, N_iter 19700, Accuracy: 0.65625, avg_obj: 1.6343245967741935, elbo 0
test_out, N_iter 19700, Accuracy: 0.11175271739130435, avg_obj: 3.493546195652174, elbo 0
train, N_iter 19800, Accuracy: 0.71875, avg_obj: 1.4375, elbo 8186.69384765625
computing test acc
test_in, N_iter 19800, Accuracy: 0.6620463709677419, avg_obj: 1.59375, elbo 0
test_out, N_iter 19800, Accuracy: 0.11548913043478261, avg_obj: 3.488111413043478, elbo 0
train, N_iter 19900, Accuracy: 0.65625, avg_obj: 1.765625, elbo 7842.7431640625
computing test acc
test_in, N_iter 19900, Accuracy: 0.6517137096774194, avg_obj: 1.6265120967741935, elbo 0
test_out, N_iter 19900, Accuracy: 0.11684782608695653, avg_obj: 3.546875, elbo 0
###Markdown
Look at the model after training
###Code
mask = numpy.zeros(conf.batch_size, dtype=numpy.bool)
mask[25:30]=1
cur_values_train = sub_select_inference(mask, trainer.sess.run(trainer.tensors_of_interest, feed_dict={trainer.in_ph: x_train[:conf.batch_size]}))
cur_values_test = sub_select_inference(mask, trainer.sess.run(trainer.tensors_of_interest, feed_dict={trainer.in_ph: x_test[:conf.batch_size]}))
cur_values_test_out = sub_select_inference(mask, trainer.sess.run(trainer.tensors_of_interest, feed_dict={trainer.in_ph: x_test_out[:conf.batch_size]}))
img_train = x_train[:conf.batch_size][mask]
img_test = x_test[:conf.batch_size][mask]
img_test_out = x_test_out[:conf.batch_size][mask]
label_train = c_train[:conf.batch_size][mask]
label_test = c_test[:conf.batch_size][mask]
label_test_out = c_test_out[:conf.batch_size][mask]
n_obj_train = np.sum((cur_values_train['z_pres'] > 0.5), axis=-1)
n_obj_test = np.sum((cur_values_test['z_pres'] > 0.5), axis=-1)
n_obj_test_out = np.sum((cur_values_test_out['z_pres'] > 0.5), axis=-1)
results_train = trainer.reconstruct_scenes(img_train, cur_values_train, draw_boxes=True)
results_test = trainer.reconstruct_scenes(img_test, cur_values_test, draw_boxes=True)
results_test_out = trainer.reconstruct_scenes(img_test_out, cur_values_test_out, draw_boxes=True)
save_obj(cur_values_train, conf.dir_result+"/cur_values_train",)
save_obj(cur_values_test, conf.dir_result+"/cur_values_test")
save_obj(cur_values_test_out, conf.dir_result+"/cur_values_test_out")
save_obj(img_train[:conf.batch_size], conf.dir_result+"/imgs_train",)
save_obj(img_test[:conf.batch_size], conf.dir_result+"/imgs_test")
save_obj(img_test_out[:conf.batch_size], conf.dir_result+"/imgs_test_out")
###Output
_____no_output_____
###Markdown
Train
###Code
print(label_train)
b = grid_images(img_train, figsize=(12,3))
b.savefig(conf.dir_result+"/img_in_train.pdf")
b
print(n_obj_train)
b = grid_images(results_train, figsize=(12,3))
b.savefig(conf.dir_result+"/img_out_train.pdf")
b
###Output
[2 1 3 2 2]
###Markdown
Test
###Code
print(label_test)
b = grid_images(img_test, figsize=(12,3))
b.savefig(conf.dir_result+"/img_in_test.pdf")
b
print(n_obj_test)
b = grid_images(results_test, figsize=(12,3))
b.savefig(conf.dir_result+"/img_out_test.pdf")
b
###Output
[2 2 3 2 2]
###Markdown
Test_out
###Code
print(label_test_out)
b = grid_images(img_test_out, figsize=(12,3))
b.savefig(conf.dir_result+"/img_in_extrapolation.pdf")
b
# cur_values_test_out["z_pres"]
print(n_obj_test_out)
b = grid_images(results_test_out, figsize=(12,3))
b.savefig(conf.dir_result+"/img_out_extrapolation.pdf")
b
###Output
[6 3 3 5 3]
|
tutorials/Bayes/BayesDay_Tutorial_1.ipynb | ###Markdown
Neuromatch Academy 2020 -- Bayes Day (dry run) Tutorial 1 - Bayes rule with GaussiansPlease execute the cell below to initialize the notebook environment
###Code
# @title
import time # import time
import numpy as np # import numpy
import scipy as sp # import scipy
import math # import basic math functions
import random # import basic random number generator functions
import matplotlib.pyplot as plt # import matplotlib
from IPython import display
fig_w, fig_h = (6, 4)
plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})
plt.style.use('ggplot')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
###Output
_____no_output_____
###Markdown
--- Tutorial objectives In this notebook we'll look at using *Bayes rule* with *Gaussian distributions*. That is, given a prior probability distribution, and a likelihood distribution, we will compute the posterior using Bayes rule and play with different Likelihoods and Priors to get a good intuition of how it affects the posterior distribution. * Given Bayes rule, a Gaussian Likelihood and Prior, calculate the posterior distribution.* Change the Likelihood mean, and variance and observe how Posterior changes.* Advanced: Observe what happens if the prior is a mixture of two gaussians?--- __Insert cover story here with figures, potentially with a GIF__ Bayes rule\begin{eqnarray}Posterior = \frac{ Likelihood \quad \times \quad Prior}{ Normalization \quad constant}\end{eqnarray}Mathematically, if both the Likelihood and the Prior are Gaussian, this translates into:\begin{eqnarray} Likelihood = \mathcal{N}(\mu_{likelihood},\sigma_{likelihood}^2) = \frac{1}{\sqrt{2\pi\sigma^2_{likelihood}}}\exp\left(\frac{-(x-\mu_{likelihood})^2}{2\sigma^2_{likelihood}}\right)\end{eqnarray}\begin{eqnarray} Prior = \mathcal{N}(\mu_{prior},\sigma_{prior}^2) = \frac{1}{\sqrt{2\pi\sigma^2_{prior}}}\exp\left(\frac{-(x-\mu_{prior})^2}{2\sigma^2_{prior}}\right)\end{eqnarray}\begin{eqnarray} Posterior \propto \mathcal{N}(\mu_{likelihood},\sigma_{likelihood}^2) \times \mathcal{N}(\mu_{prior},\sigma_{prior}^2) = \mathcal{N}\left( \frac{\sigma^2_{likelihood}\mu_{prior}+\sigma^2_{prior}\mu_{likelihood}}{\sigma^2_{likelihood}+\sigma^2_{prior}}, \frac{\sigma^2_{likelihood}\sigma^2_{prior}}{\sigma^2_{likelihood}+\sigma^2_{prior}} \right)\end{eqnarray}where $\mathcal{N}(\mu,\sigma^2)$ denotes a Gaussian distribution with parameters $\mu_{likelihood}$ and $\sigma^2_{likelihood}$. --- EXERCISE 1: Bayes rule with Gaussians: Computation We have a Gaussian auditory Likelihood (in red), and a Gaussian visual likelihood (in blue), and we want to combine the two to generate our posterior using Bayes rule.We provide you with a ready-to-use plotting function, and a code skeleton.**Suggestions*** Fill in the equation for the un-normalised (without the constant) Gaussian in the function skeleton function `my_gaussian`* Generate an auditory likelihood with parameters $\mu$ = 3 and $\sigma$ = 1.5* Generate a visual likelihood with parameters $\mu$ = -1 and $\sigma$ = 1.5* Calculate the posterior using pointwise multiplication of the auditory and visual likelihoods (don't forget to normalize)* Plot the Likelihoods and Posterior using the predefined function `my_plot`* Now change the variance of the visual likelihood to 0.5. See how a more precise (tighter) visual likelihood relative to auditory results in a posterior that is weighted more heavily towards the most precise source of information.
###Code
def my_plot(x, auditory=None, visual=None, posterior_pointwise=None):
"""
Plots normalized Gaussian distributions and posterior
DO NOT EDIT THIS FUNCTION !!!
Args:
x (numpy array of floats): points at which the likelihood has been evaluated
auditory (numpy array of floats): normalized probabilities for auditory likelihood evaluated at each `x`
visual (numpy array of floats): normalized probabilities for visual likelihood evaluated at each `x`
posterior (numpy array of floats): normalized probabilities for the posterior evaluated at each `x`
Returns:
Nothing.
"""
if auditory is None:
auditory = np.zeros_like(x)
if visual is None:
visual = np.zeros_like(x)
if posterior_pointwise is None:
posterior_pointwise = np.zeros_like(x)
plt.plot(x, auditory, '-r', LineWidth=2, label='Auditory')
plt.plot(x, visual, '-b', LineWidth=2, label='Visual')
plt.plot(x, posterior_pointwise, '-g', LineWidth=2, label='Posterior')
plt.legend()
plt.ylabel('Probability')
plt.xlabel('Orientation (Degrees)')
def my_gaussian(x_points, mu, sigma):
"""
Returns un-normalized Gaussian estimated at points `x_points`, with parameters: mean `mu` and std `sigma`
Args:
x_points (numpy array of floats): points at which the gaussian is evaluated
mu (scalar): mean of the Gaussian
sigma (scalar): std of the gaussian
Returns:
(numpy array of floats) : un-normalized Gaussian (i.e. without constant) evaluated at `x`
"""
###################################################################
## Insert your code here to fill with the equation for the gaussian
## Function Hints: exp -> np.exp()
## power -> z**2
###################################################################
# Calculate the gaussian as a function of mu and sigma, for each x (incl. hints )
raise NotImplementedError("You need to implement the Gaussian function!")
x = np.arange(-8,9,0.1)
################################################################################
## Insert your code here to:
## create a gaussian called 'auditory' with mean 3, and std 1.5
## create a gaussian called 'visual' with mean -1, and std 1.5
## calculate the posterior by multiplying (pointwise) the 'auditory' and 'visual' gaussians
## (Hint: Do not forget to normalise the gaussians before plotting them)
## plot the distributions using the function `my_plot`
################################################################################
###Output
_____no_output_____
###Markdown
--- EXERCISE 2: Bayes rule with Gaussians: Intuition Now that we can compute *Bayes rule* with two *Gaussians*, let's keep the auditory likelihood fixed straight ahead (mean = 0), and play around with the visual stimulus position (mean) to see how that affects the posterior.Observe how the posterior changes as a function of both the position of the likelihood with respect to the prior, and the relative weight of the likelihood with respect to the prior.**Suggestions*** Keep the prior constant with mean 0, and standard deviation 1.5* Keeping the standard deviation of the visual stimuli to 1, and allow the mean of the likelihood to vary from -8 to 8 in steps of 0.2 degree.* In a loop, calculate the posterior for each new visual stimulus, and call the `my_dynamic_plot` function to plot it.* Calculate the mean of the posterior and plot it against the visual stimulus mean. What do you observe?
###Code
def my_dynamic_plot(x, auditory, visual, posterior_pointwise):
"""
DO NOT EDIT THIS FUNCTION !!!
Plots the auditory, visual and posterior distributions and update the figure every .2 seconds
Args:
x (numpy array of floats): points at which the likelihood has been evaluated
auditory (numpy array of floats): normalized probabilities for auditory likelihood evaluated at each `x`
visual (numpy array of floats): normalized probabilities for visual likelihood evaluated at each `x`
posterior (numpy array of floats): normalized probabilities for the posterior evaluated at each `x`
Returns:
Nothing
"""
plt.clf()
plt.plot(x, auditory, '-r', LineWidth=2, label='Auditory')
plt.plot(x, visual, '-b', LineWidth=2, label='Visual')
plt.plot(x, posterior_pointwise, '-g', LineWidth=2, label='Posterior')
plt.ylabel('Probability')
plt.xlabel('Orientation (Degrees)')
plt.legend()
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(0.2)
x = np.arange(-10,11,0.1)
###############################################################################
## Insert your code here to:
## create a gaussian called 'auditory' with mean 0, and std 1.5
## create a gaussian called 'visual' with std 1.5, and mean varying from -8 to 9 in increments of 0.2 Degrees
## calculate the posterior by multiplying (pointwise) the 'auditory' and 'visual' gaussians
## (Hint: Do not forget to normalise the gaussians before plotting them)
## plot the distributions using the function `my_dynamic_plot`
## plot the posterior mean as a function of visual's mean
###############################################################################
###Output
_____no_output_____
###Markdown
--- ADVANCED Exercise: Multimodal priorsBayes rule works similarly for cue combination (auditory + visual) as it would with a prior and likelihood.What do you think is going to happen to the posterior if we were to use a multimodal prior instead of a single Gaussian (i.e. a prior with multiple peaks)?**Suggestions*** Create a bi-modal prior by summing two Gaussians centered on -3 and 3 respectively, with $\sigma_{prior}$ = 1* Similarly to the previous exercise, allow the mean of the likelihood to vary and plot the prior, likelihood and posterior over time using the function `my_dynamic_plot`. - Observe what happens to the posterior as the likelihood gets closer to the different peaks of the prior. - Notice what happens to the posterior when the likelihood is exactly in between the two modes of the prior (i.e. $\mu_{Likelihood}$ = 0)* Plot the mode of the posterior as a function of the visual stimulus mean. - What to you observe? How does it compare to the previous exercise?
###Code
x = np.arange(-10,11,0.1)
################################################################################
## Insert your code here
## Reuse your code from Exercise 2, but replace the prior with a bimodal prior
## by summing two Gaussians with variance = 1, and means [-3, 3] respectively
################################################################################
###Output
_____no_output_____ |
curriculum/unit-1-statistics-fundamentals/sprint-3-linear-algebra/module4-clustering/module-3.ipynb | ###Markdown
Vertical Line Test
###Code
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
1.1 Create two graphs, one that passes the vertical line test and one that does not.
###Code
plt.axhline(y=2)
plt.title("passes the vertical line test")
plt.show()
plt.axvline(x=2)
plt.title("fails the vertical line test")
plt.show()
###Output
_____no_output_____
###Markdown
1.2 Why are graphs that don't pass the vertical line test not considered "functions?" A function cannot have the same input (x value/domain value) mapped to multiple outputs (y value/co-domain value). Functions as Relations 2.1 Which of the following relations are functions? Why?\begin{align}\text{Relation 1: } \{(1, 2), (3, 2), (1, 3)\}\\\text{Relation 2: } \{(1, 3), (2, 3), (6, 7)\}\\\text{Relation 3: } \{(9, 4), (2, 1), (9, 6)\}\\\text{Relation 4: } \{(6, 2), (8, 3), (6, 4)\}\\\text{Relation 5: } \{(2, 6), (2, 7), (2, 4)\}\end{align} Relations 2 is the only one here that is a function because there are not repeated x values. In other words, there are no two different y values that are mapped to by the same x value. Functions as a mapping between dimensions 3.1 for the following functions what is the dimensionality of the domain (input) and codomain (range/output)?\begin{align}m(๐ฅ_1,๐ฅ_2,๐ฅ_3)=(x_1+x_2, x_1+x_3, x_2+x_3)\\n(๐ฅ_1,๐ฅ_2,๐ฅ_3,๐ฅ_4)=(x_2^2 + x_3, x_2x_4)\end{align} function m: 3D -> 3Dfunction n: 4D -> 2D 3.2 Do you think it's possible to create a function that maps from a lower dimensional space to a higher dimensional space? If so, provide an example.Yes this is possible. For more information on functions from lower dimensions of space that map to higher dimensions of space, google the terms injective and surjective in relation to Linear Transformations:Example:$f(x) = (x, x+1)$ Vector Transformations 4.1 Plug the corresponding unit vectors into each function. Use the output vectors to create a transformation matrix.\begin{align}p(\begin{bmatrix}x_1 \\ x_2 \end{bmatrix}) = \begin{bmatrix} x_1 + 3x_2 \\2 x_2 - x_1 \\ \end{bmatrix}\\\\q(\begin{bmatrix}x_1 \\ x_2 \\ x_3\end{bmatrix}) = \begin{bmatrix} 4x_1 + x_2 + 2x_3 \\2 x_2 - x_1 + 3x_3 \\ 5x_1 - 2x_3 + x_2 \end{bmatrix}\end{align} ---\begin{align}p(\begin{bmatrix} 1 \\ 0 \end{bmatrix}) = \begin{bmatrix} x_1 + 3x_2 \\ -x_1+ 2x_2 \\ \end{bmatrix} = \begin{bmatrix} 1 \\ -1 \end{bmatrix}\end{align}\begin{align}p(\begin{bmatrix} 0 \\ 1 \end{bmatrix}) = \begin{bmatrix} x_1 + 3x_2 \\ -x_1+ 2x_2 \\ \end{bmatrix} = \begin{bmatrix} 3 \\ 2 \end{bmatrix}\end{align}\begin{align}T = \begin{bmatrix} 1 & 3 \\ -1 & 2 \end{bmatrix}\end{align}---\begin{align}q(\begin{bmatrix}1 \\ 0 \\ 0 \end{bmatrix}) = \begin{bmatrix} 4x_1 + x_2 + 2x_3 \\ -x_1 + 2x_2 + 3x_3 \\ 5x_1 + x_2 - 2x_3 \end{bmatrix} = \begin{bmatrix} 4 \\ -1 \\ 5\end{bmatrix}\end{align}\begin{align}q(\begin{bmatrix}0 \\ 1 \\ 0 \end{bmatrix}) = \begin{bmatrix} 4x_1 + x_2 + 2x_3 \\ -x_1 + 2x_2 + 3x_3 \\ 5x_1 + x_2 - 2x_3 \end{bmatrix}= \begin{bmatrix} 1 \\ 2 \\ 1 \end{bmatrix}\end{align}\begin{align}q(\begin{bmatrix}0 \\ 0 \\ 1 \end{bmatrix}) = \begin{bmatrix} 4x_1 + x_2 + 2x_3 \\ -x_1 + 2x_2 + 3x_3 \\ 5x_1 + x_2 - 2x_3 \end{bmatrix}= \begin{bmatrix} 2 \\ 3 \\ -2\end{bmatrix}\end{align}\begin{align}T = \begin{bmatrix} 4 & 1 & 2 \\ -1 & 2 & 3 \\ 5 & 1 & -2 \end{bmatrix}\end{align}--- 4.2 Verify that your transformation matrices are correct by choosing an input matrix and calculating the result both via the traditional functions above and also via vector-matrix multiplication.
###Code
# Transformation Matrix
T1 = np.array([[1,3],[-1,2]])
# Input Vector
v1 = np.array([2,3])
# Product found by hand-calculating
product_by_hand_1 = [11, 4]
# Product found with NumPy
np.matmul(T1, v1)
# Transformation Matrix
T2 = np.array([[4,1,2],[-1,2,3],[5,1,-2]])
# Input Vector
v2 = np.array([1,2,3])
# Product found by hand-calculating
product_by_hand_2 = [12, 12, 1]
# Product found with NumPy
np.matmul(T2, v2)
###Output
_____no_output_____
###Markdown
Eigenvalues and Eigenvectors 5.1 In your own words, give an explanation for the intuition behind eigenvalues and eigenvectors. An eigenvector is a vector that does not change orientation (direction in which it is pointing) during a linear transformation. An eigenvector always comes paired with a corresponding eigenvalue. Every linear transformation has at least one eigenvector-eigenvalue pair. The eigenvalue describes how much the eigenvector gets scaled by (stretched or squished) during the transformation. The Curse of Dimensionality 6.1 What are some of the challenges of working with high dimensional spaces? - Increased computational inefficiency (searching)- Increased data redundancy with increasing number of columns.- More difficult to visualize/explore the data.- Measures of euclidean distance breaks down in high-dimensional spaces- Increased sparcity of data.- As the number of parameters in a model grows while number of available observations remains fixed, overfitting a predictive model becomes more certain. 6.2 What is the rule of thumb for how many observations you should have compared to parameters in your model? You should have 5x as many observations (rows) as you do parameters (columns) in a machine learning model. Principal Component Analysis 7.1 Code for loading and cleaning the 2013 national dataset from the [Housing Affordability Data System (HADS)](https://www.huduser.gov/portal/datasets/hads/hads.html) --housing data, can be found below. Perform PCA on the processed dataset `national_processed` (Make sure you standardize your data!) and then make a scatterplot of PC1 against PC2. Some of our discussion and work around PCA with this dataset will continue during tomorrow's lecture and assignment. The code below will read in the dataset and perform categorical encoding of the categorical variables to ensure that we're only working with numeric columns from our dataset. Start adding your PCA code at the bottom of the provided code.
###Code
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
import os.path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Read Natinal Data
national_url = 'https://www.huduser.gov/portal/datasets/hads/hads2013n_ASCII.zip'
national_file = 'thads2013n.txt'
if os.path.exists(national_file):
national = pd.read_csv(national_file)
else:
z_national = urlopen(national_url)
zip_national = ZipFile(BytesIO(z_national.read())).extract(national_file)
national = pd.read_csv(zip_national)
print(national.shape)
national.head()
# Look at datatypes
# a lot of object datatypes even though they seem to be strings of numbers.
national.dtypes
# check for null values
national.isnull().sum().any()
# check for number of categorical vs numeric columns
cat_cols = national.columns[national.dtypes=='object']
num_cols = national.columns[national.dtypes!='object']
print(f'{len(cat_cols)} categorical columns')
print(f'{len(num_cols)} numerical columns')
# We're making a copy of our data in case we mess something up.
national_processed = national.copy()
# Categorically Encode our Variables:
# They need to all be numeric before we do PCA.
# https://pbpython.com/categorical-encoding.html
# Cast categorical columns to "category" data type
national_processed[cat_cols] = national_processed[cat_cols].astype('category')
national_processed.dtypes
# Replace all category cell values with their numeric category codes
for col in cat_cols:
national_processed[col] = national_processed[col].cat.codes
print(national_processed.shape)
national_processed.head()
# Now we only ahve numeric columns (ints and floats)
national_processed.dtypes
###Output
_____no_output_____
###Markdown
---
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# There isn't a super clear Y varaible in this dataset, so we'll just
# pretend that our whole dataset is our X matrix for now.
# Make a copy to save our work at this checkpoint
df = national_processed.copy()
# Turn our dataframe into a numpy array
X = df.values
# instantiate our standard scaler object
scaler = StandardScaler()
# Standardize our data
Z = scaler.fit_transform(X)
# Instantiate our PCA object
pca = PCA(2)
transformed_data = pca.fit_transform(Z)
transformed_data
plt.scatter(transformed_data[:,0], transformed_data[:,1])
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.title("Principal Component 1 vs Principal Component 2")
plt.show()
###Output
_____no_output_____ |
ch10/ch10-exercise-solutions.ipynb | ###Markdown
[Learn Quantum Computing with Python and Q](https://www.manning.com/books/learn-quantum-computing-with-python-and-q-sharp?a_aid=learn-qc-granade&a_bid=ee23f338)Chapter 10 Exercise Solutions----> Copyright (c) Sarah Kaiser and Chris Granade.> Code sample from the book "Learn Quantum Computing with Python and Q" by> Sarah Kaiser and Chris Granade, published by Manning Publications Co.> Book ISBN 9781617296130.> Code licensed under the MIT License. Exercise 10.1 **Use QuTiP to verify that the two operations `ApplyCNOT` and `ApplyCNOTTheOtherWay` can be simulated by the same unitary matrix, and thus do the exact same thing.** This first snippet shows the unitary matrix for `ApplyCNOT` which is equvalent to the QuTiP function `cnot`.
###Code
from qutip.qip.operations import cnot
cnot()
###Output
_____no_output_____
###Markdown
The matrix above is the same as the one below which represents surrounding a `CNOT` operation with `H` on all qubits, and flipping the control and target qubits.
###Code
from qutip.tensor import tensor
from qutip.qip.operations import hadamard_transform
(
tensor([hadamard_transform(), hadamard_transform()])
* cnot(None,1,0)
* tensor([hadamard_transform(), hadamard_transform()])
)
###Output
_____no_output_____
###Markdown
---- Exercise 10.2 **Just as you can use three classical `XOR` instructions to implement an in-place classical swap, you can use three `CNOT` operations to do the same thing as a single `SWAP` operation.In fact, the following Q snippet does the same thing as `SWAP(left, right)`:**```CNOT(left, right);CNOT(right, left);CNOT(left, right);```**Double-check that this is the same as `SWAP(left, right)`, both by using `AssertOperationsEqualReferenced`, and by using QuTiP.**
###Code
import qsharp
qsharp.compile("""
operation SwapWithCnot(pair : Qubit[]) : Unit {
CNOT(pair[0], pair[1]);
CNOT(pair[1], pair[0]);
CNOT(pair[0], pair[1]);
}
operation SwapDirectly(pair : Qubit[]) : Unit is Adj {
SWAP(pair[0], pair[1]);
}
""")
assert_swap = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
operation AssertSwapEqualsThreeCnot() : Unit {
AssertOperationsEqualReferenced(2, SwapWithCnot, SwapDirectly);
}
""")
assert_swap.simulate()
###Output
_____no_output_____
###Markdown
> **Extra credit**: `SWAP(left, right)` is the same as `SWAP(right, left)`, so the snippet above should work even if you start with `CNOT(right, left)` instead. Double-check that!
###Code
qsharp.compile("""
operation ReverseSwapWithCnot(pair : Qubit[]) : Unit{
CNOT(pair[1], pair[0]);
CNOT(pair[0], pair[1]);
CNOT(pair[1], pair[0]);
}
operation ReverseSwapDirectly(pair : Qubit[]) : Unit is Adj {
SWAP(pair[1], pair[0]);
}
""")
assert_swap_reverse = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
operation AssertSwapEqualsThreeCnot() : Unit {
AssertOperationsEqualReferenced(2, ReverseSwapWithCnot, ReverseSwapDirectly);
}
""")
assert_swap_reverse.simulate()
###Output
_____no_output_____
###Markdown
---- Exercise 10.3 **Using QuTiP, check that when run on two-qubit registers, the two programs from the listing below can be simulated by the same unitary matrix and thus do the same thing to their input registers.**```open Microsoft.Quantum.Diagnostics;operation ApplyXUsingCNOTs(register : Qubit[]): Unit is Adj + Ctl { within { ApplyToEachCA( CNOT(register[0], _), register[1...] ); } apply { X(register[0]); }}operation CheckThatThisWorks() : Unit { AssertOperationsEqualReferenced(2, ApplyXUsingCNOTs, ApplyToEachCA(X, _) ); Message("Woohoo!");}```
###Code
from qutip import sigmax, qeye
from qutip.tensor import tensor
from qutip.qip.operations import cnot
from functools import reduce
def apply_x_using_cnot(n : int):
within = reduce((lambda x, y: y * x), [cnot(n, 0, i) for i in range(1, n)])
return within.dag() * tensor([sigmax()] + [qeye(2)] * (n - 1)) * within
def apply_to_each_x(n : int):
return tensor([sigmax()] * n)
print(apply_x_using_cnot(3))
print(apply_to_each_x(3))
###Output
_____no_output_____
###Markdown
You can see that the above two matricies are the same and thus represent the same operation. ---- Exercise 10.4 **Try modifying the listing from exercise 10.3 to see if both programs are equivalent when applied to more than two qubits.**> **NOTE:** It can be pretty expensive to use `AssertOperationsEqualReferenced` for more than a few qubits.
###Code
[_, check_three_qubit, check_eight_qubit] = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
operation ApplyXUsingCNOTs(register : Qubit[]) : Unit is Adj + Ctl {
within {
ApplyToEachCA(
CNOT(register[0], _),
register[1...]
);
} apply {
X(register[0]);
}
}
operation CheckThatThisWorks3() : Unit {
AssertOperationsEqualReferenced(3,
ApplyXUsingCNOTs,
ApplyToEachCA(X, _)
);
Message("Woohoo!");
}
operation CheckThatThisWorks8() : Unit {
AssertOperationsEqualReferenced(8,
ApplyXUsingCNOTs,
ApplyToEachCA(X, _)
);
Message("Woohoo!");
}
""")
check_three_qubit.simulate()
check_eight_qubit.simulate()
###Output
_____no_output_____
###Markdown
For at least a small sample, the assert succedes and so you know the two programs are the same. It turns out that they will always be the same, no matter the number of qubits used. ---- Exercise 10.5 **Try preparing your register in states other than $\left|00\right\rangle$ before calling `ApplyRotationAboutXX`.Does your operation do what you expected?**> **HINT:** Recall from Part I that you can prepare a copy of the $\left|1\right\rangle$ state by applying an `X` operation, and that you can prepare $\left|+\right\rangle$ by applying an `H` operation.
###Code
qsharp.compile("""
operation ApplyRotationAboutXX(
angle : Double, register : Qubit[]
) : Unit is Adj + Ctl {
within {
CNOT(register[0], register[1]);
} apply {
Rx(angle, register[0]);
}
}
""")
rotate_zeros_about_xx = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
operation RotateZeroAboutXX(angle : Double) : Unit {
use register = Qubit[2];
ApplyRotationAboutXX(angle, register);
DumpMachine();
Message("\n");
ResetAll(register);
}
""")
rotate_plus_about_xx = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
operation RotatePlusAboutXX(angle : Double) : Unit {
use register = Qubit[2];
ApplyToEachCA(H, register);
ApplyRotationAboutXX(angle, register);
DumpMachine();
Message("\n");
ResetAll(register);
}
""")
rotate_ones_about_xx = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
operation RotateOnesAboutXX(angle : Double) : Unit {
use register = Qubit[2];
ApplyToEachCA(X, register);
ApplyRotationAboutXX(angle, register);
DumpMachine();
Message("\n");
ResetAll(register);
}
""")
###Output
_____no_output_____
###Markdown
With that Q code compiled, make a table of the resulting states for a range of angles from 0 to $2\pi$. Here we started with the $|00\rangle$ state for comparison.
###Code
import numpy as np
[rotate_zeros_about_xx.simulate(angle=a * np.pi / 4) for a in range(8)]
###Output
_____no_output_____
###Markdown
Now repeat the rotation angles but start with the state $|++\rangle$.
###Code
import numpy as np
[rotate_plus_about_xx.simulate(angle=a * np.pi / 4) for a in range(0, 8)]
###Output
_____no_output_____
###Markdown
Note that these are all equivalent up to a global phase; rotating the |++โฉ state around the ๐๐-axis doesn't do anything. One more time, repeat the angles with the inital state of $|11\rangle$.
###Code
import numpy as np
[rotate_ones_about_xx.simulate(angle=a * np.pi / 4) for a in range(0, 8)]
###Output
_____no_output_____
###Markdown
---- Exercise 10.6 **Try using `DumpMachine` to explore how the `Rx` operation acts on a single qubit, and compare to the two-qubit rotation about the $X \otimes X$ axis that you implemented in the snippet below.How are the two rotation operations similar, and how do they differ?**```open Microsoft.Quantum.Diagnostics;open Microsoft.Quantum.Math;operation ApplyRotationAboutXX(angle : Double, register : Qubit[]): Unit is Adj + Ctl { within { CNOT(register[0], register[1]); } apply { Rx(angle, register[0]); }}operation DumpXXRotation() : Unit { let angle = PI() / 2.0; use register = Qubit[2]; ApplyRotationAboutXX(angle, register); DumpMachine(); ResetAll(register);}```
###Code
[_, dump_rx_rotation, dump_xx_rotation] = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
open Microsoft.Quantum.Math;
operation ApplyRotationAboutXX(
angle : Double, register : Qubit[]
) : Unit is Adj + Ctl {
within {
CNOT(register[0], register[1]);
} apply {
Rx(angle, register[0]);
}
}
operation DumpRxRotation(angle : Double) : Unit {
use q = Qubit();
Rx(angle, q);
DumpMachine();
Message("\n");
Reset(q);
}
operation DumpXXRotation(angle : Double) : Unit {
use register = Qubit[2];
ApplyRotationAboutXX(angle, register);
DumpMachine();
Message("\n");
ResetAll(register);
}
""")
import numpy as np
[dump_rx_rotation.simulate(angle=a * np.pi / 4) for a in range(0, 8)]
[dump_xx_rotation.simulate(angle=a * np.pi / 4) for a in range(0, 8)]
###Output
_____no_output_____
###Markdown
The rotations are similar in that the amplitudes for the first ($|0\rangle$ or $|00\rangle$) and last state ($|1\rangle$ or $|11\rangle$) have the same amplitudes. They are obviously different from the standpoint they operate on different numbers of qubits. **Compare rotating about the $X \otimes X$ axis with applying an `Rx` operation to each qubit in a two-qubit register.**
###Code
dump_rxrx_rotation = qsharp.compile("""
open Microsoft.Quantum.Diagnostics;
open Microsoft.Quantum.Math;
operation DumpRxRxRotation(angle : Double) : Unit {
use register = Qubit[2];
ApplyToEach(Rx(angle, _), register);
DumpMachine();
Message("\n");
ResetAll(register);
}
""")
[dump_rxrx_rotation.simulate(angle=a * np.pi / 4) for a in range(0, 8)]
###Output
_____no_output_____
###Markdown
You can see here that emphatically applying the `Rx` operation to each of the two qubits in a register is _not_ the same as rotating about the $XX$-axis. ---- Epilogue_The following cell logs what version of the components this was last tested with._
###Code
qsharp.component_versions()
###Output
Preparing Q# environment...
|
notebooks/10_experiments/3DOF_manoeuvre.ipynb | ###Markdown
3 Degrees Of Freedome (DOF)
###Code
%matplotlib inline
import sympy as sym
import sympy.physics.mechanics as me
from sympy import Abs,sin,cos,tan
from sympy import init_printing
init_printing(use_latex='mathjax')
from pydy.system import System
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
###Code
x0,y0,psi,x,y,beta,x,y,u,v,r,V = me.dynamicsymbols(r'x0 y0 \psi x y \beta x y u v r V')
m,Izz,t,x_cg = sym.symbols(r'm Izz t x_{cg}')
earth_frame = me.ReferenceFrame(name = 'E')
ship_frame = me.ReferenceFrame(name = 'S')
ship_frame.orient(parent = earth_frame,rot_type='Axis',amounts = (psi,earth_frame.z))
#ship_frame.set_ang_vel(otherframe=earth_frame,value = r*earth_frame.z)
ship_frame.dcm(otherframe=earth_frame)
O0 = me.Point('O0')
O0.set_vel(frame = earth_frame,value = 0)
O = me.Point('O')
#O.set_pos(otherpoint=O0,value = x0*earth_frame.x + y0*earth_frame.y)
O.set_pos(otherpoint=O0,value = x*ship_frame.x + y*ship_frame.y)
O.set_vel(frame = ship_frame,value = u*ship_frame.x + v*ship_frame.y)
O.v1pt_theory(otherpoint=O0,outframe=earth_frame,interframe=ship_frame)
O.pos_from(O0)
O.vel(earth_frame).express(earth_frame)
O.acc(earth_frame).express(earth_frame)
CG = me.Point('CG')
CG.set_pos(O,value = x_cg*ship_frame.x)
CG.v2pt_theory(otherpoint=O,outframe=earth_frame,fixedframe=ship_frame)
CG.acc(earth_frame).express(earth_frame)
kinematical_differential_equations = [u - x.diff(),
v - y.diff(),
r - psi.diff()]
coordinates = [x,y,psi,]
speeds = [u,v,r,]
kane = me.KanesMethod(earth_frame,coordinates, speeds,kinematical_differential_equations)
###Output
_____no_output_____
###Markdown
Manoeuver derivatives
###Code
delta_R = sym.symbols('\delta_{R}')
eta = sym.symbols('\eta')
Xvv = sym.symbols('X_{vv}')
Xvr = sym.symbols('X_{vr}')
Xvveta = sym.symbols('X_{vv\eta}')
Xdd = sym.symbols('X_{\delta\delta}')
Xddetaeta = sym.symbols('X_{\delta\delta\eta\eta}')
# X
Xudot = sym.symbols('X_{\dot{u}}')
a_i = sym.symbols('a_{i}')
b_i = sym.symbols('b_{i}')
c_i = sym.symbols('c_{i}')
a1 = sym.symbols('a_{1}')
b1 = sym.symbols('b_{1}')
c1 = sym.symbols('c_{1}')
a2 = sym.symbols('a_{2}')
b2 = sym.symbols('b_{2}')
c2 = sym.symbols('c_{2}')
a3 = sym.symbols('a_{3}')
b3 = sym.symbols('b_{3}')
c3 = sym.symbols('c_{3}')
a4 = sym.symbols('a_{4}')
b4 = sym.symbols('b_{4}')
c4 = sym.symbols('c_{4}')
Xrr = sym.symbols('X_{rr}')
Xvv = sym.symbols('X_{vv}')
Xvr = sym.symbols('X_{vr}')
Xvveta = sym.symbols('X_{vv\eta}')
Xdeltadelta = sym.symbols('X_{\delta\delta}')
Xddetaeta = sym.symbols('X_{\delta\delta\eta\eta}')
# Y
Yvdot = sym.symbols('Y_{\dot{v}}')
Yrdot = sym.symbols('Y_{\dot{r}}')
Yv = sym.symbols('Y_{v}')
Yr = sym.symbols('Y_{r}')
Yvav = sym.symbols('Y_{v\abs{v}}')
Yvar = sym.symbols('Y_{v\abs{r}}')
Yveta = sym.symbols('Y_{v\eta}')
Yreta = sym.symbols('Y_{r\eta}')
Yvaveta = sym.symbols('Y_{v\abs{v}\eta}')
Ystar = sym.symbols('Y_{*}')
Ystareta = sym.symbols('Y_{*\eta}')
Ydelta = sym.symbols('Y_{\delta}')
Ydeltaar = sym.symbols('Y_{\delta\abs{r}}')
Ydeltaeta = sym.symbols('Y_{\delta\eta}')
# N
Nrdot = sym.symbols('N_{\dot{r}}')
Nvdot = sym.symbols('N_{\dot{v}}')
Nr = sym.symbols('N_{r}')
Nv = sym.symbols('N_{v}')
Nrar = sym.symbols('N_{r\abs{r}}')
Nvav = sym.symbols('N_{v\abs{v}}')
Nvar = sym.symbols('N_{v\abs{r}}')
Nreta = sym.symbols('N_{r\eta}')
Nveta = sym.symbols('N_{v\eta}')
Nvaveta = sym.symbols('N_{v\abs{v}\eta}')
Ndelta = sym.symbols('N_{\delta}')
Ndeltaar = sym.symbols('N_{\delta\abs{r}}')
Ndeltaeta = sym.symbols('N_{\delta\eta}')
Nstar = sym.symbols('N_{*}')
Nstareta = sym.symbols('N_{*\eta}')
###Output
_____no_output_____
###Markdown
Fx
###Code
equation_Fx = (
#Xudot*u.diff()
#+ (a_i + b_i*eta + c_i*eta**2)
+ (a1 + b1*eta + c1*eta**2)
+ Xrr*r**2 + Xvv*v**2 + Xvr*v*r
+ Xvveta*v**2*(eta-1)
+ Xdeltadelta*delta_R**2
+ Xddetaeta*(delta_R**2*eta**2)
)
a_i = a1 # Assume this now ...
b_i = b1
c_i = c1
equation_Fx
###Output
_____no_output_____
###Markdown
Fy
###Code
equation_Fy = (
#Yvdot*v.diff()
#+ Yrdot*r.diff()
+ Yv*v + Yr*r + Yvav*v*Abs(v) + Yvar*v*Abs(r)
+ (Yveta*v + Yreta*r + Yvaveta*v*Abs(v))*(eta - 1)
+ Ydelta*delta_R + Ydeltaar*delta_R*Abs(r) + Ydeltaeta*delta_R*(eta - 1)
+ Ystar + Ystareta*(eta - 1)
)
equation_Fy
###Output
_____no_output_____
###Markdown
Mz
###Code
equation_Mz = (
#Nrdot*r.diff() + Nvdot*v.diff()
+ Nr*r + Nv*v
+ Nrar*r*Abs(r) + Nvav*v*Abs(v) + Nvar*v*Abs(r)
+ (Nreta*r + Nveta*v + Nvaveta*v*Abs(v))*(eta - 1)
+ Ndelta*delta_R + Ndeltaar*delta_R*Abs(r) + Ndeltaeta*(eta - 1)
+ Nstar + Nstareta*(eta - 1)
)
equation_Mz
Fx = (O,equation_Fx*ship_frame.x)
Fy = (O,equation_Fy*ship_frame.y)
Mz = (ship_frame, equation_Mz * ship_frame.z)
forces = [Fx,Fy,Mz]
#mass = me.Particle('m', CG, m)
inertia_dyadic = me.inertia(ship_frame, ixx = 0, iyy = 0, izz = Izz)
inertia_dyadic
inertia = (inertia_dyadic, CG)
ship = me.RigidBody('ship',masscenter=CG,frame = ship_frame,mass = m,inertia=inertia)
fr, frstar = kane.kanes_equations(bodies = [ship],loads = forces)
M = kane.mass_matrix_full
M
f = kane.forcing_full
f
(M.inv() * f)
###Output
_____no_output_____
###Markdown
Create a PyDy system
###Code
derivatives = {
#Xudot: -0.00086,
Ydeltaar:0.0,
Nvav:0.00754,
a1: -0.001331,
Xvr:0.01095,
Yvar:-0.0152,
Nr:-0.00294,
b1: -0.001011,
Xvv:0.00287,
#Yrdot:-0.00025,
Nrar:0.0,
c1:0.001827,
Xdeltadelta:-0.001,
Ydelta:0.00416,
Nreta: 0.0,
#a2:-0.000894,
Xrr:0.0,
Ydeltaeta:0.00416,
Nvar:-0.00495,
#b2:-0.000649,
Xvveta:0.0,
Yreta:0.00138,
#Nvdot:-0.00005,
#c2:0.001543,
Xddetaeta:-0.00135,
Yveta:-0.00266,
Ndelta:-0.00216,
#a3:-0.000894,
#Yvdot:-0.0146,
Yvaveta:0.0,
Ndeltaeta:-0.00216,
#b3:0.001016,
Ystar:0.000063,
Ystareta:0.000063,
Nveta:0.00138,
#c3:0.0000004,
Yv:-0.011,
#Nrdot:-0.000964,
Nvaveta:0.0,
#a4:-0.001722,
Yvav:-0.0398,
Nstar:-0.000033,
Nreta:-0.00072,
#b4:-0.000619,
Yr:0.00394,
Nv:-0.00798,
Nstareta:-0.000033,
#c4: -0.000813,
}
derivatives
displacement = 355600*10**3 # [kg]
lpp = 349.8 # [m]
k_zz = 0.3 # Guess
r_zz = k_zz*lpp
Izz_ = displacement*r_zz**2
V = 7
rho = 1025
m_prime = displacement / (1/2*lpp**3)
Izz_prime = displacement*k_zz**2 / (1/2*lpp**5)
properties = {
m:m_prime,
Izz:Izz_prime,
}
controls = {
delta_R:0.4,
eta:1,
}
constants = {}
constants.update(properties)
constants.update(controls)
constants.update(derivatives)
initial_conditions = {
x:0,
y:0,
psi:0,
u:1,
v:0,
r:0,
}
times = np.linspace(0,1000000,1000)
times_prime = V*times/lpp
sys = System(kane,constants=constants,initial_conditions=initial_conditions,times = times_prime)
result = sys.integrate()
fig,ax = plt.subplots()
ax.plot(times,result[:,0])
ax.set_xlabel('time [s]')
ax.set_ylabel('x [m]')
p = O.pos_from(O0).express(earth_frame)
x0_ = p.dot(ship_frame.x)
x0_
xt,yt,psit = sym.symbols('xt yt psit')
x0_t = x0_.subs([(x,xt),(y,yt),(psi,psit)])
x0_t
y0_ = p.dot(ship_frame.y)
y0_
y0_t = y0_.subs([(x,xt),(y,yt),(psi,psit)])
y0_t
f_x0 = sym.lambdify((xt,yt,psit),x0_t,"numpy")
f_y0 = sym.lambdify((xt,yt,psit),y0_t,"numpy")
x0_sim = f_x0(result[:,0],result[:,1],result[:,2])
fig,ax = plt.subplots()
ax.plot(times,x0_sim)
ax.set_xlabel('time [s]')
ax.set_ylabel('x0 [m]')
y0_sim = f_y0(result[:,0],result[:,1],result[:,2])
fig,ax = plt.subplots()
ax.plot(times,y0_sim)
ax.set_xlabel('time [s]')
ax.set_ylabel('y0 [m]')
fig,ax = plt.subplots()
ax.plot(y0_sim,x0_sim)
ax.set_xlabel('y0 [m]')
ax.set_ylabel('x0 [m]')
a = ax.axis('equal')
N = 100
x_ = np.ones(N,)
y_ = np.zeros(N,)
psi_ = np.linspace(0,2*np.pi,N)
x0_ = f_x0(x_,y_,psi_)
y0_ = f_y0(x_,y_,psi_)
fig,ax = plt.subplots()
ax.plot(y0_,x0_)
ax.set_xlabel('y0 [m]')
ax.set_ylabel('x0 [m]')
a = ax.axis('equal')
dcm = ship_frame.dcm(otherframe=earth_frame)
dcm
xs = result[:,0]
ys = result[:,1]
zs = np.zeros(len(xs))
psis = result[:,2]
x0s = []
y0s = []
for x,y,z,psi in zip(xs,ys,zs,psis):
p = np.array([[x],[y]])
M = np.array([[np.cos(psi),np.sin(psi)],[-np.sin(psi),np.cos(psi)]])
p0 = M.dot(p)
x0 = p0[0]
y0 = p0[1]
x0s.append(x0)
y0s.append(y0)
fig,ax = plt.subplots()
ax.plot(y0s,x0s)
ax.set_xlabel('y0 [m]')
ax.set_ylabel('x0 [m]')
a = ax.axis('equal')
fig,ax = plt.subplots()
ax.plot(times_prime,psis)
###Output
_____no_output_____ |
dust/.ipynb_checkpoints/mie_Q-checkpoint.ipynb | ###Markdown
Introduction to the Interstellar Medium Jonathan Williams Figure 4.3: Extinction efficiency calculated from Mie theory uses miepython.py from https://pypi.org/project/miepython/
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sys
!{sys.executable} -m pip install miepython
import miepython as mp
npts = 300
x = np.linspace(0.01,5.0,npts)
# index of refraction
m = 1.3 - 0.05j
#m = 1.3 - 0.01j
xm = abs(m-1)*x
# Mie theory calculation
qext, qsca, qback, g = mp.mie(m,2*np.pi*x)
qabs = qext - qsca
fig = plt.figure(figsize=(6,4))
ax1 = fig.add_subplot(1,1,1)
ax1.set_xlabel(r"$a/\lambda$", fontsize=16)
ax1.set_ylabel("Efficiency", fontsize=16)
plt.plot(x, qext, 'k-', lw=3, label='$Q_{ext}$')
plt.plot(x, qsca, 'k--', lw=2, label='$Q_{sca}$')
plt.plot(x, qabs, 'k:', lw=2, label='$Q_{abs}$')
ax1.legend(fontsize=12)
ax1.set_xlim(0,5.0)
ax1.set_ylim(0,3.5)
# -------------------------------------------------
# show that Q_abs > Q_sca for small a/lambda
#ax1.set_xlim(0,0.25)
#ax1.set_ylim(0,0.35)
# -------------------------------------------------
plt.savefig('mie_Q.pdf')
###Output
_____no_output_____ |
geo/gis/examples/raster.ipynb | ###Markdown
Simple Raster Operations This is written with Australian datasets in mind, particularly for NSW, but should be transferable elsewhere. The dominant factor is that output maps and datasets use a UTM coordinate system. Import required modules
###Code
import os
import rasterio
import matplotlib.pyplot as plt
from rasterio import plot as rplot, windows
import numpy as np
from rasterio.warp import calculate_default_transform, reproject, Resampling
import zipfile
###Output
_____no_output_____
###Markdown
Load and plot SRTM elevation data A 1-arc second (approximately 30 m resolution) SRTM tile named covering the north of Sydney and the Central Coast is provided in the 'data' directory and is called 'srtm.tif'.
###Code
data_list = os.listdir('data')
print(data_list)
srtm_path = os.path.join(os.getcwd(), 'data', 'srtm.tif')
print(srtm_path)
###Output
['srtm.zip', 'srtm.zip.aux.xml', 'srtm_utm.tif', 'srtm.tif']
/media/geo/jobs/_67/geo/geo/gis/examples/data/srtm.tif
###Markdown
Load the data using the rasterio module.
###Code
src = rasterio.open(srtm_path)
print(src)
###Output
<open DatasetReader name='/media/geo/jobs/_67/geo/geo/gis/examples/data/srtm.tif' mode='r'>
###Markdown
Plot the data using the plot.show method from the rasterio package.
###Code
fig, ax = plt.subplots()
rplot.show(src, ax=ax)
#rshow(src, ax=ax, transform=src.transform) # note that I had previously used the transform option but notice that it doesnt seem to have an effect
plt.close()
###Output
_____no_output_____
###Markdown
Reproject a raster to a preferred coordinate system Typically in NSW a UTM coordinate system using the GDA94 datum is used in field GPS devices and on output maps. It may be necessary to plot vector data over the SRTM map, however, the vector data may be represented by a preferred UTM coordinate system. It is preferable to convert the SRTM data to the required UTM coordinate system. The rasterio documentation provides code to reproject a 'tif' dataset. The following uses that code:
###Code
srtm_utm_path = os.path.join(os.getcwd(), 'data', 'srtm_utm.tif')
if os.path.isfile(srtm_utm_path):
os.remove(srtm_utm_path)
dst_crs = 'EPSG:28356'
with rasterio.open(srtm_path) as src:
transform, width, height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds)
kwargs = src.meta.copy()
kwargs.update({
'crs': dst_crs,
'transform': transform,
'width': width,
'height': height
})
with rasterio.open(srtm_utm_path, 'w', **kwargs) as dst:
for i in range(1, src.count + 1):
reproject(
source=rasterio.band(src, i),
destination=rasterio.band(dst, i),
src_transform=src.transform,
src_crs=src.crs,
dst_transform=transform,
dst_crs=dst_crs,
resampling=Resampling.nearest)
###Output
_____no_output_____
###Markdown
The reprojected SRTM data has been generated.
###Code
print(os.path.isfile(srtm_utm_path))
###Output
True
###Markdown
Plot the reprojected data and notice the difference in the x and y axis values.
###Code
src = rasterio.open(srtm_utm_path)
fig, ax = plt.subplots()
rplot.show(src, ax=ax)
plt.close()
###Output
_____no_output_____
###Markdown
Typically, a specific area within the dataset would be of interest. You can set the x and y axis limits to define such an area. Defining an arbitrary box from the axis labels:
###Code
fig, ax = plt.subplots()
ax.set_xlim(320000,340000)
ax.set_ylim(6280000,6300000)
rplot.show(src, ax=ax)
plt.close()
###Output
_____no_output_____
###Markdown
Notice that the colours are the same as in the original image. In the closeup dataset, the data range is likely to be smaller and it may be better to stretch the colour map between the maximum and minimum values.
###Code
fig, ax = plt.subplots()
ax.set_xlim(320000,340000)
ax.set_ylim(6280000,6300000)
rplot.show(src, ax=ax, vmin=0, vmax=250)
plt.close()
###Output
_____no_output_____
###Markdown
Load a file from a compressed container and extract a subset Some very large datasets (e.g. Geoscience Australia 2015. Digital Elevation Model (DEM) of Australia derived from LiDAR 5 Metre Grid. Geoscience Australia, Canberra. http://pid.geoscience.gov.au/dataset/ga/89644) are available in compressed formats, such as zip files. These can be read directly using rasterio. First place the previously loaded SRTM data into a zip container.
###Code
zip_path = os.path.join('data', 'srtm.zip')
z = zipfile.ZipFile(zip_path, mode='w', compression=zipfile.ZIP_DEFLATED)
z.write(srtm_path)
z.close()
###Output
_____no_output_____
###Markdown
Load the data as previously but with a specially formatted path.
###Code
src = rasterio.open('zip://' + zip_path)
print(src)
###Output
<open DatasetReader name='zip://data/srtm.zip' mode='r'>
###Markdown
The LiDar dataset from Geoscience Australia (GA) that covers Sydney is over 9GB in the provided zip file. Uncompressed it is over 300GB. It is therefore preferable to extract a smaller subset for use in a project. The GA dataset is too big for use here, so instead we will simulate the procedure using the zipped SRTM data.
###Code
with rasterio.open('zip://' + zip_path) as ds:
# convert a coordinate representing the top-left of the required
# data to an integer representing a row and column
row, col = ds.index(151.222,-33.309)
e, n = col, row
# get data in a square of 1000 pixels from the origin coordinate
N = 1000
window = windows.Window(e, n, N, N)
w = ds.read(1, window=window)
print(w.shape)
meta = ds.meta
meta['width'], meta['height'] = N, N
meta['transform'] = windows.transform(window, ds.transform)
outfile = os.path.join('data', 'srtm_utm_subset.tif')
with rasterio.open(outfile, mode='w', **meta) as dst:
dst.write(w, 1)
###Output
(1000, 1000)
###Markdown
Plot the data subset
###Code
src = rasterio.open(outfile)
fig, ax = plt.subplots()
rplot.show(src, ax=ax)
###Output
_____no_output_____ |
Google Stock Prediction/Quandal Data.ipynb | ###Markdown
This project was learned from sentdex tutorial on youtube.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import quandl as qd
import math
import pickle
###Output
_____no_output_____
###Markdown
THE BELOW DATA IS GOOGLE STOCK DATA
###Code
df=pd.read_csv('GOOGL Stock Data.csv',header=0, index_col='Date', parse_dates=True)
df.shape
df = df[['Adj. Open','Adj. High','Adj. Low','Adj. Close','Adj. Volume']]
df['HL_PCT'] = (df['Adj. High']-df['Adj. Close'])/df['Adj. Close'] * 100
df['PCT_change'] = (df['Adj. Close']-df['Adj. Open'])/df['Adj. Open'] * 100
df = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']]
df.head()
forecast_col = 'Adj. Close'
df.fillna(-99999,inplace=True)
forecast_out = int(math.ceil(0.01*len(df)))
df['label'] = df[forecast_col].shift(-forecast_out)
df.head()
df.dropna(inplace=True)
df.tail()
from sklearn import preprocessing , svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
X = np.array(df.drop(['label'],1))
X = preprocessing.scale(X)
y = np.array(df['label'])
print(len(X),len(y))
xtrain , xtest , ytrain , ytest = train_test_split(X, y, test_size = 0.25 ,random_state = 0)
print(xtrain.shape)
print(ytrain.shape)
print(xtest.shape)
print(ytest.shape)
lr = LinearRegression()
lr.fit(xtrain,ytrain)
with open('LinearRegression.pickle','wb') as f:
pickle.dump(lr,f)
pickle_in = open('LinearRegression.pickle','rb')
lr=pickle.load(pickle_in)
accuracy = lr.score(xtest,ytest)
print(accuracy)
print(forecast_out)
X = X[:-forecast_out]
X_lately = X[-forecast_out:]
df.dropna(inplace=True)
forecast_set = lr.predict(X_lately)
print("For next",forecast_out,"days the forecast is: ")
print("")
print(forecast_set)
print("")
print("With a accuracy of :",accuracy*100,"%")
import datetime
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix = next_unix + one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i]
plt.figure(figsize=(18,9))
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
###Output
_____no_output_____ |
AI_for_Medical_Prognosis/W_2/C2M2_Assignment.ipynb | ###Markdown
Risk Models Using Machine LearningWelcome to the second assignment of Course 2! Outline- [1. Import Packages](1)- [2. Load the Dataset](2)- [3. Explore the Dataset](3)- [4. Dealing with Missing Data](4) - [Exercise 1](Ex-1)- [5. Decision Trees](5) - [Exercise 2](Ex-2)- [6. Random Forests](6) - [Exercise 3](Ex-3)- [7. Imputation](7)- [8. Error Analysis](8) - [Exercise 4](Ex-4)- [9. Imputation Approaches](Ex-9) - [Exercise 5](Ex-5) - [Exercise 6](Ex-6)- [10. Comparison](10)- [11. Explanations: SHAP]() In this assignment, you'll gain experience with tree based models by predicting the 10-year risk of death of individuals from the NHANES I epidemiology dataset (for a detailed description of this dataset you can check the [CDC Website](https://wwwn.cdc.gov/nchs/nhanes/nhefs/default.aspx/)). This is a challenging task and a great test bed for the machine learning methods we learned this week.As you go through the assignment, you'll learn about: - Dealing with Missing Data - Complete Case Analysis. - Imputation- Decision Trees - Evaluation. - Regularization.- Random Forests - Hyperparameter Tuning. 1. Import PackagesWe'll first import all the common packages that we need for this assignment. - `shap` is a library that explains predictions made by machine learning models.- `sklearn` is one of the most popular machine learning libraries.- `itertools` allows us to conveniently manipulate iterable objects such as lists.- `pydotplus` is used together with `IPython.display.Image` to visualize graph structures such as decision trees.- `numpy` is a fundamental package for scientific computing in Python.- `pandas` is what we'll use to manipulate our data.- `seaborn` is a plotting library which has some convenient functions for visualizing missing data.- `matplotlib` is a plotting library.
###Code
import shap
import sklearn
import itertools
import pydotplus
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import Image
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
# We'll also import some helper functions that will be useful later on.
from util import load_data, cindex
###Output
_____no_output_____
###Markdown
2. Load the Dataset Run the next cell to load in the NHANES I epidemiology dataset. This dataset contains various features of hospital patients as well as their outcomes, i.e. whether or not they died within 10 years.
###Code
X_dev, X_test, y_dev, y_test = load_data(10)
###Output
_____no_output_____
###Markdown
The dataset has been split into a development set (or dev set), which we will use to develop our risk models, and a test set, which we will use to test our models.We further split the dev set into a training and validation set, respectively to train and tune our models, using a 75/25 split (note that we set a random state to make this split repeatable).
###Code
X_train, X_val, y_train, y_val = train_test_split(X_dev, y_dev, test_size=0.25, random_state=10)
###Output
_____no_output_____
###Markdown
3. Explore the DatasetThe first step is to familiarize yourself with the data. Run the next cell to get the size of your training set and look at a small sample.
###Code
print("X_train shape: {}".format(X_train.shape))
X_train.head()
###Output
_____no_output_____
###Markdown
Our targets `y` will be whether or not the target died within 10 years. Run the next cell to see the target data series.
###Code
y_train.head(20)
###Output
_____no_output_____
###Markdown
Use the next cell to examine individual cases and familiarize yourself with the features.
###Code
i = 10
print(X_train.iloc[i,:])
print("\nDied within 10 years? {}".format(y_train.loc[y_train.index[i]]))
###Output
_____no_output_____
###Markdown
4. Dealing with Missing DataLooking at our data in `X_train`, we see that some of the data is missing: some values in the output of the previous cell are marked as `NaN` ("not a number").Missing data is a common occurrence in data analysis, that can be due to a variety of reasons, such as measuring instrument malfunction, respondents not willing or not able to supply information, and errors in the data collection process.Let's examine the missing data pattern. `seaborn` is an alternative to `matplotlib` that has some convenient plotting functions for data analysis. We can use its `heatmap` function to easily visualize the missing data pattern.Run the cell below to plot the missing data:
###Code
sns.heatmap(X_train.isnull(), cbar=False)
plt.title("Training")
plt.show()
sns.heatmap(X_val.isnull(), cbar=False)
plt.title("Validation")
plt.show()
###Output
_____no_output_____
###Markdown
For each feature, represented as a column, values that are present are shown in black, and missing values are set in a light color.From this plot, we can see that many values are missing for systolic blood pressure (`Systolic BP`). Exercise 1In the cell below, write a function to compute the fraction of cases with missing data. This will help us decide how we handle this missing data in the future. Hints The pandas.DataFrame.isnull() method is helpful in this case. Use the pandas.DataFrame.any() method and set the axis parameter. Divide the total number of rows with missing data by the total number of rows. Remember that in Python, True values are equal to 1.
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def fraction_rows_missing(df):
'''
Return percent of rows with any missing
data in the dataframe.
Input:
df (dataframe): a pandas dataframe with potentially missing data
Output:
frac_missing (float): fraction of rows with missing data
'''
### START CODE HERE (REPLACE 'Pass' with your 'return' code) ###
return sum(df.isnull().any(axis=1)) / len(df)
### END CODE HERE ###
###Output
_____no_output_____
###Markdown
Test your function by running the cell below.
###Code
df_test = pd.DataFrame({'a':[None, 1, 1, None], 'b':[1, None, 0, 1]})
print("Example dataframe:\n")
print(df_test)
print("\nComputed fraction missing: {}, expected: {}".format(fraction_rows_missing(df_test), 0.75))
print(f"Fraction of rows missing from X_train: {fraction_rows_missing(X_train):.3f}")
print(f"Fraction of rows missing from X_val: {fraction_rows_missing(X_val):.3f}")
print(f"Fraction of rows missing from X_test: {fraction_rows_missing(X_test):.3f}")
###Output
_____no_output_____
###Markdown
We see that our train and validation sets have missing values, but luckily our test set has complete cases. As a first pass, we will begin with a **complete case analysis**, dropping all of the rows with any missing data. Run the following cell to drop these rows from our train and validation sets.
###Code
X_train_dropped = X_train.dropna(axis='rows')
y_train_dropped = y_train.loc[X_train_dropped.index]
X_val_dropped = X_val.dropna(axis='rows')
y_val_dropped = y_val.loc[X_val_dropped.index]
###Output
_____no_output_____
###Markdown
5. Decision TreesHaving just learned about decision trees, you choose to use a decision tree classifier. Use scikit-learn to build a decision tree for the hospital dataset using the train set.
###Code
dt = DecisionTreeClassifier(max_depth=None, random_state=10)
dt.fit(X_train_dropped, y_train_dropped)
###Output
_____no_output_____
###Markdown
Next we will evaluate our model. We'll use C-Index for evaluation.> Remember from lesson 4 of week 1 that the C-Index evaluates the ability of a model to differentiate between different classes, by quantifying how often, when considering all pairs of patients (A, B), the model says that patient A has a higher risk score than patient B when, in the observed data, patient A actually died and patient B actually lived. In our case, our model is a binary classifier, where each risk score is either 1 (the model predicts that the patient will die) or 0 (the patient will live).>> More formally, defining _permissible pairs_ of patients as pairs where the outcomes are different, _concordant pairs_ as permissible pairs where the patient that died had a higher risk score (i.e. our model predicted 1 for the patient that died and 0 for the one that lived), and _ties_ as permissible pairs where the risk scores were equal (i.e. our model predicted 1 for both patients or 0 for both patients), the C-Index is equal to:>> $$\text{C-Index} = \frac{\\text{concordant pairs} + 0.5\times \\text{ties}}{\\text{permissible pairs}}$$Run the next cell to compute the C-Index on the train and validation set (we've given you an implementation this time).
###Code
y_train_preds = dt.predict_proba(X_train_dropped)[:, 1]
print(f"Train C-Index: {cindex(y_train_dropped.values, y_train_preds)}")
y_val_preds = dt.predict_proba(X_val_dropped)[:, 1]
print(f"Val C-Index: {cindex(y_val_dropped.values, y_val_preds)}")
###Output
_____no_output_____
###Markdown
Unfortunately your tree seems to be overfitting: it fits the training data so closely that it doesn't generalize well to other samples such as those from the validation set.> The training C-index comes out to 1.0 because, when initializing `DecisionTreeClasifier`, we have left `max_depth` and `min_samples_split` unspecified. The resulting decision tree will therefore keep splitting as far as it can, which pretty much guarantees a pure fit to the training data.To handle this, you can change some of the hyperparameters of our tree. Exercise 2Try and find a set of hyperparameters that improves the generalization to the validation set and recompute the C-index. If you do it right, you should get C-index above 0.6 for the validation set. You can refer to the documentation for the sklearn [DecisionTreeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html). Hints Try limiting the depth of the tree ('max_depth').
###Code
# Experiment with different hyperparameters for the DecisionTreeClassifier
# until you get a c-index above 0.6 for the validation set
dt_hyperparams = {
# set your own hyperparameters below, such as 'min_samples_split': 1
### START CODE HERE ###
'max_depth': 3,
### END CODE HERE ###
}
###Output
_____no_output_____
###Markdown
Run the next cell to fit and evaluate the regularized tree.
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
dt_reg = DecisionTreeClassifier(**dt_hyperparams, random_state=10)
dt_reg.fit(X_train_dropped, y_train_dropped)
y_train_preds = dt_reg.predict_proba(X_train_dropped)[:, 1]
y_val_preds = dt_reg.predict_proba(X_val_dropped)[:, 1]
print(f"Train C-Index: {cindex(y_train_dropped.values, y_train_preds)}")
print(f"Val C-Index (expected > 0.6): {cindex(y_val_dropped.values, y_val_preds)}")
###Output
_____no_output_____
###Markdown
If you used a low `max_depth` you can print the entire tree. This allows for easy interpretability. Run the next cell to print the tree splits.
###Code
dot_data = StringIO()
export_graphviz(dt_reg, feature_names=X_train_dropped.columns, out_file=dot_data,
filled=True, rounded=True, proportion=True, special_characters=True,
impurity=False, class_names=['neg', 'pos'], precision=2)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
###Output
_____no_output_____
###Markdown
> **Overfitting, underfitting, and the bias-variance tradeoff**>> If you tested several values of `max_depth`, you may have seen that a value of `3` gives training and validation C-Indices of about `0.689` and `0.630`, and that a `max_depth` of `2` gives better agreement with values of about `0.653` and `0.607`. In the latter case, we have further reduced overfitting, at the cost of a minor loss in predictive performance.>> Contrast this with a `max_depth` value of `1`, which results in C-Indices of about `0.597` for the training set and `0.598` for the validation set: we have eliminated overfitting but with a much stronger degradation of predictive performance.>> Lower predictive performance on the training and validation sets is indicative of the model _underfitting_ the data: it neither learns enough from the training data nor is able to generalize to unseen data (the validation data in our case).>> Finding a model that minimizes and acceptably balances underfitting and overfitting (e.g. selecting the model with a `max_depth` of `2` over the other values) is a common problem in machine learning that is known as the _bias-variance tradeoff_. 6. Random ForestsNo matter how you choose hyperparameters, a single decision tree is prone to overfitting. To solve this problem, you can try **random forests**, which combine predictions from many different trees to create a robust classifier. As before, we will use scikit-learn to build a random forest for the data. We will use the default hyperparameters.
###Code
rf = RandomForestClassifier(n_estimators=100, random_state=10)
rf.fit(X_train_dropped, y_train_dropped)
###Output
_____no_output_____
###Markdown
Now compute and report the C-Index for the random forest on the training and validation set.
###Code
y_train_rf_preds = rf.predict_proba(X_train_dropped)[:, 1]
print(f"Train C-Index: {cindex(y_train_dropped.values, y_train_rf_preds)}")
y_val_rf_preds = rf.predict_proba(X_val_dropped)[:, 1]
print(f"Val C-Index: {cindex(y_val_dropped.values, y_val_rf_preds)}")
###Output
_____no_output_____
###Markdown
Training a random forest with the default hyperparameters results in a model that has better predictive performance than individual decision trees as in the previous section, but this model is overfitting.We therefore need to tune (or optimize) the hyperparameters, to find a model that both has good predictive performance and minimizes overfitting.The hyperparameters we choose to adjust will be:- `n_estimators`: the number of trees used in the forest.- `max_depth`: the maximum depth of each tree.- `min_samples_leaf`: the minimum number (if `int`) or proportion (if `float`) of samples in a leaf.The approach we implement to tune the hyperparameters is known as a grid search:- We define a set of possible values for each of the target hyperparameters.- A model is trained and evaluated for every possible combination of hyperparameters.- The best performing set of hyperparameters is returned.The cell below implements a hyperparameter grid search, using the C-Index to evaluate each tested model.
###Code
def holdout_grid_search(clf, X_train_hp, y_train_hp, X_val_hp, y_val_hp, hyperparams, fixed_hyperparams={}):
'''
Conduct hyperparameter grid search on hold out validation set. Use holdout validation.
Hyperparameters are input as a dictionary mapping each hyperparameter name to the
range of values they should iterate over. Use the cindex function as your evaluation
function.
Input:
clf: sklearn classifier
X_train_hp (dataframe): dataframe for training set input variables
y_train_hp (dataframe): dataframe for training set targets
X_val_hp (dataframe): dataframe for validation set input variables
y_val_hp (dataframe): dataframe for validation set targets
hyperparams (dict): hyperparameter dictionary mapping hyperparameter
names to range of values for grid search
fixed_hyperparams (dict): dictionary of fixed hyperparameters that
are not included in the grid search
Output:
best_estimator (sklearn classifier): fitted sklearn classifier with best performance on
validation set
best_hyperparams (dict): hyperparameter dictionary mapping hyperparameter
names to values in best_estimator
'''
best_estimator = None
best_hyperparams = {}
# hold best running score
best_score = 0.0
# get list of param values
lists = hyperparams.values()
# get all param combinations
param_combinations = list(itertools.product(*lists))
total_param_combinations = len(param_combinations)
# iterate through param combinations
for i, params in enumerate(param_combinations, 1):
# fill param dict with params
param_dict = {}
for param_index, param_name in enumerate(hyperparams):
param_dict[param_name] = params[param_index]
# create estimator with specified params
estimator = clf(**param_dict, **fixed_hyperparams)
# fit estimator
estimator.fit(X_train_hp, y_train_hp)
# get predictions on validation set
preds = estimator.predict_proba(X_val_hp)
# compute cindex for predictions
estimator_score = cindex(y_val_hp, preds[:,1])
print(f'[{i}/{total_param_combinations}] {param_dict}')
print(f'Val C-Index: {estimator_score}\n')
# if new high score, update high score, best estimator
# and best params
if estimator_score >= best_score:
best_score = estimator_score
best_estimator = estimator
best_hyperparams = param_dict
# add fixed hyperparamters to best combination of variable hyperparameters
best_hyperparams.update(fixed_hyperparams)
return best_estimator, best_hyperparams
###Output
_____no_output_____
###Markdown
Exercise 3In the cell below, define the values you want to run the hyperparameter grid search on, and run the cell to find the best-performing set of hyperparameters.Your objective is to get a C-Index above `0.6` on both the train and validation set. Hints n_estimators: try values greater than 100 max_depth: try values in the range 1 to 100 min_samples_leaf: try float values below .5 and/or int values greater than 2
###Code
def random_forest_grid_search(X_train_dropped, y_train_dropped, X_val_dropped, y_val_dropped):
# Define ranges for the chosen random forest hyperparameters
hyperparams = {
### START CODE HERE (REPLACE array values with your code) ###
# how many trees should be in the forest (int)
'n_estimators': [100, 150, 200],
# the maximum depth of trees in the forest (int)
'max_depth': [3, 4, 5],
# the minimum number of samples in a leaf as a fraction
# of the total number of samples in the training set
# Can be int (in which case that is the minimum number)
# or float (in which case the minimum is that fraction of the
# number of training set samples)
'min_samples_leaf': [3, 4],
### END CODE HERE ###
}
fixed_hyperparams = {
'random_state': 10,
}
rf = RandomForestClassifier
best_rf, best_hyperparams = holdout_grid_search(rf, X_train_dropped, y_train_dropped,
X_val_dropped, y_val_dropped, hyperparams,
fixed_hyperparams)
print(f"Best hyperparameters:\n{best_hyperparams}")
y_train_best = best_rf.predict_proba(X_train_dropped)[:, 1]
print(f"Train C-Index: {cindex(y_train_dropped, y_train_best)}")
y_val_best = best_rf.predict_proba(X_val_dropped)[:, 1]
print(f"Val C-Index: {cindex(y_val_dropped, y_val_best)}")
# add fixed hyperparamters to best combination of variable hyperparameters
best_hyperparams.update(fixed_hyperparams)
return best_rf, best_hyperparams
best_rf, best_hyperparams = random_forest_grid_search(X_train_dropped, y_train_dropped, X_val_dropped, y_val_dropped)
###Output
_____no_output_____
###Markdown
Finally, evaluate the model on the test set. This is a crucial step, as trying out many combinations of hyperparameters and evaluating them on the validation set could result in a model that ends up overfitting the validation set. We therefore need to check if the model performs well on unseen data, which is the role of the test set, which we have held out until now.
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
y_test_best = best_rf.predict_proba(X_test)[:, 1]
print(f"Test C-Index: {cindex(y_test.values, y_test_best)}")
###Output
_____no_output_____
###Markdown
Your C-Index on the test set should be greater than `0.6`. 7. ImputationYou've now built and optimized a random forest model on our data. However, there was still a drop in test C-Index. This might be because you threw away more than half of the data of our data because of missing values for systolic blood pressure. Instead, we can try filling in, or imputing, these values. First, let's explore to see if our data is missing at random or not. Let's plot histograms of the dropped rows against each of the covariates (aside from systolic blood pressure) to see if there is a trend. Compare these to the histograms of the feature in the entire dataset. Try to see if one of the covariates has a signficantly different distribution in the two subsets.
###Code
dropped_rows = X_train[X_train.isnull().any(axis=1)]
columns_except_Systolic_BP = [col for col in X_train.columns if col not in ['Systolic BP']]
for col in columns_except_Systolic_BP:
sns.distplot(X_train.loc[:, col], norm_hist=True, kde=False, label='full data')
sns.distplot(dropped_rows.loc[:, col], norm_hist=True, kde=False, label='without missing data')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Most of the covariates are distributed similarly whether or not we have discarded rows with missing data. In other words missingness of the data is independent of these covariates.If this had been true across *all* covariates, then the data would have been said to be **missing completely at random (MCAR)**.But when considering the age covariate, we see that much more data tends to be missing for patients over 65. The reason could be that blood pressure was measured less frequently for old people to avoid placing additional burden on them.As missingness is related to one or more covariates, the missing data is said to be **missing at random (MAR)**.Based on the information we have, there is however no reason to believe that the _values_ of the missing data โ or specifically the values of the missing systolic blood pressures โ are related to the age of the patients. If this was the case, then this data would be said to be **missing not at random (MNAR)**. 8. Error Analysis Exercise 4Using the information from the plots above, try to find a subgroup of the test data on which the model performs poorly. You should be able to easily find a subgroup of at least 250 cases on which the model has a C-Index of less than 0.69. Hints Define a mask using a feature and a threshold, e.g. patients with a BMI below 20: mask = X_test['BMI'] . Try to find a subgroup for which the model had little data.
###Code
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def bad_subset(forest, X_test, y_test):
# define mask to select large subset with poor performance
# currently mask defines the entire set
### START CODE HERE (REPLACE the code after 'mask =' with your code) ###
mask = X_test.Age < 40
### END CODE HERE ###
X_subgroup = X_test[mask]
y_subgroup = y_test[mask]
subgroup_size = len(X_subgroup)
y_subgroup_preds = forest.predict_proba(X_subgroup)[:, 1]
performance = cindex(y_subgroup.values, y_subgroup_preds)
return performance, subgroup_size
###Output
_____no_output_____
###Markdown
Test Your Work
###Code
performance, subgroup_size = bad_subset(best_rf, X_test, y_test)
print("Subgroup size should greater than 250, performance should be less than 0.69 ")
print(f"Subgroup size: {subgroup_size}, C-Index: {performance}")
###Output
_____no_output_____
###Markdown
Expected OutputNote, your actual output will vary depending on the hyper-parameters that you chose and the mask that you chose.- Make sure that the c-index is less than 0.69```PythonSubgroup size: 586, C-Index: 0.6275```**Bonus**: - See if you can get a c-index as low as 0.53```Subgroup size: 251, C-Index: 0.5331``` 9. Imputation ApproachesSeeing that our data is not missing completely at random, we can handle the missing values by replacing them with substituted values based on the other values that we have. This is known as imputation.The first imputation strategy that we will use is **mean substitution**: we will replace the missing values for each feature with the mean of the available values. In the next cell, use the `SimpleImputer` from `sklearn` to use mean imputation for the missing values.
###Code
# Impute values using the mean
imputer = SimpleImputer(strategy='mean')
imputer.fit(X_train)
X_train_mean_imputed = pd.DataFrame(imputer.transform(X_train), columns=X_train.columns)
X_val_mean_imputed = pd.DataFrame(imputer.transform(X_val), columns=X_val.columns)
###Output
_____no_output_____
###Markdown
Exercise 5Now perform a hyperparameter grid search to find the best-performing random forest model, and report results on the test set. Define the parameter ranges for the hyperparameter search in the next cell, and run the cell. Target performanceMake your test c-index at least 0.74 or higher Hints n_estimators: try values greater than 100 max_depth: try values in the range 1 to 100 min_samples_leaf: try float values below .5 and/or int values greater than 2
###Code
# Define ranges for the random forest hyperparameter search
hyperparams = {
### START CODE HERE (REPLACE array values with your code) ###
# how many trees should be in the forest (int)
'n_estimators': [100, 150, 200],
# the maximum depth of trees in the forest (int)
'max_depth': [3, 4, 5],
# the minimum number of samples in a leaf as a fraction
# of the total number of samples in the training set
# Can be int (in which case that is the minimum number)
# or float (in which case the minimum is that fraction of the
# number of training set samples)
'min_samples_leaf': [3, 4],
### END CODE HERE ###
}
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
rf = RandomForestClassifier
rf_mean_imputed, best_hyperparams_mean_imputed = holdout_grid_search(rf, X_train_mean_imputed, y_train,
X_val_mean_imputed, y_val,
hyperparams, {'random_state': 10})
print("Performance for best hyperparameters:")
y_train_best = rf_mean_imputed.predict_proba(X_train_mean_imputed)[:, 1]
print(f"- Train C-Index: {cindex(y_train, y_train_best):.4f}")
y_val_best = rf_mean_imputed.predict_proba(X_val_mean_imputed)[:, 1]
print(f"- Val C-Index: {cindex(y_val, y_val_best):.4f}")
y_test_imp = rf_mean_imputed.predict_proba(X_test)[:, 1]
print(f"- Test C-Index: {cindex(y_test, y_test_imp):.4f}")
###Output
_____no_output_____
###Markdown
Expected outputNote, your actual c-index values will vary depending on the hyper-parameters that you choose. - Try to get a good Test c-index, similar these numbers below:```PythonPerformance for best hyperparameters:- Train C-Index: 0.8109- Val C-Index: 0.7495- Test C-Index: 0.7805``` Next, we will apply another imputation strategy, known as **multivariate feature imputation**, using scikit-learn's `IterativeImputer` class (see the [documentation](https://scikit-learn.org/stable/modules/impute.htmliterative-imputer)).With this strategy, for each feature that is missing values, a regression model is trained to predict observed values based on all of the other features, and the missing values are inferred using this model.As a single iteration across all features may not be enough to impute all missing values, several iterations may be performed, hence the name of the class `IterativeImputer`.In the next cell, use `IterativeImputer` to perform multivariate feature imputation.> Note that the first time the cell is run, `imputer.fit(X_train)` may fail with the message `LinAlgError: SVD did not converge`: simply re-run the cell.
###Code
# Impute using regression on other covariates
imputer = IterativeImputer(random_state=0, sample_posterior=False, max_iter=1, min_value=0)
imputer.fit(X_train)
X_train_imputed = pd.DataFrame(imputer.transform(X_train), columns=X_train.columns)
X_val_imputed = pd.DataFrame(imputer.transform(X_val), columns=X_val.columns)
###Output
_____no_output_____
###Markdown
Exercise 6Perform a hyperparameter grid search to find the best-performing random forest model, and report results on the test set. Define the parameter ranges for the hyperparameter search in the next cell, and run the cell. Target performanceTry to get a text c-index of at least 0.74 or higher. Hints n_estimators: try values greater than 100 max_depth: try values in the range 1 to 100 min_samples_leaf: try float values below .5 and/or int values greater than 2
###Code
# Define ranges for the random forest hyperparameter search
hyperparams = {
### START CODE HERE (REPLACE array values with your code) ###
# how many trees should be in the forest (int)
'n_estimators': [100, 150, 200],
# the maximum depth of trees in the forest (int)
'max_depth': [3, 4, 5],
# the minimum number of samples in a leaf as a fraction
# of the total number of samples in the training set
# Can be int (in which case that is the minimum number)
# or float (in which case the minimum is that fraction of the
# number of training set samples)
'min_samples_leaf': [3, 4],
### END CODE HERE ###
}
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
rf = RandomForestClassifier
rf_imputed, best_hyperparams_imputed = holdout_grid_search(rf, X_train_imputed, y_train,
X_val_imputed, y_val,
hyperparams, {'random_state': 10})
print("Performance for best hyperparameters:")
y_train_best = rf_imputed.predict_proba(X_train_imputed)[:, 1]
print(f"- Train C-Index: {cindex(y_train, y_train_best):.4f}")
y_val_best = rf_imputed.predict_proba(X_val_imputed)[:, 1]
print(f"- Val C-Index: {cindex(y_val, y_val_best):.4f}")
y_test_imp = rf_imputed.predict_proba(X_test)[:, 1]
print(f"- Test C-Index: {cindex(y_test, y_test_imp):.4f}")
###Output
_____no_output_____
###Markdown
Expected OutputNote, your actual output will vary depending on the hyper-parameters that you chose and the mask that you chose.```PythonPerformance for best hyperparameters:- Train C-Index: 0.8131- Val C-Index: 0.7454- Test C-Index: 0.7797``` 10. ComparisonFor good measure, retest on the subgroup from before to see if your new models do better.
###Code
performance, subgroup_size = bad_subset(best_rf, X_test, y_test)
print(f"C-Index (no imputation): {performance}")
performance, subgroup_size = bad_subset(rf_mean_imputed, X_test, y_test)
print(f"C-Index (mean imputation): {performance}")
performance, subgroup_size = bad_subset(rf_imputed, X_test, y_test)
print(f"C-Index (multivariate feature imputation): {performance}")
###Output
_____no_output_____
###Markdown
We should see that avoiding complete case analysis (i.e. analysis only on observations for which there is no missing data) allows our model to generalize a bit better. Remember to examine your missing cases to judge whether they are missing at random or not! 11. Explanations: SHAPUsing a random forest has improved results, but we've lost some of the natural interpretability of trees. In this section we'll try to explain the predictions using slightly more sophisticated techniques. You choose to apply **SHAP (SHapley Additive exPlanations) **, a cutting edge method that explains predictions made by black-box machine learning models (i.e. models which are too complex to be understandable by humans as is).> Given a prediction made by a machine learning model, SHAP values explain the prediction by quantifying the additive importance of each feature to the prediction. SHAP values have their roots in cooperative game theory, where Shapley values are used to quantify the contribution of each player to the game.> > Although it is computationally expensive to compute SHAP values for general black-box models, in the case of trees and forests there exists a fast polynomial-time algorithm. For more details, see the [TreeShap paper](https://arxiv.org/pdf/1802.03888.pdf).We'll use the [shap library](https://github.com/slundberg/shap) to do this for our random forest model. Run the next cell to output the most at risk individuals in the test set according to our model.
###Code
X_test_risk = X_test.copy(deep=True)
X_test_risk.loc[:, 'risk'] = rf_imputed.predict_proba(X_test_risk)[:, 1]
X_test_risk = X_test_risk.sort_values(by='risk', ascending=False)
X_test_risk.head()
###Output
_____no_output_____
###Markdown
We can use SHAP values to try and understand the model output on specific individuals using force plots. Run the cell below to see a force plot on the riskiest individual.
###Code
explainer = shap.TreeExplainer(rf_imputed)
i = 0
shap_value = explainer.shap_values(X_test.loc[X_test_risk.index[i], :])[1]
shap.force_plot(explainer.expected_value[1], shap_value, feature_names=X_test.columns, matplotlib=True)
###Output
_____no_output_____
###Markdown
How to read this chart:- The red sections on the left are features which push the model towards the final prediction in the positive direction (i.e. a higher Age increases the predicted risk).- The blue sections on the right are features that push the model towards the final prediction in the negative direction (if an increase in a feature leads to a lower risk, it will be shown in blue).- Note that the exact output of your chart will differ depending on the hyper-parameters that you choose for your model.We can also use SHAP values to understand the model output in aggregate. Run the next cell to initialize the SHAP values (this may take a few minutes).
###Code
shap_values = shap.TreeExplainer(rf_imputed).shap_values(X_test)[1]
###Output
_____no_output_____
###Markdown
Run the next cell to see a summary plot of the SHAP values for each feature on each of the test examples. The colors indicate the value of the feature.
###Code
shap.summary_plot(shap_values, X_test)
###Output
_____no_output_____
###Markdown
Clearly we see that being a woman (`sex = 2.0`, as opposed to men for which `sex = 1.0`) has a negative SHAP value, meaning that it reduces the risk of dying within 10 years. High age and high systolic blood pressure have positive SHAP values, and are therefore related to increased mortality. You can see how features interact using dependence plots. These plot the SHAP value for a given feature for each data point, and color the points in using the value for another feature. This lets us begin to explain the variation in SHAP value for a single value of the main feature.Run the next cell to see the interaction between Age and Sex.
###Code
shap.dependence_plot('Age', shap_values, X_test, interaction_index='Sex')
###Output
_____no_output_____
###Markdown
We see that while Age > 50 is generally bad (positive SHAP value), being a woman generally reduces the impact of age. This makes sense since we know that women generally live longer than men.Let's now look at poverty index and age.
###Code
shap.dependence_plot('Poverty index', shap_values, X_test, interaction_index='Age')
###Output
_____no_output_____ |
src/scene_segmentation_module/scene_segmentation_model.ipynb | ###Markdown
Scene Segmentation Model
###Code
import inspect
from viper_toolkit import Dissect
from model_server import ViperModel, NeuralNetworkLoader
from scene_segmentation_module import SceneSegmentationModel
###Output
_____no_output_____
###Markdown
Scene Segmentation ClassThe Scene Segmentation Model is a class of NeuralNetworkLoader models which performs scene segmentation on the image classifying the pixels in four classes:* Roadway* curb* Backgroun* Marker Using the Neural Network and the model weights found in the modules "Model" folder, the module first loads a ViperModel object:
###Code
source = inspect.getsource(ViperModel)
print (source)
###Output
class ViperModel(object):
# A ViperModel contains the location of the model architecture
# and the model weights which are stored in the node package.
def __init__(self, package_name, model_xml, weights_bin):
self.pkg = package_name
self.model = model_xml
self.weights = weights_bin
self.setup_model()
def setup_model(self):
self.dir = roslib.packages.get_pkg_dir(self.pkg)
self.location = os.path.join(
self.dir,
self.model
)
self.weights = os.path.join(
self.dir,
self.weights
)
###Markdown
Instantiating the class NeuralNetworkLoaderWhen the VPU is initialized, the Model Server will provide the location of these parameters to the inference engine, as well as create a class object for this module called which I call a NeuralNetworkLoader. This object allows the model to be initialized at the time of the VPU instantiation, which is asyncronous to the instantiation of this module:***(Note: While I am providing the class definition here, the NeuralNetworkLoader Class is shared amongst all Model Nodes; I will break this down how this works in more detail on TBA page)***
###Code
source = inspect.getsource(NeuralNetworkLoader)
print (source)
###Output
class NeuralNetworkLoader(object):
"""
A NeuralNetworkLoader is a class object which loads a pretrained
model (architecture and weights) onto a initialized OpenVino
inference engine.
Keyword Arguments:
ie -- an Inference Engine instance set up in the parent node which
we will load this model to.
ViperModel -- a instance of class ViperModel which contains the
models weights and the structure of the neural network.
device -- the inference device to be used to predict on (i.e.,
"MYRIAD", CPU, GPU, etc.)
model_tag -- a three letter abbreviation used by the VIPER Logger
module which identifies log messages as originating from within this
modules code.
model_name -- a logger attribute which identifies this model.
"""
def __init__(self,
ie: IECore,
viper_model: ViperModel,
device: str,
model_tag: str = "M..",
model_name: str = "Model",
*args,
**kwargs):
# Creates our helper tools from our Viper Toolkkit such as
# the parameter manager, our log manager, and our timer.
self.setup_parameters(
model_name = model_name,
model_tag = model_tag)
# Prepare this model for loading
self.setup_inference_engine(
ie = ie,
viper_model = viper_model,
device = device)
# Load the read network onto the initialized device.
self.load_inference_engine(device=device, ie = ie)
# Retrieve the architecture of the model to load, including
# inputs and outputs and stores these on the parameter server
self.get_network_info()
# Retrieves the image shapes for the input and output from the
# now loaded model and stores these on the parameter server
self.get_model_info()
def setup_parameters(self, model_name: str, model_tag: str):
# Instantiate our logger tool naming these processes and
# setting the tag. The ("XX.") convention indicates this is a
# model and log messages are coming from within the
# model processing script and not the main node.
self.logger = Logger(
name = model_name,
tag = model_tag)
# Instantiate our timer tool which will output the times of
# the processes within the model, and indicate that the
# process originated from within the model, and not the module.
self.timer = ProcessTimer(logger=self.logger)
# Creates a parameter manager
self.NeuralNetworkParams = Parameters(logger=self.logger)
def setup_inference_engine(self, ie: IECore, viper_model: ViperModel, device: str):
# Link the internal inference engine with the initialized engine
# and read the network architecture.
self._ie = ie
# Load the Viper Model class object, which contains the address
# for the neural network architecture and well as the weights
# of the trained model.
self._net = ie.read_network(
model=viper_model.location,
weights=viper_model.weights
)
def load_inference_engine(self, device, ie):
# Load the network architecture and weights into the initialized
# inference engine. We must indicate the device name which
# is passed through the main node.
self._exec_net = ie.load_network(
network = self._net,
device_name = device
)
def get_network_info(self):
# Set the input and output blobs
self._input_blob = next(iter(self._exec_net.input_info))
self._output_blob = next(iter(self._exec_net.outputs))
# Get the input shape
#self._input_shape = self._net.inputs[self._input_blob].shape
#self.logger.i(f'Input shape: {self._input_shape}')
# Save these parameters to the parameter server
#self.NeuralNetworkParams.add(
# Parameter(
# name = "Input_shape",
# value = self._input_shape,
# dynamic = False))
# Get the output shape
self._output_shape = self._net.outputs[self._output_blob].shape
self.logger.i(f'Output shape: {self._output_shape}')
# Save these parameters to the parameter server
self.NeuralNetworkParams.add(
Parameter(
name="Output_shape",
value=self._output_shape,
dynamic=False))
def get_model_info(self):
# Accesses the shape of the input layer and the output layer
self._input_key = list(self._exec_net.input_info)[0]
self._output_keys = list(self._exec_net.outputs.keys())
self._tensors = self._exec_net.input_info[self._input_key].tensor_desc
# Saves the shapes to variables representing
self.n, self.c, self.h, self.w = self._tensors.dims
self.logger.i(f'Tensor shape (NCHW): ({self.n}, {self.c}, {self.h}, {self.w})')
self.NeuralNetworkParams.add(
Parameter(
name="Input_height",
value=self.h,
dynamic=False))
self.NeuralNetworkParams.add(
Parameter(
name="Input_width",
value=self.w,
dynamic=False))
###Markdown
The effect of this is that the inference engine object is initiated once, and all models are loaded to the inference engine object at initialization. This module then instantiates a SeceneSegmentationModel, which is a child class of the NeuralNetworkLoader already instantiated on behalf of this module. We allow this module to inherit all methods and properties of the parent class.In effect, the class object that was initiated prior to this modules initialization can then be "passed on" to this module as if it were this module which had instantiated this class. Scene Segmentation ModelThe SceneSegmentationModel is a class object of type NeuralNetworkLoader, and contains the methods particular to the performance of Scene Segmentation. It contains two class methods:* export_parameters() which exports the shape of the model specificly needed by this model* run_scene_segmentation() which is the method by which the image is transformed and passed to the inference engine for prediction. After sucessfully invoking this method an image mask is returned with each pixel being classified as one of the four classes.
###Code
source = inspect.getsource(SceneSegmentationModel)
print (source)
###Output
class SceneSegmentationModel(NeuralNetworkLoader):
"""
A SceneSegmentationModel is a class object which uses a Convolutional
Neural Network (CNN) to classify every pixel within an image as a
member of a certain class (segments the image). In this pytorch
pretrained model we are predicting on 4 classes:
(a) Roadway, (b) Curb, (c) Background, and (d) marker.
The model specifications can be found at:
https://docs.openvino.ai/2018_R5/_docs_Transportation_segmentation_curbs_release1_caffe_desc_road_segmentation_adas_0001.html
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def export_parameters(export_state=True, **kwargs):
# Exports parameters sent to this function for debugging
# purposes, and then turns off function after export_state=False
# is received.
self.export_state = False
for arg in kwargs:
self.parameters.add(
Parameter(
name = arg,
value = kwargs[arg],
dynamic = False))
self.export_state = export_state
def run_scene_segmentation(self, frame):
# The default ROS image is BGR color, however the model is
# expecting RGB, so we will need to convert this first.
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# We will now attempt to resize the image to fit the model input
#try:
input_img = cv2.resize(rgb_image, (self.w, self.h))
rospy.loginfo("resized")
#self.logger.d(f"Resized Shape: {input_img.shape}")
#except:
#self.logger.e(f"Cannot resize image to shape ({self.w}, {self.h})")
#return
if self.export_state:
self.export_parameters(resized_shape = input_img.shape)
# We need to wrangle the image from into the NCHW format.
#try:
transposed_img = np.expand_dims(
a = input_img.transpose(2, 0, 1),
axis = 0)
rospy.loginfo("transposed")
#self.logger.d(f"Transposed Shape: {transposed_img.shape}")
#except:
# self.logger.e("Error converting to NCHW format")
# return
if self.export_state:
self.export_parameters(transposed_shape=transposed_img.shape)
# We will now perform inference on the input object using the
# inference engine we loaded to the Visual Processing Unit
results = self._exec_net.infer(
inputs = {self._input_layer: transposed_img}
)
rospy.loginfo("infered")
# Extract the inference blob from the results array
result_ir = results[self._output_blob]
# We then compute the maximum value along axis 1 indicating
# the max likelyhood class for which that pixel belongs, and
# return this classification map of the original image.
mask = np.argmax(
a = result_ir,
axis=1)
# We export the successful shape and then turn off exporting.
if self.export_state:
self.export_parameters(mask_shape=mask.shape, export_state=False)
self.logger.i(f"Returning shape: {mask.shape}")
return mask
|
notebooks/03.00-Probability-and-Information-Theory.ipynb | ###Markdown
*This notebook contains an excerpt from the [Deep Learning with Tensorflow 2.0](https://www.adhiraiyan.org/DeepLearningWithTensorflow.html) by Mukesh Mithrakumar. The code is released under the [MIT license](https://opensource.org/licenses/MIT) and is available for FREE [on GitHub](https://github.com/adhiraiyan/DeepLearningWithTF2.0).**Open Source runs on love, laughter and a whole lot of coffee. Consider buying me [one](https://www.buymeacoffee.com/mmukesh) if you find this content useful!* 03.00 - Probability and Information Theory Probability theory is a mathematical framework for representing uncertain statements. But probability is not just an abstract concept in the math world, probability is all around us, and it can be fun to calculate the probability of events in our life. For example, the next time you get late in traffic and your boss calls you, in addition to the time you will reach, you can also say the probability of you reaching in that time, of course it would take you a little bit of math but if you did miss a meeting, you can tell your boss you did tell him there was a chance you might not make it right ๐.In artificial intelligence applications, we use probability theory in two major ways. - First, the laws of probability tell us how AI systems should reason, so we design our algorithms to compute or approximatevarious expressions derived using probability theory. - Second, we can use probability and statistics to theoretically analyze the behavior of proposed AI systems.While probability theory allows us to make uncertain statements and to reason in the presence of uncertainty, information theory enables us to quantify the amount of uncertainty in a probability distribution.If you are already familiar with probability theory and information theory, you may wish to skip this chapter except for section 3.14, which describes the graphs we use to describe structured probabilistic models for machine learning. If you have absolutely no prior experience with these subjects, this chapter should be sufficient to successfully carry out deep learning research projects, but I do suggest that you consult an additional resource. Tensorflow ProbabilityIn this notebook, I will be introducing you to [Tensorflow Probability](https://www.tensorflow.org/probability), which was a toolbox introduced at the 2018 Tensorflow Developer Summit. This is a probabilistic programming toolbox for machine learning researchers and practitioners to build models. I won't be going exhaustively through all the functionalities of the toolbox but give you practical examples and help you get comfortable enough to go on your own and use some of the other amazing functions. You can read about an introduction to tensorflow probability [here](https://medium.com/tensorflow/introducing-tensorflow-probability-dca4c304e245). So even if you are familiar with the probability concepts, atleast go through the code to see how tensorflow probability works, this really is a very powerful API.As always, go through the material first and then try playing around with the code, specially with probability, manipulate the values and see if the results make sense, you can find the link to the Google Colab notebook at the end of the chapter.
###Code
"""
At the moment of writing (05.20.2019) the only build that supports tensorflow probability is the tensorflow
nightly build so we will use that to install tensorflow 2.0 and tensorflow probability.
"""
# Install tensorflow 2.0 and tensorflow probability from the nightly build
!pip install --upgrade tf-nightly-2.0-preview tfp-nightly
# Imports
import os
import random
import sys
import tensorflow as tf
import tensorflow_probability as tfp
# By convention, we generally refer to the tf probability distributions library as tfd.
tfd = tfp.distributions
import seaborn as sns
from matplotlib import pyplot as plt
from collections import defaultdict
# Import helpers file
"""
For some plots we need to convert tensors into numpy ndarrays. For that we use the evaluate function in
the helpers.py. If you are running this in Google Colab, make sure you upload the helpers.py found in the
notebooks folder to Google Colab but if you are running this in binder, you should be fine.
"""
from helpers import evaluate
# turning of tensorflow INFO, WARNING, and ERROR messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# plt axis colors setup
plt.rc_context({'axes.edgecolor':'orange', 'xtick.color':'red', 'ytick.color':'red', 'text.color':'orange'})
color_b = 'dodgerblue'
color_o = '#FF9A13'
color_sb = '#0504aa'
color_do = 'darkorange'
# Check the versions of tensorflow 2.0 and tensorflow probability
print("Tensorflow version: {}".format(tf.__version__))
print("Tensorflow probability version: {}".format(tfp.__version__))
###Output
Tensorflow version: 2.0.0-dev20190602
Tensorflow probability version: 0.8.0-dev20190602
###Markdown
03.01 - Why Probability? Unlike the world of computer scientists and software engineers where things are entirely deterministic and certain, the world of machine learning must always deal with uncertain quantities and sometimes stochastic (non-deterministic or randomly determined) quantities.There are three possible sources of uncertainty:1. Inherent stochasticity: These are systems that have inherent randomness. Like using the python rand() function which outputs random numbers each time you run, or the dynamics of subatomic particles in quantum mechanics which are described as probabilistic in quantum mechanics.2. Incomplete observability: The best example for this is the Monty Hall problem, the one in the movie 21 Jim Sturgess gets asked, there are three doors and there's a ferrari behind one door and the other two lead to a goat. Watch the [scene](https://www.youtube.com/watch?v=cXqDIFUB7YU) to understand how to solve the Monty Hall problem. In this even though the contestant's choice is deterministic, but from the contestant's point of view the outcome is uncertain and deterministic systems appear to be stochastic when you can't observe all the variables.3. Incomplete modeling: Spoiler Warning! Well, at the end of End Game, when Iron man snapped away all of Thanos' forces, (I know, still recovering from the scene), we are left to wonder what happened to Gamora right, was she snapped away because she was with Thanos's forces initially or was she saved because she turned against Thanos. When we discard some information about the model the discarded information in this case whether Tony knew Gamora was good or bad results in an uncertainty in the model's predictions, in this case we don't know for certain if she is alive or not.Okay, swear, last Avengers reference. When Dr. Strange said we have 1 in 14 million chances of winning the war, he practically saw those 14 million futures, this is called __frequentist probability__, which defines an event's probability as the limit of its relative frequency in a large number of trials. But not always do we have Dr. Strange's time stone to see all the possible futures or events that are repeatable, in this case we turn to __Bayesian probability__, which uses probability to represent a degree of belief for certain events, with 1 indicating absolute certainty and 0 indicating absolute uncertainty.Even though the frequentist probability is related to rates at which events occur and Bayesian probability is related to qualitative levels of certainty, we treat both of them as behaving the same and we use the exact same formulas to compute the probability of events. 03.02 - Random Variables A __random variable__ is a variable that can take on different values randomly. On its own, a random variable is just a description of the states that are possible (you can think of these like functions), which must be coupled with a probability distribution that specifies how likely each of these states is.Well, if that doesn't make sense, let me give you an example, when I first heard about random variables, I thought this must work like a random number generator spitting out random values at each call, this is partly correct, let me clear it up. So, random number generators have two main components, a sampler, which is nothing more than a happy soul that flips a coin over and over again, reporting the results. And after this sampler, we have a random variable, the job of the random variable is to translate these Heads or Tails events into numbers based on our rules.Random variables can be discrete or continuous. A discrete random variable is one that has a finite or countably infinite number of states. Note that these states are not necessarily the integers; they can also just be named states that are not considered to have any numerical value. For example, gender (male, female, etc), for which we use an indicator function $\mathbb{I}$ to map non-numeric values to numbers, e.g. male=0, female=1. A continuous random variable is associated with real value.
###Code
"""
The Rademacher and Rayleigh are two types of distributions we will use to generate our samples.
Rademacher: is a discrete probability distribution where a random variate X has a 50% chance of being +1 and a
50% chance of being -1.
Rayleigh: is a continuous probability distribution for non-negative valued random variables.
Do not worry about what probability distributions mean, we will be looking at it in the next section, for now,
you can think of Rademacher as the sampler, the happy guy who tosses coins over and over again where
heads represent +1 and tails -1.
And Rayleigh is the guy who works at a gas/petrol station who helps you to fill the tank and notes down how much
you filled your tank (eg. 1.2l, 4.5l) which are continuous values.
"""
# Discrete random variable
rademacher = tfp.math.random_rademacher([1, 100], dtype=tf.int32)
# Continuous random variable
rayleigh = tfp.math.random_rayleigh([1, 100], dtype=tf.float32)
# Plot discrete random variable 1 and -1
plt.title("Rademacher Discrete Random Variables")
plt.hist(rademacher, color=color_b)
plt.show()
# Plot continuous random variable
plt.title("Rayleigh Continuous Random Variables")
plt.hist(rayleigh, color=color_o)
plt.show()
###Output
_____no_output_____
###Markdown
03.03 - Probability Distributions A __probability distribution__ is a description of how likely a random variable or set of random variables is to take on each of its possible states. The way we describe probability distributions depends on whether the variables are discrete or continuous. 3.3.1 Discrete Variables and Probability Mass functions A probability distribution over discrete variables may be described using a __probability mass function (PMF)__. A probability mass function maps from a state of a random variable to the probability of that random variable taking on that state.For example the roll of a dice is random and a discrete variable means the roll can only have 1, 2, 3, 4, 5 or 6 on a die and no values inbetween. We denote probability mass functions with $P$, where we denote a __PMF__ equation as $P(X = x)$. Here $x$ can be a number on the dice when $X$ is the event of rolling the dice.
###Code
"""
In a fair 6 sided dice, when you roll, each number has a chance of 1/6 = 16.7% of landing and we can show
this by running long enough rolls. So in this example, we do 10000 rolls and we verify that P(X=4) = 16.7%.
In short, the probability from a PMF says what chance x has. Play around with the different x values,
number of rolls and sides and see what kind of probability you get and see if it makes sense.
"""
def single_dice(x, sides, rolls):
"""Calculates and prints the probability of rolls.
Arguments:
x (int) : is the number you want to calculate the probability for.
sides (int) : Number of sides for the dice.
rolls (int) : Number of rolls.
Returns:
a printout.
"""
result = roll(sides, rolls)
for i in range(1, sides +1):
plt.bar(i, result[i] / rolls)
print("P(X = {}) = {}%".format(x, tf.divide(tf.multiply(result[x], 100), rolls)))
def roll(sides, rolls):
"""Returns a dictionary of rolls and the sides of each roll.
Arguments:
sides (int) : Number of sides for the dice.
rolls (int) : Number of rolls.
Returns:
a dictionary.
"""
d = defaultdict(int) # creating a default dictionary
for _ in range(rolls):
d[random.randint(1, sides)] += 1 # The random process
return d
single_dice(x=6, sides=6, rolls=10000)
###Output
P(X = 6) = 16.43%
###Markdown
To be a __PMF__ on a random variable x, a function $P$ must satisfy the following properties:- The domain of $P$ must be the set of all possible states of x. In our example above the possible states of x are from 1-6, try plugging in 7 for x and see what value you get.- $\forall x \in \mathrm{x}, 0 \leq P(x) \leq 1$. An impossible event has probability 0, and no state can be less probable than that. Likewise, an event that is guaranteed to happen has probability 1, and no state can have a greater chance of occurring. If you tried plugging in 7 for our example above, you would have seen the probability of obtaining a 7 would be zero, that is an impossible event because 7 is not in our set.- $\sum_{x \in \mathrm{x}} P(x) = 1$. Normalized property that prevents from obtaining probabilities greater than one. Meaning if you add all the individual values of our dice probabilities, it should sum to 1 or 100%. Probability mass functions can act on many variables at the same time. Such a probability distribution over many variables is known as a __joint probability mass function__. $P (\mathrm{x} = x; \mathrm{y} = y) = P(x) P(y)$ denotes the probability that $\mathrm{x} = x$ and $\mathrm{y} = y$ simultaneously.
###Code
"""
In this example, we are rolling two dices, there are ways to simplify the code so it's not this long but
I wanted to show that we are rolling two dice 1000 times, and in the example we are calculating the probability
of rolling x=4 and y=1, this can be easily calculated by multiplying the individual probabilities of x and y.
"""
def multi_dice(x, y, sides, rolls, plot=True):
"""Calculates the joint probability of two dice.
Arguments:
x (int) : is the number you want to calculate the probability for.
y (int) : is the number you want to calculate the probability for.
sides (int) : Number of sides for the dice.
rolls (int) : Number of rolls.
plot (bool) : Whether you want to plot the data or not.
Returns:
probabilities (float).
"""
result1 = roll(sides, rolls) # first result from the rolls
result2 = roll(sides, rolls) # second result from the rolls
prob_x = tf.divide(result1[x], rolls) # calculates the probability of x
prob_y = tf.divide(result2[y], rolls) # calculates the probability of y
joint_prob = tf.multiply(prob_x, prob_y) # calculates the joint probability of x&y by multiplying
if plot:
for i in range(1, sides +1):
plt.title("Dice 1 {} Rolls".format(rolls))
plt.bar(i, result1[i] / rolls, color=color_b)
plt.show()
for i in range(1, sides +1):
plt.title("Dice 2 {} Rolls".format(rolls))
plt.bar(i, result2[i] / rolls, color=color_o)
plt.show()
return prob_x, prob_y, joint_prob
prob_x, prob_y, joint_prob = multi_dice(x=4, y=1, sides=6, rolls=10000, plot=True)
print("P(x = {:.4}%), P(y = {:.4}%), P(x = {}; y = {}) = {:.4}%\n\n".format(tf.multiply(prob_x, 100),
tf.multiply(prob_y, 100),
4, 1, tf.multiply(joint_prob, 100)))
###Output
_____no_output_____
###Markdown
3.3.2 Continuous Variables and Probability Density Functions When working with continuous random variables, we describe probability distributions using a __probability density function (PDF)__. Let's play a game shall we, what if I ask you to guess the integer that I am thinking of between 1 to 10, regardless of the number you pick, the probability of each of the options is the same (1/10) because you have 10 options and the probabilities must add up to 1. But what if I told you to guess the real number I am thinking between 0 and 1. Now this gets tricky, I can be thinking of 0.2, 0.5, 0.0004 and it can go on and on and the possibilities are endless. So we run into problems like how are we going to describe the probability of each option since there are infinite numbers. This is where __PDF__ comes to help, instead of asking the exact probability, we look for a probability that is close to a single number.
###Code
"""
In our guessing game example, I told you how difficult it would be for you to guess a real number I am thinking of
between 0 and 1 and below, we plot such a graph with minval of 0 and maxval of 1 and we "guess" the values 500
times and the resulting distribution is plotted.
"""
# Outputs random values from a uniform distribution
continuous = tf.random.uniform([1, 500], minval=0, maxval=1, dtype=tf.float32)
g = sns.distplot(continuous, color=color_b)
plt.grid()
###Output
_____no_output_____
###Markdown
To be a probability density function, a function $p$ must satisfy thefollowing properties:- The domain of $p$ must be the set of all possible states of x- $\forall x \in \mathrm{x}, p(x) \geq 0$. Note that we do not require $p(x) \leq 1$- $\int p(x)dx = 1$ A probability density function $p(x)$ does not give the probability of a specific state directly; instead the probability of landing inside an infinitesimal region with volume $\delta x$ is given by $p(x) \delta x$
###Code
"""
Below is the same histogram plot of our continuous random variable, note that the values of y axis looks different
between the seaborn distplot and the histogram plot because the sns distplot is also drawing a density plot.
You can turn it off by setting โkde=Falseโ and you will get the same plot as you see below.
The goal of the following plot is to show you that if you want to calculate the p(0.3) then you would need to
calculate the volume of the region delta x
"""
n, bins, patches = plt.hist(continuous, color=color_b)
patches[3].set_fc(color_o)
plt.grid()
###Output
_____no_output_____
###Markdown
We can integrate the density function to find the actual probability mass of a set of points. Specifically, the probability that $x$ lies in some set $\mathbb{S}$ is given by the integral of $p(x)$ over that set ($\int_{[a,b]} p(x) dx$) __Tensorflow Probability Distribution Library__From here onwards, we will be using [TFP distributions](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions) module often and we will be calling it as tfd (=tfp.distributions). So, before getting started, let me explain a few things about the module.The TF Probability uses distribution subclasses to represent stochastic, random variables. Recall the first cause of uncertainty, inherent stochasticity. This means that even if we knew all the values of the variables' parameters, it would still be random. We would see examples of these distributions in Section 9. In the previous example, we created the distribution using a random variable but extracting samples from it and manipulating those will not be as intuitive as it would when you are using the tfp distributions library. We usually start by creating a distribution and then when we draw samples from it, those samples become tensorflow tensors which can be deterministically manipulated.Some common methods in tfd:- sample(sample_shape=(), seed=None): Generates a specified sample size- mean(): Calculates the mean- mode(): Calculates the mode- variance(): Calculates the variance- stddev(): Calculates the standard deviation- prob(value): Calculates both the Probability density/mass function- log_prob(value): Calculates the Log probability density/mass function.- entropy(): Shannon entropy in nats.
###Code
"""
Let's say we want to find the probability of 1.55 (p(1.5)) from a continuous distribution. We can ofcourse
do the integral and find it but in tensorflow probability you have "prob()" which allows you to calculate
both Probability Mass Function and Probability Density Function.
For tfp.distributions.Normal "loc" is the mean and "scale" is the std deviation. Don't worry if you don't
understand those, we will go through distributions in Section 9. And I recommend you come back and go through
these examples again after you finish section 9.
Also, there's nothing special about these numbers, play around with the scale, p(x) values and the k limits to
get a better understanding.
"""
# creating an x axis
samples = tf.range(-10, 10, 0.001)
# Create a Normal distribution with mean 0 and std deviation 3
normal_distribution = tfd.Normal(loc=0., scale=3)
# Then we calculate the PDFs of drawing 1.5
pdf_x = normal_distribution.prob(1.5)
# We can't plot tensors so evaluate is a helper function to convert to ndarrays
[pdf_x_] = evaluate([pdf_x])
# Finally, we plot both the PDF of the samples and p(1.5)
plt.plot(samples, normal_distribution.prob(samples), color=color_b)
plt.fill_between(samples, normal_distribution.prob(samples), color=color_b)
plt.bar(1.5, pdf_x_, color=color_o)
plt.grid()
print("Probability of drawing 1.5 = {:.4}% from the normal distribution".format(pdf_x*100))
###Output
Probability of drawing 1.5 = 11.74% from the normal distribution
###Markdown
03.04 - Marginal Probability Sometimes we know the probability distribution over a set of variables and we want to know the probability distribution over just a subset of them. The probability distribution over the subset is known as the __marginal probability distribution__.For discrete random variables x and y, we use the __sum rule__ to find the marginal probability distribution.$$\color{orange}{\forall x \in \mathrm{x}, P(\mathrm{x} = x) = \displaystyle\sum_{y} P(\mathrm{x} = x, \mathrm{y} = y) = \displaystyle\sum_{y} P( x) P(y)) \tag{1}}$$For example, I am all in for self driving cars but just for fun, let's say what is your chances of getting hit by Uber's, Tesla's or Googles' self driving car Waymo, you probably don't care about the individual probabilities right, you just wanna make sure you don't get hit by any, so in this case to calculate the probability of getting hit by any three of the cars you would be just summing up the individual joint probabilities of getting hit by each of the cars and that is called Marginal probability. Make sense? If not, don't worry, let's see an example below:
###Code
"""
Let's start by creating three distributions for Waymo, Uber and Tesla (W, U and T) and use Bernoulli distribution
since for Bernoulli distribution the outcome can only be 0 or 1, in our case, not hit and hit.
Say the probabilities of getting hit for W = 0.1, U = 0.2 and T = 0.3. Also, nothing against Tesla ๐.
With tfp.distributions, you don't have to create individual distributions line by line, you can specify the
probabilities inside "probs" argument. This call defines three independent Bernoulli distributions, which happen
to be contained in the same Python Distribution object (self_driving).
The three events W, U and T are independent but we would like to specify a joint distribution to be able to
calculate the marginal probability of the event [0, 0, 0], the probability of not getting hit by any three.
For this we would be using a higher order distribution called Independent, which takes independent distributions
and yields a new distribution.
"""
# Lets create three Bernoulli distributions for Waymo, Uber and Tesla
self_driving = tfd.Bernoulli(probs=[.1, .2, .3])
# Individual probabilities of getting hit by W, U and T and these should match with the specified probs
print("Individual probabilities: {}".format(self_driving.prob([1, 1, 1])))
# Combining the distributions to create the independent distribution
self_driving_joint = tfd.Independent(self_driving, reinterpreted_batch_ndims=1)
# Finally let's calculate the marginal probability of [0, 0, 0]
print("Marginal Probability of event [0, 0, 0]: {:.4}".format(self_driving_joint.prob([0, 0, 0])))
###Output
Individual probabilities: [0.09999999 0.19999999 0.29999998]
Marginal Probability of event [0, 0, 0]: 0.504
###Markdown
Now now, the value can be scary, I mean just 50% chance of surviving, not so good, but note that this is just probability, what you should worry is about the odds. We use probability and odds interchangeably in day to day life, but they mean different things, if you recall in my first example, Dr. Strange gave us the odds of winning as 1 in 14 million, so in the self driving car case, even though the probability may seem low, your odds may be higher. For a detailed difference between odds and probability, check ["The Difference Between "Probability" and "Odds""](http://sphweb.bumc.bu.edu/otlt/MPH-Modules/BS/BS704_Confidence_Intervals/BS704_Confidence_Intervals10.html). Finally for continuous variables, we need to use integration instead of summation.$$\color{orange}{p(x) = \int p(x, y) dy \tag{2}}$$And the way to find the marginal probabilities would be the same except you would be using a different distribution, based on continuous random variables, three more sections and you will have a better idea of which distributions to use ๐. 03.05 - Conditional Probability In many cases, we are interested in the probability of some event, given that some other event has happened. This is called a __conditional probability__. We denote the conditional probability that $\mathrm{y} = y$ given $\mathrm{x} = x$ as:$$\color{orange}{P(\mathrm{x} = x \ | \ \mathrm{y} = y) = \frac{P(\mathrm{x} = x, \mathrm{y} = y)}{P(\mathrm{y} = y)} \tag{3a}}$$This can be really useful on uncertain events, for example, if I ask you, do you think I walk to work? By now you should say something along, 40% chance or some arbitrary number that I walk to work, but if I give you additional information like say i live 10 minutes walking distance from work, then the probability of me walking to work got high right, this can happen the other way too, if I say I am 1 hour walking distance from work, then probability of me walking to work will get small. This is conditional probability. You will word it as "probability of me walking to work given I live 10 minutes from work". The conditional probability can also be given in terms of sets as below:$$\color{orange}{P(\mathrm{x} = x \ | \ \mathrm{y} = y) = \frac{P( x \cap y)}{P(y)} \tag{3b}}$$This can be illustrated as below:
###Code
"""
Let's say I roll a fair die twice and obtain two numbers.
X1 = result of the first roll and X2 = result of the second roll.
Given that I know X1+X2 = 7, what is the probability that X1=4 or X2=4?
We start by doing 100 trials and we create a Multinomial distribution where total_count= number of trials,
probs = probability vectors for the events. We use Multinomial because it can have multiple outcomes and we
take 2 samples from this distribution to resemble dice_1 and dice_2.
"""
for i in range(100):
dice_roll_distribution = tfd.Multinomial(total_count=1., probs=[1/6.]*6)
dice_sample = dice_roll_distribution.sample(2)
# tf.where returns the index of the samples and we add 1 because indexing starts from 0
dice_1 = tf.where(dice_sample[0])[0] + 1
dice_2 = tf.where(dice_sample[1])[0] + 1
if tf.equal((dice_1 + dice_2), 7):
# There are two conditions (4, 3) and (3, 4) that would result in a sum of 7 with either x1=4 or x2=4
prob_7 = (2/36.)
# There are 6 combinations that would result in getting a 7 {(6,1),(5,2),(4,3),(3,4),(2,5),(1,6)}
prob_4 = (6/36.)
prob_4_given_7 = tf.divide(prob_7,prob_4)*100
def f1(): return tf.print("X1: {} \t X2: {} \t P(X1 or X2=4 | 7): {:.4}%".format(dice_1, dice_2,
prob_4_given_7),
output_stream=sys.stdout)
def f2(): return None
tf.cond(tf.logical_or(tf.equal(dice_1, 4), tf.equal(dice_2, 4)), f1, f2)
else:
continue
###Output
X1: [4] X2: [3] P(X1 or X2=4 | 7): 33.33%
X1: [3] X2: [4] P(X1 or X2=4 | 7): 33.33%
X1: [3] X2: [4] P(X1 or X2=4 | 7): 33.33%
X1: [4] X2: [3] P(X1 or X2=4 | 7): 33.33%
X1: [4] X2: [3] P(X1 or X2=4 | 7): 33.33%
###Markdown
03.06 - The Chain Rule of Conditional Probabilities Any joint probability distribution over many random variables may be decomposed into conditional distributions over only one variable:$$\color{orange}{P(\mathrm{x^{(1)}, \cdots, x^{(n)}}) = P(\mathrm{x^{(1)}}) \textstyle\prod_{i = 2}^{n} P (\mathrm{x^{(i)}} | \mathrm{x^{(1)}, \cdots, x^{(i-1)}}) \tag{4}}$$This observation is known as the __chain rule__, or __product rule__, of probability.
###Code
"""
Lets see an example for three events, but before that, let's break down the steps of finding the
conditional probability with three events:
P(a, b, c) = P(a|b, c) P(b, c)
= P(a|b, c) P(b|c) P(c)
Now, to the example, in a factory there are 100 units of a certain product, 5 of which are defective.
We pick three units from the 100 units at random. What is the probability that none of them are defective?
We start by creating three Bernoulli distributions for the three events.
Event 1: The probability of choosing a good part (95/100)
Event 2: The probability of choosing a 2nd good part (94/99)
Event 3: The probability of choosing a 3rd good part (93/98
We can do this example without using the tfp.Independent call but it would take few more lines but this way, you
can keep chaining the conditional probabilities
"""
# Let's start by creating three Bernoulli distributions
units_distribution = tfd.Bernoulli(probs=[95/100., 94/99., 93/98.])
# Let's join these probability distributions
joint_unit_distribution = tfd.Independent(units_distribution, reinterpreted_batch_ndims=1)
# Finally let's calculate the probability of picking one after the other
print("Probability of P(a, b, c) = P(1, 1, 1): {:.4}".format(joint_unit_distribution.prob([1, 1, 1])))
###Output
Probability of P(a, b, c) = P(1, 1, 1): 0.856
###Markdown
03.07 - Independence and Conditional Independence Two random variables x and y are __independent__ if their probability distribution can be expressed as a product of two factors, one involving only x and one involving only y:$$\color{orange}{\forall x \in \mathrm{x}, y \in \mathrm{y}, \ p(\mathrm{x} = x, \mathrm{y} = y) = p(\mathrm{x} = x)p(\mathrm{y} = y) \tag{5}}$$For example, let's say Scooby-Doo and Tom live in Coolsville.The probability that Scooby gets home in time for dinner, and the probability that Tom gets home in time for dinner are independent, this is we wouldn't expect one to have an affect on the other. These are independent events.
###Code
"""
Let's revisit our multiple_dice example, there we calculated the probabilities of getting 4 on Dice 1 and 1 on
dice two. In this example, let's expand that and see what the probability of getting 4 on dice 1 given we
get 1 on dice 2 for 100 rolls. P(x = 4 | y = 1) = P(x=4, y=1) / P(y=1).
"""
# calculating the prob of x, y and the joint probability
_, prob_y, prob_numerator = multi_dice(x=4, y=1, sides=6, rolls=100, plot=False)
prob_denominator = prob_y
conditional_prob = tf.divide(prob_numerator, prob_denominator)
print("Probability of getting 4 on dice 1 given I get 1 on Dice 2: {}%".format(tf.multiply(conditional_prob, 100)))
###Output
Probability of getting 4 on dice 1 given I get 1 on Dice 2: 21.0%
###Markdown
Note the dice 1 throw that resulted getting four can happen without the need of dice 2 throw, these two events are independent. Two random variables x and y are __conditionally independent__ given a random variable z if the conditional probability distribution over x and y factorizes in this way for every value of z:$$\color{orange}{\forall x \in \mathrm{x}, y \in \mathrm{y}, z \in \mathrm{z}, \ p(\mathrm{x} = x, \mathrm{y} = y \ | \ \mathrm{z} = z) = p(\mathrm{x} = x \ | \ \mathrm{z} = z)p(\mathrm{y} = y \ | \ \mathrm{z} = z) \tag{6}}$$Let's continue with our Scooby-Doo and Tom example, now let's say there was a monster mouse attack in the city. The probabilities of Scooby getting home in time and Tom getting home in time for dinner will depend on the monster mouse attack. However, Scooby and Tom have nothing to do with each other, then their probabilities of getting home in time are independent. Meaning we can't deduce from Scooby getting home late that Tom will get late as well. We denote $x \perp y$ means that x and y are independent while $x \perp y \ | \ z$ means that x and y are conditionally independent given z. 03.08 - Expectation, Variance and Covariance The __expectation__, or __expected value__, of some function $f(x)$ with respect to a probability distribution $P(x)$ is the average, or mean value, that $f$ takes on when $x$ is drawn from $P$. For discrete variables this can be computed with a summation:$$\color{orange}{\mathbb{E}_{x \sim P} [f(x)] = \displaystyle\sum_{x} P(x) f(x) \tag{7}}$$Let me motivate this with a simple example, let's say I take you to a casino, my treat for you working up to section 8 ๐ and we play a simple game. You pay, well I pay 2 bitcoins and you roll a fair die. In this game if you get {1, 2, 3} lets call this event A, you will win 1 bitcoin, if event B={4, 5} occurs, you win 2 bitcoins and if C={6} occurs, you win 6 bitcoins. So, we being smart want to calculate how much we expect to make every time we play this game. So, if A occurs, we make -1 bitcoins, if B occurs, we don't make any bitcoin and if C occurs, we make 4 bitcoins. So, we can compute the average profit as:$(-1) \times (1/6 + 1/6 + 1/6) + (0) \times (1/6 + 1/6) + (4) \times (1/6) = 1/6$And, we can expect to make 1/6 bitcoins on the average every time we play this game. So before you bet on something or play a game next time, just check how much you can expect to make out of it
###Code
# Let's see the bitcoin example in code.
bitcoins = [-1, -1, -1, 0, 0, 4, 4] # gives the winnings we can make for each face of the dice
dice = [1/6.]*6 # probability of landing a face
expectation = 0
for i in range(0, len(dice)):
expectation += (dice[i] * bitcoins[i]) # summing p(x) * f(x)
# Calculate the expectation
print( "Expectation of the bitcoin game E(X) is : {:.4}".format(expectation))
###Output
Expectation of the bitcoin game E(X) is : 0.167
###Markdown
For continuous variables, it is computed with an integral:$$\color{orange}{\mathbb{E}_{x \sim P} [f(x)] = \int p(x) f(x) dx \tag{8}}$$
###Code
# Let's create a uniform distribution such that the limit be between 0 and 1
uniform_distribution = tfd.Uniform(low=0.0, high=1.0)
# Here we find the expectation of the uniform distribution
continuous_expectation = uniform_distribution.mean()
print("The Expectation of f(x) = 1 for the limit 0 to 1: {}".format(continuous_expectation))
# Plotting the expectation
plt.hist(continuous_expectation, color=color_b)
plt.grid()
###Output
The Expectation of f(x) = 1 for the limit 0 to 1: 0.5
###Markdown
Expectations are linear, for example,$$\color{orange}{\mathbb{E}_{\mathrm{x}}[\alpha f(x) + \beta g(x)] = \alpha \mathbb{E}_{\mathrm{x}}[f(x)] + \beta \mathbb{E}_{\mathrm{x}}[g(x)] \tag{9}}$$where $\alpha \ \text{and} \ \beta$ are not dependent on $x$. When we predict the expected value of an experiment, we would like our outcome to not deviate too much from the expected value and __variance__ gives a measure of how much the values of a function of a random variable x vary as we sample different values of x from its probability distribution:$$\color{orange}{Var(f(x)) = \mathbb{E} \Big[ (f(x) - \mathbb{E}[f(x)])^2 \Big] \tag{10}}$$When the variance is low, the values of $f(x)$ cluster near their expected value. The square root of the variance is known as the __standard deviation__.
###Code
# creating an x axis from -3 to 3 with 0.001 increments
x_axis = tf.range(-3, 3, 0.001)
# Let's create two distributions to see how variance affects the distributions. loc(=mean) and std.deviation(=scale)
distribution_1 = tfd.Normal(loc=0., scale=0.5)
distribution_2 = tfd.Normal(loc=0., scale=1.5)
# Distribution plot 1
plt.plot(x_axis, distribution_1.prob(x_axis), color=color_b)
plt.fill_between(x_axis, distribution_1.prob(x_axis), color=color_b)
# Distribution plot 2
plt.plot(x_axis, distribution_2.prob(x_axis), color=color_o)
plt.fill_between(x_axis, distribution_2.prob(x_axis), color=color_o)
plt.grid()
print("Blue Plot Variance: {} \nOrange Plot Variance: {}".format(distribution_1.variance(), distribution_2.variance()))
###Output
Blue Plot Variance: 0.25
Orange Plot Variance: 2.25
###Markdown
The __covariance__ gives some sense of how much two values are linearly related to each other, as well as the scale of these variables:$$\color{orange}{Cov(f(x), g(y)) = \mathbb{E} \big[ (f(x) - \mathbb{E}[f(x)]) (g(y) - \mathbb{E}[g(y)]) \big] \tag{11}}$$In simple terms covariance refers to the measure of how two random variable in a data set will change together.- High absolute values of the covariance mean that the values change very much and are both far from their respective means at the same time. - If the sign of the covariance is positive, then both variables tend to take on relatively high values simultaneously. - If the sign of the covariance is negative, then one variable tends to take on a relatively high value at the times that the other takes on a relatively low value and vice versa.
###Code
"""
To find the covariance we will be using another tensorflow probability library called the stats.
"""
# We start by creating two normal distributions to represent f(x) and g(y)
f_x = tf.random.normal(shape=(100, 1, 3))
g_y = tf.random.normal(shape=(100, 1, 3))
# cov[i, j] is the sample covariance between fx[:, i, j] and fy[:, i, j].
covariance = tfp.stats.covariance(f_x, g_y, sample_axis=0, event_axis=None)
print("Covariance of f(x) and g(y): {}".format(covariance[0]))
###Output
Covariance of f(x) and g(y): [ 0.10961602 -0.08872931 0.03041128]
###Markdown
Sometimes we confuse covariance with __correlation__, even though they both indicate whether variables are positively or negatively related to each other, they defer from the fact that covariance only determines whether the different units of the variables are increasing or decreasing, while correlation measures how closely the variables move together by normalizing the contribution of each variable.
###Code
# correlation of f(x)
correlation = tfp.stats.correlation(f_x, g_y, sample_axis=0, event_axis=None)
print("Correlation of f(x) and g(y): {}".format(correlation[0]))
###Output
Correlation of f(x) and g(y): [ 0.10907104 -0.09540905 0.02875604]
###Markdown
The notions of covariance and dependence are related but distinct concepts. They are related because two variables that are independent have zero covariance, and two variables that have nonzero covariance are dependent. Independence, however, is a distinct property from covariance. For two variables to have zero covariance, there must be no linear dependence between them. Independence is a stronger requirement than zero covariance, because independence also excludes nonlinear relationships. The __covariance matrix__ of a random vector $x \in \mathbb{R}^n$ is an *n x n* matrix, such that:$$\color{orange}{Cov(\mathbf{x})_{i, j} = Cov(\mathrm{x}_i, \mathrm{x}_j) \tag{12}}$$The diagonal elements of the covariance give the variance:$$\color{orange}{ Cov(\mathrm{x}_i, \mathrm{x}_j) = Var(\mathrm{x}_i) \tag{13}}$$
###Code
# covariance matrix of f(x)
cov_matrix = tfp.stats.covariance(f_x, sample_axis=0, event_axis=-1)
print("Variance of x: {} \nCovariance Matrix for x: \n{}".format(covariance[0], cov_matrix[0]))
###Output
Variance of x: [ 0.10961602 -0.08872931 0.03041128]
Covariance Matrix for x:
[[ 0.77077645 0.01786118 -0.01279851]
[ 0.01786118 1.0053835 0.04651672]
[-0.01279851 0.04651672 1.0021778 ]]
###Markdown
03.09 - Common Probability Distributions A probability distribution is a function that describes how likely you will obtain the different possible values of the random variable.Following are a few examples of popular distributions. 3.9.1 Bernoulli Distribution The __Bernoulli distribution__ is a distribution over a single binary random variable. It is controlled by a single parameter $\phi \in [0, 1]$, which gives the probability of the random variable being equal to 1. It has the following properties:$$\color{orange}{P(\mathrm{x} = 1) = \phi} \\\color{orange}{P(\mathrm{x} = 0) = 1 - \phi} \\\color{orange}{P(\mathrm{x} = x) = \phi^x (1 - \phi)^{1-x}} \\\color{orange}{\mathbb{E}_{\mathrm{x}} [\mathrm{x}] = \phi}\\\color{orange}{ Var_{\mathrm{x}}(\mathrm{x}) = \phi(1 - \phi) \tag{14}}$$ The Bernoulli distribution is a special case of the __Binomial distribution__ where there is only one trial. A binomial distribution is the sum of independent and identically distributed Bernoulli random variables. For example, let's say you do a single coin toss, the probability of getting heads is p. The random variable that represents your winnings after one coin toss is a Bernoulli random variable. So, what is the probability that you land heads in 100 tosses, this is where you use the Bernoulli trials, in general, if there are n Bernoulli trials, then the sum of those trials is a binomial distribution with parameters n and p. Below, we will see an example for 1000 trials and the resulting Binomial distribution is plotted.
###Code
# Create a Bernoulli distribution with a probability .5 and sample size of 1000
bernoulli_distribution = tfd.Bernoulli(probs=.5)
bernoulli_trials = bernoulli_distribution.sample(1000)
# Plot Bernoulli Distribution
sns.distplot(bernoulli_trials, color=color_b)
# Properties of Bernoulli distribution
property_1 = bernoulli_distribution.prob(1)
print("P(x = 1) = {}".format(property_1))
property_2 = bernoulli_distribution.prob(0)
print("P(x = 0) = 1 - {} = {}".format(property_1, property_2))
print("Property three is a generalization of property 1 and 2")
print("For Bernoulli distribution The expected value of a Bernoulli random variable X is p (E[X] = p)")
# Variance is calculated as Var = E[(X - E[X])**2]
property_5 = bernoulli_distribution.variance()
print("Var(x) = {0} (1 - {0})".format(property_5))
###Output
P(x = 1) = 0.5
P(x = 0) = 1 - 0.5 = 0.5
Property three is a generalization of property 1 and two
For Bernoulli distribution The expected value of a Bernoulli random variable X is p (E[X] = p)
Var(x) = 0.25 (1 - 0.25)
###Markdown
3.9.2 Multinoulli Distribution The __multinoulli__ or __categorical distribution__ is a distribution over a single discrete variable with *k* different states, where *k* is finite. The multinoulli distribution is a special case of the __multinomial distribution__, which is a generalization of Binomial distribution. A multinomial distribution is the distribution over vectors in ${0, \cdots, n}^k$ representing how many times each of the *k* categories visited when *n* samples are drawn from a multinoulli distribution.
###Code
# For a fair dice
p = [1/6.]*6
# Multinoulli distribution with 60 trials and sampled once
multinoulli_distribution = tfd.Multinomial(total_count=60., probs=p)
multinoulli_pdf = multinoulli_distribution.sample(1)
print("""Dice throw values: {}
In sixty trials, index 0 represents the times the dice landed on 1 (= {} times) and
index 1 represents the times the dice landed on 2 (= {} times)\n""".format(multinoulli_pdf,
multinoulli_pdf[0][0],
multinoulli_pdf[0][1]))
g = sns.distplot(multinoulli_pdf, color=color_b)
plt.grid()
###Output
Dice throw values: [[ 8. 10. 13. 12. 9. 8.]]
In sixty trials, index 0 represents the times the dice landed on 1 (= 8.0 times) and
index 1 represents the times the dice landed on 2 (= 10.0 times)
###Markdown
There are other discrete distributions like:- Hypergeometric Distribution: models sampling without replacement- Poisson Distribution: expresses the probability of a given number of events occurring in a fixed interval of time and/or space if these events occur with a known average rate and independently of the time since the last event.- Geometric Distribution: counts the number of Bernoulli trials needed to get one success.Since this will not be an exhaustive introduction to distributions, I presented only the major ones and for the curious ones, if you want to learn more, you can take a look at the references I mention at the end of the notebook.Next, we will take a look at some continuous distributions. 3.9.3 Gaussian Distribution The most commonly used distribution over real numbers is the __normal distribution__, also known as the __Gaussian distribution__:$$\color{orange}{\mathcal{N}(x; \mu, \sigma^2) = \sqrt{\frac{1}{2 \pi \sigma^2}} exp \Big(- \frac{1}{2 \sigma^2} (x - \mu)^2 \Big) \tag{15}}$$The two parameters $\mu \in \mathbb{R}$ and $\sigma \in (0, \infty)$ control the normal distribution. The parameter $\mu$ gives the coordinate of the central peak. This is also the mean of the distribution: $\mathbb{E}[\mathrm{x}] = \mu$. The standard deviation of the distribution is given by $\sigma$, and the variance by $\sigma^2$.
###Code
# We use linespace to create a range of values starting from -8 to 8 with incremants (= stop - start / num - 1)
rand_x= tf.linspace(start=-8., stop=8., num=150)
# Gaussian distribution with a standard deviation of 1 and mean 0
sigma = float(1.)
mu = float(0.)
gaussian_pdf = tfd.Normal(loc=mu, scale=sigma).prob(rand_x)
# convert tensors into numpy ndarrays for plotting
[rand_x_, gaussian_pdf_] = evaluate([rand_x, gaussian_pdf])
# Plot of the Gaussian distribution
plt.plot(rand_x_, gaussian_pdf_, color=color_b)
plt.fill_between(rand_x_, gaussian_pdf_, color=color_b)
plt.grid()
###Output
_____no_output_____
###Markdown
Normal distributions are a sensible choice for many applications. In the absence of prior knowledge about what form a distribution over the real numbers should take, the normal distribution is a good default choice for two major reasons.1. Many distributions we wish to model are truly close to being normal distributions. The __central limit theorem__ shows that the sum of many independent random variables is approximately normally distributed.2. Out of all possible probability distributions with the same variance,the normal distribution encodes the maximum amount of uncertainty over the real numbers. We can thus think of the normal distribution as being the one that inserts the least amount of prior knowledge into a model.The normal distribution generalizes to $\mathbb{R}^n$, in which case it is known as the __multivariate normal distribution__. It may be parameterized with a positive definite symmetric matrix $\Sigma$:$$\color{orange}{\mathcal{N}(x; \mu, \Sigma) = \sqrt{\frac{1}{(2 \pi)^n det(\Sigma)}} exp \Biggr(- \frac{1}{2} (x - \mu)^\top \Sigma^{(-1)} (x - \mu) \Biggr) \tag{16}}$$The parameter $\mu$ still gives the mean of the distribution, though now it is vector valued. The parameter $\Sigma$ gives the covariance matrix of the distribution.
###Code
# We create a multivariate normal distribution with two distributions with mean 0. and std.deviation of 2.
mvn = tfd.MultivariateNormalDiag(loc=[0., 0.], scale_diag = [2., 2.])
# we take 1000 samples from the distribution
samples = mvn.sample(1000)
# Plot of multi variate distribution
g = sns.jointplot(samples[:, 0], samples[:, 1], kind='scatter', color=color_b)
plt.show()
###Output
_____no_output_____
###Markdown
3.9.4 Exponential and Laplace Distributions In the context of deep learning, we often want to have a probability distribution with a sharp point at $x = 0$. To accomplish this, we can use the __exponential distribution__: $$\color{orange}{p(x; \lambda) = \lambda \mathbf{1}_{x \geq 0} exp(- \lambda x) \tag{17}}$$The exponential distribution uses the indicator function $\mathbf{1}_{x \geq 0}$ to assign probability zero to all negative values of $x$.
###Code
# We use linespace to create a range of values starting from 0 to 4 with incremants (= stop - start / num - 1)
a = tf.linspace(start=0., stop=4., num=41)
# the tf.newaxis expression is used to increase the dimension of the existing array by one more dimension
a = a[..., tf.newaxis]
lambdas = tf.constant([1.])
# We create a Exponential distribution and calculate the PDF for a
expo_pdf = tfd.Exponential(rate=1.).prob(a)
# convert tensors into numpy ndarrays for plotting
[a_, expo_pdf_] = evaluate([a,expo_pdf])
# Plot of Exponential distribution
plt.figure(figsize=(12.5, 4))
plt.plot(a_.T[0], expo_pdf_.T[[0]][0], color=color_sb)
plt.fill_between(a_.T[0], expo_pdf_.T[[0]][0],alpha=.33, color=color_b)
plt.title(r"Probability density function of Exponential distribution with $\lambda$ = 1")
plt.grid()
###Output
_____no_output_____
###Markdown
A closely related probability distribution that allows us to place a sharp peak of probability mass at an arbitrary point $\mu$ is the __Laplace distribution__:$$\color{orange}{\text{Laplace} (x; \mu, \gamma) = \frac{1}{2 \gamma} exp \Big( - \frac{|x - \mu|}{\gamma} \Big) \tag{18}}$$
###Code
# We use linespace to create a range of values starting from 0 to 4 with incremants (= stop - start / num - 1)
a = tf.linspace(start=0., stop=4., num=41)
# the tf.newaxis expression is used to increase the dimension of the existing array by one more dimension
a = a[..., tf.newaxis]
lambdas = tf.constant([1.])
# We create a Laplace distribution and calculate the PDF for a
laplace_pdf = tfd.Laplace(loc=1, scale=1).prob(a)
# convert tensors into numpy ndarrays for plotting
[a_, laplace_pdf_] = evaluate([a, laplace_pdf])
# Plot of laplace distribution
plt.figure(figsize=(12.5, 4))
plt.plot(a_.T[0], laplace_pdf_.T[[0]][0], color=color_sb)
plt.fill_between(a_.T[0], laplace_pdf_.T[[0]][0],alpha=.33, color=color_b)
plt.title(r"Probability density function of Laplace distribution")
plt.grid()
###Output
_____no_output_____
###Markdown
3.9.5 The Dirac Distribution and Empirical Distribution In some cases, we wish to specify that all the mass in a probability distribution clusters around a single point. This can be accomplished by defining a PDF using the Dirac delta function, $\delta (x)$:$$\color{orange}{p(x) = \delta(x - \mu) \tag{19}}$$The Dirac delta function is defined such that it is zero valued everywhere except 0, yet integrates to 1. We can think of the Dirac delta function as being the limit point of a series of functions that put less and less mass on all points other than zero.By defining $p(x)$ to be $\delta$ shifted by $-\mu$ we obtain an infinitely narrow infinitely high peak of probability mass where $x = \mu$
###Code
"""
There is no dirac distribution in tensorflow, you will be able to plot using the fast fourier transform in
the tf.signal but that would take us outside the scope of the book so we use the normal distribution
to plot a dirac distribution. Play around with the delta and mu values to see how the distribution moves.
"""
# We use linespace to create a range of values starting from -8 to 8 with incremants (= stop - start / num - 1)
rand_x= tf.linspace(start=-8., stop=8., num=150)
# Gaussian distribution with a standard deviation of 1/6 and mean 2
delta = float(1./6.)
mu = float(2.)
dirac_pdf = tfd.Normal(loc=mu, scale=delta).prob(rand_x)
# convert tensors into numpy ndarrays for plotting
[rand_x_, dirac_pdf_] = evaluate([rand_x, dirac_pdf])
# Plot of the dirac distribution
plt.plot(rand_x_, dirac_pdf_, color=color_sb)
plt.fill_between(rand_x_, dirac_pdf_, color=color_b)
plt.grid()
###Output
_____no_output_____
###Markdown
A common use of the Dirac delta distribution is as a component of an __empirical distribution__:$$\color{orange}{\hat{p}(x) = \frac{1}{m} \displaystyle\sum_{i = 1}^m \delta (x - x^{(i)}) \tag{20}}$$which puts probability mass $\frac{1}{m}$ on each of the $m$ points $x^{(1)}, \cdots, x^{(m)}$, forming a given data set or collection of sample. The Dirac delta distribution is only necessary to define the empirical distribution over continuous variables. For discrete variables, the situation is simpler: an empirical distribution can be conceptualizedas a multinoulli distribution, with a probability associated with each possible input value that is simply equal to the __empirical frequency__ of that value in the training set. We can view the empirical distribution formed from a dataset of training examples as specifying the distribution that we sample from when we train a model on this dataset. Another important perspective on the empirical distribution is that it is the probability density that maximizes the likelihood of the training data. 3.9.6 Mixtures of Distributions One common way of combining simpler distributions to define probability distribution is to construct a __mixture distribution__. A mixture distribution is made up of several component distributions. On each trial, the choice of which component distribution should generate the sample is determined by sampling a component identity from a multinoulli distribution:$$\color{orange}{P(\mathrm{x}) = \displaystyle\sum_i P(c = i) \ P(\mathrm{x} | c = i) \tag{21}}$$where $P(c)$ is the multinoulli distribution over component identities.
###Code
"""
We will be creating two variable with two components to plot the mixture of distributions.
The tfd.MixtureSameFamily distribution implements a batch of mixture distribution where all components are from
different parameterizations of the same distribution type. In our example, we will be using tfd.Categorical to
manage the probability of selecting components. Followed by tfd.MultivariateNormalDiag as components.
The MultivariateNormalDiag constructs Multivariate Normal distribution on R^k
"""
num_vars = 2 # Number of variables (`n` in formula).
var_dim = 1 # Dimensionality of each variable `x[i]`.
num_components = 2 # Number of components for each mixture (`K` in formula).
sigma = 5e-2 # Fixed standard deviation of each component.
# Set seed.
tf.random.set_seed(77)
# categorical distribution
categorical = tfd.Categorical(logits=tf.zeros([num_vars, num_components]))
# Choose some random (component) modes.
component_mean = tfd.Uniform().sample([num_vars, num_components, var_dim])
# component distribution for the mixture family
components = tfd.MultivariateNormalDiag(loc=component_mean, scale_diag=[sigma])
# create the mixture same family distribution
distribution_family = tfd.MixtureSameFamily(mixture_distribution=categorical, components_distribution=components)
# Combine the distributions
mixture_distribution = tfd.Independent(distribution_family, reinterpreted_batch_ndims=1)
# Extract a sample from the distribution
samples = mixture_distribution.sample(1000).numpy()
# Plot the distributions
g = sns.jointplot(x=samples[:, 0, 0], y=samples[:, 1, 0], kind="scatter", color=color_b, marginal_kws=dict(bins=50))
plt.show()
###Output
_____no_output_____
###Markdown
The mixture model allows us to briefly glimpse a concept that will be of paramount importance laterโthe __latent variable__. A latent variable is a random variable that we cannot observe directly. Latent variables may be related to x through the joint distribution. A very powerful and common type of mixture model is the __Gaussian mixture model__, in which the components $p(\mathrm{x} | c =i)$ are Gaussians. Each component has a separate parametrized mean $\mu^{(i)}$ and covariance $\Sigma^{(i)}$. As with a single Gaussian distribution, the mixture of Gaussians might constrain the covariance matrix for each component to be diagonal or isotropic. A Gaussian mixture model is a __universal approximator__ of densities, in the sense that any smooth density can be approximated with any specific nonzero amount of error by a Gaussian mixture model with enough components. Some of the other continuous distribution functions include:- Erlang Distribution: In a Poisson process of rate $\lambda$ the waiting times between k events have an Erlang distribution.- Gamma Distribution: In a Poisson process with rate $\lambda$ the gamma distribution gives the time to the $k^{th}$ event.- Beta Distribution: represents a family of probabilities and is a versatile way to represent outcomes for percentages or proportions.- Dirichlet Distribution: is a multivariate generalization of the Beta distribution. Dirichlet distributions are commonly used as prior distributions in Bayesian statistics. I recommend you go back to some of the examples before section 9 to see how we are using these distributions to understand how these are applied. 03.10 - Useful Properties of Common Functions Logistic Sigmoid:$$\color{orange}{\sigma(x) = \frac{1}{1 + exp(-x)} \tag{22}}$$The logistic sigmoid is commonly used to produce the $\phi$ parameter of a Bernoulli distribution because its range is (0, 1), which lies within the valid range of values for the $\phi$ parameter. The sigmoid function saturates when its argument is very positive or very negative, meaning that the function becomes very flat and insensitive to small changes in its input.
###Code
def logistic(x, phi):
"""Calculates the logistic function.
Arguments:
x (int) : is the x values.
phi (int) : parameter.
Returns:
Values in range(0, 1)
"""
return 1.0 / (1.0 + tf.exp(phi * x))
# create a range of values starting from -4 to 4 with incremants (= stop - start / num - 1)
x_vals = tf.linspace(start=-4., stop=4., num=100)
# Create three logistic functions to see the effect of the parameter phi
log_phi_1 = logistic(x_vals, 1.)
log_phi_3 = logistic(x_vals, 3.)
log_phi_5 = logistic(x_vals, -5.)
# convert tensors into numpy ndarrays for plotting
[x_vals_, log_phi_1_, log_phi_3_, log_phi_5_] = evaluate([x_vals, log_phi_1, log_phi_3, log_phi_5])
# Plot of the logistic function
plt.figure(figsize = (12, 5))
plt.plot(x_vals_, log_phi_1_, label=r"$\phi = 1$")
plt.plot(x_vals_, log_phi_3_, label=r"$\phi = 3$")
plt.plot(x_vals_, log_phi_5_, label=r"$\phi = -5$")
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
Softplus Function:$$\color{orange}{\zeta (x) = log \ (1 + exp(x)) \tag{23}}$$The softplus function can be useful for producing the $\beta$ or $\sigma$ parameter of a normal distribution because its range is (0, $\infty$). It also arises commonly when manipulating expressions involving sigmoids. The name of the softplus function comes from the fact that it is a smoothed, or โsoftened,โ version of $x^+ = max(0, x)$
###Code
def softplus(x, beta):
"""Calculates the softplus function.
Arguments:
x (int) : is the number you want to calculate the probability for.
beta (int) : paramter.
Returns:
Values in range(0, infinity).
"""
return tf.math.log(1 + tf.math.exp(beta * x))
# create a range of values starting from -4 to 4 with incremants (= stop - start / num - 1)
x_vals = tf.linspace(start=-4., stop=4., num=100)
# Create three softplus functions to see the effect of the parameter beta
log_beta_1 = softplus(x_vals, 1.)
log_beta_3 = softplus(x_vals, 3.)
log_beta_5 = softplus(x_vals, -5.)
# convert tensors into numpy ndarrays for plotting
[x_vals_, log_beta_1_, log_beta_3_, log_beta_5_] = evaluate([x_vals, log_beta_1, log_beta_3, log_beta_5])
# Plot of the softplu function
plt.figure(figsize = (12, 5))
plt.plot(x_vals_, log_beta_1_, label=r"$\beta = 1$")
plt.plot(x_vals_, log_beta_3_, label=r"$\beta = 3$")
plt.plot(x_vals_, log_beta_5_, label=r"$\beta = -5$")
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
The following are some useful properties if you wish to memorize: The function $\sigma^{-1}x$ is called the __logit__ in statistics, but this term is rarely used in machine learning. 03.11 - Bayes' Rule __Bayes' rule__ is a useful tool that computes the conditional probability $P( x | y)$ from $P(y | x)$. Here - $P( x | y)$ is called the _posterior_; this is what we are trying to estimate, - $P(y | x)$ is called the _likelihood_; this is the probability of observing the new evidence, given our initial hypothesis, - $P(x)$ is called the _prior_; this is the probability of our hypothesis without any additional prior information,- $P(y)$ is called the _marginal likelihood_; this is the total probability of observing the evidence.The Bayes' rule can be summed up as:$$\color{orange}{P(x | y) = \frac{P(x) \ P(y | x)}{P(y)} \tag{24}}$$Even though $P(y)$ appears in the formula, it is usually feasible to compute $P(y) = \sum_x P(y | x) P(x)$, so we do not need to begin with knowledge of $P(y)$.
###Code
"""
There are 2 cookie jars:
Jar 1 has 30 vanilla cookies and 10 chocolate cookies
Jar 2 has 20 vanilla cookies and 20 chocolate cookies
You randomly pick one cookie from one randomly chosen jar. It is vanilla. What is the probability it was from Jar 1?
So we are looking for P(Jar 1 | Vanilla).
"""
# probability of picking between Jar 1 and 2: The Prior probability
p_jar_1 = 1/2
# Vanilla / total cookies: The likelihood
p_vanilla_given_jar_1 = 30/(30+10)
# total vanilla cookies / Total cookies: The marginal likelihood
p_vanilla = (30+20) / (30+10 + 20+20)
# Bayes' rule
p_jar_1_given_vanilla = (p_jar_1 * p_vanilla_given_jar_1) / p_vanilla
print('P(Jar 1 | Vanilla) = {}'.format(p_jar_1_given_vanilla))
###Output
P(Jar 1 | Vanilla) = 0.6
###Markdown
03.12 - Technical Details of Continuous Variables In this section, we start with __Measure Theory__ and I would be giving you a no math introduction to it. The reason you aren't seeing any implementations is because most of the time these implementations are built directly into the tools you are using unless ofcourse if you are a researcher studying new types of data. So I won't be giving you any implementations but nevertheless this is an important area and understanding it may help you reformulate certain distribution problems.Measure theory studies ways of generalizing the intuitive notions of length, area and volume. For example, see the image below:It's pretty challenging to see how we can measure the area of such a shape and that's in 2D and most of the time we are playing with higher dimensional data. Btw what do you see in the image, I see Darth Vader from star wars and I can't unsee it now ๐. Anyways, let's see how measure theory can help in such situations. For example, let's say you want to measure how long it would take for you to read the [Deep Learning with Tensorflow 2.0](https://www.adhiraiyan.org/DeepLearningWithTensorflow.html) book. What would be a good measure? If you go by using the number of pages, would that be a good metric? Because pages of a book would differ if it was printed in hardcover, paperback or in our case an ebook, so if you take this article, the read time you see on the top of the page is measured by the number of words, that is a good metric right, no matter hardcover, paperback or ebook, it would have the same number of words, so now, we do have version invariance between the different published formats, and if I want to add an extra chapter to this book, then I would just calculate the number of words in that chapter and add this to the book, this is the key contribution of measure theory.Measure theory also provides a rigorous way of describing a set of points as negligibly small. Such a set is said to have __measure zero__. Let me explain this by continuing my book example, in this chapter and the rest, you see some pictures, and we can't measure the read time of pictures using words, so we set the pictures to have a measure zero and our read time based on words will still be a good approximation for the length of the book.Another useful term from measure theory is __almost everywhere__. A property that holds almost everywhere holds throughout all space except for on a set of measure zero. Now this is where we generalize our measure, even though the example we saw had little probability, measure theory helps make certain probability statements precise. Another technical detail of continuous variables relates to handling continuous random variables that are deterministic functions of one another. Suppose we have two random variables, __x__ and __y__, such that $y = g(x)$, where *g* is an invertible, continuous, differentiable transformation. One might expect that $p_y(y) = p_x(g^{-1}(y))$. This is not actually the case.The problem with this approach is that it fails to account for the distortion of space introduced by the function *g*. Recall that the probability of $x$ lying in an infinitesimally small region with volume $\delta x$ is given by $p(x) \delta x$. Since *g* can expand or contract space, the infinitesimal volume surrounding $x$ in $x$ space may have different volume in $y$ space. We can correct this problem by defining:$$\color{orange}{p_x(x) = p_y(g(x)) \Bigg|\frac{\partial g(x)}{\partial x} \Bigg| \tag{25}}$$In higher dimensions, the derivative generalizes to the determinant of the __Jacobian matrix__- the matrix with $J_{i, j} = \frac{\partial{x}_i}{\partial{y}_j}$. Thus for real valued vectors $x$ and $y$:$$\color{orange}{p_x(x) = p_y(g(x)) \Bigg| det \Big( \frac{\partial g(x)}{\partial x} \Big) \Bigg| \tag{26}}$$ 03.13 - Information Theory Information theory is a branch of applied mathematics that revolves around quantifying how much information is present in a signal. In the context of machine learning, we can also apply information theory to continuous variables where some of these message length interpretations do not apply. The basic intuition behind the information theory is that a likely event should have low information content, less likely events should have higher information content and independent events should have additive information.Let me give you a simple example, lets say you have a male friend, and he is head over heels in love with this girl, so he asks this girl out pretty much every week and there's a 99% chance she says no, so you being his best friend, he texts you everytime after he asks the girl out to let you know what happened, he texts, "Hey guess what she said, NO ๐ญ๐ญ๐ญ", this is of course wasteful, considering he has a very low chance so it makes more sense for your friend to just send "๐ญ" but if she says yes then he can of course send a longer text, so this way, the number of bits used to convey the message (and your corresponding data bill) will be minimized. P.S don't tell your friend he has a low chance, that's how you lose friends ๐ฌ.To satisfy these properties, we define the __self-information__ of an event $\mathrm{x} = x$ to be:$$\color{orange}{I(x) = -log \ P(x) \tag{27}}$$In this book, we always use log to mean the natural logarithm, with base e. Our definition of $I(x)$ is therefore written in units of __nats__. One nat is the amount of information gained by observing an event of probability $\frac{1}{e}$. Other texts use base-2logarithms and units called __bits__ or __shannons__; information measured in bits is just a rescaling of information measured in nats.
###Code
"""
No matter what combination of toss you get the Entropy remains the same but if you change the probability of the
trial, the entropy changes, play around with the probs and see how the entropy is changing and see if the increase
or decrease makes sense.
"""
coin_entropy = [0] # creating the coin entropy list
for i in range(10, 11):
coin = tfd.Bernoulli(probs=0.5) # Bernoulli distribution
coin_sample = coin.sample(i) # we take 1 sample
coin_entropy.append(coin.entropy()) # append the coin entropy
sns.distplot(coin_entropy, color=color_o, hist=False, kde_kws={"shade": True}) # Plot of the entropy
print("Entropy of 10 coin tosses in nats: {} \nFor tosses: {}".format(coin_entropy[1], coin_sample))
plt.grid()
###Output
Entropy of 10 coin tosses in nats: 0.6931471824645996
For tosses: [0 1 1 1 0 1 1 1 0 1]
###Markdown
Self information deals only with a single outcome. We can quantify the amount of uncertainty in an entire probability distribution using the __Shannon entropy__:$$\color{orange}{H(\mathrm{x}) = \mathbb{E}_{x \sim P} [I(x)] = -\mathbb{E}_{x \sim P}[log \ P(x)] \tag{28}}$$also denoted as $H(P)$.Shannon entropy of a distribution is the expected amount of information in an event drawn from that distribution. It gives a lower bound on the number of bits needed on average to encode symbols drawn from a distribution P. Distributions that are nearly deterministic (where the outcome is nearly certain) have low entropy; distributions that are closer to uniform have high entropy. When $\mathrm{x}$ is continuous, the Shannon entropy is known as the __differential entropy__.
###Code
"""
Note here since we are using the Bernoulli distribution to find the expectation we simply use mean,
if you change the distribution, you need to find the Expectation accordingly
"""
def shannon_entropy_func(p):
"""Calculates the shannon entropy.
Arguments:
p (int) : probability of event.
Returns:
shannon entropy.
"""
return -tf.math.log(p.mean())
# Create a Bernoulli distribution
bernoulli_distribution = tfd.Bernoulli(probs=.5)
# Use TFPs entropy method to calculate the entropy of the distribution
shannon_entropy = bernoulli_distribution.entropy()
print("TFPs entropy: {} matches with the Shannon Entropy Function we wrote: {}".format(shannon_entropy,
shannon_entropy_func(bernoulli_distribution)))
###Output
TFPs entropy: 0.6931471824645996 matches with the Shannon Entropy Function we wrote: 0.6931471824645996
###Markdown
Entropy isn't remarkable for its interpretation, but for its properties. For example, entropy doesn't care about the actual *x* values like variance, it only considers their probability. So if we increase the number of values *x* may take then the entropy will increase and the probabilities will be less concentrated.
###Code
# You can see below by changing the values of x we increase the entropy
shannon_list = []
for i in range(1, 20):
uniform_distribution = tfd.Uniform(low=0.0, high=i) # We create a uniform distribution
shannon_entropy = uniform_distribution.entropy() # Calculate the entropy of the uniform distribution
shannon_list.append(shannon_entropy) # Append the results to the list
# Plot of Shannon Entropy
plt.hist(shannon_list, color=color_b)
plt.grid()
###Output
_____no_output_____
###Markdown
If we have two separate probability distributions P(x) and Q(x) over the same random variable x, we can measure how different these two distributions are using the __Kullback-Leibler (KL) divergence__:$$\color{orange}{D_{KL} (P \| Q) = \mathbb{E}_{x \sim P} \Big[ log \ \frac{P(x)}{Q(x)} \Big] = \mathbb{E}_{x \sim P} [log \ P(x) - log \ Q(x)] \tag{29}}$$In the case of discrete variables, it is the extra amount of information needed to send a message containing symbols drawn from probability distribution P, when we use a code that was designed to minimizethe length of messages drawn from probability distribution Q.
###Code
def kl_func(p, q):
"""Calculates the KL divergence of two distributions.
Arguments:
p : Distribution p.
q : Distribution q.
Returns:
the divergence value.
"""
r = p.loc - q.loc
return (tf.math.log(q.scale) - tf.math.log(p.scale) -.5 * (1. - (p.scale**2 + r**2) / q.scale**2))
# We create two normal distributions
p = tfd.Normal(loc=1., scale=1.)
q = tfd.Normal(loc=0., scale=2.)
# Using TFPs KL Divergence
kl = tfd.kl_divergence(p, q)
print("TFPs KL_Divergence: {} matches with the KL Function we wrote: {}".format(kl, kl_func(p, q)))
###Output
TFPs KL_Divergence: 0.4431471824645996 matches with the KL Function we wrote: 0.4431471824645996
###Markdown
The KL divergence has many useful properties, most notably being nonnegative. The KL divergence is 0 if and only if P and Q are the same distribution in the case of discrete variables, or equal โalmost everywhereโ in the case of continuous variables. A quantity that is closely related to the KL divergence is the __cross-entropy__ $H(P, Q) = H(P) + D_{KL} (P \| Q)$, which is similar to the KL divergence but lacking the term on the left:$$\color{orange}{H(P, Q) = - \mathbb{E}_{x \sim P} \ log \ Q(x) \tag{30}}$$Minimizing the cross-entropy with respect to Q is equivalent to minimizing the KL divergence, because Q does not participate in the omitted term.
###Code
"""
The cross_entropy computes the Shannons cross entropy defined as:
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
"""
# We create two normal distributions
p = tfd.Normal(loc=1., scale=1.)
q = tfd.Normal(loc=0., scale=2.)
# Calculating the cross entropy
cross_entropy = q.cross_entropy(p)
print("TFPs cross entropy: {}".format(cross_entropy))
###Output
TFPs cross entropy: 3.418938636779785
|
notebooks/20210218-mdl-daen690-focus_q2.ipynb | ###Markdown
Determine Data Subset for Focus Question 2The purpose of this analysis is to identify the data subset necessary to explore possible analytic solutions for **Focus Question 2**. This analysis does not create the dataset needed, it is simply an attempt to discover what dataset joins and columns will be used for the actual analysis. Entity Relationship Diagram (ERD) model displays the inter-relationships between the Patients, Procedures, and Medicines datasets. Datasets Assumptions:1. Datasets provided where build as views and queried from original base tables2. PatientId and FRDPersonnellID columns are unique to each patient and personnel in base tables but are not a primary key in each provided dataset and may appear more than one time in any of the datasets3. PatientId and FRDPersonnellID in the Procedures and Medicines dataset and will be treated as foreign keys to the Patients dataset, even though in reality they would not be if we were using all the base tables in the data warehouse Restating Focus Question 2 for user story:Determine Data Subset regarding exploring what, if any, relationship there is between 1. EMS procedures performed by provider with calulated tenure to an individual patient and 2. medications given to an individual patient by a provider with calulated tenure.Dataset Observations:- To retrieve EMS procedures performed, will require most or all rows from the Procedures dataset.- To retrieve EMS medications given, will require rows most or all from the Medications dataset.- To complete the view required, the rows from the Procedures and Medications dataset will need to be joined with the Patients dataset.At a minimum, to begin exploring Focus Question 2 the following columns are needed for- EMS procedures performed by provider with calulated tenure to an individual patient - Patients.PatientId - Patients.FRDPersonnelID - Patients.PatientOutcome - Patients.DispatchTime - Patients.FRDPersonnelStartTime - Calulated Value for months and years of tenre of Provider - Procedures.PatientId - Procedures.Procedure_Performed_Code - Procedures.Procedure_Performed_Description - Procedures.FRDPersonnelID- Medications given to an individual patient by a provider with calulated tenure - Patients.PatientId - Patients.FRDPersonnelID - Patients.PatientOutcome - Patients.DispatchTime - Patients.FRDPersonnelStartTime - Calulated Value for months and years of tenre of Provider - Medications.PatientId - Medications.Medication_Given_RXCUI_Code - Medications.Medication_Given_Description - Medications.FRDPersonnelID
###Code
# Import libraries
import pandas as pd
import numpy as np
# Reads all sheets into dictionaries. Only importing first thousand rows because it's all that's
# needed to get example from original provided unsorted datasets.
# Note: for provided dataset 20210214-ems-raw-v03.xlsx, on spreadsheet on Medications manually updated
# Personnel_Performer_ID_Internal to FRDPersonnelID
all_dfs = pd.read_excel(r'./data/20210214-ems-raw-v03.xlsx',
sheet_name=None,
nrows=1000,
na_values=['NA'])
# Display dictionary keys created
all_dfs.keys()
# Display Patients dataset row and column count
all_dfs['Patients'].shape
# Display Procedures dataset row and column count
all_dfs['Procedures'].shape
# Display Medications dataset row and column count
all_dfs['Medications'].shape
# Select single row for example from Patients dataset where PatientId equals 479862 and display
df_pat_ex = all_dfs['Patients'].loc[(all_dfs['Patients']['PatientId'] == 479862)]
df_pat_ex
# Select single row for example from Procedures dataset where PatientId equals 479862 and display
df_proc_ex = all_dfs['Procedures'].loc[(all_dfs['Procedures']['PatientId'] == 479862)]
df_proc_ex
# Select single row for example from Medications dataset where PatientId equals 479862 and display
df_med_ex = all_dfs['Medications'].loc[(all_dfs['Medications']['PatientId'] == 479862)]
df_med_ex
# Inner join on Patients and Procedures example and display results
df_proc_ex = df_pat_ex.merge(df_proc_ex,
on=('PatientId','FRDPersonnelID'),
how='inner')
df_proc_ex
# Inner join on Patients and Medications example and display results
df_med_ex = df_pat_ex.merge(df_med_ex,
on=('PatientId','FRDPersonnelID'),
how='inner')
df_med_ex
###Output
_____no_output_____ |
notebooks/Tutorial - FHIRstorm Authorization (confidential).ipynb | ###Markdown
**Note**: This tutorial explains how to register a 'confidential' app. Many FHIR servers don't support "confidential" apps (those which can protect a client_secret), so you may be stuck doing things the public way for a while. Register the clientWe're going to be using the [SmartHealthIT][smarthealthit] Sandboxes, so we need to go to the [sandbox][sb] and register a new client there, clicking 'Register Manually', and filling in the boxes:- App Type: Confidential Client- App Name: FHIRstorm test- App Launch URI: http://localhost:8000/launch- App Redirect URIs: http://localhost:8000/callback- Allow Offline Access: (checked)- Patient Scoped App: (checked)- App logo: (skip it for now)Register it, and then save the client ID and secret that it gives you to a configuration file. The one we'll here is saved in example/auth-tutorial/config.py[smarthealthit]: http://docs.smarthealthit.org/[sb]: https://sandbox.smarthealthit.org/smartdstu2//manage-apps
###Code
# %load ../example/auth-tutorial/config.py
SMART_CLIENT_ID = '9644d85e-07f0-4962-a78b-ab1bfe39c6d8'
SMART_CLIENT_SECRET = 'APOO8c_OjK4DrcwoTYp82KUv2LrRvD_hlVinoZzqoxO5EPUZhprbb4azfXp8qTdMFxdviSQIKr7SMswVpR4SSVc'
SMART_SERVICE_ROOT = 'https://sb-fhir-dstu2.smarthealthit.org/smartdstu2/data'
SMART_SCOPE = 'openid profile offline_access patient/*.*'
JWT_SECRET = 'itsaseekrit'
###Output
_____no_output_____
###Markdown
To actually handle the redirect and such, we'll need to have a (minimal) web server. There's a basic one in example/auth-tutorial/app.pyLet's walk through that file step by step:
###Code
# %load -r 1-10 ../example/auth-tutorial/app.py
import os
import pkg_resources
from flask import Flask, request, redirect, jsonify, url_for, abort
from fhirstorm import Connection, auth
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'
app = Flask(__name__)
app.config.from_pyfile(
pkg_resources.resource_filename(
'fhirstorm', '../example/auth-tutorial/config.py'))
###Output
_____no_output_____
###Markdown
Here, we have the imports and global variables we'll need. In particular, we have- `os` to give us `os.environ`, which lets us tell the Python oauth library that it's OK for us to use insecure (http) redirect urls (they're localhost urls anyway, so they're pretty secure...)- From `flask` we import a number of names to help us with our app (more on those when we use them)- `Connection` and `auth` is what we need from `fhirstorm` to let us connect to the server- `app` is the Flask application - a little bit of `pkg_resources` magic to help us locate the config file Next, we have the actual web routes we'll be handling.For this tutorial, we'll need to handle 3 paths:- `/launch` will handle cases where the FHIR server initiates a session with *us* (the so-called 'launch profile')- `/callback` will handle the callback from the FHIR server with an authorization **code** (that we'll exchange for a **access_token** SMART "launch sequences"There are two ways that your app might be started: either from the FHIR server (via an EHR interface or patient portal), or 'standalone' (the user starts at your app and doesn't have to log into a patient portal before using your app).We'll refer to these as the **EHR launch sequence** and the **Standalone launch sequence** below.The general idea is as follows:1. [launch only] The EHR user chooses to launch your app 1. [launch only] The EHR redirects the user to your "App launch URI"1. [both] Your app redirects the user to the EHR's `authorization_uri`1. [both] The EHR user confirms that access is authorized1. [both] The EHR redirects the user back to your "App redirect URI" with an authorization `code`1. [both] Your app (not involving the user) exchanges the `code` for an `access_token` via a `POST` request1. [both] Your app uses the `access_token` for subsequent callsIn the **EHR launch sequence**, everything starts at step 1. In the **Standalone launch sequence**, everything starts in step 3. Otherwise they're basically the same Handling the EHR launch sequenceOur `/launch` route's purpose is just to send a specially-crafted redirect _back_ to the FHIR server. This URL (which we configured earlier as the 'App Launch URI') is invoked via a GET request by the FHIR server with two parameters: `iss` and `launch`:- `iss` is a reference to the service root used by the server. That way your app can be configured to work with a number of different EHRs, all hitting the same `/launch` URI- `launch` is an opaque string containing a value that you must send _back_ to the FHIR server with the authorization redirect. In the code below, we:- inspect the `iss` argument (to make sure we recognize the server),- grab the `service` object from a FHIRstorm `Connection` back to the FHIR server,- generate a `state` parameter that will let us verify the callback we receive later,- use FHIRstorm's `auth.authorization_url` function to obtain a URL that we'll redirect the user to, and - finally, actually perform the redirect
###Code
# %load -s launch ../example/auth-tutorial/app.py
@app.route('/launch')
def launch():
iss = request.args['iss']
if iss != app.config['SMART_SERVICE_ROOT']:
abort(403)
launch = request.args.get('launch', None)
conn = Connection(iss)
service = conn.service()
state = auth.jwt_state(app.config['JWT_SECRET'])
authorization_url, state = auth.authorization_url(
service,
client_id=app.config['SMART_CLIENT_ID'],
client_secret=app.config['SMART_CLIENT_SECRET'],
redirect_uri=url_for('callback', _external=True),
scope=app.config['SMART_SCOPE'] + ' launch',
state=state,
aud=iss,
launch=launch)
return redirect(authorization_url)
###Output
_____no_output_____
###Markdown
Handling the callback (used for both EHR launch and standalone launch)After the user verifies that access is granted, the EHR will redirect the user to your "App redirect URI" (which we specified both at app registration time _and_ in the construction of our `authorization_url`. In the code below, we:- get a `Connection` and `service` objects from FHIRstorm again, - use the `service` and the `auth.fetch_token` to exchange our authorization code (which is embedded in `request.url`- save the token in a global variable (just for this tutorial; normally we'd store it in a database)
###Code
# %load -s callback ../example/auth-tutorial/app.py
@app.route('/callback')
def callback():
global ACCESS_TOKEN
conn = Connection(app.config['SMART_SERVICE_ROOT'])
service = conn.service()
token = auth.fetch_token(
service,
client_id=app.config['SMART_CLIENT_ID'],
client_secret=app.config['SMART_CLIENT_SECRET'],
redirect_uri=url_for('callback', _external=True),
authorization_response=request.url,
state_validator=auth.jwt_state_validator(
app.config['JWT_SECRET'],
iss=app.config['SMART_SERVICE_ROOT']))
print(token)
ACCESS_TOKEN = token
return jsonify(token)
###Output
_____no_output_____
###Markdown
Testing out the EHR launch sequence Now that we have the Flask application designed, we can go ahead and run it for a couple of requests (one to handle the `/launch` request and one to handle the `/callback`. If you want to do this on your own, you can either run the Flask app standalone as follows:```bashFLASK_APP=example/auth-tutorial/app.py python -m flask run```Or you can run the code interactive to handle a couple of requests as shown in the following cell.Either way, once you have the server listening for connections, you'll need to return to the sandbox and launch your app from there.
###Code
from wsgiref.simple_server import make_server
with make_server('', 8000, app) as httpd:
httpd.handle_request()
httpd.handle_request()
###Output
127.0.0.1 - - [09/Dec/2017 16:48:21] "GET /launch?iss=https%3A%2F%2Fsb-fhir-dstu2.smarthealthit.org%2Fsmartdstu2%2Fdata&launch=7E5ooK HTTP/1.1" 302 1307
###Markdown
At this point, we have an `access_token` which we should save (but in this tutorial we're just keeping it in the `ACCESS_TOKEN` variable:
###Code
ACCESS_TOKEN
###Output
_____no_output_____
###Markdown
Testing out the standalone launch sequenceWe can also test out the standalone launch sequence. First, we'll need to create the authorization URL:
###Code
conn = Connection(SMART_SERVICE_ROOT)
service = conn.service()
state = auth.jwt_state(app.config['JWT_SECRET'])
scope = 'launch launch/patient ' + SMART_SCOPE
authorization_url, state = auth.authorization_url(
service,
client_id=SMART_CLIENT_ID,
client_secret=SMART_CLIENT_SECRET,
redirect_uri='http://localhost:8000/callback',
scope=scope,
state=state,
aud=SMART_SERVICE_ROOT)
scope
###Output
_____no_output_____
###Markdown
Things to notice in the above code:- we added the `launch/patient` scope to tell the FHIR server we want a "patient context" to be selected for us- we don't need to pass the `aud` and `launch` parameters this time.To actually perform the authorization, we can visit the authorization URL while running our little server for one request:
###Code
authorization_url
import webbrowser
webbrowser.open_new_tab(authorization_url)
with make_server('', 8000, app) as httpd:
httpd.handle_request()
ACCESS_TOKEN
###Output
_____no_output_____
###Markdown
Accessing resources using the access tokenNow that we have our access token, we can actually start to use it to get resources. There will be more on that in the Resources tutorial, but for now, we'll just fetch the `Patient`:
###Code
from requests_oauthlib import OAuth2Session
conn = Connection(
SMART_SERVICE_ROOT,
session=OAuth2Session(token=ACCESS_TOKEN))
svc = conn.service()
conn.get(f'/Patient/{ACCESS_TOKEN["patient"]}')
###Output
_____no_output_____
###Markdown
You can also retrive resources by their type (all the advertised resources are available under the `svc.r` attribute:
###Code
dir(svc.r)
p = svc.r.Patient.fetch(ACCESS_TOKEN['patient'])
p.name
###Output
_____no_output_____ |
src/models/2021-06-09_heart-modes-normal-abnorm_01.ipynb | ###Markdown
Neural Nets to Predict Normal Abnormal HR from Modes
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
%%javascript
utils.load_extension('collapsible_headings/main')
utils.load_extension('hide_input/main')
utils.load_extension('execute_time/ExecuteTime')
utils.load_extension('code_prettify/code_prettify')
utils.load_extension('scroll_down/main')
utils.load_extension('jupyter-js-widgets/extension')
from fastai.tabular import *
PATH = "/home/tris/Github/SysID_EEGdynamics/src/data/preprocessed/tabular_DMD"
# col_names=['subject','trial','mode_no','real1','real2','real3','real4','real5','real6','real7','real8','real9','real10','real11','real12','real13','real14','real15','real16','real17','real18','real19','real20','real21','real22','real23','real24','real25','real26','real27','real28','real29','real30','real31','real32','imag1','imag2','imag3','imag4','imag5','imag6','imag7','imag8','imag9','imag10','imag11','imag12','imag13','imag14','imag15','imag16','imag17','imag18','imag19','imag20','imag21','imag22','imag23','imag24','imag25','imag26','imag27','imag28','imag29','imag30','imag31','imag32','fn','zeta'];
df_raw = pd.read_csv('/home/tris/Github/SysID_EEGdynamics/src/data/preprocessed/2021_06_09_hr_ex_06.csv')
df_raw
df_raw.iloc[[40],:]
# os.makedirs('tmp', exist_ok=True)
# df_raw.to_feather('tmp/eeg-raw')
import pandas as pd
#df_raw = pd.read_feather('tmp/eeg-raw') #lol raw sashimis and sushis
df_raw.head()
# df_raw.iloc[[200],:]
# fig, axs = plt.subplots(1, 3, figsize=(15, 5))
# axs[0].hist(df_raw.norm)
# axs[0].set_title('Label')
# axs[1].hist(df_raw.sex)
# axs[1].set_title('Sex')
# axs[2].hist(df_raw.Phi1)
# axs[2].set_title('Mode1c1')
valid_idx=np.random.randint(low=0, high=len(df_raw), size=250)
valid_idx
dep_var= 'norm'
path = "/media/tris/tris_files/github/SysID_EEGdynamics/models"
data = TabularDataBunch.from_df(path, df_raw, dep_var, valid_idx=valid_idx)
data.show_batch()
# max_log_y = np.log(np.max(df_raw['norm'])*1.2)
# y_range = torch.tensor([0, max_log_y])
learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04, metrics=accuracy, emb_szs={'norm': 2})
#learn = tabular_learner(data, layers=[10000,500], emb_drop=0.04, metrics=accuracy)
learn.model
learn.model_dir='/home/tris/Github/SysID_EEGdynamics/models'
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(25, 0.8e-3, wd=0.2)
learn.recorder.plot_losses()
learn.show_results()
preds,y,losses=learn.get_preds(with_loss=True)
interp=ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix(figsize=(8,8))
###Output
_____no_output_____ |
4-Machine_Learning/3-Deep Learning/1-Redes Neuronales/ToDo_Exercise1 Deep Learning classification.ipynb | ###Markdown
Clasificacion Basica: Predecir una imagen de moda Esta Guia entrena un modelo de red neuronal para clasificar imagenes de ropa como, tennis y camisetas.Esta Guia usa [tf.keras](https://www.tensorflow.org/guide/keras), un API de alto nivel para construir y entrenar modelos en Tensorflow.
###Code
# TensorFlow y tf.keras
import tensorflow as tf
from tensorflow import keras
# Librerias de ayuda
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
###Output
2.0.0
###Markdown
Importar el set de datos de moda de MNIST Esta guia usa el set de datos de [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist)que contiene mas de 70,000 imagenes en 10 categorias. Las imagenes muestran articulos individuales de ropa a una resolucion baja (28 por 28 pixeles) como se ve aca: <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> Figure 1. Fashion-MNIST samples (by Zalando, MIT License). Para importar y cargar el set de datos de MNIST directamente de TensorFlow: La *class* de ropa que la imagen representa. Label Class 0 T-shirt/top 1 Trouser 2 Pullover 3 Dress 4 Coat 5 Sandal 6 Shirt 7 Sneaker 8 Bag 9 Ankle boot Cada imagen es mapeada a una unica etiqueta. Ya que los *Class names* no estan incluidos en el dataset. Los guardamos en la siguiente lista:
###Code
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Explore el set de datos* ยฟCuรกntas imagenes hay en train?* ยฟY en test?* ยฟDe cuรกntos pixels se compone cada imagen?* ยฟCuรกles son los valores de los labels? Pre-procese el set de datosInspecciona y representa la primera imagen del dataset de train. Para ello, utiliza la funciรณn `imshow` de matplotlib.
###Code
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Escala los conjuntos de train y test para que vayan del 0 al 1. No hace falta usar ninguna librerรญa. Con realizar una divisiรณn en cada conjunto serรก suficiente. Para verificar que el set de datos esta en el formato adecuado y que estan listos para construir y entrenar la red, vamos a desplegar las primeras 25 imagenes de el *training set* y despleguemos el nombre de cada clase debajo de cada imagen.
###Code
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
###Output
_____no_output_____
###Markdown
Construir el ModeloConstruir la red neuronal requiere configurar las capas del modelo y luego compilar el modelo. Configurar las CapasConstruye todas las capas del modelo.
###Code
'''
La primera capa de esta red, `tf.keras.layers.Flatten`,
transforma el formato de las imagenes de un arreglo bi-dimensional (de 28 por 28 pixeles) a un arreglo uni dimensional
(de 28*28 pixeles = 784 pixeles). Observe esta capa como una capa no apilada de filas de pixeles en la misma imagen y alineandolo.
Esta capa no tiene parametros que aprender; solo reformatea el set de datos.
Despues de que los pixeles estan "aplanados", la secuencia consiste de dos capas`tf.keras.layers.Dense`.
Estas estan densamente conectadas, o completamente conectadas. La primera capa `Dense` tiene 128 nodos (o neuronas).
La segunda (y ultima) capa es una capa de 10 nodos *softmax* que devuelve un arreglo de 10 probabilidades que suman a 1.
Cada nodo contiene una calificacion que indica la probabilidad que la actual imagen pertenece a una de las 10 clases.
'''
###Output
_____no_output_____
###Markdown
Compila el modeloAntes de que el modelo este listo para entrenar , se necesitan algunas configuraciones mas. Estas son agregadas durante el paso de compilacion del modelo:* *Loss function* โEsto mide que tan exacto es el modelo durante el entrenamiento. Quiere minimizar esta funcion para dirigir el modelo en la direccion adecuada.* *Optimizer* โ Esto es como el modelo se actualiza basado en el set de datos que ve y la funcion de perdida.* *Metrics* โ Se usan para monitorear los pasos de entrenamiento y de pruebas.Prueba en el posterior entrenamiento varios optimizadores.Como es un problema de clasificaciรณn multiclase, tendrรกs que usar `sparse_categorical_crossentropy` como funciรณn de coste. En cuanto a las mรฉtricas, usa simplemente `accuracy`. Entrenar el ModeloEmpieza entrenรกndolo con 10 epochs. Prueba con mรกs
###Code
'''
A medida que el modelo entrena, la perdida y la exactitud son desplegadas.
Este modelo alcanza una exactitud de 0.88 (o 88%) sobre el set de datos de entrenamiento.
'''
###Output
Epoch 1/10
1875/1875 [==============================] - 3s 1ms/step - loss: 0.6271 - accuracy: 0.7858
Epoch 2/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3867 - accuracy: 0.8615
Epoch 3/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3386 - accuracy: 0.8769
Epoch 4/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3193 - accuracy: 0.8818
Epoch 5/10
1875/1875 [==============================] - 2s 1ms/step - loss: 0.2931 - accuracy: 0.8942
Epoch 6/10
1875/1875 [==============================] - 2s 1ms/step - loss: 0.2814 - accuracy: 0.8962
Epoch 7/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2672 - accuracy: 0.8997
Epoch 8/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2551 - accuracy: 0.9043
Epoch 9/10
1875/1875 [==============================] - 4s 2ms/step - loss: 0.2491 - accuracy: 0.9070
Epoch 10/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2405 - accuracy: 0.9091
###Markdown
Evaluar AccuracyPrueba el rendimiento del modelo con los datos de test
###Code
'''
Resulta que la exactitud sobre el set de datos es un poco menor que la exactitud sobre el set de entrenamiento.
Esta diferencia entre el entrenamiento y el test se debe a *overfitting* (sobre ajuste).
Sobre ajuste sucede cuando un modelo de aprendizaje de maquina (ML) tiene un rendimiento peor sobre un set de datos nuevo,
que nunca antes ha visto comparado con el de entrenamiento.
'''
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
###Output
313/313 - 0s - loss: 0.3352 - accuracy: 0.8809
Test accuracy: 0.8809000253677368
###Markdown
Hacer prediccionesCon el modelo entrenado puedes usarlo para hacer predicciones sobre imagenes. El modelo ha predecido la etiqueta para cada imagen en el set de datos de *test* (prueba). Miremos la primera prediccion:
###Code
predictions[0]
###Output
_____no_output_____
###Markdown
*una* prediccion es un array de 10 numeros. Estos representan el nivel de "confianza" del modelo sobre las imagenes de cada uno de los 10 articulos de moda/ropa. Puedes revisar cual tiene el nivel mas alto de confianza:
###Code
np.argmax(predictions[0])
###Output
_____no_output_____
###Markdown
Entonces, el modelo tiene mayor confianza que esta imagen es un bota de tobillo "ankle boot" o `class_names[9]`. Examinando las etiquetas de *test* o de pruebas muestra que esta clasificaion es correcta:
###Code
test_labels[0]
###Output
_____no_output_____
###Markdown
**Grafica** esto para poder ver todo el set de la prediccion de las 10 clases.
###Code
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
###Output
_____no_output_____
###Markdown
Miremos la imagen [0], sus predicciones y el array de predicciones. Las etiquetas de prediccion correctas estan en azul y las incorrectas estan en rojo. El numero entrega el porcentaje (sobre 100) para la etiqueta predecida.
###Code
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
###Output
_____no_output_____
###Markdown
Vamos a graficar multiples imagenes con sus predicciones. Notese que el modelo puede estar equivocado aun cuando tiene mucha confianza.
###Code
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Finalmente, usamos el modelo entrenado para hacer una prediccion sobre una unica imagen.
###Code
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
###Output
(28, 28)
###Markdown
Los modelos de `tf.keras` son optimizados sobre *batch* o bloques, o coleciones de ejemplos por vez.De acuerdo a esto, aunque use una unica imagen toca agregarla a una lista:
###Code
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
###Output
(1, 28, 28)
###Markdown
Ahora prediga la etiqueta correcta para esta imagen:
###Code
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
###Output
_____no_output_____
###Markdown
`model.predict` retorna una lista de listas para cada imagen dentro del *batch* o bloque de datos. Tome la prediccion para nuestra unica imagen dentro del *batch* o bloque:
###Code
np.argmax(predictions_single[0])
###Output
_____no_output_____ |
tutorials/02_ome-zarr_basics.ipynb | ###Markdown
Basic zarr handling1. Open your OME-NGFF zarr and look at the info
###Code
z = zarr.open('/projects/researchit/djme/zarr_dask_examples/zarrs/histopath_example.zarr', mode='r')
z.info
###Output
_____no_output_____
###Markdown
2. Check the bioformats2raw layout version
###Code
z.attrs['bioformats2raw.layout']
###Output
_____no_output_____
###Markdown
3. This zarr was created from an NDPI file, which outputs three images, which is why there are three groups. Only the first image has the raw data we are looking for, so let's take a look at the info for that:
###Code
z['0'].info
###Output
_____no_output_____
###Markdown
4. This "image" is still considered a group, because zarrs produced by `bioformats2raw` are multiscale. Thus each of these ten arrays is a pyramid (resolution) level. The `0` array is full resolution, `1` is 1/2 scale along the x and y axes, `2` is 1/4 scale, and so on. Let's take a look at a few:
###Code
z['0/0'].info # Full resolution pyramid level
z['0/1'].info # 1/2 scale along x and y
z['0/2'].info # 1/4 scale along x and y
###Output
_____no_output_____
###Markdown
5. Let's dive into the full resolution array:
###Code
zim = z['0/0']
type(zim)
zim.shape
zim.chunks
zim.nchunks
###Output
_____no_output_____
###Markdown
6. Slicing a zarr returns a numpy array
###Code
zim[0,0,0,:50,:50]
type(zim[0,0,0,:50,:50])
plt.imshow(zim[0,0,0,10000:18000,16000:24000]);
###Output
_____no_output_____
###Markdown
7. This is actually an RGB image. `matplotlib` can handle that, but we need to reshape
###Code
rgb_zim = np.moveaxis(zim[0,:,0,10000:18000,16000:24000], 0, -1)
rgb_zim.shape
plt.imshow(rgb_zim);
###Output
_____no_output_____
###Markdown
8. Precomputed pyramids are great for when you don't need full resolution data, e.g., if you want to take a look at a global view of the whole slide
###Code
zim_small = z['0/7'] # remember that the second group in the path is pyramid level
zim_small.shape
zim_small_array = np.moveaxis(zim_small[0,:,0,:,:], 0, -1)
zim_small_array.shape
plt.imshow(zim_small_array);
###Output
_____no_output_____
###Markdown
9. We can also load a zarr from object storage over the internet. Here is the same image loaded from some S3-compatible object storage. Note that we are specifying the full resolution image, not the top level zarr group:
###Code
za = zarr.open('http://s3-far.jax.org/zarrtest/histopath_example.zarr/0/0', mode='r')
za.info
###Output
_____no_output_____
###Markdown
10. This new zarr loaded over http can be used just like the one we loaded from the filesystem.
###Code
plt.imshow(za[0,0,0,10000:18000,16000:24000]);
###Output
_____no_output_____
###Markdown
Using OME-NGFF/zarr for exploring high content screening data 11. Plate data can also be converted to an OME-NGFF zarr! Like our slide scan example, plates are also zarr groups, with the top level group indicating row. In this example, we have 8 rows of wells:
###Code
plate = zarr.open('/projects/researchit/djme/zarr_dask_examples/zarrs/plate_example.zarr/', mode='r')
plate.info
###Output
_____no_output_____
###Markdown
12. And we have 12 columns of wells:
###Code
plate['0'].info
###Output
_____no_output_____
###Markdown
13. When selecting info for a specific well (i.e., specifying both row and column of the plate in the path), you find that the well itself is also a group. This grouping contains "fields", which are independent images taken of a single well. This well has 25 fields:
###Code
plate['0/0'].info
###Output
_____no_output_____
###Markdown
14. The next level down is your multiscale level, so selecting '0' for that will give you your image:
###Code
plate['0/0/0/0'].info
###Output
_____no_output_____
###Markdown
15. zarr groups give you useful iterators that can be used to loop over your data. Here is how you might loop over all of the fields in a well:
###Code
_, axes = plt.subplots(nrows=5, ncols=5, figsize=(14, 14))
for field_index, field in plate['2/2'].groups():
field_index = int(field_index) # Because this starts as a string
axes.flatten()[field_index].imshow(field['0'][0,0,0,:,:]);
###Output
_____no_output_____ |
Python-f-strings-Slides.ipynb | ###Markdown
Python: f-strings and Q&A MEOPAR ATM 2020-10-19Doug Latornell, UBC and 43ravens[[email protected]](mailto:[email protected])Twitter: [@dlatornell](https://twitter.com/dlatornell) Learning Goals* Explain what a Python f-string is* Write f-strings* Use f-strings as an aid to debug Python code* Format variable values in f-strings* Use f-strings to compose structured file paths/names Combining Words and Variable Values in Strings String concatenation with a plus-sign seems like the simplest thing that might work:
###Code
name = "Doug"
print("Hello " + name)
###Output
Hello Doug
###Markdown
Now, try it with a number:
###Code
a = 43
print("The value of a is " + a)
###Output
_____no_output_____
###Markdown
Various solutions to that problem:
###Code
print("The value of a is " + str(a))
print("The value of a is %s" % a)
print("The value of a is {a}".format(a=a))
###Output
The value of a is 43
###Markdown
Python 3.6 brought a better way... "Formatted String Literals" or f-strings
###Code
print(f"Hello {name}")
###Output
Hello Doug
###Markdown
* Put an `f` (lowercase or uppercase, your choice) in front of the opening quote of the string* Enclose variable names in braces (`{}`) Live-coding...
###Code
print(f"The value of a is {a}")
a = 43
b = 71
print(f"a + b = {a + b}")
###Output
a + b = 114
###Markdown
Slides... Learning Goals* โ
Explain what a Python f-string is* โ
Write f-strings* Use f-strings as an aid to debug Python code* Format variable values in f-strings* Use f-strings to compose structured file paths/names f-strings and Debugging* A basic technique for debugging code in any language is to add print statements to the code to trace its execution and show values of variables* Sometimes all you need is `print("here")` to tell you that a particular piece of code is being executed* Sometimes you want to see the value of a variable: `print(f"var={var}")` Live-coding... Imagine that we have an ocean model that includes heat transfer between the oceanand the atmosphere.We notice in our model output that the ocean temperature doesn't seem to be changingas quickly as we expect:it doesn't warm up in the summer, nor cool down in the winter.After some tracing through the code with `print()` statements to help us figure outwhat is going on,we reach a function that calculates the radiative component of the heat flux betweenthe atmosphere and the ocean.A really simple version of that function might be:
###Code
def calc_rad_flux(temp_atmos, temp_ocean):
# Stefan-Boltzmann constant [W m^2 K^-4]
sigma = 5.670_374_419e-8
rad_flux = sigma * (temp_atmos**4 - temp_ocean**4)
return rad_flux
temp_atmos = 25.2
temp_ocean = 10.1
rad_flux = calc_rad_flux(temp_atmos, temp_ocean)
print(f"rad_flux = {rad_flux}")
###Output
rad_flux = 0.022277186456082308
###Markdown
Add debugging print statements to see the values of atmosphere and oceantemperatures that are actually being used to calculate the flux:
###Code
def calc_rad_flux(temp_atmos, temp_ocean):
# Stefan-Boltzmann constant [W m^2 K^-4]
sigma = 5.670_374_419e-8
print(f"temp_atmos = {temp_atmos}")
print(f"temp_ocean = {temp_ocean}")
rad_flux = sigma * (temp_atmos**4 - temp_ocean**4)
return rad_flux
rad_flux = calc_rad_flux(temp_atmos, temp_ocean)
print(f"rad_flux = {rad_flux}")
###Output
temp_atmos = 25.2
temp_ocean = 10.1
rad_flux = 0.022277186456082308
###Markdown
And then it dawns on us:The temperatures have to be in Kelvin, not Celcius!So,fix the bug by adding 273.15 to the temperatures
###Code
def calc_rad_flux(temp_atmos, temp_ocean):
# Stefan-Boltzmann constant [W m^2 K^-4]
sigma = 5.670_374_419e-8
rad_flux = sigma * ((temp_atmos + 273.15)**4 - (temp_ocean + 273.15)**4)
return rad_flux
rad_flux = calc_rad_flux(temp_atmos, temp_ocean)
print(f"rad_flux = {rad_flux}")
###Output
rad_flux = 84.28000023251099
###Markdown
The pattern of showing a variable name and its value is so common that Python 3.8added it as a new feature of f-strings - just put an equals sign (`=`) after the variable name:
###Code
print(f"{a=}")
print(f"{a = }")
###Output
a=43
a = 43
###Markdown
Slides... Add `print()` statements to show variable names and their values to help understand code:
###Code
temp_atmos = 15.2
print(f"temp_atmos = {temp_atmos}")
###Output
temp_atmos = 15.2
###Markdown
or, in Python>=3.8:
###Code
print(f"{temp_atmos = }")
###Output
temp_atmos = 15.2
###Markdown
Learning Goals* โ
Explain what a Python f-string is* โ
Write f-strings* โ
Use f-strings as an aid to debug Python code* Format variable values in f-strings* Use f-strings to compose structured file paths/names Format Variable Values in f-strings Live-coding...
###Code
sigma = 5.670_374_419e-8
print(f"{sigma = }")
print(f"{sigma = :f}")
print(f"{sigma = :0.20f}")
print(f"{sigma = :1.4e}")
print(f"{a=:0.3f}")
print(f"{a=:04d}")
###Output
a=0043
###Markdown
Slides...
###Code
sigma = 5.670_374_419e-8
print(f"{sigma = :0.20f}")
print(f"{sigma = :1.4e}")
print(f"{a=:0.3f}")
print(f"{a=:04d}")
###Output
sigma = 0.00000005670374419000
sigma = 5.6704e-08
a=43.000
a=0043
###Markdown
Format Specification Mini-Language:https://docs.python.org/3.8/library/string.htmlformatspec Learning Goals* โ
Explain what a Python f-string is* โ
Write f-strings* โ
Use f-strings as an aid to debug Python code* โ
Format variable values in f-strings* Use f-strings to compose structured file paths/names Use f-strings to Compose Structured File Paths/NamesLarge volumes of data like model output and ocean observations (e.g. Argo floats)are often stored using structured file paths and/or names.Examples:* URL for ECCC HRDPS forecast file: https://dd.weather.gc.ca/model_hrdps/west/grib2/12/017/CMC_hrdps_west_APCP_SFC_0_ps2.5km_2020101812_P017-00.grib2 * File path/name for UBC SalishSeaCast NEMO model output file: `/results2/SalishSea/nowcast-green.201905/18oct20/SalishSea_1h_20201018_20201018_grid_T.nc` Live-coding...
###Code
import datetime
day = datetime.date(2020, 10, 18)
print(f"/results2/SalishSea/nowcast-green.201905/18oct20/SalishSea_1h_{day:%Y%m%d}_{day:%Y%m%d}_grid_T.nc")
ddmmmyy = f"{day:%d%b%y}".lower()
print(f"/results2/SalishSea/nowcast-green.201905/{ddmmmyy}/SalishSea_1h_{day:%Y%m%d}_{day:%Y%m%d}_grid_T.nc")
for day_num in range(1, 11):
day = datetime.date(2020, 10, day_num)
ddmmmyy = f"{day:%d%b%y}".lower()
grid_T_path = f"/results2/SalishSea/nowcast-green.201905/{ddmmmyy}/SalishSea_1h_{day:%Y%m%d}_{day:%Y%m%d}_grid_T.nc"
print(grid_T_path)
with xarray.open_dataset(grid_T_path) as grid_T:
print(grid_T.data_vars)
###Output
_____no_output_____
###Markdown
Slides...
###Code
import datetime
for day_num in range(1, 11):
day = datetime.date(2020, 10, day_num)
ddmmmyy = f"{day:%d%b%y}".lower()
grid_T_path = f"/results2/SalishSea/nowcast-green.201905/{ddmmmyy}/SalishSea_1h_{day:%Y%m%d}_{day:%Y%m%d}_grid_T.nc"
print(grid_T_path)
###Output
/results2/SalishSea/nowcast-green.201905/01oct20/SalishSea_1h_20201001_20201001_grid_T.nc
/results2/SalishSea/nowcast-green.201905/02oct20/SalishSea_1h_20201002_20201002_grid_T.nc
/results2/SalishSea/nowcast-green.201905/03oct20/SalishSea_1h_20201003_20201003_grid_T.nc
/results2/SalishSea/nowcast-green.201905/04oct20/SalishSea_1h_20201004_20201004_grid_T.nc
/results2/SalishSea/nowcast-green.201905/05oct20/SalishSea_1h_20201005_20201005_grid_T.nc
/results2/SalishSea/nowcast-green.201905/06oct20/SalishSea_1h_20201006_20201006_grid_T.nc
/results2/SalishSea/nowcast-green.201905/07oct20/SalishSea_1h_20201007_20201007_grid_T.nc
/results2/SalishSea/nowcast-green.201905/08oct20/SalishSea_1h_20201008_20201008_grid_T.nc
/results2/SalishSea/nowcast-green.201905/09oct20/SalishSea_1h_20201009_20201009_grid_T.nc
/results2/SalishSea/nowcast-green.201905/10oct20/SalishSea_1h_20201010_20201010_grid_T.nc
###Markdown
Date/time formatting directives: https://docs.python.org/3/library/time.htmltime.strftime Live-coding...
###Code
hrdps_url = "https://dd.weather.gc.ca/model_hrdps/west/grib2/12/017/CMC_hrdps_west_APCP_SFC_0_ps2.5km_2020101812_P017-00.grib2"
hrdps_gribs = "https://dd.weather.gc.ca/model_hrdps/west/grib2"
forecast = 12
hr = 17
day = datetime.date(2020, 10, 18)
var = "APCP_SFC_0"
hrdps_url = f"{hrdps_gribs}/{forecast}/{hr:03d}/CMC_hrdps_west_{var}_ps2.5km_{day:%y%m%d}{forecast}_P{hr:03d}-00.grib2"
print(hrdps_url)
hrdps_url_tmpl = "{hrdps_gribs}/{forecast}/{hr:03d}/CMC_hrdps_west_{var}_ps2.5km_{day:%y%m%d}{forecast}_P{hr:03d}-00.grib2"
print(
hrdps_url_tmpl.format(
hrdps_gribs=hrdps_gribs, forecast=forecast, hr=hr, var=var, day=day))
###Output
https://dd.weather.gc.ca/model_hrdps/west/grib2/12/017/CMC_hrdps_west_APCP_SFC_0_ps2.5km_20101812_P017-00.grib2
###Markdown
Slides...
###Code
hrdps_gribs = "https://dd.weather.gc.ca/model_hrdps/west/grib2"
forecast = 12
hr = 17
day = datetime.date(2020, 10, 18)
var = "APCP_SFC_0"
hrdps_url = f"{hrdps_gribs}/{forecast}/{hr:03d}/CMC_hrdps_west_{var}_ps2.5km_{day:%y%m%d}{forecast}_P{hr:03d}-00.grib2"
print(hrdps_url)
###Output
https://dd.weather.gc.ca/model_hrdps/west/grib2/12/017/CMC_hrdps_west_APCP_SFC_0_ps2.5km_20101812_P017-00.grib2
|
courses/udacity_intro_to_tensorflow_for_deep_learning/[JPN]l01c01_introduction_to_colab_and_python.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
**Introduction to Colab and Python** Run in Google Colab View source on GitHub ใใฎColabใธใใใใใใใใงใฏใPythonใใญใฐใฉใใณใฐ่จ่ชใจใณใผในใฎๆผ็ฟใซไฝฟ็จใใ็ฐๅขใฎๆฆ่ฆใ็ฐกๅใซ็ดนไปใใพใ๏ผColabใColabใฏใGoogle Cloudใไฝฟ็จใใฆใใฉใฆใถใผใงๅฎ่กใใใPython้็บ็ฐๅขใงใใใใจใใฐใใHello Worldใใๅฐๅทใใใซใฏใใใฆในใ[]ใฎไธใซ็ฝฎใใฆใๅทฆไธใฎๅ็ใใฟใณใๆผใใพใใ ใพใใฏใshift-enterใๆผใใฆๅฎ่กใใพใใ
###Code
# Never mind this statement, for compatibility reasons
# from __future__ import absolute_import, division, print_function, unicode_literals
print("Hello World")
###Output
Hello World
###Markdown
Functions, Conditionals, and IterationPython้ขๆฐใไฝๆใใฆใใซใผใใใๅผใณๅบใใพใใใใ
###Code
def HelloWorldXY(x, y):
if (x < 10):
print("Hello World, x was < 10")
elif (x < 20):
print("Hello World, x was >= 10 but < 20")
else:
print("Hello World, x was >= 20")
return x + y
for i in range(8, 25, 5): # i=8, 13, 18, 23 (start, stop, step)
print("--- Now running with i: {}".format(i))
r = HelloWorldXY(i,i)
print("Result from HelloWorld: {}".format(r))
print(HelloWorldXY(1,2))
###Output
Hello World, x was < 10
3
###Markdown
็ฐกๅใงใใญใ0ใใ2๏ผๆไป็๏ผใงๅงใพใใซใผใใๅฟ
่ฆใชๅ ดๅใฏใๆฌกใฎใใใใใๅฎ่กใงใใพใใ
###Code
print("Iterate over the items. `range(2)` is like a list [0,1].")
for i in range(2):
print(i)
print("Iterate over an actual list.")
for i in [0,1]:
print(i)
print("While works")
i = 0
while i < 2:
print(i)
i += 1
print("Python supports standard key words like continue and break")
while True:
print("Entered while")
break
###Output
Python supports standard key words like continue and break
Entered while
###Markdown
Numpy and listsPythonใซใฏ่จ่ชใซ็ตใฟ่พผใพใใใชในใใใใใพใใใใ ใใใใใซใฏnumpyใจใใใฉใคใใฉใชใผใไฝฟ็จใใพใใNumpyใฏใๆฉๆขฐๅญฆ็ฟใ่กใใจใใซๅฝน็ซใคๅคใใฎใตใใผใๆฉ่ฝใๆไพใใพใใใใใซใฏใใคใณใใผใๆใ่กจ็คบใใใพใใ ใใฎในใใผใใกใณใใซใใใnumpyใใใฑใผใธๅ
จไฝใๅฉ็จๅฏ่ฝใซใชใใ็็ฅใใใใnpใๆงๆใไฝฟ็จใใฆใใใใฎใทใณใใซใซใขใฏใปในใงใใพใใ
###Code
import numpy as np # Make numpy available using np.
# Create a numpy array, and append an element
a = np.array(["Hello", "World"])
a = np.append(a, "!")
print("Current array: {}".format(a))
print("Printing each element")
for i in a:
print(i)
print("\nPrinting each element and their index")
for i,e in enumerate(a):
print("Index: {}, was: {}".format(i, e))
print("\nShowing some basic math on arrays")
b = np.array([0,1,4,3,2])
print("Max: {}".format(np.max(b)))
print("Average: {}".format(np.average(b)))
print("Max index: {}".format(np.argmax(b)))
print("\nYou can print the type of anything")
print("Type of b: {}, type of b[0]: {}".format(type(b), type(b[0])))
print("\nUse numpy to create a [3,3] dimension array with random number")
c = np.random.rand(3, 3)
print(c)
print("\nYou can print the dimensions of arrays")
print("Shape of a: {}".format(a.shape))
print("Shape of b: {}".format(b.shape))
print("Shape of c: {}".format(c.shape))
print("...Observe, Python uses both [0,1,2] and (0,1,2) to specify lists")
###Output
You can print the dimensions of arrays
Shape of a: (3,)
Shape of b: (5,)
Shape of c: (3, 3)
...Observe, Python uses both [0,1,2] and (0,1,2) to specify lists
###Markdown
Colab Specifics Colabใฏใ็ดๆฅใขใฏใปในใงใใไปฎๆณใใทใณใงใใ VMใฎ็ซฏๆซใงใณใใณใใๅฎ่กใใใซใฏใ่กใฎๅใซๆๅ็ฌฆ๏ผ๏ผ๏ผใไปใใพใใ
###Code
print("\nDoing $ls on filesystem")
!ls -l
!pwd
print("Install numpy") # Just for test, numpy is actually preinstalled in all Colab instances
!pip install numpy
###Output
Install numpy
Requirement already satisfied: numpy in /Users/hironsuz/anaconda3/lib/python3.7/site-packages (1.17.4)
###Markdown
**Exercise**ใใฎใใญในใใปใซใฎไธใซใณใผใใปใซใไฝๆใใไปฅไธใซใณใผใใ่ฟฝๅ ใใพใใ* ็พๅจใฎใใฃใฌใฏใใช๏ผpwd๏ผใฎใในใไธ่ฆง่กจ็คบใใ* /๏ผcd๏ผใซ็งปๅใใใณใณใใณใใใชในใใใพใ๏ผls -l๏ผ
###Code
!pwd
!cd /
!ls -l
print("Hello")
###Output
/Users/hironsuz/Documents/tensorflow/courses/udacity_intro_to_tensorflow_for_deep_learning
total 4000
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 13113 12 3 15:30 [JPN]l01c01_introduction_to_colab_and_python.ipynb
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 37215 12 2 18:37 [JPN]l02c01_celsius_to_fahrenheit.ipynb
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 186770 12 3 09:49 [JPN]l03c01_classifying_images_of_clothing.ipynb
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 186753 12 3 10:31 [JPN]l04c01_image_classification_with_cnns.ipynb
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 426961 12 3 11:43 [JPN]l05c01_dogs_vs_cats_without_augmentation.ipynb
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 249117 12 3 12:47 [JPN]l05c02_dogs_vs_cats_with_augmentation.ipynb
-rw-r--r-- 1 hironsuz YAHOO\Domain Users 44298 12 3 11:43 foo.png
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 13546 11 26 20:25 [32ml01c01_introduction_to_colab_and_python.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 45830 12 3 15:19 [32ml02c01_celsius_to_fahrenheit.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 36641 11 26 20:25 [32ml03c01_classifying_images_of_clothing.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 32720 12 2 12:59 [32ml04c01_image_classification_with_cnns.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 403684 12 2 14:08 [32ml05c01_dogs_vs_cats_without_augmentation.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 30894 11 26 20:25 [32ml05c02_dogs_vs_cats_with_augmentation.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 27773 11 26 20:25 [32ml05c03_exercise_flowers_with_data_augmentation.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 31467 11 26 20:25 [32ml05c04_exercise_flowers_with_data_augmentation_solution.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 28337 11 26 20:25 [32ml06c01_tensorflow_hub_and_transfer_learning.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 19832 11 26 20:25 [32ml06c02_exercise_flowers_with_transfer_learning.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 23799 11 26 20:25 [32ml06c03_exercise_flowers_with_transfer_learning_solution.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 29119 11 26 20:25 [32ml07c01_saving_and_loading_models.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 10084 11 26 20:25 [32ml08c01_common_patterns.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 9369 11 26 20:25 [32ml08c02_naive_forecasting.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 15388 11 26 20:25 [32ml08c03_moving_average.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 8765 11 26 20:25 [32ml08c04_time_windows.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 17530 11 26 20:25 [32ml08c05_forecasting_with_machine_learning.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 18249 11 26 20:25 [32ml08c06_forecasting_with_rnn.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 13494 11 26 20:25 [32ml08c07_forecasting_with_stateful_rnn.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 12623 11 26 20:25 [32ml08c08_forecasting_with_lstm.ipynb[m[m
-rwxr-xr-x@ 1 hironsuz YAHOO\Domain Users 18547 11 26 20:25 [32ml08c09_forecasting_with_cnn.ipynb[m[m
Hello
|
notebooks/Data Setup/data setup scikit-learn gaussians Initial.ipynb | ###Markdown
This notebook prepares an initial dataset for testing HDBSCAN* implementations
###Code
import numpy as np
import pandas as pd
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=4000, centers=3, n_features=7, random_state=0)
print(X.shape)
print(X[0])
df = pd.DataFrame({'Feature1': X[:, 0], 'Feature2': X[:, 1], 'Feature3': X[:, 2],
'Feature4': X[:, 3], 'Feature5': X[:, 4], 'Feature6': X[:, 5],
'Feature7': X[:, 6]})
df.head(10)
# This generates a csv file
# the same data is saved to 2 different files, the Tribuo and Python loader uses the header
df.to_csv('/Users/gstewart/temp/development/mscs/cpsc69700/RefImpl/HDBSCAN_Star/first-gaussians.csv', index=False, header=False)
df.to_csv('../../data/first-gaussians.csv', index=False, header=True)
print(y.tolist())
###Output
[0, 1, 2, 1, 0, 0, 1, 0, 0, 2, 2, 1, 2, 2, 0, 2, 1, 1, 0, 0, 0, 1, 2, 2, 1, 0, 1, 0, 1, 0, 1, 1, 2, 0, 1, 2, 2, 2, 2, 1, 1, 2, 2, 2, 0, 0, 1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 2, 0, 0, 0, 2, 1, 2, 2, 2, 2, 2, 1, 0, 2, 0, 0, 0, 2, 0, 0, 1, 0, 1, 0, 2, 0, 2, 1, 2, 0, 1, 2, 1, 0, 2, 2, 1, 0, 1, 2, 2, 1, 0, 0, 2, 0, 1, 0, 1, 2, 1, 0, 1, 1, 0, 1, 2, 2, 1, 0, 0, 1, 0, 0, 1, 2, 1, 2, 1, 0, 2, 0, 1, 1, 1, 0, 2, 2, 0, 1, 1, 2, 2, 1, 1, 2, 0, 2, 1, 1, 2, 1, 0, 0, 2, 0, 2, 1, 1, 1, 0, 0, 1, 1, 0, 0, 2, 1, 2, 2, 1, 0, 2, 1, 0, 0, 1, 2, 1, 0, 2, 1, 1, 0, 1, 1, 2, 0, 1, 2, 1, 0, 2, 1, 1, 1, 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 0, 0, 1, 2, 2, 2, 1, 0, 1, 0, 0, 2, 0, 1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 1, 2, 0, 0, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 2, 2, 0, 1, 0, 0, 1, 2, 2, 1, 1, 0, 2, 2, 2, 1, 0, 1, 2, 1, 2, 0, 1, 2, 0, 0, 1, 0, 0, 1, 0, 1, 0, 2, 0, 2, 2, 1, 1, 1, 0, 1, 2, 2, 2, 0, 1, 1, 0, 2, 1, 0, 2, 1, 0, 2, 2, 2, 0, 2, 1, 1, 2, 1, 0, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 0, 0, 1, 2, 0, 1, 2, 1, 0, 2, 2, 2, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 2, 0, 1, 2, 0, 1, 2, 1, 1, 0, 2, 0, 2, 1, 1, 0, 1, 0, 2, 1, 1, 0, 2, 1, 0, 2, 0, 0, 0, 0, 1, 2, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 0, 1, 2, 1, 1, 0, 0, 2, 0, 2, 0, 0, 2, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 1, 0, 0, 1, 1, 2, 0, 1, 2, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 0, 1, 0, 0, 2, 0, 1, 0, 2, 0, 1, 2, 0, 0, 2, 1, 0, 1, 1, 1, 2, 2, 1, 2, 2, 1, 0, 1, 0, 2, 2, 1, 1, 2, 2, 2, 1, 0, 2, 1, 0, 0, 2, 2, 0, 1, 0, 0, 0, 1, 2, 2, 2, 2, 0, 0, 1, 2, 2, 1, 0, 2, 0, 1, 1, 1, 0, 2, 1, 0, 0, 0, 2, 1, 0, 1, 1, 0, 2, 1, 1, 1, 0, 0, 1, 2, 1, 2, 1, 1, 1, 0, 0, 1, 0, 2, 1, 1, 1, 1, 0, 2, 1, 1, 2, 0, 2, 0, 0, 1, 0, 0, 0, 2, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2, 0, 1, 0, 0, 2, 0, 2, 0, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 1, 2, 1, 2, 0, 1, 1, 0, 2, 0, 2, 1, 1, 0, 0, 1, 2, 2, 1, 0, 2, 0, 1, 2, 1, 1, 0, 1, 1, 2, 2, 1, 0, 0, 0, 1, 2, 0, 1, 1, 1, 2, 2, 1, 1, 0, 1, 1, 0, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 2, 2, 0, 1, 0, 0, 2, 0, 1, 0, 2, 2, 2, 0, 0, 1, 1, 0, 2, 2, 1, 2, 1, 2, 2, 0, 0, 1, 2, 1, 0, 1, 2, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0, 1, 1, 0, 2, 2, 2, 0, 1, 0, 0, 2, 0, 1, 1, 1, 1, 2, 2, 1, 2, 1, 2, 0, 0, 2, 0, 0, 2, 2, 1, 1, 2, 0, 0, 0, 2, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 0, 2, 1, 0, 1, 0, 0, 0, 0, 2, 2, 1, 2, 2, 0, 1, 1, 0, 2, 1, 2, 2, 1, 1, 2, 0, 0, 1, 0, 0, 0, 2, 1, 1, 0, 2, 2, 1, 1, 1, 0, 0, 2, 1, 2, 0, 1, 0, 1, 0, 0, 0, 0, 1, 2, 0, 2, 1, 0, 0, 1, 2, 0, 0, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 1, 2, 2, 0, 2, 1, 2, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 0, 1, 1, 1, 2, 2, 1, 2, 0, 1, 2, 1, 1, 2, 0, 1, 1, 1, 2, 0, 0, 2, 0, 2, 1, 2, 1, 2, 1, 2, 2, 0, 0, 0, 1, 0, 1, 2, 1, 2, 0, 0, 1, 0, 0, 2, 1, 0, 0, 2, 1, 1, 1, 0, 2, 1, 0, 1, 0, 0, 0, 0, 1, 1, 2, 2, 1, 0, 2, 0, 1, 2, 2, 0, 0, 2, 2, 1, 0, 0, 2, 0, 0, 0, 1, 2, 0, 2, 1, 2, 0, 2, 1, 1, 2, 1, 2, 1, 0, 2, 0, 1, 0, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 0, 0, 0, 1, 1, 2, 2, 2, 0, 2, 1, 0, 2, 2, 1, 1, 1, 2, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 1, 2, 1, 0, 0, 1, 2, 1, 0, 0, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 0, 1, 1, 2, 1, 1, 1, 0, 1, 2, 2, 2, 2, 0, 2, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 2, 2, 1, 1, 0, 2, 0, 2, 2, 2, 0, 0, 0, 2, 1, 0, 1, 2, 0, 1, 2, 1, 0, 2, 0, 2, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 0, 0, 0, 1, 2, 2, 2, 0, 1, 2, 0, 0, 2, 2, 0, 0, 0, 1, 0, 2, 0, 2, 2, 2, 2, 1, 2, 0, 2, 1, 2, 0, 1, 1, 0, 0, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 1, 0, 2, 1, 1, 2, 2, 0, 1, 1, 0, 2, 2, 1, 2, 1, 0, 1, 0, 1, 2, 2, 0, 0, 2, 2, 1, 2, 1, 2, 2, 1, 1, 1, 0, 2, 2, 1, 2, 2, 0, 0, 1, 2, 1, 0, 1, 1, 0, 0, 2, 0, 1, 1, 2, 0, 2, 2, 2, 2, 2, 0, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 0, 1, 0, 0, 0, 1, 2, 1, 2, 1, 0, 2, 1, 2, 2, 0, 2, 0, 1, 0, 1, 0, 0, 2, 2, 0, 0, 1, 0, 2, 1, 1, 1, 0, 1, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 0, 2, 1, 1, 2, 1, 1, 1, 2, 0, 0, 2, 1, 2, 0, 2, 0, 0, 2, 1, 0, 0, 0, 0, 2, 2, 2, 0, 1, 0, 1, 0, 0, 2, 1, 1, 1, 1, 2, 0, 1, 0, 0, 2, 2, 2, 2, 1, 0, 1, 0, 2, 0, 2, 1, 2, 1, 1, 1, 1, 2, 0, 0, 2, 2, 0, 2, 0, 2, 0, 1, 0, 2, 2, 2, 2, 2, 1, 0, 1, 0, 0, 0, 0, 1, 2, 0, 2, 0, 0, 0, 1, 2, 0, 2, 0, 2, 2, 2, 1, 2, 0, 0, 0, 2, 2, 1, 0, 2, 2, 2, 2, 2, 0, 1, 0, 1, 2, 0, 2, 1, 1, 0, 2, 2, 0, 1, 0, 2, 0, 1, 1, 2, 1, 1, 0, 1, 0, 0, 1, 2, 0, 0, 1, 1, 0, 2, 1, 2, 0, 2, 2, 1, 2, 1, 2, 0, 2, 1, 0, 0, 0, 1, 0, 0, 0, 2, 2, 0, 0, 2, 0, 2, 1, 0, 2, 0, 1, 2, 2, 0, 1, 2, 0, 1, 1, 2, 1, 0, 0, 2, 2, 0, 1, 1, 2, 1, 0, 0, 1, 1, 0, 0, 0, 2, 1, 1, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 0, 0, 1, 1, 2, 1, 1, 2, 0, 2, 2, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2, 1, 1, 1, 0, 2, 1, 1, 0, 1, 2, 1, 1, 2, 0, 0, 2, 0, 0, 2, 0, 0, 1, 0, 0, 0, 1, 0, 1, 2, 2, 1, 0, 1, 0, 2, 2, 2, 0, 0, 0, 2, 2, 2, 1, 2, 2, 0, 2, 2, 1, 2, 1, 1, 1, 1, 0, 0, 0, 1, 2, 0, 0, 2, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 2, 2, 2, 2, 0, 0, 2, 0, 1, 1, 1, 2, 0, 2, 2, 2, 1, 1, 2, 2, 0, 0, 2, 1, 1, 2, 1, 0, 0, 0, 1, 2, 0, 2, 0, 2, 1, 0, 1, 0, 0, 1, 2, 0, 0, 0, 1, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 0, 1, 2, 0, 1, 0, 0, 1, 1, 2, 0, 0, 2, 0, 2, 1, 1, 0, 1, 2, 0, 1, 2, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 2, 2, 0, 1, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 0, 1, 1, 0, 1, 2, 0, 1, 2, 1, 1, 0, 2, 1, 1, 2, 0, 2, 0, 1, 2, 2, 1, 2, 1, 1, 2, 0, 1, 2, 2, 0, 1, 0, 2, 2, 0, 2, 0, 1, 0, 1, 2, 2, 2, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 0, 2, 1, 1, 2, 0, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 1, 1, 0, 2, 0, 0, 2, 0, 1, 0, 0, 1, 2, 1, 1, 0, 1, 2, 2, 1, 1, 0, 0, 0, 2, 1, 1, 0, 1, 0, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 0, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 0, 0, 1, 2, 0, 2, 0, 0, 0, 0, 2, 2, 2, 1, 0, 2, 2, 0, 2, 0, 2, 1, 2, 1, 2, 1, 0, 2, 0, 1, 0, 0, 2, 1, 0, 2, 2, 0, 1, 0, 2, 1, 1, 0, 0, 2, 0, 2, 2, 0, 0, 1, 1, 1, 0, 2, 0, 0, 0, 0, 0, 2, 1, 0, 2, 1, 2, 1, 1, 0, 2, 0, 0, 0, 1, 1, 1, 0, 2, 1, 2, 2, 0, 2, 0, 1, 2, 0, 2, 0, 1, 2, 2, 2, 0, 1, 2, 2, 0, 2, 1, 2, 2, 1, 2, 2, 1, 0, 1, 0, 2, 2, 1, 2, 1, 2, 0, 2, 2, 2, 2, 1, 2, 2, 2, 0, 1, 0, 2, 1, 1, 2, 2, 2, 1, 0, 2, 0, 0, 0, 2, 2, 0, 1, 0, 0, 2, 2, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 0, 0, 2, 0, 0, 2, 2, 0, 0, 2, 2, 2, 1, 2, 0, 0, 1, 2, 0, 0, 1, 0, 2, 0, 2, 2, 0, 1, 1, 2, 2, 2, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 2, 0, 1, 1, 0, 2, 1, 2, 1, 0, 0, 1, 1, 2, 2, 0, 0, 0, 1, 1, 0, 1, 2, 0, 0, 0, 1, 0, 1, 1, 0, 0, 2, 1, 2, 2, 0, 1, 2, 2, 1, 1, 2, 0, 1, 1, 0, 0, 0, 2, 2, 0, 1, 1, 2, 0, 2, 2, 0, 0, 2, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 2, 1, 0, 2, 2, 2, 2, 2, 1, 2, 2, 1, 1, 0, 0, 1, 2, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 2, 0, 1, 0, 0, 2, 2, 0, 2, 2, 0, 2, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 2, 1, 2, 2, 0, 0, 2, 1, 0, 0, 0, 1, 1, 2, 1, 1, 2, 0, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 0, 2, 1, 1, 0, 1, 1, 1, 0, 1, 2, 2, 1, 1, 0, 2, 0, 0, 1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 0, 0, 2, 1, 0, 2, 0, 2, 0, 2, 1, 0, 0, 2, 0, 1, 1, 0, 2, 1, 2, 2, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 2, 1, 0, 2, 0, 0, 0, 1, 2, 2, 2, 1, 0, 1, 2, 0, 0, 1, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 2, 2, 0, 2, 0, 0, 2, 0, 0, 1, 0, 0, 1, 2, 2, 0, 0, 0, 0, 1, 2, 1, 1, 1, 0, 0, 2, 0, 1, 0, 1, 2, 0, 0, 2, 1, 0, 2, 0, 2, 1, 1, 0, 2, 0, 0, 2, 0, 0, 1, 1, 0, 1, 2, 1, 0, 1, 0, 0, 0, 2, 0, 2, 2, 0, 2, 1, 2, 1, 2, 2, 2, 2, 1, 0, 2, 0, 1, 0, 0, 2, 2, 1, 2, 0, 0, 1, 2, 2, 0, 2, 1, 2, 1, 2, 2, 1, 1, 1, 0, 2, 0, 2, 2, 2, 2, 2, 0, 1, 1, 2, 2, 1, 0, 1, 1, 0, 2, 2, 0, 1, 1, 2, 2, 1, 0, 0, 2, 1, 1, 2, 1, 0, 0, 0, 0, 1, 0, 0, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 0, 0, 1, 0, 0, 0, 1, 2, 2, 1, 1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 2, 2, 2, 1, 1, 0, 2, 0, 2, 2, 2, 2, 2, 2, 0, 0, 1, 0, 2, 0, 1, 2, 2, 2, 1, 1, 2, 1, 1, 0, 1, 0, 2, 2, 2, 2, 1, 2, 1, 2, 0, 1, 2, 0, 0, 2, 1, 1, 2, 0, 1, 0, 2, 0, 0, 1, 2, 1, 2, 2, 2, 0, 2, 0, 0, 1, 0, 0, 1, 2, 1, 1, 0, 2, 1, 0, 1, 0, 0, 2, 1, 2, 0, 0, 2, 2, 2, 2, 2, 2, 1, 0, 2, 1, 1, 2, 0, 1, 1, 0, 2, 1, 1, 2, 2, 1, 0, 2, 2, 0, 1, 0, 0, 2, 0, 1, 2, 1, 2, 0, 2, 1, 0, 2, 0, 1, 2, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 2, 0, 1, 0, 0, 1, 2, 1, 2, 2, 2, 1, 1, 1, 2, 1, 0, 2, 2, 2, 0, 2, 1, 0, 1, 0, 1, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 2, 2, 0, 2, 1, 1, 2, 2, 1, 0, 2, 2, 0, 2, 2, 1, 1, 1, 1, 0, 2, 1, 2, 2, 0, 2, 0, 2, 0, 1, 0, 2, 2, 1, 2, 0, 1, 1, 2, 1, 2, 0, 2, 1, 1, 0, 2, 0, 1, 0, 0, 1, 2, 2, 2, 0, 1, 1, 2, 2, 0, 0, 1, 2, 2, 1, 0, 2, 1, 0, 0, 1, 2, 2, 0, 2, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 1, 0, 2, 1, 2, 2, 2, 1, 1, 1, 2, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 0, 2, 0, 2, 1, 1, 0, 2, 2, 0, 0, 2, 0, 2, 1, 2, 0, 2, 1, 2, 1, 1, 2, 2, 2, 0, 2, 2, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 2, 2, 1, 2, 0, 0, 0, 1, 1, 1, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 1, 1, 2, 1, 1, 2, 0, 0, 2, 1, 1, 0, 0, 1, 1, 2, 2, 2, 1, 2, 0, 0, 2, 2, 0, 2, 1, 2, 1, 1, 1, 0, 0, 2, 2, 0, 0, 1, 2, 2, 2, 1, 1, 0, 0, 2, 1, 0, 2, 0, 0, 2, 0, 1, 1, 0, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 0, 0, 2, 1, 2, 0, 2, 2, 2, 1, 1, 1, 2, 0, 1, 2, 0, 0, 0, 2, 2, 1, 0, 0, 2, 1, 1, 1, 1, 2, 0, 0, 1, 0, 0, 1, 0, 2, 2, 2, 1, 2, 2, 1, 1, 2, 2, 0, 2, 1, 0, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 2, 1, 2, 1, 1, 0, 0, 2, 0, 2, 2, 1, 2, 2, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 0, 1, 1, 2, 1, 1, 1, 1, 0, 2, 2, 0, 2, 1, 2, 0, 0, 2, 0, 2, 0, 2, 1, 2, 2, 1, 2, 1, 0, 0, 0, 1, 2, 2, 0, 0, 1, 2, 2, 2, 0, 1, 1, 1, 0, 0, 2, 1, 0, 0, 2, 1, 1, 1, 0, 2, 2, 1, 2, 1, 2, 1, 1, 0, 2, 1, 2, 0, 1, 0, 2, 2, 0, 1, 2, 0, 0, 2, 2, 2, 0, 2, 0, 0, 1, 0, 1, 1, 2, 2, 1, 2, 0, 0, 0, 2, 1, 2, 0, 0, 2, 0, 0, 2, 1, 2, 1, 0, 0, 1, 1, 2, 2, 2, 0, 1, 2, 2, 0, 1, 1, 0, 2, 2, 0, 2, 2, 2, 0, 0, 1, 1, 2, 0, 2, 1, 0, 2, 2, 0, 2, 2, 0, 1, 0, 1, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 2, 2, 0, 2, 1, 1, 2, 2, 0, 1, 2, 2, 2, 1, 2, 1, 0, 2, 2, 2, 2, 2, 1, 0, 2, 2, 1, 2, 0, 1, 2, 1, 2, 0, 2, 2, 1, 0, 1, 0, 1, 0, 1, 1, 2, 2, 2, 0, 1, 1, 0, 0, 0, 1, 0, 2, 1, 2, 1, 1, 1, 2, 2, 1, 0, 2, 0, 0, 0, 2, 1, 0, 1, 1, 2, 2, 0, 1, 1, 2, 0, 2, 2, 2, 2, 2, 0, 1, 2, 1, 0, 2, 2, 2, 2, 0, 1, 0, 1, 2, 2, 0, 1, 2, 1, 2, 2, 1, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 2, 0, 0, 0, 1, 1, 1, 0, 1, 2, 1, 0, 2, 1, 2, 1, 1, 1, 0, 1, 1, 1, 2, 1, 1, 2, 1, 2, 0, 0, 0, 2, 1, 0, 1, 0, 2, 0, 1, 0, 0, 2, 2, 0, 2, 1, 2, 2, 0, 1, 1, 2, 1, 2, 2, 0, 2, 2, 0, 0, 2, 0, 1, 0, 2, 2, 0, 2, 0, 2, 1, 0, 2, 1, 2, 0, 0, 1, 0, 0, 1, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 1, 0, 2, 1, 2, 2, 0, 2, 0, 0, 2, 0, 1, 0, 0, 1, 0, 1, 1, 2, 1, 0, 1, 2, 0, 1, 1, 0, 2, 0, 1, 1, 1, 1, 1, 1, 2, 2, 1, 0, 0, 2, 0, 1, 2, 1, 2, 2, 0, 0, 1, 1, 1, 2, 0, 2, 0, 1, 2, 1, 1, 1, 2, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 2, 0, 1, 1, 2, 0, 2, 0, 0, 0, 1, 2, 0, 0, 2, 0, 1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 1, 2, 2, 0, 1, 2, 0, 0, 0, 2, 0, 1, 0, 2, 1, 1, 1, 1, 1, 2, 0, 1, 0, 1, 2, 2, 2, 0, 2, 0, 2, 1, 2, 2, 2, 1, 0, 0, 0, 0, 1, 2, 0, 2, 1, 2, 0, 1, 0, 2, 2, 1, 1, 1, 2, 0, 0, 1, 2, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 2, 1, 0, 1, 2, 2, 2, 0, 0, 2, 2, 2, 1, 2, 1, 1, 1, 1, 2, 0, 2, 2, 2, 2, 1, 2, 0, 0, 2, 1, 1, 1, 2, 1, 2, 1, 1, 2, 0, 0, 0, 1, 2, 2, 1, 2, 1, 1, 0, 1, 0, 2, 2, 0, 0, 2, 1, 2, 1, 2, 1, 1, 2, 1, 0, 1, 2, 1, 1, 2, 0, 1, 2, 2, 0, 2, 1, 0, 0, 1, 2, 2, 1, 0, 1, 2, 2, 0, 1, 2, 1, 2, 0, 1, 2, 2, 0, 2, 0, 0, 0, 2, 2, 1, 0, 1, 1, 0, 0, 0, 0, 1, 2, 0, 1, 1, 1, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 2, 0, 1, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 2, 0, 1, 1, 2, 1, 2, 0, 1, 1, 2, 2, 0, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1, 0, 1, 0, 1, 0, 2, 1, 2, 0, 2, 1, 1, 2, 2, 2, 2, 0, 0, 0, 2, 0, 2, 2, 2, 0, 2, 1, 1, 0, 1, 2, 2, 0, 1, 1, 0, 2, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 2, 0, 1, 1, 2, 0, 0, 1, 1, 0, 0, 2, 0, 2, 0, 2, 2, 2, 0, 0, 1, 1, 1, 1, 2, 1, 0, 2, 0, 0, 2, 1, 1, 2, 2, 1, 2, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 0, 0, 0, 1, 2, 0, 0, 1, 1, 0, 0, 1, 0, 2, 0, 1, 1, 2, 1, 1, 0, 2, 1, 0, 2, 1, 1, 0, 2, 2, 1, 0, 1, 2, 2, 2, 0, 0, 2, 0, 2, 0, 2, 1, 0, 2, 2, 0, 0, 2, 2, 0, 0, 0, 2, 2, 1, 0, 1, 2, 2, 1, 2, 1, 2, 1, 2, 2, 0, 2, 0, 1, 2, 1, 0, 0, 0, 0, 1, 0, 0, 1, 2, 2, 0, 0, 2, 0, 0, 1, 1, 0, 1, 0, 1, 2, 2, 0, 0, 2, 1, 1, 1, 1, 2, 2, 1, 0, 2, 1, 0, 1, 0, 2, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 0, 0, 1, 0, 2, 2, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 2, 1, 0, 2, 2, 2, 0, 1, 0, 0, 2, 0, 2, 0, 2, 0, 0, 0, 1, 1, 1, 0, 1, 1, 2, 0, 2, 0, 2, 2, 0, 2, 0, 1, 1]
|
files/fate-dice-statistics/Fate Dice Expectation Regions.ipynb | ###Markdown
Wilson Score Interval
###Code
def fate_probability(value):
"""Returns the probability of rolling a value on 4dF.
Args:
value (int): The value rolled on 4dF.
Returns:
float: The probability of rolling the value, that is P(4dF=value).
Raises:
TypeError: If -4 <= value <= 4 and value is not an integer.
"""
# Outside bounds
if not -4 <= value <= 4:
return 0
# The probability of rolling a sum of 0, 1, 2, 3, 4 (negatives are same probability)
probability = [19/81, 16/81, 10/81, 4/81, 1/81]
return probability[abs(value)]
def expected_array(n):
""" Compute the expected values for each bin given a series of 4dF rolls.
Args:
n (int): Number of rolls.
Returns:
list: A list of expected number of rolls seen by value.
"""
result = []
for value in range(-4, 5):
exp = fate_probability(value) * n
result.append(exp)
return result
expected = np.array(expected_array(COUNT))
lower, upper = np.array(proportion_confint(expected, COUNT, alpha=0.01, method="wilson")) * COUNT
wilson_data = [["Outcome", "Lower Bound", "Upper Bound"]]
for i, (low, up) in enumerate(zip(lower, upper)):
value = i - 4
wilson_data.append([value, "{0:.2f}".format(low), "{0:.2f}".format(up)])
print(tabulate(wilson_data, headers="firstrow", tablefmt="pipe"))
###Output
| Outcome | Lower Bound | Upper Bound |
|----------:|--------------:|--------------:|
| -4 | 2.43 | 16.84 |
| -3 | 15.72 | 41.74 |
| -2 | 47.52 | 86.31 |
| -1 | 81.73 | 128.46 |
| 0 | 99.34 | 149.02 |
| 1 | 81.73 | 128.46 |
| 2 | 47.52 | 86.31 |
| 3 | 15.72 | 41.74 |
| 4 | 2.43 | 16.84 |
###Markdown
Normal Approximation
###Code
def variance_expectation(n, sign):
"""Returns the expected range of rolls based on the variance and z score.
Args:
n (int): The number of rolls to consider.
sign (int): -1 returns the lower bounds, +1 returns the upper bounds.
Returns:
list: A list of the lower/upper bounds for the values -4, -3, ..., 4.
"""
weights = expected_array(n)
output = []
for i, weight in enumerate(weights):
value = i - 4
p = fate_probability(value)
sigma = sign * 2.58 * math.sqrt(n * p * (1 - p)) + weight
output.append(sigma)
return output
upper = variance_expectation(COUNT, 1)
lower = variance_expectation(COUNT, -1)
naive_data = [["Outcome", "Lower Bound", "Upper Bound"]]
for i, (low, up) in enumerate(zip(lower, upper)):
value = i - 4
naive_data.append([value, "{0:.2f}".format(low), "{0:.2f}".format(up)])
print(tabulate(naive_data, headers="firstrow", tablefmt="pipe"))
###Output
| Outcome | Lower Bound | Upper Bound |
|----------:|--------------:|--------------:|
| -4 | -0.06 | 12.95 |
| -3 | 13.01 | 38.55 |
| -2 | 45.05 | 83.84 |
| -1 | 79.64 | 126.58 |
| 0 | 97.47 | 147.42 |
| 1 | 79.64 | 126.58 |
| 2 | 45.05 | 83.84 |
| 3 | 13.01 | 38.55 |
| 4 | -0.06 | 12.95 |
###Markdown
Monte Carlo Simulation
###Code
# Values to select, and the weights to do it with
WEIGHTS = [1, 4, 10, 16, 19, 16, 10, 4, 1]
VALUES = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
# Bins edges
BINS = [-4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5]
def simulate_rolls(n, trials=10000):
""" Simulate rolling 4dF N times and calculate the expectation intervals.
Args:
n (int): The number of times to roll 4dF per trial.
trials (int): The number of trials to run.
Returns:
list: A list of lists, containing the counts for each value for each trial:
[
[ count, count, count ... ], # -4
[ count, count, count ... ], # -3
...
[ count, count, count ... ], # +4
]
"""
# The possible values we can select, the weights for each, and a histogram binning to let us count them quickly
values = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
bins = [-4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5]
weights = [1, 4, 10, 16, 19, 16, 10, 4, 1]
results = [[], [], [], [], [], [], [], [], []]
# Perform a trial
for _ in range(trials):
# We select all n rolls "at once" using a weighted choice function
rolls = choices(values, weights=weights, k=n)
counts = np.histogram(rolls, bins=bins)[0]
# Add the results to the global result
for i, count in enumerate(counts):
results[i].append(count)
return results
%%time
# Run the simulation
results = simulate_rolls(COUNT, 300000)
# Print the ranges
simulated_data = [["Outcome", "Lower Bound", "Upper Bound"]]
for i, r in enumerate(results):
value = i - 4
p = np.percentile(r, [0.5, 99.5])
simulated_data.append([value, int(p[0]), int(p[1])])
print(tabulate(simulated_data, headers="firstrow", tablefmt="pipe"))
###Output
| Outcome | Lower Bound | Upper Bound |
|----------:|--------------:|--------------:|
| -4 | 1 | 14 |
| -3 | 14 | 39 |
| -2 | 46 | 84 |
| -1 | 80 | 127 |
| 0 | 98 | 148 |
| 1 | 80 | 127 |
| 2 | 46 | 84 |
| 3 | 14 | 39 |
| 4 | 1 | 14 |
###Markdown
Probability
###Code
def configuration_probability(value, n_times, m_tries):
""" Give the probability of rolling a value exactly N times on 4dF, given M
rolls.
The formula for the probability is:
(M choose N) * P(Value)^N * P(Not Value)^{M-N}
Where (M choose N) is the binomial coefficient, P(Value)^N is the
probability of rolling the value exactly N times, and P(Not Value)^{M-N} is
the probability of rolling any other result M-N times.
Args:
value (int): The value rolled on 4dF.
n_times (int): The number of times the value should appear.
m_tries (int): The total number of 4dF rolls.
Returns:
float: The probability of the desired combination.
"""
# Binomial coefficient
coeff = math.factorial(m_tries) // (math.factorial(n_times) * math.factorial(m_tries - n_times))
p = fate_probability(value)
total_probability = coeff * (p**n_times) * ((1-p)**(m_tries-n_times))
return total_probability
def get_interval(m_tries, start=0.005, end=0.995):
"""Find the lower and upper bounds on the number of times 4dF should roll a
certain value given m total rolls.
The lower bound is defined as n such that CP(n) < start < CP(n+1).
The upper bound is defined as n such that CP(n) < end < CP(n+1)
CP(n) is the cumulative probability, such that:
CP(n) = Sum of P(i) for 0 <= i <= n
Args:
m_tries (int): The total number of 4dF rolls.
Returns:
array of tuples: An array of tuples for the lower and upper bounds
expected for rolling values -4, -3, -2, -1, 0, 1, 2, 3, 4 on 4dF
rolled m times. The array has the following form:
[
(lower, upper), # Bounds for number of -4s expected
(lower, upper), # Bounds for number of -3s expected
...
(lower, upper), # Bounds for number of 4s expected
]
"""
output = []
# Check each 4dF value
for val in range(-4, 5):
lower = None
upper = None
total_probability = 0
# Check all possible number of results we could get
for n in range(m_tries+1):
total_probability += configuration_probability(val, n, m_tries)
if total_probability > start and lower is None:
lower = n
elif total_probability > end and upper is None:
upper = n
break
output.append((lower, upper))
return output
intervals = get_interval(COUNT)
# Print the ranges
prob_data = [["Outcome", "Lower Bound", "Upper Bound"]]
for i, (lower, upper) in enumerate(intervals):
value = i - 4
prob_data.append([value, lower, upper])
print(tabulate(prob_data, headers="firstrow", tablefmt="pipe"))
###Output
| Outcome | Lower Bound | Upper Bound |
|----------:|--------------:|--------------:|
| -4 | 1 | 14 |
| -3 | 14 | 39 |
| -2 | 46 | 84 |
| -1 | 80 | 127 |
| 0 | 98 | 148 |
| 1 | 80 | 127 |
| 2 | 46 | 84 |
| 3 | 14 | 39 |
| 4 | 1 | 14 |
###Markdown
Plotting
###Code
import matplotlib.pyplot as plt
import seaborn as sns
# Set plotting style
sns.set_style("white")
%matplotlib inline
###Output
_____no_output_____
###Markdown
Comparison
###Code
# Plot size
WIDTH = 12
HEIGHT = 7
# A map of plot colors to dice colors. This also sets the order in which they are plotted.
COLORS = (
(naive_data,"Normal Approximation"),
(wilson_data, "Wilson Score"),
(simulated_data, "Monte Carlo"),
(prob_data, "Binomial Probability"),
)
# Offsets
OFFSETS = []
STEP = 0.1
for i in range(2):
half = STEP / 2
position = 0 + half + (half * 2 * i)
OFFSETS += [-position, position]
OFFSETS.sort()
# Set the canvas size
fig = plt.figure(figsize=(WIDTH, HEIGHT))
for i, (data, label) in enumerate(COLORS):
# Make the data plotable
cur_data = data[1:] # Remove the title
cur_data = [(v, float(l), float(u)) for v, l, u in cur_data]
values, lower, upper = zip(*cur_data)
# Convert to Numpy arrays for easier operations
lower = np.array(lower)[4:]
upper = np.array(upper)[4:]
# Adjust the values for the different options to prevent overlap
values = np.array(values)[4:]
offset = OFFSETS[i]
values = values + offset
# Plot the observed points
mid = np.array(expected_array(COUNT))[4:]
y_err_low = mid - lower
y_err_up = upper - mid
plt.errorbar(x=values, y=mid, yerr=[y_err_low, y_err_up], fmt="none", linewidth=12, label=label)
ax = plt.gca()
ax.set_xlabel("4dF Result", size=16)
ax.set_ylabel("Count", size=16)
plt.tick_params(axis='both', which='major', labelsize=14)
# Set main title
plt.suptitle("Fate Dice: 99% Intervals for {n} Rolls".format(n=COUNT), y=1.025, fontsize=22)
# Legend
plt.legend(loc="upper right", fontsize=18)
# Make the plots move together
plt.tight_layout()
# Save to disk
for ext in ("png", "svg"):
fig.savefig("/tmp/fate_dice_regions.{ext}".format(ext=ext), bbox_inches="tight", dpi=300)
###Output
_____no_output_____
###Markdown
Probability Distribution
###Code
def outcome_probability_distribution(m_tries, outcome=0):
"""Find the probability of rolling a set amount of 4dF for a specific outcome.
Args:
m_tries (int): The total number of 4dF rolls.
outcome (int): The result of rolling 4dF, from -4 to 4.
Returns:
array of tuples: Where the first entry of the tuple is the number of
times the dice showed the outcome, and the second entry is the
probability.
"""
output = []
# Check all possible number of results we could get
for n in range(m_tries+1):
probability = configuration_probability(outcome, n, m_tries)
output.append((n, probability))
return output
outcome_probability = outcome_probability_distribution(COUNT)
# Set bin sizes
BINS = []
for i in range(0, COUNT):
BINS.append(i - 0.5)
BINS.append(COUNT + 0.5)
# Colors for the plot
BLUE, *_, RED = sns.color_palette(n_colors=4)
# Set the canvas size
fig = plt.figure(figsize=(WIDTH, HEIGHT))
# Make the data plotable
x, y = zip(*outcome_probability)
# Plot the observed points
plt.hist(x, bins=BINS, weights=y, label="Most likely 99%", color=BLUE)
# Plot the red tails
lower = prob_data[5][1]
upper = prob_data[5][2]
left_x = x[:lower]
left_y = y[:lower]
right_x = x[upper+1:]
right_y = y[upper+1:]
plt.hist(left_x, bins=BINS, weights=left_y, color=RED, label="Least likely 1%")
plt.hist(right_x, bins=BINS, weights=right_y, color=RED)
ax = plt.gca()
ax.set_ylabel("Probability", size=16)
plt.yticks((0, 0.015, 0.03, 0.045))
ax.set_xlabel("Number of Zeroes Rolled", size=16)
ax.set_xlim([80, 148+18])
plt.tick_params(axis='both', which='major', labelsize=14)
# Add arrows
count_arrow = {
"arrowstyle": "simple",
"connectionstyle": "arc3",
"color": RED,
"shrinkB": 4.5,
}
ax.annotate(
"Lower 0.5%",
xy=(95, 0.001),
xytext=(-80, 30),
textcoords='offset points',
arrowprops=count_arrow,
size=16,
)
ax.annotate(
"Upper 0.5%",
xy=(152, 0.001),
xytext=(10, 30),
textcoords='offset points',
arrowprops=count_arrow,
size=16,
)
# Plot vertical lines for the lower and upper bound
bottom, top = ax.get_ylim()
center = (top - bottom) / 2
ax.axvline(98-0.5, color='.5', linestyle="--")
plt.text(
97,
center,
"Lower bound at 98",
color='.5',
size=18,
rotation='vertical',
verticalalignment='center',
horizontalalignment='right',
)
ax.axvline(148+0.5, color='.5', linestyle="--")
plt.text(
149.5,
center,
"Upper bound at 148",
color='.5',
size=18,
rotation='vertical',
verticalalignment='center',
horizontalalignment='left',
)
# Add counts
plt.text(81, 0.0425, "Total Rolls: {n}".format(n=COUNT), size=18)
# Set main title
plt.suptitle("Fate Dice: Probability Distrubiton of Number of Zeros Rolled", y=1.025, fontsize=22)
# Legend
legend = plt.legend(loc="upper right", fontsize=18, frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
frame.set_alpha(1)
# Make the plots move together
plt.tight_layout()
# Save to disk
for ext in ("png", "svg"):
fig.savefig("/tmp/fate_dice_probabilities.{ext}".format(ext=ext), bbox_inches="tight", dpi=300)
###Output
_____no_output_____ |
questions/question_81_90/README.ipynb | ###Markdown
Q. 81 - 90- 81. Hessianใฎใณใผใใผๆคๅบ- 82. Harrisใฎใณใผใใผๆคๅบ (Step.1) Sobel + Gaussian- 83. Harrisใฎใณใผใใผๆคๅบ (Step.2) ใณใผใใผๆคๅบ- 84. ็ฐกๅใช็ปๅ่ช่ญ (Step.1) ๆธ่ฒๅ + ใในใใฐใฉใ - 85. ็ฐกๅใช็ปๅ่ช่ญ (Step.2) ใฏใฉในๅคๅฅ- 86. ็ฐกๅใช็ปๅ่ช่ญ (Step.3) ่ฉไพก(Accuracy)- 87. ็ฐกๅใช็ปๅ่ช่ญ (Step.4) k-NN- 88. K-means (Step.1) ้ๅฟไฝๆ- 89. K-means (Step.2) ใฏใฉในใฟใชใณใฐ- 90. K-means (Step.3) ๅๆใฉใใซใฎๅคๆด
###Code
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage import io
img_orig = io.imread('https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/imori_256x256.png')
img_gray = cv2.cvtColor(img_orig, cv2.COLOR_RGB2GRAY)
img_128 = io.imread('https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/imori_128x128.png')
img_gray_128 = cv2.cvtColor(img_128, cv2.COLOR_RGB2GRAY)
img_noise = io.imread('https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/imori_256x256_noise.png')
img_dark = io.imread('https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/imori_256x256_dark.png')
img_dark_gray = cv2.cvtColor(img_dark, cv2.COLOR_RGB2GRAY)
img_light = io.imread('https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/imori_256x256_light.png')
img_light_gray = cv2.cvtColor(img_light, cv2.COLOR_RGB2GRAY)
img_thorino = io.imread('https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/thorino.jpg')
img_thorino_gray = cv2.cvtColor(img_thorino, cv2.COLOR_RGB2GRAY)
img_eye = io.imread("https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/imori_256x256_eye.png")
img_eye = cv2.cvtColor(img_eye, cv2.COLOR_RGBA2RGB)
img_seg = io.imread("https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/seg_sample.png")
img_connect = io.imread("https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/connect_sample.png")
img_gazo = io.imread("https://yoyoyo-yo.github.io/Gasyori100knock/dataset/images/gazo_sample.png")
###Output
_____no_output_____
###Markdown
Q.81. Hessianใฎใณใผใใผๆคๅบthorino.jpgใซHessian(ใใทใขใณ)ใฎใณใผใใผๆคๅบใ่กใใใณใผใใผๆคๅบใจใฏใจใใธใซใใใ่งใฎ็นใๆคๅบใใใใจใงใใใใณใผใใผใฏๆฒ็ใๅคงใใใชใ็นใงใใใๆฌกๅผใฎใฌใฆในๆฒ็ใซใใใฆใ```bashใฌใฆในๆฒ็ K = det(H) / (1 + Ix^2 + Iy^2)^2det(H) = Ixx Iyy - IxIy^2H ... ใใทใขใณ่กๅใ็ปๅใฎไบๆฌกๅพฎๅ(ใฐใฌใผในใฑใผใซ็ปๅใชใฉใซๅฏพใใฆใSobelใใฃใซใฟใๆใใฆๆฑใใใใ)ใ็ปๅไธใฎไธ็นใซๅฏพใใฆใๆฌกๅผใงๅฎ็พฉใใใใIx ... xๆนๅใฎsobelใใฃใซใฟใๆใใใใฎใ Iy ... yๆนๅใฎsobelใใฃใซใฟใๆใใใใฎใH = [ Ix^2 IxIy] IxIy Iy^2```ใใทใขใณใฎใณใผใใผๆคๅบใงใฏใdet(H)ใๆฅตๅคง็นใใณใผใใผใจใฟใชใใ ๆฅตๅคง็นใฏๆณจ็ฎ็ป็ด ใจ8่ฟๅใๆฏ่ผใใฆใๆณจ็ฎ็ป็ด ใฎๅคใๆๅคงใงใใใฐๆฅตๅคง็นใจใใฆๆฑใใ่งฃ็ญใงใฏdet(H)ใๆฅตๅคง็นใใคใmax(det(H))*0.1ใ่ถ
้ใใ็นใใณใผใใผใจใใฆใใใ
###Code
def hessian_corner(gray):
## Sobel
def sobel_filtering(gray):
# get shape
H, W = gray.shape
# sobel kernel
sobely = np.array(((1, 2, 1),
(0, 0, 0),
(-1, -2, -1)), dtype=np.float32)
sobelx = np.array(((1, 0, -1),
(2, 0, -2),
(1, 0, -1)), dtype=np.float32)
# padding
tmp = np.pad(gray, (1, 1), 'edge')
# prepare
Ix = np.zeros_like(gray, dtype=np.float32)
Iy = np.zeros_like(gray, dtype=np.float32)
# get differential
for y in range(H):
for x in range(W):
Ix[y, x] = np.mean(tmp[y : y + 3, x : x + 3] * sobelx)
Iy[y, x] = np.mean(tmp[y : y + 3, x : x + 3] * sobely)
Ix2 = Ix ** 2
Iy2 = Iy ** 2
Ixy = Ix * Iy
return Ix2, Iy2, Ixy
## Hessian
def corner_detect(gray, Ix2, Iy2, Ixy, pad=2):
# get shape
H, W = gray.shape
# prepare for show detection
out = np.array((gray, gray, gray))
out = np.transpose(out, (1,2,0))
# get Hessian value
Hes = np.zeros((H, W))
for y in range(H):
for x in range(W):
Hes[y,x] = Ix2[y,x] * Iy2[y,x] - Ixy[y,x] ** 2
## Detect Corner and show
for y in range(H):
for x in range(W):
if Hes[y,x] == np.max(Hes[max(y-1, 0) : min(y+2, H), max(x-1, 0) : min(x+2, W)]) and Hes[y, x] > np.max(Hes) * 0.1:
out[y - pad : y + pad, x - pad : x + pad] = [255, 0, 0]
out = out.astype(np.uint8)
return out
# image sobel
Ix2, Iy2, Ixy = sobel_filtering(gray)
# corner detection
out = corner_detect(gray, Ix2, Iy2, Ixy)
return out
img_thorino_hessian = hessian_corner(img_thorino_gray)
fig, ax = plt.subplots(1, 2, figsize=(12, 12))
ax[0].set_title("input")
ax[0].imshow(img_thorino, cmap="gray")
ax[1].set_title("output")
ax[1].imshow(img_thorino_hessian, cmap="gray")
plt.show()
###Output
_____no_output_____
###Markdown
Q.82. Harrisใฎใณใผใใผๆคๅบ (Step.1) Sobel + GauusianใใใใQ.83ใพใงthorino.jpgใซHarris(ใใชใน)ใฎใณใผใใผๆคๅบใ่กใฃใฆใใใHarrisใฎใณใผใใผๆคๅบใฎใขใซใดใชใบใ ใฏใ- ใฐใฌใผในใฑใผใซใซๅฏพใใฆใSobelใใฃใซใฟใซใใใใใทใขใณ่กๅใๆฑใใใ```bashH = [ Ix^2 IxIy] IxIy Iy^2```- Ix^2, Iy^2, IxIyใซใใใใใฌใฆใทใขใณใใฃใซใฟใผใใใใใ- ๅใใฏใปใซๆฏใซใR = det(H) - k (trace(H))^2 ใ่จ็ฎใใใ (kใฏๅฎ้จ็ใซ0.04 - 0.06ใใธใใ่ฏใใจใใใ)- R >= max(R) * th ใๆบใใใใฏใปใซใใณใผใใผใจใชใใ (thใฏ0.1ใจใชใใใจใๅคใ)Q.82-83ใซใใใฆใฎๅใใฉใกใผใฟใฏไปฅไธใฎ้ใใใฌใฆใทใขใณใใฃใซใฟใผ(k=3, sigma=3)k = 0.04, th = 0.1ใใใงใฏ1-3ใพใงใๅฎ่ฃ
ใใใ
###Code
def Sobel_filtering(gray):
# get shape
H, W = gray.shape
# sobel kernel
sobely = np.array(((1, 2, 1),
(0, 0, 0),
(-1, -2, -1)), dtype=np.float32)
sobelx = np.array(((1, 0, -1),
(2, 0, -2),
(1, 0, -1)), dtype=np.float32)
# padding
tmp = np.pad(gray, (1, 1), 'edge')
# prepare
Ix = np.zeros_like(gray, dtype=np.float32)
Iy = np.zeros_like(gray, dtype=np.float32)
# get differential
for y in range(H):
for x in range(W):
Ix[y, x] = np.mean(tmp[y : y + 3, x : x + 3] * sobelx)
Iy[y, x] = np.mean(tmp[y : y + 3, x : x + 3] * sobely)
Ix2 = Ix ** 2
Iy2 = Iy ** 2
Ixy = Ix * Iy
return Ix2, Iy2, Ixy
# gaussian filtering
def gaussian_filtering(I, K_size=3, sigma=3):
# get shape
H, W = I.shape
## gaussian
I_t = np.pad(I, (K_size // 2, K_size // 2), 'edge')
# gaussian kernel
K = np.zeros((K_size, K_size), dtype=np.float32)
for x in range(K_size):
for y in range(K_size):
_x = x - K_size // 2
_y = y - K_size // 2
K[y, x] = np.exp( -(_x ** 2 + _y ** 2) / (2 * (sigma ** 2)))
K /= (sigma * np.sqrt(2 * np.pi))
K /= K.sum()
# filtering
for y in range(H):
for x in range(W):
I[y,x] = np.sum(I_t[y : y + K_size, x : x + K_size] * K)
return I
# get difference image
i_x2, i_y2, i_xy = Sobel_filtering(img_thorino_gray)
# gaussian filtering
i_x2 = gaussian_filtering(i_x2, K_size=3, sigma=3)
i_y2 = gaussian_filtering(i_y2, K_size=3, sigma=3)
i_xy = gaussian_filtering(i_xy, K_size=3, sigma=3)
fig, ax = plt.subplots(1, 4, figsize=(15, 3))
ax[0].set_title("input")
ax[0].imshow(img_thorino, cmap="gray")
ax[1].set_title("i_x2")
ax[1].imshow(i_x2, cmap="gray")
ax[2].set_title("i_y2")
ax[2].imshow(i_y2, cmap="gray")
ax[3].set_title("i_xy")
ax[3].imshow(i_xy, cmap="gray")
plt.show()
###Output
_____no_output_____
###Markdown
Q.83. Harrisใฎใณใผใใผๆคๅบ (Step.2) ใณใผใใผๆคๅบใใใงใฏใขใซใดใชใบใ 4-5ใๅฎ่ฃ
ใใใ4ใซใใใ k = 0.04ใ5ใซใใใth = 0.1
###Code
def corner_detect(gray, Ix2, Iy2, Ixy, k=0.04, th=0.1):
# prepare output image
out = np.array((gray, gray, gray))
out = np.transpose(out, (1,2,0))
# get R
R = (Ix2 * Iy2 - Ixy ** 2) - k * ((Ix2 + Iy2) ** 2)
# detect corner
out[R >= np.max(R) * th] = [255, 0, 0]
out = out.astype(np.uint8)
return out
out = corner_detect(img_thorino_gray, i_x2, i_y2, i_xy)
fig, ax = plt.subplots(1, 2, figsize=(12, 12))
ax[0].set_title("input")
ax[0].imshow(img_thorino, cmap="gray")
ax[1].set_title("output")
ax[1].imshow(out, cmap="gray")
plt.show()
###Output
_____no_output_____
###Markdown
Q.84. ็ฐกๅใช็ปๅ่ช่ญ (Step.1) ๆธ่ฒๅ + ใในใใฐใฉใ ใใใงใฏ็ฐกๅใช็ปๅ่ช่ญใไฝๆใใใ็ปๅ่ช่ญใจใฏ็ปๅใซๅใฃใฆใใใขใใไฝใ๏ผใฉใฎใฏใฉในใซๅฑใใใ๏ผใ็นๅฎใใใฟในใฏใงใใใ ็ปๅ่ช่ญใฏใใใ็ปๅๅ้กใClassification(ใฏใฉในๅ้ก)ใCategorization(ใซใใดใฉใคใผใผใทใงใณ)ใClustering(ใฏใฉในใฟใชใณใฐ)ใใชใฉใจๅผใฐใใใใใใใๆๆณใฏ็ปๅใใไฝใใใฎ็นๅพด(HOGใSIFT, SURFใชใฉ)ใๆฝๅบใใใใฎ็นๅพดใซใใใฏใฉในใๅคๅฅใใใ CNNใๆต่กใไปฅๅใฏใใฎใขใใญใผใใใใๅใใใใใCNNใฏ็นๅพดๆฝๅบใใๅคๅฅใพใงใไธๆฌใใฆ่กใใใใใใงใฏใ็ปๅใฎ่ฒใในใใฐใฉใ ใ็จใใ็ฐกๅใช็ปๅ่ช่ญใ่กใใใขใซใดใชใบใ ใจใใฆใฏใ- ็ปๅ(train_@@@.jpg)ใใชใตใคใบใใๆธ่ฒๅฆ็(Q.6. RGBใใใใใ4้่ชฟ)ใใใ- ๆธ่ฒๅฆ็ใใ็ปๅใฎใในใใฐใฉใ ใไฝๆใใใใใใงใฎใในใใฐใฉใ ใฏRGBใใใใใ4ๅคใใจใใใใใใใๅบๅฅใใใใใB=[1,4], G=[5,8], R=[9,12]ใฎbin=12ใจใชใใใใใใใฎ็ปๅใซๅฏพๅฟใใใในใใฐใฉใ ใไฟๅญใใๅฟ
่ฆใใใใฎใงๆณจๆใ ใใคใพใใdatabase = np.zeros((10(ๅญฆ็ฟใใผใฟๆฐ), 13(RGB + class), dtype=np.int)ใใซๅญฆ็ฟใใผใฟใฎใในใใฐใฉใ ใๆ ผ็ดใใๅฟ
่ฆใใใใ- ใในใใฐใฉใ ใใใผใฟใใผในใจใใใ่ช่ญใใใ็ปๅ(test_@@@.jpg)ใจใในใใฐใฉใ ใฎๅทฎใ่จ็ฎใใฆใ็นๅพด้ใจใใใใในใใฐใฉใ ใฎๅทฎใฎๅ่จใงใๆๅฐใจใชใฃใ็ปๅใไบๆธฌใจใชใใฏใฉในใงใใใใคใพใใ่ฒใ่ฟใ็ปๅใจๅใใฏใฉในใซใชใใจ่ใใใใใใใใงใฏ1-3ใๅฎ่ฃ
ใใใในใใฐใฉใ ใๅฏ่ฆๅใใใ ๅญฆ็ฟใใผใฟใฏdatasetใใฉใซใใซใใ train_akahara_@@@.jpg (ใฏใฉใน1)ใจ train_madara_@@@.jpg(ใฏใฉใน2) ใ็จใใใ(่จ10ๆ) akaharaใจใฏใขใซใใฉใคใขใชใmadaraใฏใใใฉใคใขใชใงใใใใใฎใใใชไบใ็นๅพด้ใไฟๅญใใฆใใใใผใฟใใผในๅใฏไบบๅทฅ็ฅ่ฝ็ฌฌไธไธไปฃใฎๆๆณใงใใใใใใฏใๅ
จ้จใฎใใฟใผใณใๆ่จใใฆใใใฐOKใจใใ่ใๆนใงใใใใใ ใใใใใใใจใกใขใชใๅคง้ใซๆถ่ฒปใใใฎใงไฝฟ็จใ้ใใใๆๆณใงใใใ
###Code
# dicrease color
def dic_color(img):
img = img // 64 * 64 + 32
return img
def get_feature(img):
feat = np.zeros(12, dtype=np.float32)
for i in range(4):
feat[i,] = (img[..., 0] == (64 * i + 32)).sum() #len(np.where(img[..., 0] == (64 * j + 32))[0])
feat[i + 4] = (img[..., 1] == (64 * i + 32)).sum()
feat[i + 8] = (img[..., 2] == (64 * i + 32)).sum()
return feat
# database
def get_db():
train_paths = [
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_3.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_3.jpg"
]
# prepare database
db = np.zeros((len(train_paths), 13), dtype=np.float32)
# each image
for i, path in enumerate(train_paths):
print(path)
img = io.imread(path)
img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_CUBIC)
img = dic_color(img)
feat = get_feature(img)
db[i, :-1] = feat
# get class
if 'akahara' in path:
cls = 0
elif 'madara' in path:
cls = 1
# store class label
db[i, -1] = cls
return db, train_paths
# get database
db, train_paths = get_db()
fig, ax = plt.subplots(1, 6, figsize=(15, 2))
for i in range(len(db)):
ax[i].set_title(train_paths[i].split("/")[-1])
ax[i].bar(np.arange(12), db[i, :-1])
plt.show()
###Output
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_1.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_2.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_3.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_1.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_2.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_3.jpg
###Markdown
Q.85. ็ฐกๅใช็ปๅ่ช่ญ (Step.2) ใฏใฉในๅคๅฅใใใงใฏใขใซใดใชใบใ 4-5ใๅฎ่ฃ
ใใใใในใใใผใฟใซใฏ akahara_@@@.jpgใจ madara_@@@.jpgใ็จใใใ ใใ ใใๅ็ปๅใจๆใใในใใฐใฉใ ๅทฎๅใๅฐใใ็ปๅใฎๅๅใจไบๆธฌใฏใฉในใฎ2ใคใๅบๅใใใใใใฏNearesetNeighborใจๅผใฐใใ่ฉไพกๆนๆณใงใใใ
###Code
test_paths = [
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_3.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_3.jpg"
]
for path in test_paths:
img = io.imread(path)
img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_CUBIC)
img = dic_color(img)
feat = get_feature(img)
db_diff = np.abs(db[:, :-1] - feat)
distances = db_diff.sum(axis=1)
nearest = distances.argmin()
pred_cls = db[nearest, -1]
label = "akahara" if pred_cls == 0 else "madara"
print(path.split("/")[-1], ", pred >>", label)
###Output
akahara_1.jpg , pred >> akahara
akahara_2.jpg , pred >> akahara
akahara_3.jpg , pred >> akahara
madara_1.jpg , pred >> madara
madara_2.jpg , pred >> madara
madara_3.jpg , pred >> madara
###Markdown
Q.86. ็ฐกๅใช็ปๅ่ช่ญ (Step.3) ่ฉไพก(Accuracy)ใใใงใฏ็ปๅ่ช่ญใฎ็ตๆใ่ฉไพกใใใ็ปๅ่ช่ญใฎๅ ดๅใฏใฉใใใใๆญฃ่งฃใฏใฉในใไบๆณใงใใใใ็คบใAccuracy(Precisionใจใใฃใใใใใ)ใไธ่ฌ็ใช่ฉไพกๆๆจใงใใใAccuracyใฏๆฌกๅผใง่จ็ฎใใใใ่ฆใฏใในใใซใใใๅพ็น็ใงใใใๅฐๆฐ่กจ็คบใใใจใใใ100ๆใใฆใใผใปใณใใผใธใง่กจใใใจใใใใ```bashAccuracy = (ๆญฃ่งฃใใ็ปๅๆฐ) / (ใในใใใ็ปๅใฎ็ทๆฐ)```ไปฅไธใ่ธใพใใฆใQ.85ใฎAccuracyใๆฑใใใ
###Code
accuracy = 6 / 6
accuracy
###Output
_____no_output_____
###Markdown
Q.87. ็ฐกๅใช็ปๅ่ช่ญ (Step.4) k-NNQ.85ใงใฏmadara_2.jpgใใขใซใใฉใคใขใชใจไบๆธฌใใใใใใใๅ้ฟใใใใใซใใใใงใฏ่ฒๅใใ่ฟใ็ปๅใ3ใค้ธใณใใใใใฎๅคๆฐๆฑบใซใใฃใฆไบๆธฌใฏใฉในใๆฑบๅฎใใใใใฎใใใซ็นๅพดใ่ฟใใใฎใๅญฆ็ฟใใผใฟใใkๅ้ธใใงๅคๆญใใๆๆณใk่ฟๅ(k-NN: k-Nearest Neighbor)ใจใใใQ.85.ใฎNNๆณใฏk=1ใฎๅ ดๅใจใฟใใใ
###Code
k = 3
from collections import Counter
for path in test_paths:
img = io.imread(path)
img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_CUBIC)
img = dic_color(img)
feat = get_feature(img)
db_diff = np.abs(db[:, :-1] - feat)
distances = db_diff.sum(axis=1)
nearest = distances.argsort()[:k]
pred_cls = db[nearest, -1]
counts = Counter(pred_cls).most_common()
label = "akahara" if counts[0][0] == 0 else "madara"
print(path.split("/")[-1], ", pred >>", label)
###Output
akahara_1.jpg , pred >> akahara
akahara_2.jpg , pred >> akahara
akahara_3.jpg , pred >> akahara
madara_1.jpg , pred >> madara
madara_2.jpg , pred >> madara
madara_3.jpg , pred >> madara
###Markdown
Q.88. K-means (Step.1) ้ๅฟไฝๆQ.84-87ใฎ็ปๅ่ช่ญใฏๆๅธซใใผใฟใๅฟ
่ฆใจใใใใใใๆๅธซใใๅญฆ็ฟ(supervised-training)ใฎใใฎใใใ็ฐกๅใชใใฎใ ใฃใใใใใใงใฏๆๅธซใๅฟ
่ฆใจใใชใๆๅธซใชใๅญฆ็ฟ(unsupervised-training)ใง็ปๅใๅ้กใใใๆใ็ฐกๅใชๆนๆณใK-meansใฏใฉในใฟใชใณใฐๆณใงใใใใใใฏไบใใฏใฉในๆฐใๅใใฃใฆใใๅ ดๅใซไฝฟใใใจใใงใใ็นๅพด้ใ้ๅฟใซๅใใชใใใฏใฉในใฟใชใณใฐใใๆๆณใงใใใK-Meansใขใซใดใชใบใ ใจใใฆใฏใ1. ใใผใฟใซใใใใใฉใณใใ ใซใฏใฉในใๅฒใๅฝใฆใใ2. ใฏใฉในใใจใซ้ๅฟใ่จ็ฎใใใ3. ๅใใผใฟใจ้ๅฟใฎ่ท้ขใ่จ็ฎใใๆใ่ท้ขใ่ฟใ้ๅฟใฎใฏใฉในใๅฒใๅฝใฆใใ4. 2-3ใใฏใฉในๅคๆดใใชใใชใใพใง็นฐใ่ฟใใใใใงใฏใtrainใฎ็ปๅใซๅฏพใใฆๆฌกใฎใใใซ่กใใ1. ็ปๅใๆธ่ฒๅใใใในใใฐใฉใ ใไฝๆใใใใใ็นๅพด้ใจใใใ2. ๅ็ปๅใซใฉใณใใ ใซ0ใ1ใฎใฏใฉในใๅฒใๅฝใฆใใ (ใใใงใฏใใฏใฉในๆฐ=2, np.random.seed(1)ใใจใใฆใnp.random.random() = thใชใ1ใๅฒใๅฝใฆใใth=0.5)3. ใฏใฉในใ0ใ๏ผใฎ็นๅพด้ใฎ้ๅฟ(mean)ใใใใใๅใใ(้ๅฟใฏ gs = np.zeros((Class, 12), dtype=np.float32)ใซๆ ผ็ดใใใ)4. ๅ็ปๅใซๅฏพใใฆใ็นๅพด้ใจ้ๅฟใฎ่ท้ข(ใฆใผใฏใชใใ่ท้ข(L1ใใซใ ): ๅทฎใไบไนใใใใฎๅ่จใฎsqrtใใจใฃใใใฎ)ใ่จ็ฎใใ่ท้ขใ่ฟใ้ๅฟใฎใฏใฉในใๅฒใๅฝใฆใใ5. 3-4ใใฏใฉในใฎๅคๆดใใชใใชใใพใง็นฐใ่ฟใใใใใงใฏใ1-3ใพใงใๅฎ่ฃ
ใใ(4,5ใฎใใจใ่ใใฆใซใผใใไฝใใชใใฆใใใ)ใ
###Code
def assign_label_init(db, paths, class_n=2):
feats = db.copy()
# assign random label
np.random.seed(0)
feats[:, -1] = np.random.randint(0, class_n, (len(db)))
# prepare gravity
gs = np.zeros((class_n, feats.shape[1] - 1), dtype=np.float32)
# get gravity
for i in range(class_n):
gs[i] = np.mean(feats[np.where(feats[..., -1] == i)[0], :12], axis=0)
print("assigned label")
print(feats[:, -1])
print("Grabity")
print(gs)
assign_label_init(db, train_paths)
###Output
assigned label
[0. 1. 1. 0. 1. 1.]
Grabity
[[3673. 6966. 3607. 2138. 3707. 7019. 3859. 1799. 3996.5
6818. 3551.5 2018. ]
[2611. 7386.75 5079.75 1306.5 2404.75 7201.75 5536.75 1240.75 2751.5
6570.75 5746.5 1315.25]]
###Markdown
Q.89. K-means (Step.2) ใฏใฉในใฟใชใณใฐใใใงใฏใขใซใดใชใบใ 4-5ใๅฎ่ฃ
ใใฆใใฏใฉในใฟใชใณใฐใ่กใใใใใงไบๆธฌใฏใฉในใ0,1ใจใชใฃใฆใใใใQ.85-87ใจ้ใใฉใใซใฎ้ ็ชใฏใใฉใใฉใงใใใ ใชใฎใงใK-meansใฏใใใพใงใซใใดใชๅฅใซๅ้กใใๆๆณใงใใใใใใๅ
ทไฝ็ใซไฝใฎใฏใฉในใใพใงใฏๅใใใชใใ ใพใใใฏใฉในๆฐใฏไบใใใกใใ็ฅใฃใฆ็ฝฎใใชใใใฐใใใชใใK-meansใฏใฉในใฟใชใณใฐใงใฏๆๅใซๅฒใๅฝใฆใใฉใใซใฎ็ถๆ
ใซใใฃใฆใๆๅพใฎๅบๅใๅคงใใๅทฆๅณใใใใฎใงๆณจๆใๅฟ
่ฆใงใใใ ใพใใใใผใฟๆฐใๅฐใชใใจๅคฑๆใใใใใใใใฏใใผใฟๆฐใๅฐใชใใใจใงใ็ใฎใใผใฟใฎๅๅธใใตใณใใชใณใฐใใซใใใใจใๅๅ ใงใใใใคใพใใใใผใฟๆฐใๅคใใปใฉใใผใฟใฎๅๅธใ็ฒพๅบฆ่ฏใใใใใใใจใซใใใ
###Code
def label_kmeans(db, paths, class_n=2):
feats = db.copy()
feat_n = feats.shape[1] - 1
# assign random label
np.random.seed(0)
feats[:, -1] = np.random.randint(0, class_n, (len(db)))
# prepare gravity
gs = np.zeros((class_n, feat_n), dtype=np.float32)
# get gravity
for i in range(class_n):
gs[i] = np.mean(feats[np.where(feats[..., -1] == i)[0], :feat_n], axis=0)
while True:
# prepare greavity
gs = np.zeros((class_n, feat_n), dtype=np.float32)
change_count = 0
# compute gravity
for i in range(class_n):
gs[i] = np.mean(feats[np.where(feats[..., -1] == i)[0], :feat_n], axis=0)
# re-labeling
for i in range(len(feats)):
# get distance each nearest graviry
dis = np.sqrt(np.sum(np.square(np.abs(gs - feats[i, :feat_n])), axis=1))
# get new label
pred = np.argmin(dis, axis=0)
# if label is difference from old label
if int(feats[i, -1]) != pred:
change_count += 1
feats[i, -1] = pred
if change_count < 1:
break
for i in range(db.shape[0]):
print(paths[i].split("/")[-1], " Pred:", feats[i, -1])
label_kmeans(db, train_paths)
###Output
akahara_1.jpg Pred: 1.0
akahara_2.jpg Pred: 1.0
akahara_3.jpg Pred: 0.0
madara_1.jpg Pred: 0.0
madara_2.jpg Pred: 0.0
madara_3.jpg Pred: 1.0
###Markdown
Q.90. K-means ใใผใฟใๅขใใtestใฎ็ปๅใๅ ใใฆkmeansใ่กใใ
###Code
def get_db_all():
train_paths = [
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_3.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_3.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_3.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_1.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_2.jpg",
"https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_3.jpg"
]
# prepare database
db = np.zeros((len(train_paths), 13), dtype=np.float32)
# each image
for i, path in enumerate(train_paths):
print(path)
img = io.imread(path)
img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_CUBIC)
img = dic_color(img)
feat = get_feature(img)
db[i, :-1] = feat
# get class
if 'akahara' in path:
cls = 0
elif 'madara' in path:
cls = 1
# store class label
db[i, -1] = cls
return db, train_paths
db2, train_paths2 = get_db_all()
print("\nkmeans")
label_kmeans(db2, train_paths2)
###Output
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_1.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_2.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/akahara_3.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_1.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_2.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/train/madara_3.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_1.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_2.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/akahara_3.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_1.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_2.jpg
https://yoyoyo-yo.github.io/Gasyori100knock/dataset/test/madara_3.jpg
kmeans
akahara_1.jpg Pred: 1.0
akahara_2.jpg Pred: 1.0
akahara_3.jpg Pred: 0.0
madara_1.jpg Pred: 0.0
madara_2.jpg Pred: 0.0
madara_3.jpg Pred: 1.0
akahara_1.jpg Pred: 1.0
akahara_2.jpg Pred: 1.0
akahara_3.jpg Pred: 1.0
madara_1.jpg Pred: 0.0
madara_2.jpg Pred: 0.0
madara_3.jpg Pred: 0.0
|
Peak declustering and signal crossings detection.ipynb | ###Markdown
Stochastic signal Peak declustering and signal crossing detection are useful tools when analyzing (stochastic) signals. This notebook demonstrates how these tools can be used. Spectrum to generate signal fromLets use a spectrum that exhibits a power-law like tail decay. This kind of spectra are common in environment description in offshore applications (https://wikiwaves.org/Ocean-Wave_Spectra).
###Code
ref_scale = 1.
spectrum_mode = 0.20
spectrum_shape = 4.
spectrum_scale = spectrum_mode*((spectrum_shape + 1)/spectrum_shape)**(1./spectrum_shape)
spectrum_dist = stats.invweibull(spectrum_shape, 0., spectrum_scale)
freq_delta = 0.0005
freq = np.arange(0.005, 5., freq_delta)
spectrum_norm = spectrum_dist.pdf(freq)
spectrum = ref_scale*spectrum_norm
plt.figure()
plt.plot(freq, spectrum)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Power spectrum')
plt.xlim(0., 1.)
###Output
_____no_output_____
###Markdown
Realize stochastic signalLets generate a Guassian signal based on the spectra above.
###Code
t = np.arange(0., 3600., step=0.1) # 1 hour signal at 10 Hz
amplitudes = np.sqrt(2.)*stats.norm.rvs(0, np.sqrt(spectrum*freq_delta))
phases = stats.uniform.rvs(-np.pi, np.pi, spectrum.size)
theta = np.zeros(t.size)
for f, amp, phase in zip(freq, amplitudes, phases):
f *= 2.*np.pi
xt_arg = f*t + phase
sin = np.sin(xt_arg)
cos = np.cos(xt_arg)
theta += amp*sin
plt.figure()
plt.plot(t, theta)
plt.axhline(0, linestyle='--', color='k')
plt.xlabel('Seconds [s]')
plt.grid()
###Output
_____no_output_____
###Markdown
Finding crossingsThe proposed pull-request provides **scipy.signal.argcross**, **scipy.signal.argcrossup**, and **scipy.signal.argcrossdown** to detect "crossings" i.e. sign changes. The API is designed so that these functions behave the same way as **scipy.signal.argrelmax** etc.
###Code
crossings, *_ = signal.argcross(theta, threshold=0.)
plt.figure()
plt.plot(t, theta)
plt.plot(t[crossings], theta[crossings], 'r.')
plt.axhline(0, linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.grid()
crossings_up, *_ = signal.argcrossup(theta, threshold=0.)
crossings_down, *_ = signal.argcrossdown(theta, threshold=0.)
plt.figure()
plt.plot(t, theta)
plt.plot(t[crossings_up], theta[crossings_up], 'g^', label='Data point before an up-crossing')
plt.plot(t[crossings_down], theta[crossings_down], 'rv', label='Data point before an down-crossing')
plt.axhline(0, linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
In signal analysis, there is a metric/statistics called the zero-crossing period. It describes the expected period between each signal upcrossing (i.e. each time signal goes from negative to positive.) Topic is covered in most textbooks about stocahstic signals, e.g. ref. *Naess, Arvid, and Torgeir Moan. Stochastic dynamics of marine structures. Cambridge University Press, 2012.* https://www.amazon.com/Stochastic-Dynamics-Marine-Structures-Arvid/dp/0521881552
###Code
freq, power_spec = scipy.signal.periodogram(theta, fs=10.)
omega = 2*np.pi*freq
m0 = np.trapz(power_spec) # 0th spectral moment
m2 = np.trapz(omega**2 * power_spec) # 2nd spectral moment
###Output
_____no_output_____
###Markdown
Estimate zero-crossing period from spectral moments
###Code
tz_ps = 2*np.pi*np.sqrt(m0/m2)
tz_ps
###Output
_____no_output_____
###Markdown
Estimate zero-crossing period empirically from the signal itself.
###Code
tz = 3600/len(crossings_up)
tz
###Output
_____no_output_____
###Markdown
Peak declusteringIn analysis of signals the peaks are maybe the most interesting part. In particular, they may be used in predictive analysis, such as extreme value analysis. Therefore, being able to describe the statistical distribution of the peaks are essential.However, analytical results are only known for some idealized cases such as Guassian signals:* narrow-banded -> peaks will be Rayleigh distributed* wide-banded -> peaks will be Rice distributedIn real world, these conditions seldom apply. Therefore, a more pragmatic approach is often adopted. Describe the distribution of peaks by means of Weibull distribution, or Pareto distribution (often referred to as peak-over-threshold models). However, for these models to be applicable, we should be able to assume statistical independence between the data point. (This is to avoid significant complexity in our analysis).The statistical independence may achieved by means on 2 (common) methods, as implemented in **scipy.signal.decluster_peaks**: Mean-upcrossing (``mean``) declustering is as following: 1. Identify clusters of exceedences, i.e., find all peaks between two upcrossings above the signal mean value. 2. Select only the n-th largest peaks from each cluster (cf. ``order``). 3. Exclude peaks that falls below the threshold (cf. ``x_th``). Runs (``runs``) declustering is as following: 1. Identify clusters of exceedences, i.e., find all peaks between two upcrossings above the threshold (cf. ``x_th``). 2. Merge clusters that are seperated with less than k runs (cf. ``runs``). The number of peaks below the threshold seperating a down-crossing and the subsequent up-crossing is called runs. 3. Select only the n-th largest peaks from each cluster (cf. ``order``). The difference between the methods are subtle, and they even overplap for ``x_th = x.mean()`` and ``runs = 0``. References ---------- .. [1] Coles S, (2001), An Introduction to Statistical Modelling of Extreme Values. Springer. The point of both methods is to select a subset of all peaks, that can be assumed to be statistically independent. Then this subset will be amenable to simpler well-established analysis methods (and distribution functions) Mean-upcrossing declustering
###Code
peaks_dc_1 = signal.decluster_peaks(theta, x_th=theta.mean()) # same as argerelmax, then applying a mask
peaks_dc_1 = np.concatenate(peaks_dc_1)
peaks_dc_2 = signal.decluster_peaks(theta, x_th=theta.mean(), order=2) # same as argerelmax, then applying a mask
peaks_dc_2 = np.concatenate(peaks_dc_2)
peaks_dc_3 = signal.decluster_peaks(theta, x_th=theta.mean(), order=3) # same as argerelmax, then applying a mask
peaks_dc_3 = np.concatenate(peaks_dc_3)
plt.figure()
plt.plot(t, theta)
plt.plot(t[peaks_dc_1], theta[peaks_dc_1], 'x', label='mean declustered peaks - order=1')
plt.axhline(0, linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
plt.figure()
plt.plot(t, theta)
plt.plot(t[peaks_dc_2], theta[peaks_dc_2], 'x', label='mean declustered peaks - order=2')
plt.axhline(0, linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
plt.figure()
plt.plot(t, theta)
plt.plot(t[peaks_dc_3], theta[peaks_dc_3], 'x', label='mean declustered peaks - order=3')
plt.axhline(0, linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
Lets look at the histogram of peaks.
###Code
peaks, *_ = signal.argrelmax(theta)
peaks_dc_1 = signal.decluster_peaks(theta, x_th=2.) # same as argerelmax, then applying a mask
peaks_dc_1 = np.concatenate(peaks_dc_1)
plt.figure()
_ = plt.hist(theta[peaks], bins=25, alpha=0.5)
_ = plt.hist(theta[peaks_dc_1], bins=5)
###Output
_____no_output_____
###Markdown
Runs declustering
###Code
# at least 2 peaks must occur below the threshold to count as a cluster, then pick the largest peak
peaks_dc_1 = signal.decluster_peaks(theta, x_th=1., method='runs', runs=2)
peaks_dc_1 = np.concatenate(peaks_dc_1)
# at least 2 peaks must occur below the threshold to count as a cluster, then pick the 2 largest peak
peaks_dc_2 = signal.decluster_peaks(theta, x_th=1., order=2, method='runs', runs=2)
peaks_dc_2 = np.concatenate(peaks_dc_2)
# at least 2 peaks must occur below the threshold to count as a cluster, then pick the 3 largest peak
peaks_dc_3 = signal.decluster_peaks(theta, x_th=1., order=3, method='runs', runs=2)
peaks_dc_3 = np.concatenate(peaks_dc_3)
plt.figure()
plt.plot(t, theta)
plt.plot(t[peaks_dc_1], theta[peaks_dc_1], 'x', label='mean declustered peaks - order=1')
plt.axhline(1., linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
plt.figure()
plt.plot(t, theta)
plt.plot(t[peaks_dc_2], theta[peaks_dc_2], 'x', label='mean declustered peaks - order=2')
plt.axhline(1., linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
plt.figure()
plt.plot(t, theta)
plt.plot(t[peaks_dc_3], theta[peaks_dc_3], 'x', label='mean declustered peaks - order=3')
plt.axhline(1., linestyle='--', color='k')
plt.xlim(1000, 1050)
plt.xlabel('Seconds [s]')
plt.legend()
plt.grid()
peaks, *_ = signal.argrelmax(theta)
peaks_dc_1 = signal.decluster_peaks(theta, x_th=2., method='runs', runs=2) # same as argerelmax, then applying a mask
peaks_dc_1 = np.concatenate(peaks_dc_1)
plt.figure()
_ = plt.hist(theta[peaks], bins=25, alpha=0.5)
_ = plt.hist(theta[peaks_dc_1], bins=5)
###Output
_____no_output_____ |
homework 7/REVISED_homework7_reservoir_geomechanics.ipynb | ###Markdown
**Homework 7 Building Geomechanical Model for Barnett Shale**For reference, see Homework 6
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
!git clone https://www.github.com/yohanesnuwara/reservoir-geomechanics
###Output
Cloning into 'reservoir-geomechanics'...
warning: redirecting to https://github.com/yohanesnuwara/reservoir-geomechanics.git/
remote: Enumerating objects: 84, done.[K
remote: Counting objects: 100% (84/84), done.[K
remote: Compressing objects: 100% (84/84), done.[K
remote: Total 219 (delta 36), reused 0 (delta 0), pack-reused 135[K
Receiving objects: 100% (219/219), 13.72 MiB | 33.37 MiB/s, done.
Resolving deltas: 100% (92/92), done.
###Markdown
Data and knowns
###Code
depth = 5725 # ft
Sh_grad = 0.65 # least principal stress gradient, psi/ft
mu = 0.75
Pm_grad = 0.53 # mud weight gradient, psi/ft
Pp_grad = 0.48 # pore pressure gradient, psi/ft
T0 = 0 # tensile strength, psi
Wbo = 0 # wellbore breakout width, degree
# Sv at depth from homework 1
Sv = 6374.8 # psi
# UCS from sonic log at depth from homework 3
C0_sonic = 12659.88 # psi
# Pp at depth
Pp = depth * Pp_grad
# Pm at depth
Pm = depth * Pm_grad
###Output
_____no_output_____
###Markdown
$$\frac{S_1-Pp}{S_3-Pp}=(\sqrt{\mu^2+1}+\mu)^2$$$(\sqrt{\mu^2+1}+\mu)^2$ denoted as `ratio`
###Code
ratio = (np.sqrt((mu**2) + 1) + mu)**2
ratio
###Output
_____no_output_____
###Markdown
Lower bound of least principal stress (in this case `Sh`) is calculated from the above equation$$Sh=(\frac{Sv-Pp}{(\sqrt{\mu^2+1}+\mu)^2})+Pp$$
###Code
Sh_lower = ((Sv - Pp) / ratio) + Pp
print('Lower bound of minimum horizontal stress:', Sh_lower, 'psi')
###Output
Lower bound of minimum horizontal stress: 3654.7 psi
###Markdown
Upper bound of intermediate principal stress (in this case `SH`) is calculated from equation (derived from the very first equation)$$SH = [(\sqrt{\mu^2+1}+\mu)^2]\cdot(Sv-Pp)+Pp$$
###Code
SH_upper = (ratio * (Sv - Pp)) + Pp
print('Upper bound of maximum horizontal stress:', SH_upper, 'psi')
###Output
Upper bound of maximum horizontal stress: 17255.2 psi
###Markdown
Construct stress polygon using `zobackogram` function from Homework 6
###Code
import sys
sys.path.append('/content/reservoir-geomechanics/homework 6')
from zobackogram import zobackogram
zobackogram(Sv, Pp, mu)
###Output
_____no_output_____
###Markdown
Plot tensile fracture and wellbore breakout line
###Code
def breakout(Sv, Pp, mu, Pm, T0, C0, Wbo):
import numpy as np
import matplotlib.pyplot as plt
# plot zobackogram
p1, p2, nf, ss, rf = zobackogram(Sv, Pp, mu)
# ratio of S1-Pp to S3-Pp
ratio = (np.sqrt((mu**2) + 1) + mu)**2
# lower limit of Shmin, from Sv
Sh = ((Sv - Pp) / ratio) + Pp
# upper limit of SHmax, from Sv and Pp
SH = (ratio * (Sv - Pp)) + Pp
# axes of plot
Sv_x = np.arange(0, (SH + 10000), 10)
Sv_y = Sv_x
"Tensile fracture line"
Sh_line = Sv_x
SH_tensile = (3 * Sh_line) - ((2 * Pp) + (Pm - Pp)) - T0
tensile = plt.plot(Sh_line, SH_tensile, '--')
"Wellbore breakout line"
SH_breakout = (C0 + (2 * Pp) + (Pm - Pp) - (Sh_line * (1 + 2 * np.cos(np.deg2rad(180 - Wbo))))) / (1 - 2 * np.cos(np.deg2rad(180 - Wbo)))
breakout = plt.plot(Sh_line, SH_breakout, '--')
plt.legend((p1[0], p2[0], nf[0], ss[0], rf[0], tensile[0], breakout[0]),
['Sv line', 'Sv data', 'Normal Faulting (NF) Polygon',
'Strike Slip (SS) Polygon', 'Reverse Faulting (RF) Polygon',
'Tensile fracture line', 'Wellbore breakout line'])
return(Sh, SH)
Sh, SH = breakout(Sv, Pp, mu, Pm, T0, C0_sonic, Wbo)
###Output
_____no_output_____
###Markdown
The value of minimum stress from LOT on the polygonThe purple line is the minimum horizontal stress from ISIP data
###Code
Sh_data = depth * Sh_grad
print('Minimum horizontal stress from Instantaneuous Shut-in Pressure (ISIP) data:', Sh_data, 'psi \n')
sh_x = np.array([Sh_data, Sh_data])
sh_y = np.array([0, SH+10000])
breakout(Sv, Pp, mu, Pm, T0, C0_sonic, Wbo)
plt.plot(sh_x, sh_y, '--', color='purple')
###Output
Minimum horizontal stress from Instantaneuous Shut-in Pressure (ISIP) data: 3721.25 psi
###Markdown
Zooming in
###Code
breakout(Sv, Pp, mu, Pm, T0, C0_sonic, Wbo)
plt.plot(sh_x, sh_y, '--', color='purple')
plt.xlim(0,11000); plt.ylim(0,11000)
###Output
_____no_output_____
###Markdown
Constraints of stresses Tensile fractureImplicit equation for tensile fracture line is:$$Sh+SH-2(SH-Sh)-2P_p-(P_m-P_p) = T_0$$Solving for $SH$, explicit equation becomes:$$SH=3Sh - (T_0+2P_p+(P_m-P_p))$$Criteria for no tensile fracture occurs when:$$Sh+SH-2(SH-Sh)-2P_p-(P_m-P_p)\geq T_0$$So, $SH$ is explicitly:$$SH\leq 3Sh - (T_0+2P_p+(P_m-P_p))$$Or $SH$ must be below the tensile line Wellbore breakoutImplicit equation for the breakout line is: $$Sh+SH-2(SH-Sh)\cos(\pi-W_{bo})-2P_p-(P_m-P_p)=C_0$$Solving for $SH$, explicit equation becomes:$$SH=\frac{C_0+2P_p+(P_m-P_p)-Sh(1+2\cos(\pi-W_{bo}))}{1-2\cos(\pi-W_{bo})}$$Criteria for no wellbore breakout occurs when:$$Sh+SH-2(SH-Sh)\cos(\pi-W_{bo})-2P_p-(P_m-P_p) \geq C_0$$So, $SH$ is explicitly:$$SH \leq \frac{C_0+2P_p+(P_m-P_p)-Sh(1+2\cos(\pi-W_{bo}))}{1-2\cos(\pi-W_{bo})}$$Or $SH$ must be below the breakout line Corner point 1: lowermost point in NF regime on $SH=Sh$ line
###Code
# minimum stress
Sh1 = Sh_lower
print("Minimum horizontal stress at point 1:", Sh1, "psi")
# maximum stress
SH1 = Sh1
print("Maximum horizontal stress at point 1:", SH1, "psi")
###Output
Minimum horizontal stress at point 1: 3654.7 psi
Maximum horizontal stress at point 1: 3654.7 psi
###Markdown
Corner point 2: intersection between tensile line and lower bound of Shmin in NF regime
###Code
# minimum stress
Sh2 = Sh_lower
print("Minimum horizontal stress at point 2:", Sh2, "psi")
# maximum stress
SH2 = (3 * Sh2) - ((2 * Pp) + (Pm - Pp))
print("Maximum horizontal stress at point 2:", SH2, "psi")
###Output
Minimum horizontal stress at point 2: 3654.7 psi
Maximum horizontal stress at point 2: 5181.8499999999985 psi
###Markdown
Corner point 3: intersection between tensile line and breakout line in SS regime
###Code
# Solving Sh and SH simultaneously
from scipy.optimize import fsolve
def f(y):
Sh, SH = y
# tensile line
f1 = (3 * Sh) - ((2 * Pp) + (Pm - Pp)) - T0 - SH
# breakout line
f2 = ((C0_sonic + (2 * Pp) + (Pm - Pp) - (Sh * (1 + 2 * np.cos(np.deg2rad(180 - Wbo))))) / (1 - 2 * np.cos(np.deg2rad(180 - Wbo)))) - SH
return[f1, f2]
solve = fsolve(f, [1, 1]) # initial guess
# minimum stress
Sh3 = solve[0]
print("Minimum horizontal stress at point 3:", Sh3, "psi")
# maximum stress
SH3 = solve[1]
print("Maximum horizontal stress at point 3:", SH3, "psi")
###Output
Minimum horizontal stress at point 3: 4473.61 psi
Maximum horizontal stress at point 3: 7638.579999999999 psi
###Markdown
Corner point 4: intersection between breakout line and $SH=Sh$ line in RF regime
###Code
# Solving Sh and SH simultaneously
from scipy.optimize import fsolve
def f(y):
Sh, SH = y
# normal line
f1 = SH - Sh
# breakout line
f2 = ((C0_sonic + (2 * Pp) + (Pm - Pp) - (Sh * (1 + 2 * np.cos(np.deg2rad(180 - Wbo))))) / (1 - 2 * np.cos(np.deg2rad(180 - Wbo)))) - SH
return[f1, f2]
solve = fsolve(f, [1, 1]) # initial guess
# minimum stress
Sh4 = solve[0]
print("Minimum horizontal stress at point 4:", Sh4, "psi")
# maximum stress
SH4 = solve[1]
print("Maximum horizontal stress at point 4:", SH4, "psi")
###Output
Minimum horizontal stress at point 4: 9221.064999999999 psi
Maximum horizontal stress at point 4: 9221.064999999999 psi
###Markdown
Plot the constrained area on stress polygon
###Code
Sh_constrain = [Sh1, Sh2, Sh3, Sh4]
SH_constrain = [SH1, SH2, SH3, SH4]
breakout(Sv, Pp, mu, Pm, T0, C0_sonic, Wbo)
plt.plot(sh_x, sh_y, '--', color='purple')
# fill area
plt.fill(Sh_constrain, SH_constrain, color='green', alpha=0.2)
plt.xlim(0,11000); plt.ylim(0,11000)
###Output
_____no_output_____
###Markdown
Questions Question 1a. From the first homework assignment, what is the overburden stress of the site at 5725 feet depth in psi?
###Code
print('The overburden stress at depth 5725 m is:', Sv, 'psi')
###Output
The overburden stress at depth 5725 m is: 6374.8 psi
###Markdown
Question 1b. What is the minimum horizontal stress of the site at 5725 feet depth in psi?
###Code
print('The minimum horizontal stress at depth 5725 m is:', Sh_data, 'psi')
###Output
The minimum horizontal stress at depth 5725 m is: 3721.25 psi
###Markdown
Question 1c. What is the pore pressure of the site at 5725 feet depth in psi?
###Code
print('The pore pressure at depth 5725 m is:', Pp, 'psi')
###Output
The pore pressure at depth 5725 m is: 2748.0 psi
###Markdown
Question 1d. What is the mud pressure of the site at 5725 feet depth in psi?
###Code
print('The mud pressure at depth 5725 m is:', Pm, 'psi')
###Output
The mud pressure at depth 5725 m is: 3034.25 psi
###Markdown
Question 1e. From the third homework assignment, what is the unconfined compressive strength estimated from a sonic log for the Barnett formation of the site at 5725 feet depth in psi?
###Code
print('The unconfined compressive strength at depth 5725 m is:', C0_sonic, 'psi')
###Output
The unconfined compressive strength at depth 5725 m is: 12659.88 psi
###Markdown
Question 1f. What is the upper bound of the maximum horizontal stress of the site at 5725 feet depth in psi?From the intersection of our $Sh$ and the polygons, we know that we are in Normal Faulting (NF) regime.$Sh$ from ISIP data 3721.25 psi substituted to tensile line$$SH=3Sh - (T_0+2P_p+(P_m-P_p))$$
###Code
SH_upper = (3 * Sh_data) - ((2 * Pp) + (Pm - Pp)) - T0
print('Upper bound of maximum horizontal stress:', SH_upper, 'psi')
###Output
Upper bound of maximum horizontal stress: 5381.5 psi
###Markdown
Question 1g. What is the lower bound of the maximum horizontal stress of the site at 5725 feet depth in psi?$Sh$ from ISIP data 3721.25 psi substituted to $SH=Sh$ line
###Code
SH_lower = Sh_data
print('Lower bound of maximum horizontal stress:', SH_lower, 'psi')
###Output
Lower bound of maximum horizontal stress: 3721.25 psi
###Markdown
Question 1h. What is the value of the upper bound of a ฯ of the site at 5725 feet depth? Enter your answer as a number from 0 to 1$$\phi=\frac{S_2-S_3}{S_1-S_3}$$Regime is NF, the stresses are: $Sh<SH<Sv$Therefore, $S_1=Sv$, $S_2=SH$, and $S_3=Sh$
###Code
phi_upper = (SH_upper - Sh_data) / (Sv - Sh_data)
print('Upper bound of phi:', phi_upper)
###Output
Upper bound of phi: 0.6256712705620772
###Markdown
Question 1i. What is the value of the upper bound of an Aฯ of the site at 5725 feet depth? Enter your answer as a number from 0 to 3.$$A \phi=(n+0.5)+(โ1)^n(\phi โ0.5)$$Where $n=0$ for NF regime, $n=1$ for SS regime, $n=2$ for RF regime
###Code
n = 0
A_phi_upper = (n + 0.5) + (1 * (phi_upper - 0.5))
print('Upper bound of (A*phi):', A_phi_upper)
###Output
Upper bound of (A*phi): 0.6256712705620772
|
HackerNews Challenge.ipynb | ###Markdown
HackerNews datat analysis challenge with Spark In this notebook, you will analyse a dataset of (almost) all submitted HackerNews posts with Spark. Let's start by importing some of the libraries you will need.
###Code
import json
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime as dt
%matplotlib inline
###Output
_____no_output_____
###Markdown
The file has one JSON entry per line. In order to make accessing it easier, first turn each entry as a dictionary and use `persist()` to cache the resulting RDD.
###Code
dataset_json = sc.textFile("HNStories.json")
dataset = dataset_json.map(lambda x: json.loads(x))
dataset.persist()
###Output
_____no_output_____
###Markdown
Finally, Spark has many helper functions on top of the ones we have studied which you will find useful. You can view them at [http://spark.apache.org/docs/latest/api/python/pyspark.htmlpyspark.RDD](http://spark.apache.org/docs/latest/api/python/pyspark.htmlpyspark.RDD) Task 1 Lets start with some initial analysis. * How many elements are in your datasets?* What does the first element look like?
###Code
print dataset.count()
print dataset.take(1)
###Output
[{u'objectID': u'7815290', u'author': u'TuxLyn', u'url': u'https://duckduckgo.com/settings', u'num_comments': 0, u'created_at': u'2014-05-29T08:25:40Z', u'title': u'DuckDuckGo Settings', u'points': 1, u'created_at_i': 1401351940}]
###Markdown
Each element is a dictionary of attributes and their values for a post. Can you find the set of all attributes used throughout the RDD? The function `dictionary.keys()` gives you the list of attributes of a dictionary.
###Code
attributes = dataset.flatMap(lambda dictionary: dictionary.keys())
print attributes.take(5)
###Output
[u'objectID', u'title', u'url', u'num_comments', u'created_at']
###Markdown
We see that there are more attributes than just the one used in the first element. the function `compare_elems` below returns `True` if two elements have exactly the same set of attributes. Can you use it to count the number of elements which have the same set of attributes as the first element?
###Code
# Returns true if two elements have the same schema
def compare_elems(fist, second):
if len(fist) != len(second):
return False
for key in fist.iterkeys():
if key not in second:
return False
return True
first = dataset.take(1)[0]
same = dataset.filter(lambda x: compare_elems(x, first)).persist()
print same.count()
###Output
_____no_output_____
###Markdown
We see that the vast majority of elements hold the same structure. In order to make this analysis easier, redefine `dataset` to only have elements which hold this structure.
###Code
dataset = same
###Output
_____no_output_____
###Markdown
All of the following tasks are optional, if you want to analyse the dataset in your own way using Spark feel free to do so! The tasks are there as a guide. Task 2: How many posts through time The field `created_at_i` is very useful, it gives you a UNIX timestamp of the time at which the file was created. The following function lets you extract a time from a timestamp.
###Code
def extract_time(timestamp):
return dt.fromtimestamp(timestamp)
###Output
_____no_output_____
###Markdown
Find the minimum and maximum timestamps in the RDD and call them `min_time` and `max_time`. These correspond to the first and last post, when did they occur?
###Code
times = dataset.map(lambda x: (x, extract_time(x['created_at_i']))).persist()
min_time1 = times.reduce(lambda x, y: x if x[1]<y[1] else y)
max_time1 = times.reduce(lambda x, y: x if x[1]>y[1] else y)
min_time = min_time1[0]['created_at_i']
max_time = max_time1[0]['created_at_i']
print min_time
print max_time
###Output
1160418111
1401351940
###Markdown
Now lets analyse how many elements through time. The following function assigns a record to one of 200 "buckets" of time. Use it to count the number of elements that fall within each bucket and call the result `bucket_rdd`. The result should be such that `buckets` below generates the corresponding output.
###Code
interval = (max_time - min_time + 1) / 200.0
def get_bucket(rec):
return int((rec['created_at_i'] - min_time)/interval)
buckets_rdd = dataset.map(lambda x: (get_bucket(x), 1)) \
.reduceByKey(lambda x, y: x+y)
# Use this to test your result
buckets = sorted(buckets_rdd.collect())
print buckets
#This is the desired output
buckets = sorted(buckets_rdd.collect())
print buckets
###Output
[(0, 46), (4, 1), (9, 270), (10, 763), (11, 762), (12, 211), (13, 1059), (14, 1078), (15, 957), (16, 749), (17, 721), (18, 685), (19, 664), (20, 831), (21, 1082), (22, 1152), (23, 1232), (24, 1191), (25, 1218), (26, 652), (33, 1134), (34, 1924), (35, 2047), (36, 1982), (37, 2453), (38, 2643), (39, 2487), (40, 2579), (41, 2630), (42, 2652), (43, 2805), (44, 2822), (45, 2975), (46, 3010), (47, 3031), (48, 2865), (49, 3018), (50, 3000), (51, 3301), (52, 3222), (53, 3166), (54, 3056), (55, 2967), (56, 3194), (57, 2660), (58, 2987), (59, 3575), (60, 3883), (61, 3733), (62, 3673), (63, 3901), (64, 3787), (65, 3653), (66, 4135), (67, 3977), (68, 3881), (69, 3998), (70, 3867), (71, 777), (83, 2534), (84, 4009), (85, 5552), (86, 6084), (87, 6297), (88, 6271), (89, 6040), (90, 5742), (91, 5761), (92, 6537), (93, 6656), (94, 6769), (95, 6262), (96, 6258), (97, 6132), (98, 6525), (99, 6606), (100, 6717), (101, 6972), (102, 6887), (103, 7602), (104, 7982), (105, 7979), (106, 8462), (107, 8614), (108, 8275), (109, 8906), (110, 7132), (111, 8911), (112, 9337), (113, 9607), (114, 9600), (115, 9900), (116, 10470), (117, 10119), (118, 9597), (119, 9655), (120, 9712), (121, 9830), (122, 9991), (123, 10204), (124, 10255), (125, 11317), (126, 11781), (127, 11885), (128, 12401), (129, 13721), (130, 14036), (131, 13075), (132, 14150), (133, 14764), (134, 14109), (135, 12479), (136, 10702), (137, 12640), (138, 12814), (139, 12473), (140, 12196), (141, 12648), (142, 13716), (143, 13901), (144, 13744), (145, 13326), (146, 13185), (147, 13154), (148, 12238), (149, 12071), (150, 12332), (151, 12595), (152, 12359), (153, 10498), (154, 10227), (155, 10690), (156, 10448), (157, 10243), (158, 10223), (159, 10833), (160, 10646), (161, 11609), (162, 9083), (163, 10618), (164, 11768), (165, 12578), (166, 12250), (167, 12586), (168, 12649), (169, 12110), (170, 11937), (171, 11748), (172, 11597), (173, 11520), (174, 12664), (175, 11998), (176, 11326), (177, 11441), (178, 11410), (179, 11573), (180, 11123), (181, 11644), (182, 11834), (183, 11538), (184, 12059), (185, 12712), (186, 11263), (187, 10617), (188, 9082), (189, 8761), (190, 10514), (191, 10521), (192, 10236), (193, 10370), (194, 10224), (195, 11424), (196, 10015), (197, 9686), (198, 9617), (199, 8939)]
###Markdown
We can then use this to plot the number of submitted posts over time.
###Code
bs = [dt.fromtimestamp(x[0]*interval + min_time) for x in buckets]
ts = [x[1] for x in buckets]
plt.plot(bs, ts)
###Output
_____no_output_____
###Markdown
Task 3 The following function gets the hour of the day at which a post was submitted. Use it to find the number of posts submitted at each hour of the day. The value of `hours_buckets` should match the one printed below.
###Code
def get_hour(rec):
t = dt.fromtimestamp(rec['created_at_i'])
return t.hour
hours_buckets_rdd = dataset.map(lambda x: (get_hour(x), 1)) \
.reduceByKey(lambda x, y: x+y).persist()
hours_buckets = sorted(hours_buckets_rdd.collect())
print hours_buckets
hrs = [x[0] for x in hours_buckets]
sz = [x[1] for x in hours_buckets]
plt.plot(hrs, sz)
###Output
_____no_output_____
###Markdown
Task 4 The number of points scored by a post is under the attribute `points`. Use it to compute the average score received by submissions for each hour.
###Code
hours_buckets_rdd_total = dataset.map(lambda x: (get_hour(x), (x['points'], 1))) \
.reduceByKey(lambda x, y: (x[0]+y[0], x[1]+y[1])) \
.mapValues(lambda x: x[0]/float(x[1])).persist()
scores_per_hour = sorted(hours_buckets_rdd_total.collect())
print scores_per_hour
hrs = [x[0] for x in scores_per_hour]
sz = [x[1] for x in scores_per_hour]
plt.plot(hrs, sz)
###Output
_____no_output_____
###Markdown
It may be more useful to look at sucessful posts that get over 200 points. Find the proportion of posts that get above 200 points per hour.
###Code
prop_per_hour_rdd = dataset.map(lambda x: (get_hour(x), (x['points']>200, 1))) \
.reduceByKey(lambda x, y: (x[0]+y[0], x[1]+y[1])) \
.mapValues(lambda x: x[0]/float(x[1])).persist()
prop_per_hour = sorted(prop_per_hour_rdd.collect())
print prop_per_hour
prop_per_hour = sorted(prop_per_hour_rdd.collect())
print prop_per_hour
hrs = [x[0] for x in prop_per_hour]
sz = [x[1] for x in prop_per_hour]
plt.plot(hrs, sz)
###Output
_____no_output_____
###Markdown
Task 5 The following function lists the word in the title. Use it to count the number of words in the title of each post, and look at the proportion of successful posts for each title length.
###Code
import re
def get_words(line):
return re.compile('\w+').findall(line)
prop_per_title_length_rdd = dataset.map(lambda x: (len(get_words(x['title'])), (x['points']>200, 1))) \
.reduceByKey(lambda x, y: (x[0]+y[0], x[1]+y[1])) \
.mapValues(lambda x: x[0]/float(x[1])).persist()
prop_per_title_length = sorted(prop_per_title_length_rdd.collect())
print prop_per_title_length
prop_per_title_length = sorted(prop_per_title_length_rdd.collect())
print prop_per_title_length
hrs = [x[0] for x in prop_per_title_length]
sz = [x[1] for x in prop_per_title_length]
plt.plot(hrs, sz)
###Output
_____no_output_____
###Markdown
Lets compare this with the distribution of number of words. Count for each title length the number of submissions with that length.
###Code
submissions_per_length_rdd = dataset.map(lambda x: (len(get_words(x['title'])), 1)) \
.reduceByKey(lambda x, y: x+y).persist()
submissions_per_length = sorted(submissions_per_length_rdd.collect())
print submissions_per_length
submissions_per_length = sorted(submissions_per_length_rdd.collect())
print submissions_per_length
hrs = [x[0] for x in submissions_per_length]
sz = [x[1] for x in submissions_per_length]
plt.plot(hrs, sz)
###Output
_____no_output_____
###Markdown
Looks like most people are getting it wrong! Task 6 For this task, you will need a new function: `takeOrdered()`. Like `take()` it collects elements from an RDD. However, it can be applied the smalles elements. For example, `takeOrdered(10)` returns the 10 smallest elements. Furthermore, you can pass it a function to specify the way in which the elements should be ordered. For example, `takeOrdered(10, lambda x: -x)` will return the 10 largest elements.The function below extracts the url domain out of a record. Use it to count the number of distinct domains posted to.
###Code
from urlparse import urlparse
def get_domain(rec):
url = urlparse(rec['url']).netloc
if url[0:4] == 'www.':
return url[4:]
else:
return url
print get_domain(dataset.take(1)[0])
distinctDomains = dataset.map(lambda x: (get_domain(x), 1)).reduceByKey(lambda x, y: x+y).persist()
print distinctDomains.count()
###Output
182515
###Markdown
Using `takeOrdered()` find the 25 most popular domains posted to.
###Code
top25 = distinctDomains.map(lambda x: (-x[1], x[0])).takeOrdered(25)
print top25
print top25
index = np.arange(25)
labels = [x[0] for x in top25]
counts = np.array([x[1] for x in top25]) * 100.0/dataset.count()
plt.xticks(index,labels, rotation='vertical')
plt.bar(index, counts, 0.5)
###Output
_____no_output_____
###Markdown
Create an pair RDD with 26 elements mapping each of these 25 popular domains with the average score received by the corresponding submissions as well as an `other` field for all submissions to other domains.
###Code
def map_to_domain(rec):
domain = get_domain(rec)
if domain in dict(top25):
return domain
else:
return 'other'
domain_av_score = domain_av_score_rdd.collect()
print domain_av_score
domain_av_score = domain_av_score_rdd.collect()
print domain_av_score
index26 = np.arange(26)
labels = [x[0] for x in top25]
labels.append('other')
vals = np.array([dict(domain_av_score)[x] for x in labels])
plt.xticks(index26, labels, rotation='vertical')
plt.bar(index26, vals, 0.5)
###Output
_____no_output_____
###Markdown
Now compute the proportion of successes for each domain (over 200 points).
###Code
domain_prop = domain_prop_rdd.collect()
print domain_prop
domain_prop = domain_prop_rdd.collect()
print domain_prop
index26 = np.arange(26)
labels = [x[0] for x in top25]
labels.append('other')
vals = np.array([dict(domain_prop)[x] for x in labels])
plt.xticks(index26, labels, rotation='vertical')
plt.bar(index26, vals, 0.5)
###Output
_____no_output_____ |
notebooks/Null model.ipynb | ###Markdown
Null Model
###Code
# import load_data function from
%load_ext autoreload
%autoreload 2
# fix system path
import sys
sys.path.append("/home/jovyan/work")
import pandas as pd
import numpy as np
from src.data.sets import load_sets
X_train, y_train, X_val, y_val, X_test, y_test = load_sets()
from src.models.null import NullModel
baseline_model = NullModel(target_type='classification')
y_base = baseline_model.fit_predict(y_train)
from src.models.performance import print_class_perf
print_class_perf(y_base, y_train, set_name='Training', average='weighted')
###Output
Accuracy Training: 0.07430256975352931
F1 Training: 0.01027805764859095
|
model/CalculateDistance.ipynb | ###Markdown
Turn Centroids into a District MatrixThis script takes the centroids from our Model and calculates a distance matrix that our API will use for suggestions Import Libraries
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Convert Centroids CSV to Numpy Array
###Code
df = pd.read_csv('s3://millionsongdataset/centers/centers.csv')
data = df.to_numpy()
###Output
_____no_output_____
###Markdown
Calculate Distance Matrix Function
###Code
def dist_matrix_calc(data):
from scipy.spatial import distance
matrix = []
for i in data:
dist_list = []
for j in data:
dist_list.append(round(distance.euclidean(i, j),2))
matrix.append(dist_list)
return np.array(matrix)
###Output
_____no_output_____
###Markdown
Run Function
###Code
results = dist_matrix_calc(data)
###Output
_____no_output_____
###Markdown
Upload to S3
###Code
pd.DataFrame(results).to_csv('s3://millionsongdataset/distances/distances.csv')
###Output
_____no_output_____ |
Big-Data-Clusters/CU6/Public/content/log-files/tsg091-get-azdata-logs.ipynb | ###Markdown
TSG091 - Get the azdata CLI logs================================Steps----- Get the azdata logs from the local machineGets the contents of the most recent log. There may be old logs inazdata.log.1, azdata.log.2 etc.
###Code
import os
from pathlib import Path
home = str(Path.home())
with open(os.path.join(home, ".azdata", "logs", "azdata.log"), "r") as file:
line = file.readline()
while line:
print(line.replace("\n", ""))
line = file.readline()
print('Notebook execution complete.')
###Output
_____no_output_____ |
notebooks/1 - IPython Notebook Examples/IPython Project Examples/IPython Kernel/Index.ipynb | ###Markdown
Back to the main [Index](../Index.ipynb) IPython Kernel IPython provides extensions to the Python programming language that make working interactively convenient and efficient. These extensions are implemented in the IPython Kernel and are available in all of the IPython Frontends (Notebook, Terminal, Console and Qt Console) when running this kernel. Tutorials * [Rich Output](Rich Output.ipynb)* [Custom Display Logic](Custom Display Logic.ipynb)* [Plotting in the Notebook](Plotting in the Notebook.ipynb) Examples * [Trapezoid Rule](Trapezoid Rule.ipynb)* [SymPy](SymPy.ipynb)* [Raw Input in the Notebook](Raw Input in the Notebook.ipynb) Non-notebook examples This directory also contains examples that are regular Python (`.py`) files.
###Code
%run ../utils/list_pyfiles.ipy
###Output
_____no_output_____
###Markdown
There are also a set of examples that show how to integrate IPython with different GUI event loops:
###Code
%run ../utils/list_subdirs.ipy
###Output
_____no_output_____ |
divisive_7x7_surround_net/analysis.ipynb | ###Markdown
Load data
###Code
results_df = pd.read_csv("results.csv")
# Save a simplified version of the csv file, sorted by validation set performance
df_plain = helpers.simplify_df(results_df)
df_plain.to_csv("results_plain.csv")
data_dict = Dataset.get_clean_data()
data = MonkeySubDataset(data_dict, seed=1000, train_frac=0.8, subsample=args.stim_subsample, crop=args.crop)
###Output
_____no_output_____
###Markdown
Get and save FEV performance on test set Use the 10 best models for analysis. As this operation requires model loading, we do it only if it was not done before.
###Code
try:
df_best = pd.read_csv(args.fname_best_csv)
logging.info("loaded data from " + args.fname_best_csv)
except FileNotFoundError:
df_best = df_plain[0 : args.num_best].copy()
fev_lst = []
for i in range(args.num_best):
run_no = df_best.iloc[i]["run_no"]
logging.info("load run no " + str(run_no))
model = helpers.load_dn_model(run_no, results_df, data, args.train_logs_path)
fev = model.evaluate_fev_testset()
fev_lst.append(fev)
feve = model.evaluate_fev_testset_per_neuron()
helpers.pkl_dump(feve, run_no, "feve.pkl", args.weights_path)
with model.session.as_default():
u = model.u.eval()
helpers.pkl_dump(u, run_no, "u.pkl", args.weights_path)
df_best["fev"] = fev_lst
df_best.to_csv(args.fname_best_csv)
fev = df_best.fev.values * 100
print("Mean FEV", fev.mean())
print("SEM", stats.sem(fev, ddof=1))
print("max FEV", fev.max())
print("FEV of model with max correlation on validation set", fev[0])
###Output
_____no_output_____ |
Car & Pedestrian Detection.ipynb | ###Markdown
Pedestrian Detection
###Code
body_classifier = cv2.CascadeClassifier('./Haarcascades/haarcascade_fullbody.xml')
cap = cv2.VideoCapture('./Images/Walking.avi')
while cap.isOpened() :
ret, frame = cap.read()
frame = cv2.resize(frame, None,fx = 0.5, fy = 0.5, interpolation = cv2.INTER_LINEAR)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bodies = body_classifier.detectMultiScale(gray, 1.2, 3)
for (x,y,w,h) in bodies :
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
cv2.imshow('Pedestrian Tracking',frame)
if cv2.waitKey(1) == 13 :
break
cap.release()
cv2.destroyAllWindows()
###Output
_____no_output_____
###Markdown
Car Detection
###Code
car_classifier = cv2.CascadeClassifier('./Haarcascades/haarcascade_car.xml')
cap = cv2.VideoCapture('./Images/Cars.avi')
while cap.isOpened():
time.sleep(.05)
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cars = car_classifier.detectMultiScale(gray, 1.4, 2)
for (x,y,w,h) in cars :
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
cv2.imshow('Car Detection', frame)
if cv2.waitKey(1) == 13 :
break
cap.release()
cv2.destroyAllWindows()
###Output
_____no_output_____ |
notebooks/tfidf-heatmap.ipynb | ###Markdown
HeatmapHeatmap of terms over time for Euroleaks and Communiques.
###Code
import re
import time
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
import spacy
###Output
_____no_output_____
###Markdown
stopwords and collocations
###Code
# stopwords
with open('../data/euroleaks/stopwords.json', 'r') as f:
stopwords = json.load(f)
# collocations
def apply_trigram_colloc(s, set_colloc):
res = s.lower()
for b1,b2,b3 in set_colloc:
res = res.replace(f'{b1} {b2} {b3}', f'{b1}_{b2}_{b3}')
return res
def apply_bigram_colloc(s, set_colloc):
res = s.lower()
for b1,b2 in set_colloc:
res = res.replace(f'{b1} {b2}', f'{b1}_{b2}')
return res
with open('../data/collocations/trigrams.json', 'r') as f:
trigram_colloc = json.load(f)
with open('../data/collocations/bigrams.json', 'r') as f:
bigram_colloc = json.load(f)
stopwords.keys()
nlp = spacy.load("en_core_web_sm", exclude=["ner"])
###Output
_____no_output_____
###Markdown
helper functions
###Code
def filter_token(token):
return token.pos_ in {'ADJ', 'ADV', 'NOUN', 'PROPN', 'VERB'}\
and not token.lemma_.lower() in nlp.Defaults.stop_words\
and not token.lower_ in stopwords['names']\
and not token.lower_ in stopwords['disfluency']\
and not token.lemma_.lower() in stopwords['courtesy']\
and len(token.lemma_) > 1
def find_most_prominent_k_words(k, X, vectorizer):
print(f'matrix shape: {X.shape}\n')
tfidf = X.sum(axis =0).A1
sort_ix = np.argsort(tfidf)[::-1]
most_prominent_words = np.array(vectorizer.get_feature_names())[sort_ix][:k]
max_length = pd.Series(most_prominent_words).apply(lambda s: len(s)).max()
for word, score in zip(most_prominent_words, tfidf[sort_ix][:k]):
print(f'{word}{" "*(max_length-len(word))}\t|\t{round(score,3)}')
return most_prominent_words
def find_most_prominent_words_cutoff(cut_off, X, vectorizer, verbose=False):
print(f'matrix shape: {X.shape}\n')
tfidf = X.sum(axis =0).A1
sort_ix = np.argsort(tfidf)[::-1]
mask = tfidf[sort_ix] > cut_off
most_prominent_words = np.array(vectorizer.get_feature_names())[sort_ix][mask]
print(f'number of prominent words: {len(most_prominent_words)}\n')
print(f'ratio of prominent words (relative to vocabulary size): {round(float(len(most_prominent_words)) / len(vectorizer.get_feature_names()) * 100,2)} %\n')
if verbose:
max_length = pd.Series(most_prominent_words).apply(lambda s: len(s)).max()
for word, score in zip(most_prominent_words, tfidf[sort_ix][mask]):
print(f'{word}{" "*(max_length-len(word))}\t|\t{round(score,3)}')
return most_prominent_words
###Output
_____no_output_____
###Markdown
Communiques
###Code
communiques = pd.read_csv('../data/communiques/cleaned.csv')
# group by date
communiques_groupby_date = communiques.drop(columns=['title']).groupby('date').apply(lambda s: ' '.join(s.story))
# preprocess
communique_documents = [
' '.join([token.lemma_ .lower() for sentence in nlp(doc).sents for token in sentence
if filter_token(token)
])
for doc in communiques_groupby_date.values
]
# leave out empty documents
communique_documents = [d for d in communique_documents if len(d)>1]
# apply collocations
communique_documents = [
apply_bigram_colloc(apply_trigram_colloc(doc, trigram_colloc), bigram_colloc)
for doc in communique_documents]
# get labels
communique_dates = communiques_groupby_date.index.to_series().apply(lambda s: pd.to_datetime(s).strftime('%d/%m'))
# tfidf
communique_vectorizer = TfidfVectorizer(analyzer='word',
min_df=1, # word has to be in at least two documents
max_df=0.95, # word has to be in less the 95% of documents
smooth_idf=True, # Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions.
sublinear_tf=False) # replace tf with 1 + log(tf).
communique_X = communique_vectorizer.fit_transform(communique_documents)
communique_tfidf = communique_X.sum(axis =0).A1
plt.scatter(range(len(communique_tfidf)), np.sort(communique_tfidf)[::-1], marker='+')
communique_cutoff = 0.2
communique_most_prominent_words = find_most_prominent_words_cutoff(communique_cutoff, communique_X, communique_vectorizer)
_ = find_most_prominent_k_words(30, communique_X, communique_vectorizer)
print(f'Terms that are present in more than {int(len(communique_documents) * 0.95)} documents are disregarded.')
print(f'Communiques vocabulary size: {len(communique_vectorizer.get_feature_names())}')
print(f'Number of documents: {len(communique_documents)}')
###Output
Communiques vocabulary size: 890
Number of documents: 13
###Markdown
Euroleaks
###Code
leaks = pd.read_csv('../data/euroleaks/squeezed.csv')
# group by date
leaks_groupby_date = leaks.drop(columns=['speaker']).groupby('date').apply(lambda s: ' '.join(s.speech))
# preprocess
leaks_documents = [
' '.join([token.lemma_.lower() for sentence in nlp(doc).sents for token in sentence
if filter_token(token)
])
for doc in leaks_groupby_date.values
]
# leave out empty documents
leaks_documents = [d for d in leaks_documents if len(d)>1]
# apply collocations
leaks_documents = [
apply_bigram_colloc(apply_trigram_colloc(doc, trigram_colloc), bigram_colloc)
for doc in leaks_documents]
# get labels
leaks_dates = leaks_groupby_date.index.to_series().apply(lambda s: pd.to_datetime(s).strftime('%d/%m'))
# tfidf
leaks_vectorizer = TfidfVectorizer(analyzer='word',
min_df=1, # word has to be in at least two documents
max_df=0.95, # word has to be in less the 95% of documents
smooth_idf=True, # Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions.
sublinear_tf=False) # replace tf with 1 + log(tf).
leaks_X = leaks_vectorizer.fit_transform(leaks_documents)
leaks_tfidf = leaks_X.sum(axis =0).A1
plt.scatter(range(len(leaks_tfidf))[5:], np.sort(leaks_tfidf)[::-1][5:], marker='+')
leaks_cutoff = 0.15
leaks_most_prominent_words = find_most_prominent_words_cutoff(leaks_cutoff, leaks_X, leaks_vectorizer)
_ = find_most_prominent_k_words(30, leaks_X, leaks_vectorizer)
print(f'Terms that are presint in more than {int(len(leaks_documents) * 0.95)} documents are disregarded.')
print(f'Communiques vocabulary size: {len(leaks_vectorizer.get_feature_names())}')
print(f'Number of documents: {len(leaks_documents)}')
###Output
Communiques vocabulary size: 3336
Number of documents: 12
###Markdown
make figure for cutoffs
###Code
fig, axes = plt.subplots(1, 2, sharey=True, figsize=(10,4))
axes[0].scatter(range(len(communique_tfidf)), np.sort(communique_tfidf)[::-1], marker='+')
axes[0].axhline(communique_cutoff, 0,len(communique_tfidf), color='black', linewidth=1, linestyle='--')
axes[0].set_title('Communiques')
axes[0].set_yticks([0, .5, 1, 1.5])
axes[0].text(int(len(communique_tfidf)*.7), communique_cutoff*1.2, f'cut off = {communique_cutoff}')
axes[1].scatter(range(len(leaks_tfidf)), np.sort(leaks_tfidf)[::-1], marker='+')
axes[1].axhline(leaks_cutoff, 0,len(leaks_tfidf), color='black', linewidth=1, linestyle='--')
axes[1].set_title('Euroleaks')
axes[1].text(int(len(leaks_tfidf)*.7), leaks_cutoff*1.2, f'cut off = {leaks_cutoff}')
#fig.tight_layout()
fig.savefig('../figures/tfidf-cutoff.png')
###Output
_____no_output_____
###Markdown
find differences
###Code
# prominent words in communiques which are not prominent in euroleaks
comm_but_not_leaks = []
for word in communique_most_prominent_words:
if word not in leaks_most_prominent_words:
comm_but_not_leaks.append(word)
print(f'Number of words: {len(comm_but_not_leaks)}\n')
for word in comm_but_not_leaks:
print(word)
# prominent words in communiques which are not prominent in euroleaks
leaks_but_not_comms = []
for word in leaks_most_prominent_words:
if word not in communique_most_prominent_words:
leaks_but_not_comms.append(word)
print(f'Number of words: {len(leaks_but_not_comms)}\n')
for word in leaks_but_not_comms:
print(word)
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
assert len(intersection(comm_but_not_leaks, leaks_but_not_comms)) == 0
###Output
_____no_output_____
###Markdown
words of interest (collect manually)
###Code
# manually collecter words of interest, which highlight differences
highlight_differences = set([
# comms but not leaks
#'institution', # prominent in both (vectorizere filters it out for leaks because present in every document)
#'ireland',
#'portugal',
#'cyprus',
'structural_reform',
#'national_procedure',
#'spain',
'recovery',
# leaks but not comms
'letter',
'liquidity',
'pension',
'parliament',
'debt',
'tax',
'second_letter',
'bank',
'mou',
'aide_memoire',
'market',
#'esm', # European stability mechanism
'brussels_group',
#'interest_rate',
#'prime_minister',
'mandate',
#'ecb', # European Central Bank
'bill',
'gdp',
'vat', # value-added tax
'privatization',
'sla', # staff level agreement
#'low_interest_rate',
'law',
'primary_surplus',
'labor_market',
'recession',
'budget',
# interesting collocations
'minimum_wage',
'labor_market',
'collective_bargaining',
'structural_reform',
'second_letter',
#'non_performing_loan',
#'capital_control',
'primary_surplus',
'aide_memoire',
'brussels_group',
#'quantitative_easing',
'smp_bond',
# prominent when tfidf trained on joint communiques + euroleaks
'reform',
'extension',
'referendum',
'list',
'agreement',
# domain knowledge (Varoufakis claims they refused to talk debt restructuring)
'restructuring'
])
# convert to list so that ordering is fixed
highlight_differences = list(highlight_differences)
# sort the list
highlight_differences.sort()
print(len(highlight_differences))
###Output
34
###Markdown
words that are completely absent from one vocabulary
###Code
from pprint import pprint
# words in communiques which are not prominent and are absent from euroleaks
my_list = [word for word in communique_vectorizer.get_feature_names() if word not in communique_most_prominent_words and word not in leaks_vectorizer.get_feature_names()]
print(len(my_list),'\n')
#pprint(my_list)
# words in euroleaks which are not prominent and are absent from communiques
my_list = [word for word in leaks_vectorizer.get_feature_names() if word not in leaks_most_prominent_words and word not in communique_vectorizer.get_feature_names()]
print(len(my_list),'\n')
#pprint(my_list)
###Output
2500
###Markdown
inspect collocations
###Code
for word in set(communique_vectorizer.get_feature_names() + leaks_vectorizer.get_feature_names()):
if '_' in word:
print(word)
###Output
track_record
euro_working_group
half_percent
euro_area
central_banking
press_conference
successful_conclusion_review
arm_length
banking_union
financial_stability
safety_net
greek_government
mission_chief
debt_sustainability
collective_bargaining
prior_action
central_bank
govern_council
20th_february
smp_bond
technical_team
european_semester
greek_authority
duration_mffa
aide_memoire
growth_friendly
central_banks
prior_actions
united_states
debt_sustainability_analysis
product_market
common_ground
quantitative_easing
minimum_wage
international_monetary_fund
monetary_union
member_states
capital_control
sign_dotted_line
state_play
real_estate
member_state
labor_market
second_letter
interest_rate
anti_corruption
national_procedure
greek_governmental
conclusion_review
brussels_group
prime_minister
et_cetera
structural_reform
master_financial_assistance
successful_conclusion
greek_people
maximum_flexibility
central_banker
non_performing_loan
low_interest_rate
primary_surplus
uncharted_territory
###Markdown
most prominent words (trained together)
###Code
documents = leaks_documents + communique_documents
dates = pd.concat((leaks_dates, communique_dates)).values
tfidf_vectorizer = TfidfVectorizer(analyzer='word',
min_df=1, # word has to be in at least two documents
max_df=0.95, # word has to be in less the 95% of documents
smooth_idf=True, # Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions.
sublinear_tf=False) # replace tf with 1 + log(tf).
X = tfidf_vectorizer.fit_transform(documents)
_ = find_most_prominent_k_words(30, X, tfidf_vectorizer)
# tfidf scores of words of interest, when tfidf is trained on joint text
from sklearn.feature_extraction.text import CountVectorizer
count_vectorizer = CountVectorizer(max_df=0.95)
counts = count_vectorizer.fit_transform(leaks_documents + communique_documents)
tfidf = X.sum(axis =0).A1
print()
print(f'word{" "*(20-4)}\t|\ttfidf\t|\tcount\t| relevant in\t| relevant in')
print(f'{" "*20}\t|\t\t|\t\t| communiques\t| euroleaks')
print('-'*90)
for word in highlight_differences:
index = tfidf_vectorizer.vocabulary_[word]
print(f'{word}{" "*(20-len(word))}\t|\t{round(tfidf[index],3)}\t|\t{counts.sum(axis=0).A1[count_vectorizer.vocabulary_[word]]}\t|\t{word in communique_most_prominent_words}\t|\t{word in leaks_most_prominent_words}')
###Output
word | tfidf | count | relevant in | relevant in
| | | communiques | euroleaks
------------------------------------------------------------------------------------------
agreement | 2.257 | 299 | True | True
aide_memoire | 0.344 | 31 | False | True
bank | 0.514 | 63 | False | True
bill | 0.241 | 13 | False | True
brussels_group | 0.315 | 30 | False | True
budget | 0.143 | 15 | False | True
collective_bargaining | 0.119 | 12 | False | False
debt | 0.559 | 72 | False | True
extension | 0.886 | 65 | True | True
gdp | 0.262 | 32 | False | True
labor_market | 0.173 | 21 | False | True
law | 0.183 | 14 | False | True
letter | 0.975 | 70 | False | True
liquidity | 0.683 | 69 | False | True
list | 1.206 | 126 | True | True
mandate | 0.338 | 37 | False | True
market | 0.485 | 59 | False | True
minimum_wage | 0.066 | 7 | False | False
mou | 0.449 | 49 | False | True
parliament | 0.595 | 86 | False | True
pension | 0.762 | 106 | False | True
primary_surplus | 0.22 | 24 | False | True
privatization | 0.243 | 30 | False | True
recession | 0.153 | 15 | False | True
recovery | 0.557 | 15 | True | False
referendum | 1.379 | 80 | True | True
reform | 1.578 | 181 | True | True
restructuring | 0.108 | 8 | False | False
second_letter | 0.449 | 21 | False | True
sla | 0.22 | 16 | False | True
smp_bond | 0.078 | 8 | False | False
structural_reform | 0.357 | 26 | True | True
tax | 0.529 | 71 | False | True
vat | 0.261 | 29 | False | True
###Markdown
FigureGrid: https://stackoverflow.com/questions/38973868/adjusting-gridlines-and-ticks-in-matplotlib-imshow
###Code
leaks_matshow = np.zeros((len(highlight_differences), len(leaks_dates)))
for i,word in enumerate(highlight_differences):
if word in leaks_vectorizer.get_feature_names():
leaks_matshow[i] = leaks_X.toarray()[:,leaks_vectorizer.get_feature_names().index(word)]
comm_matshow = np.zeros((len(highlight_differences), len(communique_dates)))
for i,word in enumerate(list(highlight_differences)):
if word in communique_vectorizer.get_feature_names():
comm_matshow[i,:] = communique_X.toarray()[:,communique_vectorizer.get_feature_names().index(word)]
keywords = highlight_differences
fig, axes = plt.subplots(1,2,figsize=(10,8),sharey=True)
# leaks
axes[0].matshow(np.log(leaks_matshow), cmap='Blues')
axes[0].set_xticks(np.arange(len(leaks_dates)))
_ = axes[0].set_xticklabels(leaks_dates, rotation=45)
# grids
axes[0].set_xticks(np.arange(-.5, len(leaks_dates), 1), minor=True)
axes[0].set_yticks(np.arange(len(keywords)))
axes[0].set_yticks(np.arange(-.53, len(keywords), 1), minor=True)
axes[0].grid(which='minor', color='black', linestyle='-', linewidth=.5, alpha=.4)
# communiques
axes[1].matshow(np.log(comm_matshow), cmap='Blues')
axes[1].set_xticks(range(len(communique_dates)))
_ = axes[1].set_xticklabels(communique_dates, rotation=45)
# grids
axes[1].set_xticks(np.arange(-.53, len(leaks_dates), 1), minor=True)
axes[1].set_yticks(np.arange(len(keywords)))
axes[1].set_yticks(np.arange(-.53, len(keywords), 1), minor=True)
axes[1].grid(which='minor', color='black', linestyle='-', linewidth=.5, alpha=.4)
# shared y axis
axes[0].set_yticks(range(len(keywords)))
_ = axes[0].set_yticklabels(keywords)
#axes[1].set_yticks(range(len(keywords)))
#_ = axes[1].set_yticklabels(keywords)
axes[0].set_title('Euroleaks')
axes[1].set_title('Communiques')
fig.tight_layout()
fig.savefig('../figures/term_heatmap.png')
###Output
<ipython-input-73-585dff3abe2a>:6: RuntimeWarning: divide by zero encountered in log
axes[0].matshow(np.log(leaks_matshow), cmap='Blues')
<ipython-input-73-585dff3abe2a>:16: RuntimeWarning: divide by zero encountered in log
axes[1].matshow(np.log(comm_matshow), cmap='Blues')
###Markdown
**TODO** investigate terms with text search and read into them:- bank- bill- listNote that I have taken the log of the matrix so that high scores don't dominate the color spectrum. Auxiliary text search
###Code
# count total number of occurences for each corpus
def count_occurences(word, corpus):
from sklearn.feature_extraction.text import CountVectorizer
count_vectorizer = CountVectorizer()
counts = count_vectorizer.fit_transform(corpus)
target_count = 0 if word not in count_vectorizer.get_feature_names() else counts.sum(axis=0).A1[count_vectorizer.vocabulary_[word]]
print(f'Count for word "{word}" is: {target_count}')
return target_count
count_occurences('pension', communique_documents)
count_occurences('pension', leaks_documents)
# leaks
def search_term(term):
for i,row in leaks.iterrows():
if term in row.speech.lower():
date = pd.to_datetime(row.date).strftime('%d/%m')
print(f'{row.speaker} ({date}):')
print(row.speech)
print()
#search_term('pension')
# communiques
def search_term(term):
for i,row in communiques.iterrows():
if term in row.story.lower():
print(f'{row.title} ({row.date}):')
print(row.story)
print()
#search_term('pension')
###Output
_____no_output_____ |
Notebooks/Frequency Domain Methods/Polar (Nyquist) Plots.ipynb | ###Markdown
Polar (Nyquist) PlotsDocument by: Matt Capuano I write this document as I review my Polar plots, primarly through thus resource:- System Dynamics (Ogata) | chapter 7.3When you're trying to understand how these Polar plots are obtained, try to convert the transfer function to the imaginary number form $a + jb$. The polar plot is the plot of the frequency-response of the transfer function between the real and imaginary component as $\omega$ goes from 0 to $\infty$. So you can take your equation for the TF $G(j\omega)=a+jb$ and think about what happens at $\omega \rightarrow 0$, $\omega \rightarrow \infty$, and maybe some key points in between. The points on the polar plot are just the set of $(a, b)$ in between $\omega = 0 \rightarrow \infty$
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import control.matlab as cmatlab
from scipy import signal
# Plotting settings (to format plots nicely)
plt.rcParams['figure.figsize'] = [8, 8]
plt.rcParams.update({'font.size': 18})
# Define s as being the transfer function Laplace variable
s = cmatlab.tf('s')
###Output
_____no_output_____
###Markdown
Integral and Derivative Factors $(j\omega)^{\mp1}$Consider this:$$G(j\omega) = \frac{1}{j\omega} = -j\frac{1}{\omega}$$There's no real component here... **so I'm not sure why the plot looks as it does below, it should only be an up arrow along the imaginary axis...?**
###Code
G1 = 1 / s
G2 = s
real, imag, freq = cmatlab.nyquist(G1, label='G1')
# real, imag, freq = cmatlab.nyquist(G2)
plt.legend()
###Output
_____no_output_____
###Markdown
**Note:** What happens if you multiply by a constant $K$?
###Code
G = 1 / s
for K in range(1, 5):
G1 = K * G;
real, imag, freq = cmatlab.nyquist(G1, label="K = " + str(K))
plt.legend()
###Output
_____no_output_____
###Markdown
Powers of Integral and Derivative Factors $(j\omega)^{\mp n}$**For odd powers:** You're flipping the direction of the arrow from directly up to directly down every increasing odd power, mathematically this is because:...**For even powers (I HAVEN'T DOUBLE-CHECKED THIS YET):**The imaginary term goes away. E.g.$$G(j\omega) = \frac{1}{(j\omega)^2} = -\frac{1}{\omega}$$So when $\omega = 0$, $G(j\omega) \rightarrow -\infty$ and as $\omega \rightarrow \infty$ then $G(j\omega) = -0$.Every other even power, the -1 at the front becomes positive, so this is reflected across the imaginary axis.
###Code
for n in range(1, 6, 2):
G = 1 / (s**n)
real, imag, freq = cmatlab.nyquist(G, label="n = " + str(n))
plt.legend()
for n in range(2, 7, 2):
G = 1 / (s**n)
real, imag, freq = cmatlab.nyquist(G, label="n = " + str(n))
plt.legend()
###Output
_____no_output_____
###Markdown
First-Order Factors $(1+j\omega T)^{\mp1}$As $\omega$ increases from 0 to infinity, the magnitude and phase form a semi-circle.
###Code
T = 1 # Defining the corner frequency as 1/T
G1 = 1 / (1 + T*s)
G2 = 1 + T*s
real, imag, freq = cmatlab.nyquist(G1, label='G1')
#real, imag, freq = cmatlab.nyquist(G2, label='G2')
plt.legend()
###Output
_____no_output_____
###Markdown
Changing the Corner frequency- From the corner frequency of $1/T = 1/1$ to $1/10$ everything is the same- From the corner frequencies of $1/T = 1/10$ and below, everything shifts
###Code
for T in range(1, 10, 1):
G = 1 / (1 + T*s)
real, imag, freq = cmatlab.nyquist(G, label="T = " + str(T))
plt.legend()
for T in range(10, 16, 1):
G = 1 / (1 + T*s)
real, imag, freq = cmatlab.nyquist(G, label="T = " + str(T))
plt.legend()
###Output
_____no_output_____
###Markdown
Multiplying by a constant $K$
###Code
T = 5
G = 1 / (1 + T*s)
for K in range(1, 6):
G1 = K * G;
real, imag, freq = cmatlab.nyquist(G1, label="K = " + str(K))
plt.legend()
###Output
_____no_output_____
###Markdown
Powers of First-Order Factors $(1+j\omega T)^{\mp n}$
###Code
for n in range(1, 6, 1):
G = (1 / (1 + T*s))**n
real, imag, freq = cmatlab.nyquist(G, label="n = " + str(n))
plt.legend()
###Output
_____no_output_____
###Markdown
Second-Order Factors $\left[1 + 2\zeta \left(\frac{j\omega}{\omega_n}\right) + \left(\frac{j\omega}{\omega_n}\right)^2 \right]^{\mp1}$
###Code
z = 1
omega_n = 1
G1 = 1 / (1 + (2*z*s/omega_n) + (s/omega_n)**2)
#G2 = (1 + (2*z*s/omega_n) + (s/omega_n)**2)
real, imag, freq = cmatlab.nyquist(G1, label='G1')
#real, imag, freq = cmatlab.nyquist(G2, label='G2')
plt.legend()
###Output
_____no_output_____
###Markdown
Varying the Damping Coefficient $\zeta$
###Code
omega_n = 1
for zeta in range(1, 9, 1):
G = 1 / (1 + (2*zeta*s/omega_n) + (s/omega_n)**2)
real, imag, freq = cmatlab.nyquist(G, label='$\zeta$ = ' + str(zeta))
plt.legend()
###Output
_____no_output_____
###Markdown
Varying the ... $\omega_n$
###Code
zeta = 1
for omega_n in range(1, 41, 10):
G = 1 / (1 + (2*zeta*s/omega_n) + (s/omega_n)**2)
real, imag, freq = cmatlab.nyquist(G, label='$\omega_n$ = ' + str(omega_n))
plt.legend()
###Output
_____no_output_____
###Markdown
Powers of Second-Order Factors $\left[1 + 2\zeta \left(\frac{j\omega}{\omega_n}\right) + \left(\frac{j\omega}{\omega_n}\right)^2 \right]^{\mp n}$
###Code
z = 1
omega_n = 1
for n in range(1, 6, 1):
G = (1 / (1 + (2*z*s/omega_n) + (s/omega_n)**2))**n
real, imag, freq = cmatlab.nyquist(G, label='n = ' + str(n))
plt.legend()
###Output
_____no_output_____ |
pycalphad/05 Local Equilibrium.ipynb | ###Markdown
T0 ComputationThe T0 (or T-zero) temperature is a thermodynamic quantity defined by an equality of the molar Gibbs energies of two different phases. T0 is relevant to the design of heat treatments for some alloys because, below the T0 temperature, so-called 'massive' transformation kinetics become active and may dominate the observed evolution of the microstructure. Massive transformations are diffusionless and can occur very rapidly, with no transformation barrier.
###Code
comps = ['FE', 'MN', 'VA']
state_variables = {v.N: 1, v.P: 1e5, v.T: 300}
fcc_composition_sets = [
composition_set(dbf, comps, 'FCC_A1',
{**state_variables},
phase_amt=1,
),
]
bcc_composition_sets = [
composition_set(dbf, comps, 'BCC_A2',
{**state_variables},
phase_amt=1,
),
]
x = []
y = defaultdict(lambda: [])
for temperature in np.arange(300., 2000.):
state_variables[v.T] = temperature
for compset in fcc_composition_sets:
compset.dof[2] = temperature
compset.update(compset.dof[2:], compset.NP, compset.dof[:2])
for compset in bcc_composition_sets:
compset.dof[2] = temperature
compset.update(compset.dof[2:], compset.NP, compset.dof[:2])
result_fcc, composition_sets = local_equilibrium(fcc_composition_sets, comps, {**state_variables, v.X('MN'): 0.1})
result_bcc, composition_sets = local_equilibrium(bcc_composition_sets, comps, {**state_variables, v.X('MN'): 0.1})
if not result_fcc.converged:
print(temperature)
raise ValueError('Convergence failure')
if not result_bcc.converged:
print(temperature)
raise ValueError('Convergence failure')
x.append(result_fcc.x[2])
for compset in fcc_composition_sets:
y[compset.phase_record.phase_name].append(float(compset.energy))
for compset in bcc_composition_sets:
y[compset.phase_record.phase_name].append(float(compset.energy))
%matplotlib inline
import matplotlib.pyplot as plt
from pycalphad.plot.utils import phase_legend
stable_phases = ['FCC_A1', 'BCC_A2']
energy_diff = np.array(y['FCC_A1']) - np.array(y['BCC_A2'])
sign_change_idx = np.where(np.diff(np.sign(energy_diff)) != 0)[0] + 1
sign_change_temps = np.array(x)[sign_change_idx]
plt.plot(x, energy_diff)
plt.scatter(sign_change_temps, np.zeros_like(sign_change_temps), c='r', zorder=3)
plt.ylabel('Gibbs Energy Difference (J/mol-atom)')
plt.xlabel('Temperature (K)')
plt.xlim(300, 2000)
print('FCC->BCC T0 Temperature: ', sign_change_temps[0], 'K', f'({sign_change_temps[0]-273.15} deg C)')
###Output
_____no_output_____
###Markdown
Precipitation ReactionNote that the temperature corresponding to the onset of the corresponding precipitation reaction will always be greater than (or equal to) the T0 temperature. However, we may not always observe the precipitation reaction due to the slower, diffusion-controlled kinetics of the reaction. We can compute the onset of bcc stability by 'fixing' the bcc phase to be stable with zero amount.
###Code
comps = ['FE', 'MN', 'VA']
state_variables = {v.N: 1, v.P: 1e5}
composition_sets = [
composition_set(dbf, comps, 'BCC_A2',
{**state_variables, v.T: 300},
fixed=True, phase_amt=0,
),
composition_set(dbf, comps, 'FCC_A1',
{**state_variables, v.T: 300},
fixed=False, phase_amt=1,
),
]
result, composition_sets = local_equilibrium(composition_sets, comps, {**state_variables, v.X('MN'): 0.1})
print('Converged: ', result.converged)
print('Final Composition Sets: ', composition_sets)
print('FCC-BCC Transus Temperature', result.x[2], 'K', f'({int(result.x[2]-273.15)} deg C)')
###Output
_____no_output_____ |
10-Information-Based-Learning/HW10/HW07.ipynb | ###Markdown
Homework 7 Use this notebook to work on your answers and check solutions. You can then submit your functions using "HW7_submission.ipynb" or directly write your functions in a file named "hw7_answers.py".You will use the cereal dataset attached with the assignment. Question 1Write a function called "get_corrs" which takes one argument:* df, which is a pandas data frameand returns:* m, a correlation matrix for the numerical variables in df.Hint:* google _df corr_
###Code
#### play with code here #####
###Output
_____no_output_____
###Markdown
Sample output:```In [1]: get_corrs(cer[['name','calories','carbo','sugars']])Out[1]: calories carbo sugars calories 1.000000 0.250681 0.562340 carbo 0.250681 1.000000 -0.331665 sugars 0.562340 -0.331665 1.000000``` Question 2Write a function called "get_corr_pairs" which takes one argument:* df, which is a pandas data frameand returns:* corr_pairs, a dictionary where keys are names of columns of df corresponding to numerical features, and values are arrays of names of columns whose correlation coefficient with the key has magnitude 0.3 or greater. You can use your function from question 1 to get the correlation values.
###Code
#### play with code here #####
###Output
_____no_output_____
###Markdown
Sample output:```In [1]: get_corr_pairs(cer[['name','fat','sugars','rating']])Out[1]: {'fat': ['rating'], 'rating': ['fat', 'sugars'], 'sugars': ['rating']}```Short explanation: the correlation between 'fat' and 'rating' is -0.409, 'sugars' and 'rating' is -0.760; the remaining correlations have magnitude < 0.3. Question 3Write a function called "sample_cereal" which takes two arguments: * df, which is a pandas data frame* kind, which is a string that can take value 'up' or 'down'and returns:* a pandas data frame with balanced target class 'ratingID', using up sampling if kind='up' and downsampling if kind='down'.
###Code
#### play with code here #####
###Output
_____no_output_____
###Markdown
Sample output:```In [1]: sample_cereal(cer.ix[3:5,['name','mfr','type','calories','protein','ratingID']], 'up')Out[1]: name mfr type calories protein ratingID3 All-Bran with Extra Fiber K C 50 4 13 All-Bran with Extra Fiber K C 50 4 14 Almond Delight R C 110 2 05 Apple Cinnamon Cheerios G C 110 2 0```Short explanation: The input has only one positive sample and two negative samples; random sampling from a distribution of 1 can only return one possible result, so our up-sampling of the smaller class merely replicates the row for "All-Bran with Extra Fiber". Question 4Write a function called "find_H" which takes two arguments:* df, which is a pandas data frame* cname, which is the name of the target column (that should correspond to a categorical variable)and returns:* H, the entropy in the column cname (use logarithm base 2)
###Code
#### play with code here #####
###Output
_____no_output_____
###Markdown
Sample output:```In [1]: find_H(cer.iloc[:20], 'ratingID')Out[1]: 0.60984030471640038``` Question 5We talked about using gini score for split in class, but we will focus on using entropy in this practice. What want to know how much information can be gained by splitting on one specific feature. Formally, given resp. variable $Y$, dataset $D$ and feature $X$, information gain of $Y$ from $X$ is defined as $ IG(Y, X) = H(Y) - H(Y|X) $where $H(Y|X)$ is defined as$H(Y|X) = \sum_{l \in levels(X)} P(l) \cdot H(l). $See [here](https://medium.com/@rishabhjain_22692/decision-trees-it-begins-here-93ff54ef134) for an illustration.Write a function called "info_gain" which takes four arguments:* df, which is a pandas data frame* cname, which is the name of the target column (that should correspond to a categorical variable)* csplit, which is the name of a numeric column in df (aka, feature)* threshold, which is a numeric value (aka, split cutoff)and returns:* info_gain, the information gain you get in column cname by splitting the dataset on the threshold value in column csplit.
###Code
#### play with code here #####
###Output
_____no_output_____ |
pHAT/20181028b/.ipynb_checkpoints/20181028b-checkpoint.ipynb | ###Markdown
Mapping US signals from sounds recorded
###Code
os.chdir("./")
for tmpfile in glob.glob("coca*.beacon"):
FILE = tmpfile
with open(FILE) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
A = []
t = []
for item in content[:-2]:
resp = item.split(" ")
A.append( 256*(int(resp[0])/8) + int(resp[1]) )
t.append(float(resp[2]))
fig = plt.figure(figsize=(15, 10))
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1,0), colspan=2)
ax1.plot(ssignal.decimate(t,10)[1:],ssignal.decimate(A,10)[1:])
ax1.set_title("Signal of "+tmpfile)
ax3.plot(t[0:len(A)/5000],A[0:len(A)/5000])
ax3.set_title("Detail of the signal of "+tmpfile)
F = np.abs(np.fft.fft(A))
ax2.plot(F[100:len(A)/2-1])
ax2.set_title("FFT of "+tmpfile)
plt.tight_layout()
plt.savefig(tmpfile+"details.jpg")
plt.show()
f, axarr = plt.subplots(1,2,figsize=(15,5))
plt.title("Frequency for "+tmpfile)
FFT = []
length = 1024/2
for k in range(4*480):
offset = (k+1)*length-1
B = np.fft.fft(A[offset:offset+length])
SIG = np.abs(B[50:length/2-100])
SIG = np.log(SIG/np.average(SIG[0:10]))
FFT.append(SIG)
if not (k%50):
axarr[0].plot(SIG)
#print (offset+length,len(A))
axarr[1].imshow(FFT,aspect=0.08)
plt.savefig(tmpfile+"spectrum.jpg")
plt.show()
###Output
_____no_output_____ |
slcm-03.ipynb | ###Markdown
taudata AnalyticsSupervised Learning - Classification 03(C) Taufik Sutantohttps://taudata.blogspot.com/2022/04/slcm-03.html Ensemble and Imbalance learning
###Code
print("Detecting environment: ", end=' ')
try:
import google.colab
IN_COLAB = True
print("Running the code in Google Colab. Installing and downloading dependencies.\nPlease wait...")
!pip install --upgrade pandas
except:
IN_COLAB = False
print("Running the code locally.")
# Please visit https://github.com/taudataid/PINN-DCAI for further detail such as requirements.txt file.
# Loading Modules
import warnings; warnings.simplefilter('ignore')
import pickle, time, numpy as np, seaborn as sns
import pandas as pd, matplotlib.pyplot as plt
from sklearn import svm, preprocessing
from sklearn import tree, neighbors
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.model_selection import cross_val_score, RandomizedSearchCV, GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import VotingClassifier
from sklearn import model_selection
from collections import Counter
from tqdm import tqdm
sns.set(style="ticks", color_codes=True)
print(pd.__version__)
"Done"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
file = 'data/diabetes_data.csv'
try:
# Local jupyter notebook, assuming "file" is in the "data" directory
data = pd.read_csv(file, names=names)
except:
# it's a google colab... create folder data and then download the file from github
!mkdir data
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/{file}
data = pd.read_csv(file, names=names)
print(data.shape, set(data['class']))
data.sample(5)
# Split Train-Test
X = data.values[:,:8] # Slice data (perhatikan disini struktur data adalah Numpy Array)
Y = data.values[:,8]
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=99)
print(set(Y), x_train.shape, x_test.shape, sep=', ')
###Output
{0.0, 1.0}, (614, 8), (154, 8)
###Markdown
Ensemble Model What? a learning algorithms that construct a set of classifiers and then classify new data points by taking a (weighted) vote of their predictions. Why? Better prediction, More stable model How? Bagging & Boosting โmeta-algorithmsโ : Bagging & Boosting* Ensemble https://www.youtube.com/watch?v=Un9zObFjBH0 * Bagging https://www.youtube.com/watch?v=2Mg8QD0F1dQ * Boosting https://www.youtube.com/watch?v=GM3CDQfQ4sw Property of Boosting AdaBoost https://youtu.be/BoGNyWW9-mE?t=70
###Code
# Contoh Voting (Bagging) di Python
# Catatan : Random Forest termasuk Bagging Ensemble (walau modified)
# Best practicenya Model yang di ensemble semuanya menggunakan Optimal Parameter
kNN = neighbors.KNeighborsClassifier(3)
kNN.fit(x_train, y_train)
Y_kNN = kNN.score(x_test, y_test)
DT = tree.DecisionTreeClassifier(random_state=1)
DT.fit(x_train, y_train)
Y_DT = DT.score(x_test, y_test)
model = VotingClassifier(estimators=[('k-NN', kNN), ('Decision Tree', DT)], voting='hard')
model.fit(x_train,y_train)
Y_Vot = model.score(x_test,y_test)
print('Akurasi k-NN', Y_kNN)
print('Akurasi Decision Tree', Y_DT)
print('Akurasi Votting', Y_Vot)
# Averaging juga bisa digunakan di Klasifikasi (ndak hanya Regresi),
# tapi kita pakai probabilitas dari setiap kategori
T = tree.DecisionTreeClassifier()
K = neighbors.KNeighborsClassifier()
R = LogisticRegression()
T.fit(x_train,y_train)
K.fit(x_train,y_train)
R.fit(x_train,y_train)
y_T=T.predict_proba(x_test)
y_K=K.predict_proba(x_test)
y_R=R.predict_proba(x_test)
Ave = (y_T+y_K+y_R)/3
print(Ave[:5]) # Print just first 5
prediction = [v.index(max(v)) for v in Ave.tolist()]
print(prediction[:5]) # Print just first 5
print('Akurasi Averaging', accuracy_score(y_test, prediction))
# AdaBoost
num_trees = 100
kfold = model_selection.KFold(n_splits=10)
model = AdaBoostClassifier(n_estimators=num_trees, random_state=33)
results = model_selection.cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
###Output
0.7421565276828435
###Markdown
Imbalance Data* Metric Trap* Akurasi kategori tertentu lebih penting* Contoh kasus Imbalance Learning* Undersampling, Oversampling, Model Based (weight adjustment)* https://www.kaggle.com/rafjaa/resampling-strategies-for-imbalanced-datasets* Plot perbandingan: https://imbalanced-learn.readthedocs.io/en/stable/auto_examples/combine/plot_comparison_combine.htmlsphx-glr-auto-examples-combine-plot-comparison-combine-py
###Code
Counter(Y)
# fit the model and get the separating hyperplane using weighted classes
svm_ = svm.SVC(kernel='linear')
svm_.fit(x_train, y_train)
y_SVMib = svm_.predict(x_test)
print(confusion_matrix(y_test, y_SVMib))
print(classification_report(y_test, y_SVMib))
# fit the model and get the separating hyperplane using weighted classes
# x_train, x_test, y_train, y_test
svm_balanced = svm.SVC(kernel='linear', class_weight={1: 3}) #WEIGHTED SVM
svm_balanced.fit(x_train, y_train)
y_SVMb = svm_balanced.predict(x_test)
print(confusion_matrix(y_test, y_SVMb))
print(classification_report(y_test, y_SVMb))
# Example of model-based imbalance treatment - SVM
from sklearn.datasets import make_blobs
n_samples_1, n_samples_2 = 1000, 100
centers = [[0.0, 0.0], [2.0, 2.0]]
clusters_std = [1.5, 0.5]
X, y = make_blobs(n_samples=[n_samples_1, n_samples_2],centers=centers,cluster_std=clusters_std,random_state=33, shuffle=False)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10}) #WEIGHTED SVM
wclf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')# plot the samples
ax = plt.gca()# plot the decision functions for both classifiers
xlim = ax.get_xlim(); ylim = ax.get_ylim()
xx = np.linspace(xlim[0], xlim[1], 30)# create grid to evaluate model
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf.decision_function(xy).reshape(XX.shape)# get the separating hyperplane
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-']) # plot decision boundary and margins
Z = wclf.decision_function(xy).reshape(XX.shape)# get the separating hyperplane for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])# plot decision boundary and margins for weighted classes
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"], loc="upper right")
plt.show()
###Output
_____no_output_____
###Markdown
Weighted Decision Tree
###Code
T = tree.DecisionTreeClassifier(random_state = 33)
T.fit(x_train,y_train)
y_DT = T.predict(x_test)
print('Akurasi (Decision tree Biasa) = ', accuracy_score(y_test, y_DT))
print(classification_report(y_test, y_DT))
T = tree.DecisionTreeClassifier(class_weight = 'balanced', random_state = 33)
T.fit(x_train, y_train)
y_DT = T.predict(x_test)
print('Akurasi (Weighted Decision tree) = ', accuracy_score(y_test, y_DT))
print(classification_report(y_test, y_DT))
###Output
Akurasi (Decision tree Biasa) = 0.6883116883116883
precision recall f1-score support
0.0 0.79 0.73 0.76 105
1.0 0.51 0.59 0.55 49
accuracy 0.69 154
macro avg 0.65 0.66 0.65 154
weighted avg 0.70 0.69 0.69 154
Akurasi (Weighted Decision tree) = 0.7207792207792207
precision recall f1-score support
0.0 0.83 0.74 0.78 105
1.0 0.55 0.67 0.61 49
accuracy 0.72 154
macro avg 0.69 0.71 0.69 154
weighted avg 0.74 0.72 0.73 154
###Markdown
Studi Kasus (Latihan) ENB2012: Prediksi Penggunaan Energi Gedung Task* Filter data EcoTest dan pilih hanya yang kategori di variabel target muncul min 10 kali (heat-cat)* Lakukan EDA (Preprocessing dan visualisasi dasar)* Tentukan model terbaik (dengan parameter optimal dan cross validasi)* Hati-hati Naive Bayes, Decision Tree dan Random Forest tidak memerlukan one-hot encoding.* Gunakan Metric Micro F1-Score untuk menentukan model terbaiknya. Optional* Coba bandingkan model terbaik diatas dengan model ensemble.* Apakah ada imbalance problem, coba atasi dengan over/under sampling.
###Code
file_ = "data/building-energy-efficiency-ENB2012_data.csv"
try: # Running Locally, yakinkan "file_" berada di folder "data"
data = pd.read_csv(file_, error_bad_lines=False, low_memory = False, encoding='utf8')
except: # Running in Google Colab
!mkdir data
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/{file_}
data = pd.read_csv(file_, error_bad_lines=False, low_memory = False, encoding='utf8')
print(data.shape)
data.sample(5)
# Jawaban Latihan dimulai di cell ini
###Output
_____no_output_____ |
_notebooks/2021-11-11-loss.ipynb | ###Markdown
Loss function> A refiew of classification models loss functions.- toc: true - badges: true- comments: true
###Code
!pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
from fastbook import *
###Output
_____no_output_____
###Markdown
Key Loss func & matrics by fastai Regression : MSE for loss and matric (TO DO: add all function) Classification Models: Binary classification models: Loss and accuracy build by me:
###Code
def Binary_cls_loss(preds,y):
preds = sigmoid(preds)
return torch.where(y==1, y-preds, preds).mean()
def Binary_metric_accuracy(preds, y, threshhold=0.5):
pred_accuracy = (sigmoid(preds)>threshhold)
return (pred_accuracy==y).float().mean()
###Output
_____no_output_____
###Markdown
Loss and accuracy build in fastai chpter 4 (same as mine):
###Code
def mnist_loss(predictions, targets):
predictions = predictions.sigmoid()
return torch.where(targets==1, 1-predictions, predictions).mean()
def batch_accuracy(xb, yb):
preds = xb.sigmoid()
correct = (preds>0.5) == yb
return correct.float().mean()
###Output
_____no_output_____
###Markdown
Comparing both solutions: Creater small test set for binary classification:
###Code
preds = tensor([ 9.4077, -1.3319, 9.3460, 5.9358])
y = tensor([1, 1, 0, 0])
###Output
_____no_output_____
###Markdown
loss functions test:
###Code
Binary_cls_loss(preds, y)==mnist_loss(preds, y)
###Output
_____no_output_____
###Markdown
Accuracy functions test:
###Code
Binary_metric_accuracy(preds, y) == batch_accuracy(preds, y)
###Output
_____no_output_____
###Markdown
There are no use of standart function during this chapter for simplification, later on chapter 6 has all the standrt functions names, for this case we shuold use nn.BCEWithLogitsLoss as its for Binary classifcations and includes sigmoid:
###Code
binary_loss_func = nn.BCEWithLogitsLoss()
binary_loss_func(preds, y.float()) == Binary_cls_loss(preds, y) #added .float() to convert for the correct type
###Output
_____no_output_____
###Markdown
The values are not the same as the standart function includes negtive log on the results as it improves the optimization process, we will add negative log for verification:
###Code
def Binary_cls_loss_log(preds,y):
preds = sigmoid(preds)
return -np.log(1-torch.where(y==1, y-preds, preds)).mean()
binary_loss_func(preds, y.float()), Binary_cls_loss_log(preds, y)
###Output
_____no_output_____
###Markdown
Further research : In the last example, in the chapter F.cross_entropy wheconstruction the cnn_learner, it will be intresting to learn why and whats the difference from the standard Binary function Multiclass classification Loss and accuracy build by me:
###Code
def softmaxp1(preds):
sume = np.exp(preds).sum(1)
return np.exp(preds)/sume.reshape((len(preds),1))
def multi_c_loss(preds, y):
soft = softmaxp1(preds)
softcon = np.log(soft)*-1
idx = range(len(y))
return softcon[idx, y].mean() #choosing for row 'idx' the value in column 'y'
def multi_accuracy(preds, y):
to_C = preds.argmax(1)
return (to_C==y).float().mean()
###Output
_____no_output_____
###Markdown
Loss and accuracy build in fastai chapter 5 (same as mine): In class they build only the softmax and relay on the standrt functions for rest of the develpoment/explenation"
###Code
def softmax(x): return np.exp(x) / np.exp(x).sum(dim=1, keepdim=True)
###Output
_____no_output_____
###Markdown
*The loss function used for this classifier is `F.cross_entropy` which is actually softmax + negative log likelihood, this function is using `F.nll_loss` which calcuting the negative log likelihood but because Pytorch calculate log and sofmax in the same time, do not apply the log.*Another importnat point is that by default all loss function in Pytorch takes the mean, we can define `reduction = None` to show the numbers before the mean. Comparing both solutions:
###Code
y = tensor([0, 1, 1, 2, 3])
preds =tensor([[15.8227, 9.1500, 3.8286, 9.5931],
[3.9045, 10.0090, 2.5657, 7.9364],
[9.4077, 1.3319, 9.3460, 5.9358],
[8.6940, 5.6772, 7.4109, 4.2940],
[8.8544, 5.7390, 2.6658, 6.2745]])
softmax(preds)==softmaxp1(preds)
loss_func_nn = nn.CrossEntropyLoss(reduction='none')
F.cross_entropy(preds, y), multi_c_loss(preds, y), loss_func_nn(preds, y)
###Output
_____no_output_____
###Markdown
Standart `error_rate` was used in this chapter so ืo compare, we subtract from 1:
###Code
(1-error_rate(preds, y)), multi_accuracy(preds, y)
###Output
_____no_output_____
###Markdown
Multi label classification Loss and accuracy build by me:
###Code
def multi_l_loss(preds, y):
sig = sigmoid(preds)
dist = (sig-y).abs()
return -np.log(1-dist).mean()
def multi_l_accuracy(preds, y, th=0.5):
sig_preds = sigmoid(preds)
bin_conv = (sig_preds>th).float()
return (bin_conv==y).float().mean()
###Output
_____no_output_____
###Markdown
Loss and accuracy build in fastai chapter 6 (same as mine): In the class they dont build the Loss function and use the following standart function `nn.BCEWithLogitsLoss` actually fastai knows that the DataLoaders has multiple category labels so they dont meed to specify it
###Code
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp = sigmoid(inp)
targ = targ.bool()
return ((inp>thresh)==targ).float().mean()
###Output
_____no_output_____
###Markdown
Comparing both solutions:
###Code
preds =tensor([[8.8227, 9.1500, 3.8286, 9.5931],
[3.9045, 6.0090, 2.5657, 7.9364],
[9.4077, 1.3319, 9.3460, 5.9358],
[8.6940, 5.6772, 7.4109, 4.2940],
[8.8544, 5.7390, 2.6658, 6.2745]])
y =tensor([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 1, 1]])
lss = nn.BCEWithLogitsLoss()
multi_l_loss(preds,y), lss(preds,y.float()), F.binary_cross_entropy_with_logits(preds,y.float())
accuracy_multi(preds, y)==multi_l_accuracy(preds, y)
###Output
_____no_output_____
###Markdown
Impotant point in this chapter is the summary of all standart Loss functions names and use, I have summarized it in the following table:
###Code
tp = Path('/content/2021-12-30_14-02-58.jpg')
im = Image.open(tp)
im
###Output
_____no_output_____ |
lib/spot-2.8.1/tests/python/stutter-inv.ipynb | ###Markdown
Stutter-invariant languagesA language $L$ is said to be _stutter-invariant_ iff $\ell_0\ldots\ell_{i-1}\ell_i\ell_{i+1}\ldots\in L \iff \ell_0\ldots\ell_{i-1}\ell_i\ell_i\ell_{i+1}\ldots\in L$, i.e., if duplicating a letter in a word or removing a duplicated letter does not change the membership of that word to $L$. These languages are also called _stutter-insensitive_. We use the adjective _sutter-sensitive_ to describe a language that is not stutter-invariant. Of course we can extend this vocabulary to LTL formulas or automata that represent stutter-invariant languages.Stutter-invariant languages play an important role in model checking. When verifying a stutter-invariant specification against a system, we know that we have some freedom in how we discretize the time in the model: as long as we do not hide changes of model variables that are observed by the specification, we can merge multiple steps of the model. This, combined by careful analysis of actions of the model that are independent, is the basis for a set of techniques known as _partial-order reductions_ (POR) that postpone the visit of some successors in the model, because we know we can always visit them later. Stutter-invariant formulasWhen the specification is expressed as an LTL formula, a well known way to ensure it is _stutter-invariant_ is to forbid the use of the `X` operator. Testing whether a formula is `X`-free can be done in constant time using the `is_syntactic_stutter_invariant()` method.
###Code
f = spot.formula('a U b')
print(f.is_syntactic_stutter_invariant())
f = spot.formula('a U Xb')
print(f.is_syntactic_stutter_invariant())
###Output
False
###Markdown
However some formula are syntactic-invariant despite their use of `X`. Spot implements some [automaton-based check](https://www.lrde.epita.fr/~adl/dl/adl/michaud.15.spin.pdf) to detect stutter-invariance reliably and efficiently. This can be tested with the `is_stutter_invariant()` function.
###Code
g = spot.formula('F(a & X(!a & Gb))')
print(g.is_syntactic_stutter_invariant())
print(spot.is_stutter_invariant(g))
###Output
False
True
###Markdown
Of course this `is_stutter_invariant()` function first checks whether the formula is `X`-free before wasting time building automata, so if you want to detect stutter-invariant formulas in your model checker, this is the only function to use. Also, if you hapen to already have an automaton `aut_g` for `g`, you should pass it as a second argument to avoid it being recomputed: `spot.is_stutter_invariant(g, aut_g)`. It is also known that any stutter-invariant LTL formula can be converted to an `X`-free LTL formula. Several proofs of that exist. Spot implements the rewriting of [K. Etessami](http://homepages.inf.ed.ac.uk/kousha/note_on_stut_tl_lpi.ps) under the name `remove_x()`. Note that the output of this function is only equivalent to its input if the latter is stutter-invariant.
###Code
spot.remove_x(g)
###Output
_____no_output_____
###Markdown
Stutter-invariant automata Similarly to formulas, automata use a few bits to store some known properties about themselves, like whether they represent a stutter-invariant language. This property can be checked with the `prop_stutter_invariant()` method, but that returns a `trival` instance (i.e., yes, no, or maybe). Some algorithms will update that property whenever that is cheap or expliclitely asked for. For instance `spot.translate()` only sets the property if the translated formula is `X`-free.
###Code
aut = spot.translate(g)
print(aut.prop_stutter_invariant())
###Output
maybe
###Markdown
As suggested above, we can call `is_stutter_invariant()` by passing a formula and its automaton, to save on one translation. A second translation is still needed to complement the automaton.
###Code
print(spot.is_stutter_invariant(g, aut))
###Output
True
###Markdown
Note that `prop_stutter_invariant()` was updated as a side-effect so that any futher call to `is_stutter_invariant()` with this automaton will be instantaneous.
###Code
print(aut.prop_stutter_invariant())
###Output
yes
###Markdown
You have to be aware of this property being set in your back because if while playing with `is_stutter_invariant()` you the incorrect formula for an automaton by mistake, the automaton will have its property set incorrectly, and running `is_stutter_inariant()` with the correct formula will simply return the cached property.In doubt, you can always reset the property as follows:
###Code
aut.prop_stutter_invariant(spot.trival_maybe())
print(aut.prop_stutter_invariant())
###Output
maybe
###Markdown
In case you have an automaton for which you do not have formula, you can also use `is_stutter_invariant()` by passing this automaton as the first argument. In that case a negated automaton will be constructed by determinization. If you do happen to have a negated automaton handy, you can pass it as a second argument to avoid that.
###Code
a1 = spot.automaton('''HOA: v1
AP: 1 "a"
States: 2
Start: 0
Acceptance: 0 t
--BODY--
State: 0 [0] 1
State: 1 [t] 0
--END--''')
display(a1)
print(spot.is_stutter_invariant(a1))
###Output
_____no_output_____
###Markdown
Explaining why a formula is not sutter-invariant As explained in our [Spin'15 paper](https://www.lrde.epita.fr/~adl/dl/adl/michaud.15.spin.pdf) the sutter-invariant checks are implemented using simple operators suchs as `spot.closure(aut)`, that augment the language of L by adding words that can be obtained by removing duplicated letters, and `spot.sl(aut)` or `spot.sl2(aut)` that both augment the language that L by adding words that can be obtained by duplicating letters. The default `is_stutter_invariant()` function is implemented as `spot.product(spot.closure(aut), spot.closure(neg_aut)).is_empty()`, but that is just one possible implementation selected because it was more efficient.Using these bricks, we can modify the original algorithm so it uses a counterexample to explain why a formula is stutter-sensitive.
###Code
def explain_stut(f):
f = spot.formula(f)
pos = spot.translate(f)
neg = spot.translate(spot.formula.Not(f))
word = spot.product(spot.closure(pos), spot.closure(neg)).accepting_word()
if word is None:
print(f, "is stutter invariant")
return
word.simplify()
word.use_all_aps(pos.ap_vars())
waut = word.as_automaton()
if waut.intersects(pos):
acc, rej, aut = "accepted", "rejected", neg
else:
acc, rej, aut = "rejected", "accepted", pos
word2 = spot.sl2(waut).intersecting_word(aut)
word2.simplify()
print("""{} is {} by {}
but if we stutter some of its letters, we get
{} which is {} by {}""".format(word, acc, f, word2, rej, f))
explain_stut('GF(a & Xb)')
###Output
cycle{!a & !b; a & b} is rejected by GF(a & Xb)
but if we stutter some of its letters, we get
cycle{!a & !b; a & b; a & b} which is accepted by GF(a & Xb)
###Markdown
Note that a variant of the above explanation procedure is already integerated in our [on-line LTL translator tool](https://spot.lrde.epita.fr/app/) (use the study tab). Detecting stutter-invariant statesEven if the language of an automaton is not sutter invariant, some of its states may recognize a stutter-invariant language. (We assume the language of a state is the language the automaton would have when starting from this state.) First exampleFor instance let us build a disjunction of a stutter-invariant formula and a stutter-sensitive one:
###Code
f1 = spot.formula('F(a & X!a & XF(b & X!b & Ga))')
f2 = spot.formula('F(a & Xa & XXa & G!b)')
f = spot.formula.Or([f1, f2])
print(spot.is_stutter_invariant(f1))
print(spot.is_stutter_invariant(f2))
print(spot.is_stutter_invariant(f))
pos = spot.translate(f)
display(pos)
###Output
_____no_output_____
###Markdown
While the automaton as a whole is stutter-sensitive, we can see that eventually we will enter a sub-automaton that is stutter-invariant.The `stutter_invariant_states()` function returns a Boolean vector indiced by the state number. A state is marked as `True` if either its language is stutter-invariant, or if it can only be reached via a stutter-invariant state (see the second example later). As always, the second argument, `f`, can be omitted (pass `None`) if the formula is unknown, or it can be replaced by a negated automaton if it is known.
###Code
spot.stutter_invariant_states(pos, f)
###Output
_____no_output_____
###Markdown
For convenience, the `highligh_...()` version colors the stutter-invariant states of the automaton for display.(That 5 is the color number for red in Spot's hard-coded palette.)
###Code
spot.highlight_stutter_invariant_states(pos, f, 5)
display(pos)
###Output
_____no_output_____
###Markdown
Such a procedure gives us a map of where POR can be enabled when model checking using this automaton. Second example This second example illustrates the fact that a state can be marked if it it not sutter-invariant but appear below a stutter-invariant state. We build our example automaton as the disjuction of the following two stutter-sensitive formulas, whose union is equivalent to the sutter-invariant formula `GF!a`.
###Code
g1 = spot.formula('GF(a & Xa) & GF!a')
g2 = spot.formula('!GF(a & Xa) & GF!a')
g = spot.formula.Or([g1, g2])
print(spot.is_stutter_invariant(g1))
print(spot.is_stutter_invariant(g2))
print(spot.is_stutter_invariant(g))
###Output
False
False
True
###Markdown
Here are the automata for `g1` and `g2`, note that none of the states are stutter-invariant.
###Code
aut1 = spot.translate(g1)
aut1.set_name(str(g1))
spot.highlight_stutter_invariant_states(aut1, g1, 5)
display(aut1)
aut2 = spot.translate(g2)
aut2.set_name(str(g2))
spot.highlight_stutter_invariant_states(aut2, g2, 5)
display(aut2)
###Output
_____no_output_____
###Markdown
Now we build the sum of these two automata. The stutter-invariance check detects that the initial state is stutter-invariant (i.e., the entire language is stutter-invariant) so all states below it are marked despite the fact that the language recognized from these individual states would not be stutter-invariant.
###Code
aut = spot.sum(aut1, aut2)
# At this point it is unknown if AUT is stutter-invariant
assert(aut.prop_stutter_invariant().is_maybe())
spot.highlight_stutter_invariant_states(aut, g, 5)
display(aut)
# The stutter_invariant property is set on AUT as a side effect
# of calling sutter_invariant_states() or any variant of it.
assert(aut.prop_stutter_invariant().is_true())
###Output
_____no_output_____
###Markdown
Third exampleThese procedures work regardless of the acceptance condition. Here is an example with co-Bรผchi acceptance.In this case we do not even have a formula to pass as second argument, so the check will perform a complementation by determinization.
###Code
aut = spot.automaton('randaut --seed=30 -Q4 -A"Fin(0)" a |')
spot.highlight_stutter_invariant_states(aut, None, 5)
display(aut)
###Output
_____no_output_____
###Markdown
If the negated automaton is already known, it can be passed as second argument (instead of the positive formula) to avoid unnecessary work. Sutter-invariance at the letter levelInstead of marking each state as stuttering or not, we can list the letters that we can stutter in each state.More precisely, a state $q$ is _stutter-invariant for letter $a$_ if the membership to $L(q)$ of any word starting with $a$ is preserved by the operations that duplicate letters or remove duplicates. $(\ell_0\ldots\ell_{i-1}\ell_i\ell_{i+1}\ldots\in L(q) \land \ell_0=a) \iff (\ell_0\ldots\ell_{i-1}\ell_i\ell_i\ell_{i+1}\ldots\in L(q)\land \ell_0=a)$Under this definition, we can also say that $q$ is _stutter-invariant_ iff it is _stutter-invariant for any letter_.For instance consider the following automaton, for which all words that start with $b$ are stutter invariant.The initial state may not be declared as stutter-invariant because of words that start with $\lnot b$.
###Code
f = spot.formula('(!b&Xa) | Gb')
pos = spot.translate(f)
spot.highlight_stutter_invariant_states(pos, f, 5)
display(pos)
###Output
_____no_output_____
###Markdown
The `stutter_invariant_letters()` functions returns a vector of BDDs indexed by state numbers. The BDD at index $q$ specifies all letters $\ell$ for which state $q$ would be stuttering. Note that if $q$ is stutter-invariant or reachable from a stutter-invariant state, the associated BDD will be `bddtrue` (printed as `1` below).This interface is a bit inconveniant to use interactively, due to the fact that we need a `spot.bdd_dict` object to print a BDD.
###Code
sil_vec = spot.stutter_invariant_letters(pos, f)
for q in range(pos.num_states()):
print("sil_vec[{}] =".format(q), spot.bdd_format_formula(pos.get_dict(), sil_vec[q]))
###Output
sil_vec[0] = 1
sil_vec[1] = 1
sil_vec[2] = 1
sil_vec[3] = b
###Markdown
The set of stutter-invariant states is not always forward closedConsider the following automaton, which is a variant of our second example above. The language accepted from state (2) is `!GF(a & Xa) & GF!a` (this can be simplified to `FG(!a | X!a)`), while the language accepted from state (0) is `GF(a & Xa) & GF!a`. Therefore. the language accepted from state (5) is `a & X(GF!a)`. Since this is equivalent to `a & GF(!a)` state (5) recognizes stutter-invariant language, but as we can see, it is not the case that all states below (5) are also marked. In fact, states (0) can also be reached via states (7) and (6), recognizing respectively `(a & X(a & GF!a)) | (!a & X(!a & GF(a & Xa) & GF!a))` and `!a & GF(a & Xa) & GF!a))`, i.e., two stutter-sentive languages.
###Code
ex1 = spot.automaton("""HOA: v1
States: 8 Start: 7 AP: 1 "a" Acceptance: 2 (Inf(0)&Inf(1))
--BODY--
State: 0 [!0] 0 {1} [0] 0 [0] 1 {0}
State: 1 [0] 0 [0] 1 {0}
State: 2 [t] 2 [!0] 3 [0] 4
State: 3 [!0] 3 {0 1} [0] 4 {0 1}
State: 4 [!0] 3 {0 1}
State: 5 [0] 0 [0] 2
State: 6 [!0] 0
State: 7 [!0] 6 [0] 5
--END--""")
spot.highlight_stutter_invariant_states(ex1, None, 5)
display(ex1)
###Output
_____no_output_____
###Markdown
This situation can be tested with `spot.is_stutter_invariant_forward_closed()`. The function returns `-1` if the successor of any stutter-invariant state is it is also a stutter-invariant state, otherwise it return the number of one stutter-sensitive state that has a stutter-invariant state as predecessor.
###Code
sistates = spot.stutter_invariant_states(ex1)
spot.is_stutter_invariant_forward_closed(ex1, sistates)
###Output
_____no_output_____
###Markdown
In cases where we prefer to have a forward-closed set of stutter-invariant states, it is always possible to duplicatethe problematic states. The `make_stutter_invariant_foward_closed_inplace()` modifies the automaton in place, and also returns an updated copie of the vector of stutter-invariant states.
###Code
sistates2 = spot.make_stutter_invariant_forward_closed_inplace(ex1, sistates)
spot.highlight_stutter_invariant_states(ex1, None, 5)
display(ex1)
print(sistates2)
###Output
_____no_output_____
###Markdown
Now, state 0 is no longuer a problem.
###Code
spot.is_stutter_invariant_forward_closed(ex1, sistates2)
###Output
_____no_output_____
###Markdown
Let's see how infrequently the set of stutter-invarant states is not closed.
###Code
import spot.gen as gen
# Let's consider the LTL formula from the following 5 sources,
# and restrict ourselves to formulas that are not stutter-invariant.
formulas = [ f for f in gen.ltl_patterns(gen.LTL_DAC_PATTERNS,
gen.LTL_EH_PATTERNS,
gen.LTL_HKRSS_PATTERNS,
gen.LTL_P_PATTERNS,
gen.LTL_SB_PATTERNS)
if not f.is_syntactic_stutter_invariant() ]
aut_size = []
sistates_size = []
fwd_closed = []
fmt = "{:40.40} {:>6} {:>8} {:>10}"
print(fmt.format("formula", "states", "SIstates", "fwd_closed"))
for f in formulas:
s = f.to_str()
aut = spot.translate(f)
aut_size.append(aut.num_states())
sistates = spot.stutter_invariant_states(aut, f)
sisz = sum(sistates)
sistates_size.append(sisz)
fc = spot.is_stutter_invariant_forward_closed(aut, sistates) == -1
fwd_closed.append(fc)
print(fmt.format(s, aut.num_states(), sisz, fc))
###Output
formula states SIstates fwd_closed
Fp0 -> (!p0 U (!p0 & p1 & X(!p0 U p2))) 3 2 1
Fp0 -> (!p1 U (p0 | (!p1 & p2 & X(!p1 U 4 3 1
G!p0 | (!p0 U ((p0 & Fp1) -> (!p1 U (!p1 4 2 1
G((p0 & Fp1) -> (!p2 U (p1 | (!p2 & p3 & 4 1 1
G(p0 -> (Fp1 -> (!p1 U (p2 | (!p1 & p3 & 3 0 1
F(p0 & XFp1) -> (!p0 U p2) 3 2 1
Fp0 -> (!(!p0 & p1 & X(!p0 U (!p0 & p2)) 4 3 1
G!p0 | (!p0 U (p0 & (F(p1 & XFp2) -> (!p 4 2 1
G((p0 & Fp1) -> (!(!p1 & p2 & X(!p1 U (! 4 1 1
G(p0 -> ((!(!p1 & p2 & X(!p1 U (!p1 & p3 3 0 1
G((p0 & XFp1) -> XF(p1 & Fp2)) 4 0 1
Fp0 -> (((p1 & X(!p0 U p2)) -> X(!p0 U ( 6 2 1
G(p0 -> G((p1 & XFp2) -> X(!p2 U (p2 & F 5 0 1
G((p0 & Fp1) -> (((p2 & X(!p1 U p3)) -> 10 2 1
G(p0 -> (((p1 & X(!p2 U p3)) -> X(!p2 U 10 0 1
G(p0 -> F(p1 & XFp2)) 4 0 1
Fp0 -> ((p1 -> (!p0 U (!p0 & p2 & X(!p0 4 1 1
G(p0 -> G(p1 -> (p2 & XFp3))) 3 3 1
G((p0 & Fp1) -> ((p2 -> (!p1 U (!p1 & p3 4 0 1
G(p0 -> ((p1 -> (!p2 U (!p2 & p3 & X(!p2 6 2 1
G(p0 -> F(p1 & !p2 & X(!p2 U p3))) 4 0 1
Fp0 -> ((p1 -> (!p0 U (!p0 & p2 & !p3 & 4 1 1
G(p0 -> G(p1 -> (p2 & !p3 & X(!p3 U p4)) 3 3 1
G((p0 & Fp1) -> ((p2 -> (!p1 U (!p1 & p3 4 0 1
G(p0 -> ((p1 -> (!p2 U (!p2 & p3 & !p4 & 6 2 1
p0 U (p1 & X(p2 U p3)) 3 2 1
p0 U (p1 & X(p2 & F(p3 & XF(p4 & XF(p5 & 7 2 1
F(p0 & XGp1) 2 2 1
F(p0 & X(p1 & XFp2)) 4 2 1
F(p0 & X(p1 U p2)) 3 1 1
G(p0 & XF(p1 & XF(p2 & XFp3))) 1 1 1
GF(!(p1 <-> Xp1) | !(p0 <-> Xp0)) 4 4 1
GF(!(p1 <-> Xp1) | !(p0 <-> Xp0) | !(p2 9 9 1
G((p0 & p1 & !p2 & Xp2) -> X(p3 | X(!p1 3 0 1
G((p0 & p1 & !p2 & Xp2) -> X(X!p1 | (p2 5 5 1
G(p0 & p1 & !p2 & Xp2) -> X(X!p1 | (p2 U 1 1 1
G((!p0 & p1) -> Xp2) 2 0 1
G(p0 -> X(p0 | p1)) 2 2 1
G((!(p1 <-> Xp1) | !(p0 <-> Xp0) | !(p2 34 34 1
G((p0 & !p1 & Xp1 & Xp0) -> (p2 -> Xp3)) 2 2 1
G(p0 -> X(!p0 U p1)) 2 0 1
G((!p0 & Xp0) -> X((p0 U p1) | Gp0)) 3 3 1
G((!p0 & Xp0) -> X(p0 U (p0 & !p1 & X(p0 4 4 1
G((!p0 & Xp0) -> X(p0 U (p0 & !p1 & X(p0 6 6 1
G((p0 & X!p0) -> X(!p0 U (!p0 & !p1 & X( 6 6 1
G((p0 & X!p0) -> X(!p0 U (!p0 & !p1 & X( 8 8 1
G((!p0 & Xp0) -> X(!(!p0 & Xp0) U (!p1 & 6 6 1
G(!p0 | X(!p0 | X(!p0 | X(!p0 | X(!p0 | 12 0 1
G((Xp0 -> p0) -> (p1 <-> Xp1)) 4 4 1
G((Xp0 -> p0) -> ((p1 -> Xp1) & (!p1 -> 4 4 1
p0 & XG!p0 2 1 1
XG(p0 -> (G!p1 | (!Xp1 U p2))) 4 1 1
XG((p0 & !p1) -> (G!p1 | (!p1 U p2))) 3 2 1
XG((p0 & p1) -> (Gp1 | (p1 U p2))) 3 2 1
Xp0 & G((!p0 & Xp0) -> XXp0) 5 0 1
(Xp0 U Xp1) | !X(p0 U p1) 1 1 1
(Xp0 U p1) | !X(p0 U (p0 & p1)) 1 1 1
((Xp0 U p1) | !X(p0 U (p0 & p1))) & G(p0 2 2 1
((Xp0 U Xp1) | !X(p0 U p1)) & G(p0 -> Fp 2 2 1
!G(p0 -> X(p1 R p2)) 3 1 1
(p0 & Xp1) R X(((p2 U p3) R p0) U (p2 R 5 3 1
G(p0 | XGp1) & G(p2 | XG!p1) 3 2 1
G(p0 | (Xp1 & X!p1)) 1 1 1
###Markdown
There is no instance of set of stutter-invariant states that is not closed in these example formulas.
###Code
sum(fwd_closed), len(fwd_closed)
###Output
_____no_output_____
###Markdown
Here is the percentage of stutter-invarant states.
###Code
100*sum(sistates_size)/sum(aut_size)
###Output
_____no_output_____ |
notebooks/Nuclear_Features.ipynb | ###Markdown
Nuclear Morphology and Chromatin Organization FeaturesHere we aim to compute a library of features that exhaustively describe the nuclear morphology and chromatin organization for each segmented nucleus in a given image.
###Code
from tifffile import imread
import pandas as pd
from skimage import measure
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
from nmco.nuclear_features import (
Boundary_global as BG,
Img_texture as IT,
Int_dist_features as IDF,
Boundary_local_curvature as BLC,
)
import os
# initialising paths
labelled_image_path = os.path.join(os.path.dirname(os.getcwd()),'example_data/nuc_labels.tif')
raw_image_path = os.path.join(os.path.dirname(os.getcwd()),'example_data/raw_image.tif')
feature_path = os.path.join(os.path.dirname(os.getcwd()),'example_data/')
###Output
_____no_output_____
###Markdown
Below is an example of the data that can be used.
###Code
#Read in Images
labelled_image = imread(labelled_image_path)
raw_image = imread(raw_image_path)
labelled_image = labelled_image.astype(int)
raw_image = raw_image.astype(int)
# normalize images
raw_image = cv.normalize(raw_image, None, alpha=0, beta=150, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
#Visulaise the data
#save plots to show clusters
fig = plt.figure(figsize=(8, 4))
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
#show raw image
ax0.imshow(raw_image,aspect='auto',cmap='inferno')
ax0.axis('off')
ax0.title.set_text('Image')
#show segmented image
ax1.imshow(labelled_image,aspect='auto',cmap='viridis')
ax1.axis('off')
ax1.title.set_text('Nuclear Labels')
###Output
_____no_output_____
###Markdown
One can now access each nucleus in the labelled image as well as the raw image.
###Code
#Get indexing for the individual nuclei in the image
props = measure.regionprops(labelled_image,raw_image)
fig = plt.figure(figsize=(12, 6))
ax0 = fig.add_subplot(241)
ax1 = fig.add_subplot(242)
ax2 = fig.add_subplot(243)
ax3 = fig.add_subplot(244)
ax4 = fig.add_subplot(245)
ax5 = fig.add_subplot(246)
ax6 = fig.add_subplot(247)
ax7 = fig.add_subplot(248)
# Selecting a few nuclei
nuc_of_interest = [90,112,216,301]
#show raw image
ax0.imshow(props[nuc_of_interest[0]].intensity_image,aspect='auto',cmap='inferno')
ax0.title.set_text('Nucleus 1')
ax0.axis('off')
ax1.imshow(props[nuc_of_interest[1]].intensity_image,aspect='auto',cmap='inferno')
ax1.title.set_text('Nucleus 2')
ax1.axis('off')
ax2.imshow(props[nuc_of_interest[2]].intensity_image,aspect='auto',cmap='inferno')
ax2.title.set_text('Nucleus 3')
ax2.axis('off')
ax3.imshow(props[nuc_of_interest[3]].intensity_image,aspect='auto',cmap='inferno')
ax3.title.set_text('Nucleus 4')
ax3.axis('off')
#show segmented image
ax4.imshow(props[nuc_of_interest[0]].image,aspect='auto',cmap='viridis')
ax4.title.set_text('Label 1')
ax4.axis('off')
ax5.imshow(props[nuc_of_interest[1]].image,aspect='auto',cmap='viridis')
ax5.title.set_text('Label 2')
ax5.axis('off')
ax6.imshow(props[nuc_of_interest[2]].image,aspect='auto',cmap='viridis')
ax6.title.set_text('Label 3')
ax6.axis('off')
ax7.imshow(props[nuc_of_interest[3]].image,aspect='auto',cmap='viridis')
ax7.title.set_text('Label 4')
ax7.axis('off')
###Output
_____no_output_____
###Markdown
Basic FeaturesScikit provides several informative features that describe "region properties". First we extract such built in features. For more information on how thhe features were computed check out the documentation(https://scikit-image.org/docs/dev/api/skimage.measure.htmlskimage.measure.regionprops_table).
###Code
#Measure scikit's built in features
propstable = pd.DataFrame(measure.regionprops_table(labelled_image,raw_image,cache=True,
properties=['label', 'area','perimeter','bbox_area','convex_area',
'equivalent_diameter','major_axis_length','minor_axis_length',
'eccentricity','orientation',
'centroid','weighted_centroid',
'weighted_moments','weighted_moments_normalized',
'weighted_moments_central','weighted_moments_hu',
'moments','moments_normalized','moments_central','moments_hu']))
propstable.iloc[nuc_of_interest]
###Output
_____no_output_____
###Markdown
Global Boundary featuresHere we compute the features that describe morphology of a given object. These include 1. Calliper distances2. Distribution features of radii(centroid to boundary) distancesBelow are features computed for 4 nuclei
###Code
BG_feat = pd.concat([BG.boundary_features(props[nuc_of_interest[0]].image,centroids=props[nuc_of_interest[0]].local_centroid),
BG.boundary_features(props[nuc_of_interest[1]].image,centroids=props[nuc_of_interest[1]].local_centroid),
BG.boundary_features(props[nuc_of_interest[2]].image,centroids=props[nuc_of_interest[2]].local_centroid),
BG.boundary_features(props[nuc_of_interest[3]].image,centroids=props[nuc_of_interest[3]].local_centroid)])
BG_feat
###Output
_____no_output_____
###Markdown
Local Boundary FeaturesHere we compute the features that describe local curvature of a given object.Approach:For a given object we obtain the edge pixels and compute the local curvature of each point on the curve +/- a given stepsize. Larger steps give a smoother curvature. We define the local curvature between 3 points as the inverse of the radius of their circumcircle and if the circumcenter is inside the object then the sign of curvature is positive. Below is the radius of curvature for Nucleus 1
###Code
r_c= BLC.local_radius_curvature(props[nuc_of_interest[0]].image,step=5,show_boundary=True)
#calculate local curvature features
local_curvature=[np.divide(1,r_c[x]) if r_c[x]!=0 else 0 for x in range(len(r_c))]
###Output
_____no_output_____
###Markdown
Now that we have the local curvature for all points on the boundary, we compute features that describe it such as Average and Standard Deviation of curature (positive and negative), number of time the polarity changes etc. Feature names are self-descriptive.
###Code
#compute local and global features
global_features = [BLC.global_curvature_features(np.array(local_curvature))]
global_features = pd.DataFrame([o.__dict__ for o in global_features])
global_features
###Output
_____no_output_____
###Markdown
We also check to see if there are any prominant jumps in curvature the image.
###Code
prominant_features = [BLC.prominant_curvature_features(local_curvature,show_plot=True)]
prominant_features = pd.DataFrame([o.__dict__ for o in prominant_features])
prominant_features
###Output
_____no_output_____
###Markdown
Below are the features computed for 4 nuclei.
###Code
BLC_feat= pd.concat([BLC.curvature_features(props[nuc_of_interest[0]].image,step=5),
BLC.curvature_features(props[nuc_of_interest[1]].image,step=5),
BLC.curvature_features(props[nuc_of_interest[2]].image,step=5),
BLC.curvature_features(props[nuc_of_interest[3]].image,step=5)])
BLC_feat
###Output
_____no_output_____
###Markdown
Intensity FeaturesHere we compute features that describe the intensity distribution.These include features that describe the intensity distribution, entropy and heterocromatin ratios Below are the features computed for 4 nuclei.
###Code
Int_feat= pd.concat([IDF.intensity_features(props[nuc_of_interest[0]].image,props[nuc_of_interest[0]].intensity_image),
IDF.intensity_features(props[nuc_of_interest[1]].image,props[nuc_of_interest[1]].intensity_image),
IDF.intensity_features(props[nuc_of_interest[2]].image,props[nuc_of_interest[2]].intensity_image),
IDF.intensity_features(props[nuc_of_interest[3]].image,props[nuc_of_interest[3]].intensity_image)])
Int_feat
###Output
_____no_output_____
###Markdown
Image TexturesHere we compute features that describe the texture of the image.These include the GCLM features. Below are the features computed for 4 nuclei.
###Code
Int_Text= pd.concat([IT.texture_features(props[nuc_of_interest[0]].image,props[nuc_of_interest[0]].intensity_image),
IT.texture_features(props[nuc_of_interest[1]].image,props[nuc_of_interest[1]].intensity_image),
IT.texture_features(props[nuc_of_interest[2]].image,props[nuc_of_interest[2]].intensity_image),
IT.texture_features(props[nuc_of_interest[3]].image,props[nuc_of_interest[3]].intensity_image)])
Int_Text
###Output
_____no_output_____
###Markdown
Misc. featuresWe merge all features and compute some related features.
###Code
features = pd.concat([propstable.iloc[nuc_of_interest].reset_index(drop=True),
BG_feat.reset_index(drop=True),
BLC_feat.reset_index(drop=True),
Int_feat.reset_index(drop=True),
Int_Text.reset_index(drop=True)], axis=1)
# Add in other related features for good measure
features["concavity"] = (features["convex_area"] - features["area"]) / features["convex_area"]
features["solidity"] = features["area"] / features["convex_area"]
features["a_r"] = features["minor_axis_length"] / features["major_axis_length"]
features["shape_factor"] = (features["perimeter"] ** 2) / (4 * np.pi * features["area"])
features["area_bbarea"] = features["area"] / features["bbox_area"]
features["center_mismatch"] = np.sqrt((features["weighted_centroid-0"] - features["centroid-0"]) ** 2+ (features["weighted_centroid-1"] - features["centroid-1"]) ** 2)
features["smallest_largest_calliper"] = ( features["min_calliper"] / features["max_calliper"])
features["frac_peri_w_posi_curvature"] = (features["len_posi_curvature"] / features["perimeter"])
features["frac_peri_w_neg_curvature"] = (features["len_neg_curvature"].replace(to_replace="NA", value=0)/ features["perimeter"])
features["frac_peri_w_polarity_changes"] = (features["npolarity_changes"] / features["perimeter"])
features
###Output
_____no_output_____
###Markdown
For a quick extraction of all features given a segmented image use the following code:
###Code
from nmco.utils.Run_nuclear_feature_extraction import run_nuclear_chromatin_feat_ext
features = run_nuclear_chromatin_feat_ext(raw_image_path,labelled_image_path,feature_path)
###Output
_____no_output_____
###Markdown
Tissue level summary:In order to characterise the nuclear features in a given image, we compute the distribution characteristics of each of the computed NMCO features.The measures available are: Median, Min, Max, Standard Deviation (SD) Coefficient of Variation (CV) and Coefficient of Dispersion (CD), Inter_Quartile_Range(IQR) and Quartile Coeffient of Dispersrion (QCD).
###Code
from nmco.utils.summarising_features import summarise_feature_table
features_1 = features.replace('NA',0, regex=True)
features_1 = features_1.replace('NaN',0, regex=True)
summarise_feature_table(features_1)
###Output
/home/pathy_s/anaconda3/lib/python3.8/site-packages/numpy/lib/nanfunctions.py:1115: RuntimeWarning: All-NaN slice encountered
r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out,
/home/pathy_s/anaconda3/lib/python3.8/site-packages/numpy/lib/nanfunctions.py:1391: RuntimeWarning: All-NaN slice encountered
result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
###Markdown
Nuclear Morphology and Chromatin Organization FeaturesHere we aim to compute a library of features that exhaustively describe the nuclear morphology and chromatin organization for each segmented nucleus in a given image.
###Code
# import libraries
%load_ext autoreload
import sys
sys.path.append("../")
from tifffile import imread
import pandas as pd
from skimage import measure
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
import os
from nmco.nuclear_features import (
global_morphology as BG,
img_texture as IT,
int_dist_features as IDF,
boundary_local_curvature as BLC
)
# initialising paths
labelled_image_path = os.path.join(os.path.dirname(os.getcwd()),'example_data/nuc_labels.tif')
raw_image_path = os.path.join(os.path.dirname(os.getcwd()),'example_data/raw_image.tif')
feature_path = os.path.join(os.path.dirname(os.getcwd()),'example_data/')
###Output
_____no_output_____
###Markdown
Below is an example of the data that can be used.
###Code
#Read in Images
labelled_image = imread(labelled_image_path)
raw_image = imread(raw_image_path)
labelled_image = labelled_image.astype(int)
raw_image = raw_image.astype(int)
# normalize images
raw_image = cv.normalize(raw_image, None, alpha=0, beta=150, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
#Visulaise the data
#save plots to show clusters
fig = plt.figure(figsize=(8, 4))
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
#show raw image
ax0.imshow(raw_image,aspect='auto',cmap='inferno')
ax0.axis('off')
ax0.title.set_text('Image')
#show segmented image
ax1.imshow(labelled_image,aspect='auto',cmap='viridis')
ax1.axis('off')
ax1.title.set_text('Nuclear Labels')
###Output
_____no_output_____
###Markdown
One can now access each nucleus in the labelled image as well as the raw image.
###Code
#Get indexing for the individual nuclei in the image
props = measure.regionprops(labelled_image,raw_image)
fig = plt.figure(figsize=(12, 6))
ax0 = fig.add_subplot(241)
ax1 = fig.add_subplot(242)
ax2 = fig.add_subplot(243)
ax3 = fig.add_subplot(244)
ax4 = fig.add_subplot(245)
ax5 = fig.add_subplot(246)
ax6 = fig.add_subplot(247)
ax7 = fig.add_subplot(248)
# Selecting a few nuclei
nuc_of_interest = [90,112,216,301]
#show raw image
ax0.imshow(props[nuc_of_interest[0]].intensity_image,aspect='auto',cmap='inferno')
ax0.title.set_text('Nucleus 1')
ax0.axis('off')
ax1.imshow(props[nuc_of_interest[1]].intensity_image,aspect='auto',cmap='inferno')
ax1.title.set_text('Nucleus 2')
ax1.axis('off')
ax2.imshow(props[nuc_of_interest[2]].intensity_image,aspect='auto',cmap='inferno')
ax2.title.set_text('Nucleus 3')
ax2.axis('off')
ax3.imshow(props[nuc_of_interest[3]].intensity_image,aspect='auto',cmap='inferno')
ax3.title.set_text('Nucleus 4')
ax3.axis('off')
#show segmented image
ax4.imshow(props[nuc_of_interest[0]].image,aspect='auto',cmap='viridis')
ax4.title.set_text('Label 1')
ax4.axis('off')
ax5.imshow(props[nuc_of_interest[1]].image,aspect='auto',cmap='viridis')
ax5.title.set_text('Label 2')
ax5.axis('off')
ax6.imshow(props[nuc_of_interest[2]].image,aspect='auto',cmap='viridis')
ax6.title.set_text('Label 3')
ax6.axis('off')
ax7.imshow(props[nuc_of_interest[3]].image,aspect='auto',cmap='viridis')
ax7.title.set_text('Label 4')
ax7.axis('off')
###Output
_____no_output_____
###Markdown
Basic FeaturesScikit provides several informative features that describe "region properties". One can extract such built in features if required. For more information on how the features were computed check out the documentation(https://scikit-image.org/docs/dev/api/skimage.measure.htmlskimage.measure.regionprops_table).
###Code
#Measure scikit's built in features
propstable = pd.DataFrame(measure.regionprops_table(labelled_image,raw_image,cache=True,
properties=['label', 'area','perimeter','bbox_area','convex_area',
'equivalent_diameter','major_axis_length','minor_axis_length',
'eccentricity','orientation',
'centroid','weighted_centroid',
'weighted_moments','weighted_moments_normalized',
'weighted_moments_central','weighted_moments_hu',
'moments','moments_normalized','moments_central','moments_hu']))
propstable.iloc[nuc_of_interest]
###Output
_____no_output_____
###Markdown
Global Morphology featuresWe compute the features that describe global morphology of a given object. In addition to basic features in Scikit's measure module, we introduce the following two types of measures 1. Calliper distances2. Distribution features of radii(centroid to boundary) distancesBelow are features computed for 4 nuclei
###Code
BG_feat = pd.concat([BG.measure_global_morphometrics(props[nuc_of_interest[0]].image),
BG.measure_global_morphometrics(props[nuc_of_interest[1]].image),
BG.measure_global_morphometrics(props[nuc_of_interest[2]].image),
BG.measure_global_morphometrics(props[nuc_of_interest[3]].image)])
BG_feat
###Output
_____no_output_____
###Markdown
Local Boundary FeaturesWe also compute features that the describe local curvature of a given object.Approach:For a given object we obtain the edge pixels and compute the local curvature of each point on the curve +/- a given stepsize. Larger steps give a smoother curvature. We define the local curvature between 3 points as the inverse of the radius of their circumcircle and if the circumcenter is inside the object then the sign of curvature is positive. Below is the radius of curvature for Nucleus 1
###Code
r_c= BLC.local_radius_curvature(props[nuc_of_interest[0]].image,step=5,show_boundary=True)
#calculate local curvature features
local_curvature=[np.divide(1,r_c[x]) if r_c[x]!=0 else 0 for x in range(len(r_c))]
###Output
_____no_output_____
###Markdown
Now that we have the local curvature for all points on the boundary, we compute features that describe it such as average and standard deviation of curature (positive and negative), number of time the polarity changes etc. Feature names are self-descriptive.
###Code
#compute local and global features
global_features = BLC.global_curvature_features(np.array(local_curvature))
pd.DataFrame([global_features])
###Output
_____no_output_____
###Markdown
We also check to see if there are any prominant jumps in local curvature along the boundary. To do so we identify the presence of prominent peaks.
###Code
prominant_features = BLC.prominant_curvature_features(local_curvature,show_plot=True)
pd.DataFrame([prominant_features])
###Output
_____no_output_____
###Markdown
Below are the features computed for 4 nuclei.
###Code
BLC_feat= pd.concat([BLC.measure_curvature_features(props[nuc_of_interest[0]].image,step=5),
BLC.measure_curvature_features(props[nuc_of_interest[1]].image,step=5),
BLC.measure_curvature_features(props[nuc_of_interest[2]].image,step=5),
BLC.measure_curvature_features(props[nuc_of_interest[3]].image,step=5)])
BLC_feat
###Output
_____no_output_____
###Markdown
Intensity FeaturesDNA packing levels are encoded in their fluroscent intensity. Therefore we compute features that describe the intensity distribution of pixels within the nucleus.These include features that describe the intensity distribution, entropy and heterocromatin ratios Below are the features computed for 4 nuclei.
###Code
Int_feat= pd.concat([IDF.measure_intensity_features(props[nuc_of_interest[0]].image,props[nuc_of_interest[0]].intensity_image),
IDF.measure_intensity_features(props[nuc_of_interest[1]].image,props[nuc_of_interest[1]].intensity_image),
IDF.measure_intensity_features(props[nuc_of_interest[2]].image,props[nuc_of_interest[2]].intensity_image),
IDF.measure_intensity_features(props[nuc_of_interest[3]].image,props[nuc_of_interest[3]].intensity_image)])
Int_feat
###Output
_____no_output_____
###Markdown
Image TexturesHere we compute features that describe the texture of the image.These include the GLCM features at various lengths as well as image moments. Below are the features computed for 4 nuclei.
###Code
Int_Text= pd.concat([IT.measure_texture_features(props[nuc_of_interest[0]].image,props[nuc_of_interest[0]].intensity_image),
IT.measure_texture_features(props[nuc_of_interest[1]].image,props[nuc_of_interest[1]].intensity_image),
IT.measure_texture_features(props[nuc_of_interest[2]].image,props[nuc_of_interest[2]].intensity_image),
IT.measure_texture_features(props[nuc_of_interest[3]].image,props[nuc_of_interest[3]].intensity_image)])
Int_Text
###Output
_____no_output_____
###Markdown
CompilationIf interested, one can combine all the different categories of features for a consolidated feature set.
###Code
features = pd.concat([pd.DataFrame(nuc_of_interest, columns=["label"]),
BG_feat.reset_index(drop=True),
BLC_feat.reset_index(drop=True),
Int_feat.reset_index(drop=True),
Int_Text.reset_index(drop=True)], axis=1)
features
all_features = pd.DataFrame()
pd.concat([all_features, BG_feat.reset_index(drop=True)], axis=0)
###Output
_____no_output_____
###Markdown
For a quick extraction of all features given a segmented image use the following code:
###Code
from nmco.utils.Run_nuclear_feature_extraction import run_nuclear_chromatin_feat_ext
features = run_nuclear_chromatin_feat_ext(raw_image_path,labelled_image_path,feature_path)
features
###Output
_____no_output_____
###Markdown
Tissue level summary:In order to characterise the nuclear features in a given image, we compute the distribution characteristics of each of the computed NMCO features.The measures available are: Median, Min, Max, Standard Deviation (SD) Coefficient of Variation (CV) and Coefficient of Dispersion (CD), Inter_Quartile_Range(IQR) and Quartile Coeffient of Dispersrion (QCD).
###Code
from nmco.utils.summarising_features import summarise_feature_table
summarise_feature_table(features)
###Output
/home/pathy_s/anaconda3/lib/python3.8/site-packages/numpy/lib/nanfunctions.py:1115: RuntimeWarning: All-NaN slice encountered
r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out,
/home/pathy_s/anaconda3/lib/python3.8/site-packages/numpy/lib/nanfunctions.py:1391: RuntimeWarning: All-NaN slice encountered
result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
|
workshops/decision-trees/DecisionTreeRegressor.ipynb | ###Markdown
save cleaned dataset as another .csv fileautomotive = automotive.to_csv('automotive.csv', index=False)
###Code
automotive.head()
# define features and target
X = automotive.drop(columns=['mpg', 'car name'], axis=1)
y = automotive['mpg']
# split data into train-test set
from sklearn.model_selection import train_test_split
# train-test split 80-20
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# import DecisionTreeRegressor
from sklearn.tree import DecisionTreeRegressor
# instantiate DecisionTreeRegressor (dtr) with max_depth of 8 and min_samples_leaf of 0.13
dtr = DecisionTreeRegressor(max_depth=8, min_samples_leaf=0.13)
# fit training data into model
dtr.fit(X_train, y_train)
# predict test set labels
y_pred = dtr.predict(X_test)
print(y_pred[0:5])
# metric used for evaluation: mean squared error
# import mean squared error
from sklearn.metrics import mean_squared_error as MSE
# compute mean squared error of model with test set
mse_dtr = MSE(y_test, y_pred)
# compute root mean squared error of dtr
rmse_dtr = mse_dtr**(1/2)
# print output of rmse_dtr
print(f"Test set RMSE of dtr: {rmse_dtr:.2f}")
###Output
Test set RMSE of dtr: 4.18
|
Part 2 - Python Notebook/recipes/Keras/Keras-GPU/Keras-GPU.ipynb | ###Markdown
Keras GPU IntroductionThis recipe shows how to run Keras using Batch AI. Keras supports tensorflow, cntk and theano backends. Currently only tensorflow and cntk backends supports running on GPU. Batch AI will automatic setup backend when toolkit is specified. Details- Keras can run with CNTK or Tensorflow backend.- Standard output of the job will be stored on Azure File Share. Instructions Install Dependencies and Create Configuration file.Follow [instructions](/recipes) to install all dependencies and create configuration file. Read Configuration and Create Batch AI client
###Code
from __future__ import print_function
from datetime import datetime
import sys
from azure.storage.file import FileService
import azure.mgmt.batchai.models as models
# The BatchAI/utilities folder contains helper functions used by different notebooks
sys.path.append('../../../')
import utilities as utils
cfg = utils.config.Configuration('../../configuration.json')
client = utils.config.create_batchai_client(cfg)
###Output
_____no_output_____
###Markdown
Create Resoruce Group and Batch AI workspace if not exists:
###Code
utils.config.create_resource_group(cfg)
_ = client.workspaces.create(cfg.resource_group, cfg.workspace, cfg.location).result()
###Output
_____no_output_____
###Markdown
1. Prepare Training Dataset and Script in Azure Storage Create File ShareFor this example we will create a new File Share with name `batchaidsvmsample` under your storage account.**Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you.
###Code
azure_file_share_name = 'batchaisample'
service = FileService(cfg.storage_account_name, cfg.storage_account_key)
service.create_share(azure_file_share_name, fail_on_exist=False)
print('Done')
###Output
_____no_output_____
###Markdown
Deploy Sample Script and Configure the Input Directories- Download original sample script:
###Code
sample_script_url = 'https://raw.githubusercontent.com/dhuynh/kerasnlpduy/master/kerastest.py'
utils.dataset.download_file(sample_script_url, 'kerastest.py')
###Output
_____no_output_____
###Markdown
- For each job we will create a folder containing a copy of [kerastest.py](https://raw.githubusercontent.com/dhuynh/kerasnlpduy/master/kerastest.py). This allows each job to have it's own copy of the sample script (in case you would like to change it).
###Code
keras_sample_dir = "KerasSamples"
service = FileService(cfg.storage_account_name, cfg.storage_account_key)
service.create_directory(
azure_file_share_name, keras_sample_dir, fail_on_exist=False)
service.create_file_from_path(
azure_file_share_name, keras_sample_dir, 'kerastest.py', 'kerastest.py')
print('Done')
###Output
_____no_output_____
###Markdown
Configure Compute Cluster- For this example we will use a GPU cluster of `STANDARD_NC6` nodes. Number of nodes in the cluster is configured with `nodes_count` variable;- We will mount file share at folder with name `afs`. Full path of this folder on a computer node will be `$AZ_BATCHAI_MOUNT_ROOT/afs`;- We will call the cluster `nc6`;So, the cluster will have the following parameters:
###Code
nodes_count = 1
cluster_name = 'nc6'
parameters = models.ClusterCreateParameters(
location=cfg.location,
vm_size='STANDARD_NC6',
scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=nodes_count)
),
user_account_settings=models.UserAccountSettings(
admin_user_name=cfg.admin,
admin_user_password=cfg.admin_password or None,
admin_user_ssh_public_key=cfg.admin_ssh_key or None,
)
)
###Output
_____no_output_____
###Markdown
Create Compute Cluster
###Code
_ = client.clusters.create(cfg.resource_group, cfg.workspace, cluster_name, parameters).result()
###Output
_____no_output_____
###Markdown
Monitor Cluster CreationThe `utilities` module contains a helper function allowing to wait for the cluster to become available - all nodes are allocated and finished preparation.
###Code
cluster = client.clusters.get(cfg.resource_group, cfg.workspace, cluster_name)
utils.cluster.print_cluster_status(cluster)
###Output
_____no_output_____
###Markdown
Configure Job- Will use configured previously input and output directories;- Will run standard `kerastest.py` from SCRIPT input directory using custom framework;- Will output standard output and error streams to file share. Please choose what backend of Keras will be used: `'cntk'` or `'tensorflow'`
###Code
backend = 'tensorflow'
###Output
_____no_output_____
###Markdown
If `'cntk'` backend is used:- The job will use `microsoft/2.5.1-gpu-python2.7-cuda9.0-cudnn7.0` container.- Keras framework has been preinstalled in the container- The job needs to have `cntk_settings` to be configured.
###Code
if backend == 'cntk':
parameters = models.JobCreateParameters(
location=cfg.location,
cluster=models.ResourceId(id=cluster.id),
node_count=1,
container_settings=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='microsoft/cntk:2.5.1-gpu-python2.7-cuda9.0-cudnn7.0')),
mount_volumes=models.MountVolumes(
azure_file_shares=[
models.AzureFileShareReference(
account_name=cfg.storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=cfg.storage_account_key),
azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
cfg.storage_account_name, azure_file_share_name),
relative_mount_path='afs')
]
),
std_out_err_path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format('afs'),
cntk_settings = models.CNTKsettings(
python_script_file_path='$AZ_BATCHAI_JOB_MOUNT_ROOT/afs/{0}/kerastest.py'.format(keras_sample_dir)))
###Output
_____no_output_____
###Markdown
If `'tensorflow'` backend is used:- The job will use `tensorflow/tensorflow:1.8.0-gpu` container.- Keras framework will be installed by job preparation command line.- The job needs to have `tensor_flow_settings` to be configured.
###Code
if backend == 'tensorflow':
parameters = models.JobCreateParameters(
location=cfg.location,
cluster=models.ResourceId(id=cluster.id),
node_count=1,
job_preparation=models.JobPreparation(command_line='pip install keras'),
container_settings=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='tensorflow/tensorflow:1.8.0-gpu')),
mount_volumes=models.MountVolumes(
azure_file_shares=[
models.AzureFileShareReference(
account_name=cfg.storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=cfg.storage_account_key),
azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
cfg.storage_account_name, azure_file_share_name),
relative_mount_path='afs')
]
),
std_out_err_path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format('afs'),
tensor_flow_settings=models.TensorFlowSettings(
python_script_file_path='$AZ_BATCHAI_JOB_MOUNT_ROOT/afs/{0}/kerastest.py'.format(keras_sample_dir)))
###Output
_____no_output_____
###Markdown
Create a training Job and wait for Job completion
###Code
experiment_name = 'keras_experiment'
experiment = client.experiments.create(cfg.resource_group, cfg.workspace, experiment_name).result()
job_name = datetime.utcnow().strftime('keras_{}_%m_%d_%Y_%H%M%S'.format(backend))
job = client.jobs.create(cfg.resource_group, cfg.workspace, experiment_name, job_name, parameters).result()
print('Created Job {0} in Experiment {1}'.format(job.name, experiment.name))
###Output
_____no_output_____
###Markdown
Wait for Job to FinishThe job will start running when the cluster will have enough idle nodes. The following code waits for job to start running printing the cluster state. During job run, the code prints current content of stdout file.**Note** Execution may take several minutes to complete.
###Code
if backend == 'tensorflow':
read_file = 'stdout-wk-0.txt'
elif backend == 'cntk':
read_file = 'stdout.txt'
utils.job.wait_for_job_completion(client, cfg.resource_group, cfg.workspace,
experiment_name, job_name, cluster_name, 'stdouterr', read_file)
###Output
_____no_output_____
###Markdown
List log files for the Job
###Code
files = client.jobs.list_output_files(cfg.resource_group, cfg.workspace, experiment_name, job_name,
models.JobsListOutputFilesOptions(outputdirectoryid='stdouterr'))
for f in list(files):
print(f.name, f.download_url or 'directory')
###Output
_____no_output_____
###Markdown
4. Clean Up (Optional) Delete the Job
###Code
_ = client.jobs.delete(cfg.resource_group, cfg.workspace, experiment_name, job_name)
###Output
_____no_output_____
###Markdown
Delete the ClusterWhen you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code.
###Code
_ = client.clusters.delete(cfg.resource_group, cfg.workspace, cluster_name)
###Output
_____no_output_____
###Markdown
Delete File ShareWhen you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code.
###Code
service = FileService(cfg.storage_account_name, cfg.storage_account_key)
service.delete_share(azure_file_share_name)
###Output
_____no_output_____ |
notebooks/GraphTheory.ipynb | ###Markdown
Graph theory with PowerShell Table of contents* [Directed graphs](directed)* [Undirected graphs](undirected)* [Connected graphs](connected) * [Complete graphs](complete) * [Random graphs](random) * [Probability of connectivity](connectivity) When I try to learn a new technical topic it is easier for me to do/experience the new topic through a technology I already know about. That's why I perfer to play around with things in PowerShell even though there might be better ways of doing the same using another technology. In this post I'm going to explore a bit of graph theory based on chapter 2 of the execellent book "[Think Complexity 2e](https://greenteapress.com/wp/think-complexity-2e/)" by Allen B. Downey (in fact I'd highly recommend any of the books in the "Think..." series some of which I might cover in future posts). The book and the book's source (in Python) are available for free through the book's webpage.[Graph theory](https://en.wikipedia.org/wiki/Graph_theory) is the study of graphs, which are mathematical structures used to model pairwise relations between objects. A graph is made up of vertices (also called nodes or points) which are connected by edges (also called links or lines). Since networks are everywhere, graph theory is everywhere, too. Graph theory is used to model and study all kinds of things that affect our daily lives: from traffic routes to social networks or integrated circuits.We will use the excellent [PSGraph](https://github.com/KevinMarquette/PSGraph) module developed by Keven Marquette for visualizing the graphs. PSGraph is a wrapper around [Graphviz](https://graphviz.org/) which is a commandline utility for displaying graphs. We will need to install both PSGraph and Graphviz.
###Code
# install Chocolatey
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor
[System.Net.SecurityProtocolType]::Tls12
Invoke-RestMethod -UseBasicParsing -Uri 'https://chocolatey.org/install.ps1' | Invoke-Expression
# use chocolatey to install graphviz
cinst graphviz -
# install PSgraph
Install-Module PSGraph
###Output
_____no_output_____
###Markdown
For some reason the command to export and show the Graph (Show-PSGraph) did not work on my machine. Therefore I've written my own version of the command which will overwrite the version that comes with the PSGraph module after importing it.
###Code
Import-Module PSGraph
function Show-PSGraph ([ValidateSet('dot', 'circular', 'Hierarchical', 'Radial', 'fdp', 'neato', 'sfdp', 'SpringModelLarge', 'SpringModelSmall', 'SpringModelMedium', 'twopi')]$LayoutEngine = 'circular') {
$all = @($Input)
$tempPath = [System.IO.Path]::GetTempFileName()
$all | Out-File $tempPath
$new = Get-Content $tempPath -raw | ForEach-Object { $_ -replace "`r", "" }
$new | Set-Content -NoNewline $tempPath
Export-PSGraph -Source $tempPath -ShowGraph -LayoutEngine $LayoutEngine
Invoke-Item ($tempPath + '.png')
Remove-Item $tempPath
}
###Output
_____no_output_____
###Markdown
To work through the examples we need some way to visualize the graph, which PSGraph will take care of. But we also need a way to represent a graph as an object. Let's setup some helper functions in order to do that.
###Code
function New-Edge ($From, $To, $Attributes, [switch]$AsObject) {
$null = $PSBoundParameters.Remove('AsObject')
$ht = [Hashtable]$PSBoundParameters
if ($AsObject) {
return [PSCustomObject]$ht
}
return $ht
}
function New-Node ($Name, $Attributes) {
[Hashtable]$PSBoundParameters
}
function Get-GraphVisual ($Name, $Nodes, $Edges, [switch]$Undirected) {
$sb = {
if ($Undirected) { inline 'edge [arrowsize=0]' }
foreach ($node in $Nodes) {
node @node
}
foreach ($edge in $Edges) {
edge @edge
}
}
graph $sb
}
###Output
_____no_output_____
###Markdown
The logic of the above functions will get much clearer as we go. Graphs are usually drawn with squares or circles for nodes (the things in the graph) and lines for edges (the connections between the things). Edges may be directed or undirected, depending on whether the relation-ships they represent are asymmetric or symmetric. Directed graphs A directed graph might represent three people who follow each other on Twitter. The arrow indicates the direction of the relationship. Let's create and draw our first graph using the helper functions. Grapviz/PSGraph takes care of the creation of nodes based on edges. This works fine if all nodes in a graph are connected.
###Code
$edges = & {
New-Edge Alice Bob
New-Edge Alice Chuck
New-Edge Bob Alice
New-Edge Bob Chuck
}
Get-GraphVisual Friends -Edges $edges | Show-PSGraph
###Output
_____no_output_____
###Markdown
 Undirected graphs As an example of an undirected graph the below graph shows the connection between four cities in the United States. The labels on the edges indicate driving time in hours. In this example the placement of the nodes corresponds roughly to the geography of the cities, but in general the layout of a graph is arbitrary.This time we will create node and edge objects since the position of the nodes is an attribute of a node rather than an edge. Edges can also have labels assinged through the label attribute. We also need to switch the LayoutEngine since not every one of them honors the position attribute (see [here](https://observablehq.com/@magjac/placing-graphviz-nodes-in-fixed-positions)).
###Code
$nodes = & {
New-Node Albany @{ pos = '-74,43!' }
New-Node Boston @{ pos = '-71,42!' }
New-Node NYC @{ pos = '-74,41!' }
New-Node Philly @{ pos = '-75,40!' }
}
$edges = & {
New-Edge Albany Boston @{label = 3 }
New-Edge Albany NYC @{label = 4 }
New-Edge Boston NYC @{label = 4 }
New-Edge NYC Philly @{label = 2 }
}
Get-GraphVisual Cities $nodes $edges -Undirected | Show-PSGraph -LayoutEngine neato
###Output
_____no_output_____
###Markdown
 Connected graphs Complete graphs A complete graph is a graph where every node is connected to every other.Let's create a function that draws a complete graph based on a list of Nodes. With this function we'll also start returning a Graph object with Nodes, Edges, and Visual properties.
###Code
function Get-CompleteGraph($Nodes) {
$ht = [ordered]@{}
$ht.Nodes = $Nodes
$ht.Edges = for ($i = 0; $i -lt $Nodes.Count; $i++) {
for ($j = 0; $j -lt $nodes.Count; $j++) {
if ($i -lt $j) {
New-Edge $Nodes[$i] $Nodes[$j] -AsObject
}
}
}
$ht.Visual = graph {
inline 'edge [arrowsize=0]'
edge $ht.Edges -FromScript { $_.To } -ToScript { $_.From }
}
[PSCustomObject]$ht
}
###Output
_____no_output_____
###Markdown
Let's just draw a complete graph with 10 nodes.
###Code
$completeGraph = Get-CompleteGraph (0..9)
$completeGraph.Visual | Show-PSGraph
###Output
_____no_output_____
###Markdown
 Random graphs A random graph is just what it sounds like: a graph with nodes and edges generated at random. One of the more interesting kinds of random graphs is the Erdรถs-Renyi model, studied by PaulErdรถs and Alfred Renyi in the 1960s. An Erdรถs-Renyi graph (ER graph) is characterized by two parameters:1. The number of nodes2. The probability that there is an edge between any two nodesErdรถs and Renyi studied the properties of these random graphs; one of their surprising results is the existence of abrupt changes in the properties of random graphs as random edges are added. One of the properties that displays this kind of transition is connectivity (An undirected graph is connected if there is a path from every node to every other node.)In an ER graph, the probability (p) that the graph is connected is very low when p is small and nearly 1 when p is large. Let's create a function that creates ER random graphs where the probability of an edge between each pair of nodes can be controlled via the Probability parameter.
###Code
function Get-RandomGraph ($NodeCount, $Probability, [switch]$NoVisual) {
$ht = [ordered]@{}
$ht.Edges = for ($i = 0; $i -le $NodeCount - 1; $i++) {
for ($j = 0; $j -le $NodeCount - 1; $j++) {
if ($i -lt $j) {
$rand = (Get-Random -Minimum 0 -Maximum 10000) / 10000
if ($rand -lt $Probability) {
New-Edge $i $j -AsObject
}
}
}
}
$ht.Nodes = 0..($NodeCount - 1)
if (-not $NoVisual) {
$ht.Visual = graph -Name Random {
inline 'edge [arrowsize=0]'
node $ht.Nodes
edge $ht.Edges -FromScript { $_.From } -ToScript { $_.To }
}
}
[PSCustomObject]$ht
}
###Output
_____no_output_____
###Markdown
Next we use the function to create a random Graph with 10 nodes and a Probability of edges between them of 30%.
###Code
$randomGraph = Get-RandomGraph 10 .3
$randomGraph.Visual | Show-PSGraph
$randomGraph.Edges.Count
###Output
13
###Markdown
 Remember? A graph is connected if there is a path from every node to every other nodeFor many applications involving graphs, it is useful to check whether a graph is connected. An algorithm that does this starts at any node and checks whether you can reach all other nodes. If a node can be reached, it implies, that you can also reach any of its neighbour nodes.Below is a function that returns the neighbours of a given node by its name.
###Code
function Get-Neighbours ($Edges, $Name, [switch]$Undirected) {
$edgeObjects = @($Edges)
if (@($Edges)[0].GetType().FullName -ne 'System.Management.Automation.PSCustomObject') {
$edgeObjects = foreach ($edge in $Edges) {
[PSCustomObject]$edge
}
}
(& {
($edgeObjects.where{ $_.From -eq $Name }).To
if ($Undirected) {
($edgeObjects.where{ $_.To -eq $Name }).From
}
}).where{ ![String]::IsNullOrEmpty($_) }
}
###Output
_____no_output_____
###Markdown
Let's use it to find the neighbours of some nodes within the random- and completeGraph we created earlier:
###Code
Get-Neighbours $completeGraph.Edges 0
Get-Neighbours $randomGraph.Edges 2
###Output
7
8
###Markdown
With the Get-Neighbours function we can also create a function that iterates through a graph's nodes connected by edges and return the nodes that can be reached from a given start node. This is implemented through Get-ReachableNodes, where we use a Stack to collect the neighbours and a HashSet to keep track of "visited" nodes.
###Code
function Get-ReachableNodes ($Edges, $StartNode, [switch]$Undirected) {
$seen = New-Object System.Collections.Generic.HashSet[string]
$stack = New-Object System.Collections.Generic.Stack[string]
$null = $stack.Push($StartNode)
while ($stack.Count -gt 0) {
$node = $stack.Pop()
if (-not $seen.Contains($node)) {
$null = $seen.Add($node)
Get-Neighbours $Edges $node -Undirected:$Undirected | ForEach-Object {
$null = $stack.Push( $_ )
}
}
}
return $seen
}
###Output
_____no_output_____
###Markdown
The last piece of the puzzle to check whether a graph is connected, is to use the Get-ReachableNodes function to get the set of nodes that can be reached from a giving starting point. If this number equals the number of nodes in the graph, that means we can reach all nodes, which means the graph is connected.
###Code
function Get-IsConnected($Graph, [switch]$Undirected) {
if ($Graph.Edges.Count -eq 0) { return $false }
$startNode = $Graph.Edges[0].From
$reachable = Get-ReachableNodes $Graph.Edges $startNode -Undirected:$Undirected
$reachable.Count -eq $Graph.Nodes.Count
}
###Output
_____no_output_____
###Markdown
Our complete graph is connected and our random graph happened to be not connected:
###Code
Get-IsConnected $completeGraph -Undirected
Get-IsConnected $randomGraph -Undirected
###Output
False
###Markdown
Probability of connectivity For given values number of nodes and probability, we would like to know the probability of the graph being connected. We can estimate this by generating a number of random graphs and counting how many are connected.
###Code
function Get-ProbabilityConnected($NodeCount, $Probability, $Iterations = 100) {
$count = 0
for ($i = 0; $i -le $Iterations; $i++) {
$randomGraph = Get-RandomGraph $NodeCount $Probability -Undirected -NoVisual
if ((Get-IsConnected $randomGraph -Undirected )) { $count++ }
}
$count / $Iterations
}
$nodeCount = 10
Get-ProbabilityConnected $nodeCount .23 100
###Output
0.29
###Markdown
23% was chosen because it is close to the critical value where the probability of connectivity goes from near 0 to near 1. According to Erdรถs and Renyi.
###Code
[System.Math]::Log($nodeCount) / $nodeCount
###Output
0.230258509299405
###Markdown
We can get try to replicate transition by estimating the probability of connectivity for a range of values of probabilities. We implement Python's [numpy.Logspace](https://numpy.org/doc/stable/reference/generated/numpy.logspace.html) function to get an evenly spread list of probabilities within a range on a log scale:
###Code
function Get-LogSpace([Double]$Minimum, [Double]$Maximum, $Count) {
$increment = ($Maximum - $Minimum) / ($Count - 1)
for ( $i = 0; $i -lt $Count; $i++ ) {
[Math]::Pow( 10, ($Minimum + $increment * $i))
}
}
#So let's plot the probability of connectivity for a range of values for p
$probabilities = Get-LogSpace -1.3 0 11
$probabilities
foreach ($probability in $probabilites) {
[PSCustomObject][ordered]@{
Probability = $probability
ProbabilityConnected = (Get-ProbabilityConnected $nodeCount $probability 100)
}
}
###Output
Probability ProbabilityConnected
----------- --------------------
0.0501187233627272 0
0.0676082975391982 0.01
0.091201083935591 0.01
0.123026877081238 0.02
0.165958690743756 0.16
0.223872113856834 0.35
0.301995172040202 0.56
0.407380277804113 0.97
0.549540873857625 1.01
0.741310241300917 1.01
1 1.01
|
getting-started/gpu-cudf-vs-pd.ipynb | ###Markdown
Benchmarks Comparison — pandas Versus RAPIDS cuDFThis tutorial uses `timeit` to compare performance benchmarks with pandas and RAPIDS cuDF. System Details GPU
###Code
!nvidia-smi -q
###Output
==============NVSMI LOG==============
Timestamp : Thu Jan 9 11:56:26 2020
Driver Version : 440.31
CUDA Version : 10.2
Attached GPUs : 1
GPU 00000000:81:00.0
Product Name : Tesla T4
Product Brand : Tesla
Display Mode : Enabled
Display Active : Disabled
Persistence Mode : Enabled
Accounting Mode : Disabled
Accounting Mode Buffer Size : 4000
Driver Model
Current : N/A
Pending : N/A
Serial Number : 0561119011981
GPU UUID : GPU-8b4068b3-1bcf-8dbe-978e-8eacb3c22801
Minor Number : 0
VBIOS Version : 90.04.38.00.03
MultiGPU Board : No
Board ID : 0x8100
GPU Part Number : 900-2G183-0000-000
Inforom Version
Image Version : G183.0200.00.02
OEM Object : 1.1
ECC Object : 5.0
Power Management Object : N/A
GPU Operation Mode
Current : N/A
Pending : N/A
GPU Virtualization Mode
Virtualization Mode : None
Host VGPU Mode : N/A
IBMNPU
Relaxed Ordering Mode : N/A
PCI
Bus : 0x81
Device : 0x00
Domain : 0x0000
Device Id : 0x1EB810DE
Bus Id : 00000000:81:00.0
Sub System Id : 0x12A210DE
GPU Link Info
PCIe Generation
Max : 3
Current : 1
Link Width
Max : 16x
Current : 16x
Bridge Chip
Type : N/A
Firmware : N/A
Replays Since Reset : 0
Replay Number Rollovers : 0
Tx Throughput : 0 KB/s
Rx Throughput : 0 KB/s
Fan Speed : N/A
Performance State : P8
Clocks Throttle Reasons
Idle : Active
Applications Clocks Setting : Not Active
SW Power Cap : Not Active
HW Slowdown : Not Active
HW Thermal Slowdown : Not Active
HW Power Brake Slowdown : Not Active
Sync Boost : Not Active
SW Thermal Slowdown : Not Active
Display Clock Setting : Not Active
FB Memory Usage
Total : 15109 MiB
Used : 0 MiB
Free : 15109 MiB
BAR1 Memory Usage
Total : 256 MiB
Used : 2 MiB
Free : 254 MiB
Compute Mode : Default
Utilization
Gpu : 0 %
Memory : 0 %
Encoder : 0 %
Decoder : 0 %
Encoder Stats
Active Sessions : 0
Average FPS : 0
Average Latency : 0
FBC Stats
Active Sessions : 0
Average FPS : 0
Average Latency : 0
Ecc Mode
Current : Enabled
Pending : Enabled
ECC Errors
Volatile
SRAM Correctable : 0
SRAM Uncorrectable : 0
DRAM Correctable : 0
DRAM Uncorrectable : 0
Aggregate
SRAM Correctable : 0
SRAM Uncorrectable : 0
DRAM Correctable : 0
DRAM Uncorrectable : 0
Retired Pages
Single Bit ECC : 0
Double Bit ECC : 0
Pending Page Blacklist : No
Temperature
GPU Current Temp : 44 C
GPU Shutdown Temp : 96 C
GPU Slowdown Temp : 93 C
GPU Max Operating Temp : 85 C
Memory Current Temp : N/A
Memory Max Operating Temp : N/A
Power Readings
Power Management : Supported
Power Draw : 13.08 W
Power Limit : 70.00 W
Default Power Limit : 70.00 W
Enforced Power Limit : 70.00 W
Min Power Limit : 60.00 W
Max Power Limit : 70.00 W
Clocks
Graphics : 300 MHz
SM : 300 MHz
Memory : 405 MHz
Video : 540 MHz
Applications Clocks
Graphics : 585 MHz
Memory : 5001 MHz
Default Applications Clocks
Graphics : 585 MHz
Memory : 5001 MHz
Max Clocks
Graphics : 1590 MHz
SM : 1590 MHz
Memory : 5001 MHz
Video : 1470 MHz
Max Customer Boost Clocks
Graphics : 1590 MHz
Clock Policy
Auto Boost : N/A
Auto Boost Default : N/A
Processes : None
###Markdown
CPU
###Code
!less /proc/cpuinfo
###Output
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2630 v4 @ 2.20GHz
stepping : 1
microcode : 0xb000012
cpu MHz : 2200.000
cache size : 25600 KB
physical id : 0
siblings : 10
core id : 0
cpu cores : 10
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
[K:[K : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg[7m/proc/cpuinfo[m[K
###Markdown
Benchmark Setup InstallationsInstall v3io-generator to create a 1 GB data set for the benchmark.You only need to run the generator once, and then you can reuse the generated data set.
###Code
!pip install pytimeparse
!pip install -i https://test.pypi.org/simple/ v3io-generator --upgrade
!pip install faker
###Output
Requirement already satisfied: pytimeparse in /User/.pythonlibs/jupyter/lib/python3.6/site-packages (1.1.8)
Looking in indexes: https://test.pypi.org/simple/
Requirement already up-to-date: v3io-generator in /User/.pythonlibs/jupyter/lib/python3.6/site-packages (0.0.27.dev0)
/bin/sh: pin: command not found
###Markdown
Imports
###Code
import os
import yaml
import time
import datetime
import json
import itertools
# Generator
from v3io_generator import metrics_generator, deployment_generator
# Dataframes
import cudf
import pandas as pd
###Output
_____no_output_____
###Markdown
Configurations
###Code
# Benchmark configurations
metric_names = ['cpu_utilization', 'latency', 'packet_loss', 'throughput']
nlargest = 10
source_file = os.path.join(os.getcwd(), 'data', 'ops.logs') # Use full path
os.environ['SOURCE_PATH'] = source_file # Expose for display
os.environ['SOURCE_DIR'] = os.path.dirname(source_file) # Expose for display
os.environ['SOURCE_FILE'] = os.path.basename(source_file) # Expose for display
###Output
_____no_output_____
###Markdown
Create the Data SourceUse v3io-generator to create a time-series network-operations dataset for 100 companies, including 4 metrics (CPU utilization, latency, throughput, and packet loss).Then, write the dataset to a JSON file to be used as the data source.
###Code
# Create a metadata factory
dep_gen = deployment_generator.deployment_generator()
faker=dep_gen.get_faker()
# Design the metadata
dep_gen.add_level(name='company',number=100,level_type=faker.company)
# Generate a deployment structure
deployment_df = dep_gen.generate_deployment()
# Initialize the metric values
for metric in metric_names:
deployment_df[metric] = 0
deployment_df.head()
###Output
_____no_output_____
###Markdown
Specify metrics configuration for the generator.
###Code
metrics_configuration = yaml.safe_load("""
errors: {length_in_ticks: 50, rate_in_ticks: 150}
timestamps: {interval: 5s, stochastic_interval: false}
metrics:
cpu_utilization:
accuracy: 2
distribution: normal
distribution_params: {mu: 70, noise: 0, sigma: 10}
is_threshold_below: true
past_based_value: false
produce_max: false
produce_min: false
validation:
distribution: {max: 1, min: -1, validate: false}
metric: {max: 100, min: 0, validate: true}
latency:
accuracy: 2
distribution: normal
distribution_params: {mu: 0, noise: 0, sigma: 5}
is_threshold_below: true
past_based_value: false
produce_max: false
produce_min: false
validation:
distribution: {max: 1, min: -1, validate: false}
metric: {max: 100, min: 0, validate: true}
packet_loss:
accuracy: 0
distribution: normal
distribution_params: {mu: 0, noise: 0, sigma: 2}
is_threshold_below: true
past_based_value: false
produce_max: false
produce_min: false
validation:
distribution: {max: 1, min: -1, validate: false}
metric: {max: 50, min: 0, validate: true}
throughput:
accuracy: 2
distribution: normal
distribution_params: {mu: 250, noise: 0, sigma: 20}
is_threshold_below: false
past_based_value: false
produce_max: false
produce_min: false
validation:
distribution: {max: 1, min: -1, validate: false}
metric: {max: 300, min: 0, validate: true}
""")
###Output
_____no_output_____
###Markdown
Create the data according to the given hierarchy and metrics configuration.
###Code
met_gen = metrics_generator.Generator_df(metrics_configuration,
user_hierarchy=deployment_df,
initial_timestamp=time.time())
metrics = met_gen.generate_range(start_time=datetime.datetime.now(),
end_time=datetime.datetime.now()+datetime.timedelta(hours=62),
as_df=True,
as_iterator=False)
# Verify that the source-file parent directory exists.
os.makedirs(os.path.dirname(source_file), exist_ok=1)
# Generate file from metrics
with open(source_file, 'w') as f:
metrics_batch = metrics
metrics_batch.to_json(f,
orient='records',
lines=True)
###Output
_____no_output_____
###Markdown
Validate the Target File SizeSet the target size for the test file, in MB.
###Code
!ls -lah ${SOURCE_DIR} | grep ${SOURCE_FILE}
!head ${SOURCE_PATH}
###Output
{"company":"Schaefer__Jones_and_Sanchez","cpu_utilization":60.7249169402,"cpu_utilization_is_error":false,"latency":0.0,"latency_is_error":false,"packet_loss":1.8576310021,"packet_loss_is_error":false,"throughput":266.1555833373,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Odom-Sutton","cpu_utilization":76.4322140086,"cpu_utilization_is_error":false,"latency":7.8381013211,"latency_is_error":false,"packet_loss":0.0,"packet_loss_is_error":false,"throughput":250.0232627126,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Estrada-Grimes","cpu_utilization":79.5602560259,"cpu_utilization_is_error":false,"latency":3.8517916739,"latency_is_error":false,"packet_loss":0.2517241329,"packet_loss_is_error":false,"throughput":267.5772519228,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Gardner-Smith","cpu_utilization":72.8406272809,"cpu_utilization_is_error":false,"latency":0.0,"latency_is_error":false,"packet_loss":2.1089029723,"packet_loss_is_error":false,"throughput":211.3463458109,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Smith_LLC","cpu_utilization":85.5744891531,"cpu_utilization_is_error":false,"latency":4.5970114772,"latency_is_error":false,"packet_loss":0.0,"packet_loss_is_error":false,"throughput":268.1042996066,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Howard_and_Sons","cpu_utilization":64.1149382337,"cpu_utilization_is_error":false,"latency":9.1568349918,"latency_is_error":false,"packet_loss":4.7365985431,"packet_loss_is_error":false,"throughput":250.2406810474,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Elliott_Group","cpu_utilization":73.4833873479,"cpu_utilization_is_error":false,"latency":0.0,"latency_is_error":false,"packet_loss":0.0,"packet_loss_is_error":false,"throughput":261.3493534464,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Bauer-James","cpu_utilization":61.4003401021,"cpu_utilization_is_error":false,"latency":4.7796943482,"latency_is_error":false,"packet_loss":0.0,"packet_loss_is_error":false,"throughput":240.4203662699,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Simpson__Bell_and_Paul","cpu_utilization":70.8616777102,"cpu_utilization_is_error":false,"latency":0.0,"latency_is_error":false,"packet_loss":1.2919076238,"packet_loss_is_error":false,"throughput":219.188093835,"throughput_is_error":false,"timestamp":1578571120848}
{"company":"Thompson-Olsen","cpu_utilization":65.8762961093,"cpu_utilization_is_error":false,"latency":6.5207690585,"latency_is_error":false,"packet_loss":2.3890867667,"packet_loss_is_error":false,"throughput":268.6674519478,"throughput_is_error":false,"timestamp":1578571120848}
###Markdown
BenchmarkThe benchmark tests use the following flow:- Read file- Compute aggregations- Get the n-largest values
###Code
benchmark_file = source_file
###Output
_____no_output_____
###Markdown
In the following examples, `timeit` is executed in a loop.You can change the number of runs and loops:```%%timeit -n 1 -r 1``` cuDF Benchmark
###Code
%%timeit
# Read file
gdf = cudf.read_json(benchmark_file, lines=True)
# Perform aggregation
ggdf = gdf.groupby(['company']).\
agg({k: ['min', 'max', 'mean'] for k in metric_names})
# Get the n-largest values (from the original DataFrame)
raw_nlargest = gdf.nlargest(nlargest, 'cpu_utilization')
###Output
4.97 s ยฑ 47.1 ms per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
###Markdown
pandas Benchmark
###Code
%%timeit
# Read file
pdf = pd.read_json(benchmark_file, lines=True)
# Perform aggregation
gpdf = pdf.groupby(['company']).\
agg({k: ['min', 'max', 'mean'] for k in metric_names})
# Get the n-largest values (from the original DataFrame)
raw_nlargest = pdf.nlargest(nlargest, 'cpu_utilization')
###Output
47.9 s ยฑ 2.52 s per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
###Markdown
Test Load Times cuDF
###Code
%%timeit -r 2
gdf = cudf.read_json(benchmark_file, lines=True)
###Output
5.95 s ยฑ 77.3 ms per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
###Markdown
pandas
###Code
%%timeit
gdf = pd.read_json(benchmark_file, lines=True)
###Output
41.1 s ยฑ 651 ms per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
###Markdown
Test AggregationLoad the files to memory to allow applying `timeit` only to the aggregations.
###Code
gdf = cudf.read_json(benchmark_file, lines=True)
pdf = pd.read_json(benchmark_file, lines=True)
###Output
_____no_output_____
###Markdown
cuDF
###Code
%%timeit
ggdf = gdf.groupby(['company']).\
agg({k: ['min', 'max', 'mean'] for k in metric_names})
raw_nlargest = gdf.nlargest(nlargest, 'cpu_utilization')
###Output
212 ms ยฑ 14.9 ms per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
###Markdown
pandas
###Code
%%timeit
gpdf = pdf.groupby(['company']).\
agg({k: ['min', 'max', 'mean'] for k in metric_names})
raw_nlargest = pdf.nlargest(nlargest, 'cpu_utilization')
###Output
2.17 s ยฑ 72.8 ms per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
|
notebooks/eda/kaggle_datasets_EDA.ipynb | ###Markdown
Fantasy Football EDA (Exploratory Data Analysis)EDA is a process of examining the available dataset to discover patterns, spot anomalies, test hypotheses, and check assumptions using statistical measures.Author: Ermina MujanDate: May 11, 2021This dataset is from kaggle user mur418. It contains data on NFL players for 2019 actuals and 2020 projections
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.set(rc={'figure.figsize':(10,6)})
###Output
_____no_output_____
###Markdown
Gather Data
###Code
qbs = pd.read_csv('../../fixtures/raw_data/qb_stats_and_projections_kaggle.csv')
wrs = pd.read_csv('../../fixtures/raw_data/wr_stats_and_projections_kaggle.csv')
rbs = pd.read_csv('../../fixtures/raw_data/rb_stats_and_projections_kaggle.csv')
te = pd.read_csv('../../fixtures/raw_data/te_stats_and_projections_kaggle.csv')
dst = pd.read_csv('../../fixtures/raw_data/defense_stats_and_projections_kaggle.csv')
kickers = pd.read_csv('../../fixtures/raw_data/kicker_stats_and_projections_kaggle.csv')
###Output
_____no_output_____
###Markdown
Begin cleaning the dataset by improving readability of column names and dropping unnecessary columns and rows
###Code
dst.head()
#for now, lets rename defense team name to player name
temp_dst = dst.rename(columns={"Teamname": "Playername"})
temp_dst['Playerposition'] = 'DST'
full_df = pd.concat([qbs, wrs, rbs, te, temp_dst, kickers])
full_df.head()
full_df.tail()
shape_info = full_df.shape
index = full_df.index
number_of_rows = len(index)
print(number_of_rows)
print('This dataset evaluates {} players based on data from {} columns' # print this phrase with variables added in
.format(shape_info[0], # using the .format method, insert the number of rows in the dataframe
shape_info[1])) # using the .format method, insert the number of columns in the dataframe
list(full_df)
# check if length of dataframe is 0 by calling len on Dataframe
if len(full_df) == 0:
print('DataFrame is empty')
else:
print('DataFrame is not empty')
# Dropping the columns we do not need
full_df = full_df.drop(['Unnamed: 0', '2020 OUTLOOK', 'Player OUTLOOK', '2020 PLAYER OUTLOOK',], axis=1)
# shows how many many columns have been deleted
new_shape = full_df.shape[1]
print('{} columns have been removed from the dataset'
.format( abs(new_shape-shape_info[1])))
# removing spaces from old column names
df_old_cols = list(full_df) # instantiate a new list with old column names in it
df_new_cols = [x.replace(" ", "").capitalize() for x in df_old_cols] # remove the spaces and make everything lowercase
df_new_cols[0] # make sure it worked by checking the first name
full_df.columns = df_new_cols # make df_new_cols the column names
full_df.head() # check to make sure that this change stuck
list(full_df)
#rename position to player position, and team to teamname like the rest of the tables
full_df = full_df.rename(columns={"Position": "Playerposition"})
full_df = full_df.rename(columns={"Team": "Teamname"})
list(full_df)
###Output
_____no_output_____
###Markdown
Find and determine what to do with values that are "NaN" (Not a Number)
###Code
import missingno as msno # we are using missingno to visualize the distribution of NaN(Not a Number) values
msno.matrix(full_df) # print a missing numbers matrix to show where missing numbers are
full_df.fillna(0)
# Create an empty list
Row_list =[]
# Iterate over each row
for index, rows in full_df.iterrows():
# Create list for the current row
my_list =[rows.Playername]
# append the list to the final list
Row_list.append(my_list)
print(Row_list)
### How to remove nan values from playername and '--'???
###Output
_____no_output_____
###Markdown
Export the dataframe to a csv
###Code
full_df.to_csv('../../fixtures/cleaned_data/cleaned_kaggle_dataset')
kaggle_df = pd.read_csv('../../fixtures/cleaned_data/cleaned_kaggle_dataset')
names_2019_2020 = kaggle_df[['Playername', 'Playerposition','2019fpts', '2020fpts']]
names_2019_2020
names_2019_2020.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1002 entries, 0 to 1001
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Playername 970 non-null object
1 Playerposition 620 non-null object
2 2019fpts 681 non-null float64
3 2020fpts 1001 non-null float64
dtypes: float64(2), object(2)
memory usage: 31.4+ KB
###Markdown
Plot 2020 vs 2019 points
###Code
plt.figure(figsize=(10,6))
plt.scatter(names_2019_2020['2019fpts'], names_2019_2020['2020fpts'], alpha = 0.5, color="brown")
plt.title('2020 Fantasy Points vs 2019 Fantasy Points')
plt.show()
plt.scatter(names_2019_2020["2019fpts"], names_2019_2020["2020fpts"], alpha = 0.5, color="black")
plt.title("2020 Fantasy Points vs 2019 Fantasy Points")
plt.xlabel("2020fpts")
plt.ylabel("2019fpts")
###Output
_____no_output_____
###Markdown
Number of Players per Position
###Code
players_per_pos = kaggle_df.groupby('Playerposition').size()
players_per_pos
kaggle_df.loc[kaggle_df['Playerposition'] == 'K']
players_per_pos.values
plt.figure(figsize=(10,6))
plt.bar(players_per_pos.index,players_per_pos.values, color="brown")
plt.title('Number of players per position')
plt.xlabel('Position')
plt.ylabel('Number of players')
plt.show()
###Output
_____no_output_____
###Markdown
Which position scores the most, on average?
###Code
avg_points_per_pos = kaggle_df.groupby('Playerposition')['2019fpts'].mean()
avg_points_per_pos
plt.figure(figsize=(10,6))
plt.bar(avg_points_per_pos.index,avg_points_per_pos.values, color="brown")
plt.title('Average Number of points by position')
plt.xlabel('Position')
plt.ylabel('Average number of points')
plt.show()
#boxplot to visualize how different positions scored in 2020"
sns.boxplot(x="Playerposition", y="2020fpts",data=names_2019_2020)
#boxplot to visualize how different positions scored in 2019"
sns.boxplot(x="Playerposition", y="2019fpts",data=names_2019_2020)
###Output
_____no_output_____
###Markdown
How many players are expected to score more than 15 fpts per game in 2020?Each team plays 16 games.
###Code
#15 points per game = 16 * 15 = 240
players_scoring_more_than_15_2020 = names_2019_2020.loc[names_2019_2020['2020fpts'] >
(15*16)].sort_values('2020fpts').groupby('Playerposition').count()
players_scoring_more_than_15_2020
###Output
_____no_output_____
###Markdown
What does the distribution of projected points look like?
###Code
sorted_by_proj = names_2019_2020.sort_values('2020fpts', ascending = False)
sorted_by_proj
# drop all players who are expected zero points in 2020
sorted_by_proj = sorted_by_proj[sorted_by_proj['2020fpts'] != 0]
sorted_by_proj
plt.figure(figsize=(10,6))
plt.scatter(list(range(len(sorted_by_proj))), sorted_by_proj['2020fpts'], s =1, color="black")
plt.show()
###Output
_____no_output_____ |
Data/CoMoFoD-CMFD/BusterNetOnCoMoFoD.ipynb | ###Markdown
BusterNet Performance On the CoMoFoD Dataset 1. Load the pretrained BusterNet model
###Code
model_dir = '../../Model'
sys.path.insert( 0, model_dir )
from BusterNetCore import create_BusterNet_testing_model
busterNetModel = create_BusterNet_testing_model( os.path.join( model_dir, 'pretrained_busterNet.hd5' ) )
###Output
INFO: successfully load pretrained weights from ../../Model/pretrained_busterNet.hd5
###Markdown
2. Load the CoMoFoD-CMFD Dataset
###Code
from keras.utils.io_utils import HDF5Matrix
CoMoFoD_hd5 = 'CoMoFoD-CMFD.hd5'
# 1. load HDF5 data
X = HDF5Matrix(CoMoFoD_hd5, 'X' )
XN = HDF5Matrix(CoMoFoD_hd5, 'XN' )
Y = HDF5Matrix(CoMoFoD_hd5, 'Y' )
YN = HDF5Matrix(CoMoFoD_hd5, 'YN' )
Z = busterNetModel.predict(X, verbose=1 )
###Output
5000/5000 [==============================] - 70s 14ms/step
###Markdown
3. BusterNet Performance on CoMoFoD Dataset (Table 3)
###Code
from parse import parse
from sklearn.metrics import precision_recall_fscore_support
def get_target_idx( xn, ynames ) :
fmt = '{}_F_{}'
try :
img_id, postproc = parse( fmt, xn )
except :
img_id = xn.rsplit('_')[0]
postproc = 'BASE'
idx = ynames.index( img_id )
return idx, img_id, postproc
def evaluate_CoMoFoD_performance( Z, XN, Y, YN ) :
# 1. prepare target names
ynames = []
for yn in YN :
ynames.append(yn)
# 2. evaluate performance for each sample
plut = {'mapping':{}}
for xidx, (xn, z) in enumerate( zip( XN, Z ) ) :
# 3. get corresponding target file
idx, img_id, postproc = get_target_idx( xn, ynames )
y = Y[idx]
# 4. evaluate performance
if postproc not in plut :
plut[postproc] = []
ref = y[...,2].ravel() == 0
hyp = z[...,2].ravel() <= 0.5
precision, recall, fscore, _ = precision_recall_fscore_support( ref, hyp,
pos_label=1,
average='binary' )
plut[postproc].append( [precision, recall, fscore] )
if postproc == 'BASE' :
plut['mapping'][xidx] = [idx, fscore]
# 5. show results
print( "INFO: BusterNet Performance on CoMoFoD-CMFD Dataset using the number of correct detections" )
print("-" * 100)
for key, res in sorted( plut.items() ) :
if key == 'mapping' :
continue
# a sample is correct if its F1 score is above 0.5
nb_correct = np.sum( np.row_stack(res)[:,-1] > .5 )
print ("{:>4s}: {:>3}".format( key, nb_correct ) )
return plut
plut = evaluate_CoMoFoD_performance( Z, XN, Y, YN )
###Output
INFO: BusterNet Performance on CoMoFoD-CMFD Dataset using the number of correct detections
----------------------------------------------------------------------------------------------------
BASE: 126
BC1: 125
BC2: 123
BC3: 115
CA1: 125
CA2: 124
CA3: 123
CR1: 126
CR2: 125
CR3: 125
IB1: 122
IB2: 108
IB3: 100
JC1: 65
JC2: 82
JC3: 93
JC4: 110
JC5: 105
JC6: 108
JC7: 114
JC8: 119
JC9: 112
NA1: 110
NA2: 111
NA3: 124
###Markdown
4. Plot Performance Curves
###Code
llut = { 'Bright Change(BC)' : range(1,4), 'Contrast Adjustment(CA)' : range(4,7),
'Color Reduction(CR)' : range(7,10), 'Image Blurring(IB)' : range(10,13),
'JPEG Compression(JC)' : range(13,22), 'Noise Adding(NA)' : range(22,25) }
pyplot.figure(figsize=(12,8))
ii = 1
for key, vals in llut.items() :
ys = []
xnames = []
for idx, val in enumerate(vals) :
_, prefix = parse( '{}({})', key)
tkey = prefix + str(idx+1)
ys.append( np.mean( np.row_stack( plut[tkey] ), axis=0) )
xnames.append( tkey )
pyplot.subplot(2,3,ii)
pyplot.plot( np.array(ys) )
pyplot.xticks( range(len(vals)), xnames, fontsize=12 )
pyplot.legend(['Precision', 'Recall', 'F1 Score'], fontsize=12 )
pyplot.ylim([0,.7])
ii += 1
###Output
_____no_output_____
###Markdown
5. Generate qualitative results
###Code
def visualize_random_samples( X, Y, Z, prf_lut, batch_size=4, figsize=(12,4), thresh=0.3 ) :
nb_samples = X.shape[0]
ynames = []
for yn in YN :
ynames.append(yn)
if prf_lut is None :
print("INFO: show random results")
indices = np.random.choice( prf_lut['mapping'].keys(), size=(batch_size,))
else :
print("INFO: show random results with F1 score > {}".format( thresh ) )
candi = list( filter(None, [ xidx if f1>thresh else None for xidx, ( yidx, f1 ) in prf_lut['mapping'].items() ] ) )
indices = np.random.choice( candi, size=(batch_size,))
for idx in indices :
# 1. add back imageNet BGR means
x = np.array(X[idx]) + np.array([103.939, 116.779, 123.68]).reshape([1,1,3])
# 2. restore image dtype and BGR->RGB
x = np.round(x).astype('uint8')[...,::-1]
# 3. set gt to float
yidx, f1 = prf_lut['mapping'][idx]
y = np.array(Y[yidx]).astype('float32')
z = np.array(Z[idx])
# 4. display
pyplot.figure(figsize=figsize)
pyplot.subplot(131)
pyplot.imshow( x )
pyplot.title('test image')
pyplot.subplot(132)
pyplot.imshow( y )
pyplot.title('ground truth')
pyplot.subplot(133)
pyplot.imshow( z )
pyplot.title('BusterNet predicted')
return
visualize_random_samples( X, Y, Z, prf_lut=plut, thresh=0.5 )
###Output
INFO: show random results with F1 score > 0.5
|
notebooks/Add length to metatada.ipynb | ###Markdown
WITH OVERPASS
###Code
import geojson
import requests
from shapely.geometry import box, Polygon, MultiPolygon, GeometryCollection, shape
from shapely import wkt
from shapely.ops import transform
import shapely
from functools import partial
import pyproj
import time
import json
n_processes = 10
def simplify(s, delta=0.05):
while not check_length(s):
s = s.simplify(delta, False)
delta = delta + 0.05
return s
def check_length(s, threshold=3000):
return len(str(s)) < threshold
def get_area(s):
s = shape(s)
proj = partial(pyproj.transform,
pyproj.Proj(init='epsg:4326'),
pyproj.Proj(init='epsg:3857'))
return transform(proj, s).area / 1e6 # km
def threshold_func(g, value):
return get_area(g) < value
def katana(geometry, threshold_func, threshold_value, count=0):
"""Split a Polygon into two parts across it's shortest dimension"""
bounds = geometry.bounds
width = bounds[2] - bounds[0]
height = bounds[3] - bounds[1]
if threshold_func(geometry, threshold_value) or count == 250:
# either the polygon is smaller than the threshold, or the maximum
# number of recursions has been reached
return [geometry]
if height >= width:
# split left to right
a = box(bounds[0], bounds[1], bounds[2], bounds[1]+height/2)
b = box(bounds[0], bounds[1]+height/2, bounds[2], bounds[3])
else:
# split top to bottom
a = box(bounds[0], bounds[1], bounds[0]+width/2, bounds[3])
b = box(bounds[0]+width/2, bounds[1], bounds[2], bounds[3])
result = []
for d in (a, b,):
c = geometry.intersection(d)
if not isinstance(c, GeometryCollection):
c = [c]
for e in c:
if isinstance(e, (Polygon, MultiPolygon)):
result.extend(katana(e, threshold_func, threshold_value, count+1))
if count > 0:
return result
# convert multipart into singlepart
final_result = []
for g in result:
if isinstance(g, MultiPolygon):
final_result.extend(g)
else:
final_result.append(g)
return final_result
def to_geojson(x, full=False):
g = geojson.Feature(geometry=x, properties={}).geometry
if full:
return g
if g['type'] == 'MultiPolygon':
return g['coordinates'][0][0]
else:
return g['coordinates'][0]
def swipe(x):
return [[c[1], c[0]] for c in x]
def flatten(l):
return [str(round(item, 4)) for sublist in l for item in sublist]
def to_overpass_coords(x):
coords = to_geojson(x)
coords = swipe(coords)
coords = flatten(coords)
coords = ' '.join(coords)
return coords
def overpass_request(s):
print(s['region_slug'])
print(s['partition'])
overpass_url = "http://overpass-api.de/api/interpreter"
overpass_query = """
[out:json];
way["highway"]["highway"!~"^(construction|cycleway|footway|path|proposed|service|track|pedestrian|living_street|platform|steps)$"]["highway"](poly:"%s");
for (t["highway"])
{
make stat highway=_.val,
count=count(ways),length=sum(length());
out;
}""" % s['overpass_coords']
for i in range(3):
try:
response = requests.get(overpass_url,
params={'data': overpass_query})
json.dump(response.json(), open(RAW_PATH / 'osm_region_length' / (s['region_slug'] + '_' + str(s['partition'])), 'w'))
break
except Exception as e:
if i == 2:
print(e)
print(response.text)
print('retrying')
continue
# return response
url = 'https://docs.google.com/spreadsheets/d/197fccIfjwlsT-oeisHHLn2SuOvbtJjC0inoNuxfqrwI/gviz/tq?tqx=out:csv&sheet=metadata'
metadata = pd.read_csv(url)
metadata = metadata[[c for c in metadata.columns if 'Unnamed' not in c]]
metadata['region_shapefile_wkt'] = metadata['region_shapefile_wkt'].apply(lambda x: wkt.loads(x.replace('"', '')))
# From MultiPolygon to Polygon
metadata['region_shapefile_wkt'] = metadata['region_shapefile_wkt']\
.apply(lambda x: max(x, key=lambda a: a.area) if isinstance(x, shapely.geometry.multipolygon.MultiPolygon) else x)
# metadata['region_shapefile_wkt'].apply(simplify)
metadata
threshold_value = 1000000
def partition_geometry(_df, threshold_value):
geos = katana(_df['region_shapefile_wkt'], threshold_func, threshold_value)
final = []
for i, g in enumerate(geos):
_df['partition'] = i
_df['shapefile_partition'] = g
final.append(deepcopy(_df.to_frame().T))
return pd.concat(final)
metadata_part = pd.DataFrame()
for i, row in metadata.iterrows():
print(row['region_slug'])
metadata_part = pd.concat([metadata_part , partition_geometry(row, threshold_value)])
metadata_part = metadata_part[metadata_part['region_slug'].isin(missing)]
metadata_part['shapefile_partition'] = metadata_part['shapefile_partition'].apply(simplify)
metadata_part['overpass_coords'] = metadata_part['shapefile_partition'].apply(to_overpass_coords)
rows = [row for i, row in metadata_part.iterrows()]
with Pool(1) as p:
p.map(overpass_request, rows)
regions = set(metadata['region_slug'])
def get_length(j):
a = pd.DataFrame([r['tags'] for r in j['elements']])
if len(a):
return round(a['length'].astype(float).sum(), 2)
paths = (RAW_PATH / 'osm_region_length')
names = [p.name for p in paths.glob('*')]
length = [get_length(json.load(open(s, 'r'))) for s in paths.glob('*')]
length = pd.DataFrame(list(zip(names, length)), columns=['region_slug', 'total_length'])
length['region_slug'] = length['region_slug'].apply(lambda x: '_'.join(x.split('_')[:-1]))
length = length.groupby('region_slug').sum().sort_values(by='total_length')
missing = regions.difference(set(length.index))
missing
!rm -r /home/joaom/projects/waze_coronavirus/data/raw/osm_region_length/.ipynb_checkpoints
# No partition
length.sum()
# threshold_value = 1000000
length.sum()
# threshold_value = 100000
length.sum()
a = set(metadata['region_slug'].unique())
b = set(length.dropna()['region_slug'].unique())
length.dropna().sort_values(by='total_length')
geometry = metadata[metadata['region_slug'] == 'country_brazil']['region_shapefile_wkt'].iloc[0]
daily = pd.read_csv('https://bit.ly/idb-traffic-daily')
daily
import folium
m = folium.Map(zoom_start=1, tiles='cartodbpositron')
for i in a:
m.add_children(folium.GeoJson(i))
m
paths = (RAW_PATH / 'osm_region_length')
names = [p.name for p in paths.glob('country_brazil_*')]
pd.DataFrame([], columns=['region_shapefile_wkt'])
import osm_road_length
a = metadata['region_shapefile_wkt'].iloc[0]
import pyarrow
import awswrangler as wr
wr.s3.to_parquet(
df=pd.DataFrame({
'col': [1, 2, 3],
'col2': ['A', 'A', 'B']
}),
path='s3://bucket/prefix',
dataset=True,
partition_cols=['col2']
)
import geobr
year = 2010
types = {
'States': geobr.read_state(code_state='all', year=year),
'Meso Regions': geobr.read_meso_region(code_meso="all", year=year),
'Micro Regions': geobr.read_micro_region(code_micro='all', year=2017),
'Municipalities': geobr.read_municipality(code_muni='all', year=year)
}
brasilio = pd.read_csv('https://data.brasil.io/dataset/covid19/caso.csv.gz')
last = brasilio[brasilio['is_last']]
last.head(1)
df.merge(last, left_on='code_muni', right_on='city_ibge_code')
fig, axis = plt.subplots(nrows=1, ncols=1,
figsize=(15, 15))
df.merge(last, left_on='code_muni', right_on='city_ibge_code')\
.plot(facecolor='#2D3E50', edgecolor='#FEBF57', ax=axis)
axis.set_title("Municรญpios com casos de Covid19", fontsize=20)
axis.axis('off')
fig.tight_layout()
nrows = 2
ncols = 2
fontsize = 20
fig, ax = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(15, 15), dpi=300)
types_iter = iter(types.items())
for r in range(nrows):
for c in range(ncols):
axis = ax[r][c]
name, df = next(types_iter)
df.plot(facecolor='#2D3E50', edgecolor='#FEBF57', ax=axis)
axis.set_title(name, fontsize=fontsize)
axis.axis('off')
# General
fig.tight_layout()
fig.savefig(f'imgs/1.geobr_logo.png')
###Output
_____no_output_____ |
docs/docs/colab-notebook/chronos/chronos_experimental_autots_nyc_taxi.ipynb | ###Markdown
--- Copyright 2018 Analytics Zoo Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###Output
_____no_output_____
###Markdown
**Environment Preparation** **Install Java 8**Run the cell on the **Google Colab** to install jdk 1.8.**Note:** if you run this notebook on your computer, root permission is required when running the cell to install Java 8. (You may ignore this cell if Java 8 has already been set up in your computer).
###Code
# Install jdk8
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
import os
# Set environment variable JAVA_HOME.
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
!java -version
###Output
_____no_output_____
###Markdown
**Install Analytics Zoo**You can install the latest pre-release version with automl support using `pip install --pre --upgrade analytics-zoo[automl]`.
###Code
# Install latest pre-release version of Analytics Zoo
# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo[automl]
exit() # restart the runtime to refresh installed pkg
###Output
_____no_output_____
###Markdown
**Distributed automl for time series forecasting using Chronos Experimental AutoTS**In this guide we will demonstrate how to use Chronos Experimental AutoTS for automated time seires forecasting in 5 simple steps. **Step 0: Prepare dataset**We used NYC taxi passengers dataset in [Numenta Anomaly Benchmark (NAB)](https://github.com/numenta/NAB) for demo, which contains 10320 records, each indicating the total number of taxi passengers in NYC at a corresonponding time spot.
###Code
# download the dataset
!wget https://raw.githubusercontent.com/numenta/NAB/v1.0/data/realKnownCause/nyc_taxi.csv
# load the dataset. The downloaded dataframe contains two columns, "timestamp" and "value".
import pandas as pd
df = pd.read_csv("nyc_taxi.csv", parse_dates=["timestamp"])
###Output
_____no_output_____
###Markdown
**Step 1: Init Orca Context**
###Code
# import necesary libraries and modules
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
###Output
_____no_output_____
###Markdown
This is the only place where you need to specify local or distributed mode. View [Orca Context](https://analytics-zoo.readthedocs.io/en/latest/doc/Orca/Overview/orca-context.html) for more details. Note that argument ```init_ray_on_spark``` must be ```True``` for Chronos.
###Code
# recommended to set it to True when running Analytics Zoo in Jupyter notebook
OrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook).
init_orca_context(cluster_mode="local", cores=4, init_ray_on_spark=True)
###Output
_____no_output_____
###Markdown
**Step 2: Data transformation and feature engineering using Chronos TSDataset**[TSDataset](https://analytics-zoo.readthedocs.io/en/latest/doc/PythonAPI/Chronos/tsdataset.html) is our abstract of time series dataset for data transformation and feature engineering. Here we use it to preprocess the data.
###Code
from zoo.chronos.data import TSDataset
from sklearn.preprocessing import StandardScaler
tsdata_train, tsdata_val, tsdata_test = TSDataset.from_pandas(df, # the dataframe to load
dt_col="timestamp", # the column name specifying datetime
target_col="value", # the column name to predict
with_split=True, # split the dataset into 3 parts
val_ratio=0.1, # validation set ratio
test_ratio=0.1) # test set ratio
# for each tsdataset, we
# 1. generate datetime feature columns.
# 2. impute the dataset with last occured value.
# 3. scale the dataset with standard scaler, fit = true for train data.
standard_scaler = StandardScaler()
for tsdata in [tsdata_train, tsdata_val, tsdata_test]:
tsdata.gen_dt_feature()\
.impute(mode="last")\
.scale(standard_scaler, fit=(tsdata is tsdata_train))
###Output
_____no_output_____
###Markdown
**Step 3: Create an AutoTSEstimator** [AutoTSEstimator](https://analytics-zoo.readthedocs.io/en/latest/doc/PythonAPI/Chronos/autotsestimator.html) is our Automated TimeSeries Estimator for time series forecasting task.
###Code
import zoo.orca.automl.hp as hp
from zoo.chronos.autots.experimental import AutoTSEstimator
auto_estimator = AutoTSEstimator(model='lstm', # the model name used for training
search_space='normal', # a default hyper parameter search space
past_seq_len=hp.randint(1, 10)) # hp sampling function of past_seq_len for auto-tuning
###Output
_____no_output_____
###Markdown
**Step 4: Fit with AutoTSEstimator**
###Code
# fit with AutoTSEstimator for a returned TSPipeline
ts_pipeline = auto_estimator.fit(data=tsdata_train, # train dataset
validation_data=tsdata_val, # validation dataset
epochs=5) # number of epochs to train in each trial
###Output
_____no_output_____
###Markdown
**Step 5: Further deployment with TSPipeline** [TSPipeline](https://analytics-zoo.readthedocs.io/en/latest/doc/PythonAPI/Chronos/autotsestimator.htmltspipeline-experimental) is our E2E solution for time series forecasting task.
###Code
# predict with the best trial
y_pred = ts_pipeline.predict(tsdata_test)
# evaluate the result pipeline
mse, smape = ts_pipeline.evaluate(tsdata_test, metrics=["mse", "smape"])
print("Evaluate: the mean square error is", mse)
print("Evaluate: the smape value is", smape)
# plot the result
import matplotlib.pyplot as plt
lookback = auto_estimator.get_best_config()['past_seq_len']
groundtruth_unscale = tsdata_test.unscale().to_pandas()[lookback - 1:]
plt.figure(figsize=(16,6))
plt.plot(groundtruth_unscale["timestamp"], y_pred[:,0,0])
plt.plot(groundtruth_unscale["timestamp"], groundtruth_unscale["value"])
plt.legend(["prediction", "ground truth"])
# save the pipeline
my_ppl_file_path = "/tmp/saved_pipeline"
ts_pipeline.save(my_ppl_file_path)
# restore the pipeline for further deployment
from zoo.chronos.autots.experimental import TSPipeline
loaded_ppl = TSPipeline.load(my_ppl_file_path)
# Stop orca context when your program finishes
stop_orca_context()
# show a tensorboard view
%load_ext tensorboard
%tensorboard --logdir /tmp/autots_estimator/autots_estimator_leaderboard/
###Output
_____no_output_____ |
5. Tensorflow ecosystem.ipynb | ###Markdown
`tf.contrib.learn``tf.contrib.learn` is a simplified interface that provides readily available models. It was originally developed as *Scikit Flow*.`tf.contrib.learn` allows you:+ Load in data,+ Build a model,+ Fit a model to data,+ Evaluate its accuracy,all in (almost) one line. This is why it is called a *one-liner module*.Here is the full code for the neural network classifier (Source: https://www.tensorflow.org/get_started/tflearn):
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
tf.logging.set_verbosity(tf.logging.ERROR)
# Data sets
# The Iris data set contains 150 rows of data, comprising 50 samples from each
# of three related Iris species: Iris setosa, Iris virginica, and Iris versicolor.
# Each row contains the following data for each flower sample: sepal length, sepal
# width, petal length, petal width, and flower species.
# Flower species are represented as integers, with 0 denoting Iris setosa,
# 1 denoting Iris versicolor, and 2 denoting Iris virginica.
IRIS_TRAINING = "data/iris_training.csv"
IRIS_TEST = "data/iris_test.csv"
# Load datasets.
# Datasets in tf.contrib.learn are named tuples; you can access feature data
# and target values via the data and target fields.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float,
target_column=-1)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float,
target_column=-1)
# tf.contrib.learn offers a variety of predefined models, called Estimators,
# which you can use "out of the box" to run training and evaluation operations on
# your data.
# Here, we'll configure a Deep Neural Network Classifier model to fit
# the Iris data.
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/iris_model",
enable_centered_bias = 'True')
# Fit model.
classifier.fit(x=training_set.data,
y=training_set.target,
steps=2000)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target)["accuracy"]
print('Accuracy: {0:f}'.format(accuracy_score))
print('\n')
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)
y = list(classifier.predict(new_samples, as_iterable=True))
print('Predictions: {} {}'.format(str(y[0]), str(y[1])))
###Output
_____no_output_____
###Markdown
Simple linear classifier
###Code
import tensorflow.contrib.learn.python.learn as learn
# sklearn integration
from sklearn import datasets, metrics
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.LinearClassifier(n_classes=3, feature_columns=feature_columns)
classifier.fit(iris.data, iris.target, steps=200, batch_size=32)
iris_predictions = list(classifier.predict(iris.data, as_iterable=True))
score = metrics.accuracy_score(iris.target, iris_predictions)
print("Accuracy: %f" % score)
###Output
_____no_output_____
###Markdown
Simple linear regression.
###Code
import tensorflow.contrib.learn.python.learn as learn
from sklearn import datasets, metrics, preprocessing
boston = datasets.load_boston()
x = preprocessing.StandardScaler().fit_transform(boston.data)
feature_columns = learn.infer_real_valued_columns_from_input(x)
regressor = learn.LinearRegressor(feature_columns=feature_columns)
regressor.fit(x, boston.target, steps=200, batch_size=32)
boston_predictions = list(regressor.predict(x, as_iterable=True))
score = metrics.mean_squared_error(boston_predictions, boston.target)
print ("MSE: %f" % score)
###Output
_____no_output_____
###Markdown
Custom model
###Code
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
import tensorflow.contrib.layers.python.layers as layers
import tensorflow.contrib.learn.python.learn as learn
iris = datasets.load_iris()
def my_model(features, labels):
"""
DNN with three hidden layers.
"""
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
labels = tf.one_hot(labels, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10.
features = layers.stack(features, layers.fully_connected, [10, 20, 10])
# Create two tensors respectively for prediction and loss.
prediction, loss = (
tf.contrib.learn.models.logistic_regression(features, labels)
)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(iris.data, iris.target, steps=1000)
y_predicted = [p['class'] for p in classifier.predict(iris.data, as_iterable=True)]
score = metrics.accuracy_score(iris.target, y_predicted)
print('Accuracy: {0:f}'.format(score))
labels = [0,1,3,1,1,0,2,2]
sess = tf.Session()
print(sess.run(tf.one_hot(labels, 4, 1, 0)))
sess.close()
###Output
_____no_output_____
###Markdown
`keras`> Keras is a high-level neural networks library, written in Python and capable of running on top of either TensorFlow or Theano. It was developed with a focus on enabling fast experimentation. The core data structure of Keras is a model, a way to organize layers. The main type of model is the ``Sequential model``, a linear stack of layers. ```Pythonfrom keras.models import Sequentialmodel = Sequential()``` Stacking layers is as easy as ``.add()``:```Pythonfrom keras.layers import Dense, Activationmodel.add(Dense(output_dim=64, input_dim=100))model.add(Activation("relu"))model.add(Dense(output_dim=10))model.add(Activation("softmax"))``` Once your model looks good, configure its learning process with ``.compile()``:```Pythonmodel.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])``` If you need to, you can further configure your optimizer.```Pythonfrom keras.optimizers import SGDmodel.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9, nesterov=True))``` You can now iterate on your training data in batches:```Pythonmodel.fit(X_train, Y_train, nb_epoch=5, batch_size=32)```Evaluate your performance in one line:```Pythonloss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)``` Or generate predictions on new data:```Pythonclasses = model.predict_classes(X_test, batch_size=32)proba = model.predict_proba(X_test, batch_size=32)``` Example: MNIST MLP
###Code
'''
Trains a simple deep NN on the MNIST dataset.
You can get to 98.40% test accuracy after 20 epochs.
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
tf.reset_default_graph()
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
# print model characteristics
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=1,
validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('\n')
print('Test score:', score[0])
print('Test accuracy:', score[1])
###Output
_____no_output_____
###Markdown
``keras`` sequential modeThe ``Sequential`` model is a linear stack of layers.You can create a ``Sequential`` model by passing a list of layer instances to the constructor:```pythonfrom keras.models import Sequentialfrom keras.layers import Dense, Activationmodel = Sequential([ Dense(32, input_dim=784), Activation('relu'), Dense(10), Activation('softmax'),])```You can also simply add layers via the ``.add()`` method:```pythonmodel = Sequential()model.add(Dense(32, input_dim=784))model.add(Activation('relu'))...``` Specifying the input shapeThe model needs to know what input shape it should expect. For this reason, the first layer in a ``Sequentia``l model (and only the first, because following layers can do automatic shape inference) needs to receive information about its input shape. There are several possible ways to do this:+ pass an ``input_shape`` argument to the first layer. This is a shape tuple (a tuple of integers or ``None`` entries, where ``None`` indicates that any positive integer may be expected). In ``input_shape``, the batch dimension is not included.+ pass instead a ``batch_input_shape`` argument, where the batch dimension is included. This is useful for specifying a fixed batch size (e.g. with stateful RNNs).+ some 2D layers, such as ``Dense``, support the specification of their input shape via the argument ``input_dim``, and some 3D temporal layers support the arguments ``input_dim`` and ``input_length```.As such, the following three snippets are strictly equivalent:```pythonmodel = Sequential()model.add(Dense(32, input_shape=(784,)))model = Sequential()model.add(Dense(32, batch_input_shape=(None, 784))) note that batch dimension is "None" here, so the model will be able to process batches of any size.model = Sequential()model.add(Dense(32, input_dim=784))``` The ``merge`` layerMultiple Sequential instances can be merged into a single output via a ``Merge`` layer. The output is a layer that can be added as first layer in a new ``Sequential`` model. For instance, here's a model with two separate input branches getting merged:```pythonfrom keras.layers import Mergeleft_branch = Sequential()left_branch.add(Dense(32, input_dim=784))right_branch = Sequential()right_branch.add(Dense(32, input_dim=784))merged = Merge([left_branch, right_branch], mode='concat')final_model = Sequential()final_model.add(merged)final_model.add(Dense(10, activation='softmax'))``` Such a two-branch model can then be trained via e.g.:```pythonfinal_model.compile(optimizer='rmsprop', loss='categorical_crossentropy')final_model.fit([input_data_1, input_data_2], targets) we pass one data array per model input```The ``Merge`` layer supports a number of pre-defined modes:+ ``sum`` (default): element-wise sum+ ``concat``: tensor concatenation. You can specify the concatenation axis via the argument concat_axis.+ ``mul``: element-wise multiplication+ ``ave``: tensor average+ ``dot``: dot product. You can specify which axes to reduce along via the argument dot_axes.+ ``cos``: cosine proximity between vectors in 2D tensors.You can also pass a function as the mode argument, allowing for arbitrary transformations:```pythonmerged = Merge([left_branch, right_branch], mode=lambda x: x[0] - x[1])``` CompilationBefore training a model, you need to configure the learning process, which is done via the compile method. It receives three arguments:+ an optimizer. This could be the string identifier of an existing optimizer (such as ``rmsprop`` or ``adagrad``), or an instance of the ``Optimizer`` class. + a loss function. This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function (such as ``categorical_crossentropy`` or ``mse``), or it can be an objective function. + a list of metrics. For any classification problem you will want to set this to ``metrics=['accuracy']``. A metric could be the string identifier of an existing metric or a custom metric function. Custom metric function should return either a single tensor value or a dict ``metric_name`` -> ``metric_value``. ```python for a multi-class classification problemmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) for a binary classification problemmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) for a mean squared error regression problemmodel.compile(optimizer='rmsprop', loss='mse')``` Training``Keras`` models are trained on Numpy arrays of input data and labels. For training a model, you will typically use the ``fit`` function.For a single-input model with 2 classes (binary):
###Code
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(1, input_dim=784, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# train the model, iterating on the data in batches
# of 32 samples
model.fit(data, labels, nb_epoch=10, batch_size=32)
model.summary()
###Output
_____no_output_____
###Markdown
For a multi-input model with 10 classes:
###Code
from keras.layers import Merge
left_branch = Sequential()
left_branch.add(Dense(32, input_dim=784))
right_branch = Sequential()
right_branch.add(Dense(32, input_dim=784))
merged = Merge([left_branch, right_branch], mode='concat')
model = Sequential()
model.add(merged)
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
from keras.utils.np_utils import to_categorical
data_1 = np.random.random((1000, 784))
data_2 = np.random.random((1000, 784))
# these are integers between 0 and 9
labels = np.random.randint(10, size=(1000, 1))
# we convert the labels to a binary matrix of size (1000, 10)
# for use with categorical_crossentropy
labels = to_categorical(labels, 10)
# train the model
# note that we are passing a list of Numpy arrays as training data
# since the model has 2 inputs
model.fit([data_1, data_2], labels, nb_epoch=10, batch_size=32)
model.summary()
###Output
_____no_output_____
###Markdown
Keras functional APIThe Keras functional API is the way to go for defining complex models, such as multi-output models, directed acyclic graphs, or models with shared layers.The ``Sequential`` model is probably a better choice to implement such a network, but it helps to start with something really simple.Using ``Model`` class:+ A layer instance is callable (on a tensor), and it returns a tensor+ ``Input`` tensor(s) and output tensor(s) can then be used to define a ``Model``+ Such a model can be trained just like Keras Sequential models.```pythonfrom keras.layers import Input, Densefrom keras.models import Model this returns a tensorinputs = Input(shape=(784,)) a layer instance is callable on a tensor, and returns a tensorx = Dense(64, activation='relu')(inputs)x = Dense(64, activation='relu')(x)predictions = Dense(10, activation='softmax')(x) this creates a model that includes the Input layer and three Dense layersmodel = Model(input=inputs, output=predictions)model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels) starts training``` All models are callable, just like layers.With the functional API, it is easy to re-use trained models: you can treat any model as if it were a layer, by calling it on a tensor. Note that by calling a model you aren't just re-using the architecture of the model, you are also re-using its weights.```pythonx = Input(shape=(784,)) this works, and returns the 10-way softmax we defined above.y = model(x)``` Siamese MLP on pairs of digits from the MNIST (Source: https://github.com/fchollet/keras/blob/master/examples/mnist_siamese_graph.py)Siamese networks are commonly used in image comparison applications such as face or signature verification. They can also be used in language processing, times series analysis, etc.In a typical Siamese network a large part of the network is duplicated at the base to allow multiple inputs to go through identical layers. This example shows how to teach a neural network to map an image from the MNIST dataset to a 2D point, while trying to minimize the distance between points of the same class and maximize the distance between points of different classes. Siamese network architecture is a way of learning how to embed samples into lower-dimensions based on similarity computed with features learned by a feature network.The feature network is the architecture we intend to fine-tune in this setting. Let's suppose we want to embed images. Given two images $X_1$ and $X_2$, we feed into the feature network $G_W$ and compute corresponding feature vectors $G_W(X_1)$ and $G_W(X_2)$. The final layer computes pair-wise distance between computed features $E_W = || G_W(X_1) - G_W(X_2) ||_{1}$ and final loss layer $L$ considers whether these two images are from the same class (label $1$) or not (label $0$). In the original [paper](http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf) it was proposed the **Contrastive Loss Function**: $$ L(W,(Y,X_1,X_2)^i) = (1 - Y) \times L_S(E_W(X_1,X_2)^i) + Y \times L_D(E_W(X_1,X_2)^i) $$where $L_S$ is the partial loss function for a "same-class" pair and $L_D$ is the partial loss function for a "different-class" pair.$L_S$ and $L_D$ should be designed in such a way that the minimization of $L$ will decrease the distance in the embedding space of "same-class" pairs and increase it in the case of "different-class" pairs:$$ L_S = \frac{1}{2} E_W^2 $$$$ L_D = \frac{1}{2} \{ \mbox{max }(0,1-E_W) \}^2 $$
###Code
'''Train a Siamese MLP on pairs of digits from the MNIST dataset.
It follows Hadsell-et-al.'06 [1] by computing the Euclidean distance on the
output of the shared network and by optimizing the contrastive loss (see paper
for mode details).
[1] "Dimensionality Reduction by Learning an Invariant Mapping"
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Gets to 99.5% test accuracy after 20 epochs.
3 seconds per epoch on a Titan X GPU
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import tensorflow as tf
np.random.seed(1337) # for reproducibility
tf.reset_default_graph()
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.optimizers import RMSprop
from keras import backend as K
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def create_pairs(x, digit_indices):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim):
'''
Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(2, activation=None,name='emb'))
return seq
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return labels[predictions.ravel() < 0.5].mean()
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
input_dim = 784
nb_epoch = 10
# create training+test positive and negative pairs
digit_indices = [np.where(y_train == i)[0] for i in range(10)]
tr_pairs, tr_y = create_pairs(X_train, digit_indices)
digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(X_test, digit_indices)
# network definition
base_network = create_base_network(input_dim)
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model(input=[input_a, input_b], output=distance)
# train
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y),
batch_size=128,
nb_epoch=nb_epoch)
# compute final accuracy on training and test sets
pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(pred, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
te_acc = compute_accuracy(pred, te_y)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
###Output
_____no_output_____
###Markdown
 Multi-input and multi-output modelsHere's a good use case for the functional API: models with multiple inputs and outputs. The functional API makes it easy to manipulate a large number of intertwined datastreams.Let's consider the following model. We seek to predict how many retweets and likes a news headline will receive on Twitter. The main input to the model will be the headline itself, as a sequence of words, but to spice things up, our model will also have an auxiliary input, receiving extra data such as the time of day when the headline was posted, etc. The model will also be supervised via two loss functions. Using the main loss function earlier in a model is a good regularization mechanism for deep models.Here's what our model looks like: Let's implement it with the functional API.The main input will receive the headline, as a sequence of integers (each integer encodes a word). The integers will be between 1 and 10,000 (a vocabulary of 10,000 words) and the sequences will be 100 words long.
###Code
from keras.layers import Input, Embedding, LSTM, Dense, merge
from keras.models import Model
# headline input: meant to receive sequences of 100 integers, between 1 and 10000.
# note that we can name any layer by passing it a "name" argument.
main_input = Input(shape=(100,), dtype='int32', name='main_input')
# this embedding layer will encode the input sequence
# into a sequence of dense 512-dimensional vectors.
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)
# a LSTM will transform the vector sequence into a single vector,
# containing information about the entire sequence
lstm_out = LSTM(32)(x)
###Output
_____no_output_____
###Markdown
Here we insert the auxiliary loss, allowing the LSTM and Embedding layer to be trained smoothly even though the main loss will be much higher in the model.
###Code
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
###Output
_____no_output_____
###Markdown
At this point, we feed into the model our auxiliary input data by concatenating it with the LSTM output:
###Code
auxiliary_input = Input(shape=(5,), name='aux_input')
x = merge([lstm_out, auxiliary_input], mode='concat')
# we stack a deep fully-connected network on top
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
# and finally we add the main logistic regression layer
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
###Output
_____no_output_____
###Markdown
This defines a model with two inputs and two outputs:
###Code
model = Model(input=[main_input, auxiliary_input], output=[main_output, auxiliary_output])
###Output
_____no_output_____
###Markdown
We compile the model and assign a weight of 0.2 to the auxiliary loss. To specify different loss_weights or loss for each different output, you can use a list or a dictionary. Here we pass a single loss as the loss argument, so the same loss will be used on all outputs.
###Code
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
loss_weights=[1., 0.2])
###Output
_____no_output_____
###Markdown
We can train the model by passing it lists of input arrays and target arrays:```pythonmodel.fit([headline_data, additional_data], [labels, labels], nb_epoch=50, batch_size=32)```Since our inputs and outputs are named (we passed them a "name" argument), We could also have compiled the model via:```pythonmodel.compile(optimizer='rmsprop', loss={'main_output': 'binary_crossentropy', 'aux_output': 'binary_crossentropy'}, loss_weights={'main_output': 1., 'aux_output': 0.2}) and trained it via:model.fit({'main_input': headline_data, 'aux_input': additional_data}, {'main_output': labels, 'aux_output': labels}, nb_epoch=50, batch_size=32)``` Shared layersAnother good use for the functional API are models that use shared layers. Let's take a look at shared layers.Let's consider a dataset of tweets. We want to build a model that can tell whether two tweets are from the same person or not (this can allow us to compare users by the similarity of their tweets, for instance).One way to achieve this is to build a model that encodes two tweets into two vectors, concatenates the vectors and adds a logistic regression of top, outputting a probability that the two tweets share the same author. The model would then be trained on positive tweet pairs and negative tweet pairs.Because the problem is symmetric, the mechanism that encodes the first tweet should be reused (weights and all) to encode the second tweet. Here we use a shared LSTM layer to encode the tweets.Let's build this with the functional API. We will take as input for a tweet a binary matrix of shape (140, 256), i.e. a sequence of 140 vectors of size 256, where each dimension in the 256-dimensional vector encodes the presence/absence of a character (out of an alphabet of 256 frequent characters).
###Code
from keras.layers import Input, LSTM, Dense, merge
from keras.models import Model
tweet_a = Input(shape=(140, 256))
tweet_b = Input(shape=(140, 256))
###Output
_____no_output_____
###Markdown
To share a layer across different inputs, simply instantiate the layer once, then call it on as many inputs as you want:
###Code
# this layer can take as input a matrix
# and will return a vector of size 64
shared_lstm = LSTM(64)
# when we reuse the same layer instance
# multiple times, the weights of the layer
# are also being reused
# (it is effectively *the same* layer)
encoded_a = shared_lstm(tweet_a)
encoded_b = shared_lstm(tweet_b)
# we can then concatenate the two vectors:
merged_vector = merge([encoded_a, encoded_b], mode='concat', concat_axis=-1)
# and add a logistic regression on top
predictions = Dense(1, activation='sigmoid')(merged_vector)
# we define a trainable model linking the
# tweet inputs to the predictions
model = Model(input=[tweet_a, tweet_b], output=predictions)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit([data_a, data_b], labels, nb_epoch=10)
###Output
_____no_output_____ |
Plot3D_02_add_plot_metadata.ipynb | ###Markdown
Modify metadata - add plot metadata Set up
###Code
import os
from irods.session import iRODSSession
cred_path = "D:/iRods_credentials.txt" #file containing iRODS credentials
irods_home = "/tempZone/home/garys" #iRODS home collection
sourceTree = "Y:/garys" #same iRODS home collection, but mapped as a disc drive with davrods
###Output
_____no_output_____
###Markdown
Get an iRODS session
###Code
creds = [line.rstrip('\n') for line in open(cred_path, "r")]
with iRODSSession(host=creds[0], port=creds[1], user=creds[2], password=creds[3], zone=creds[4], \
#client_user=creds[5], \
client_server_negotiation = "request_server_negotiation", client_server_policy = "CS_NEG_REQUIRE", \
encryption_algorithm = "AES-256-CBC", encryption_key_size = 32, encryption_num_hash_rounds = 16, \
encryption_salt_size = 8) as session:
print ('Got A Session', session)
pass
collection = session.collections.get(irods_home)
print(collection)
class Data:
def __init__(self, filename):
# initialise the experimental data by reading the header from the data (.dat) file
self.process = True # should this experiment be processed?
self.axes = [] # axes labels
self.columns = {} # column labels and index numbers
with open(filename) as f:
lines = [line.rstrip('\n') for line in open(filename)]
line = 3
col_no = 1
while lines[line].find("Column") != -1:
if lines[line+3].find("type: coordinate") > 0:
axis_text = lines[line+1][lines[line+1].find(": ")+2:]
self.axes.append(axis_text)
line += 4
elif lines[line+2].find("type: value") > 0:
column_text = lines[line+1][lines[line+1].find(": ")+2:]
self.columns[column_text]=col_no
line += 3
col_no += 1
if len(self.axes) < 2: self.process = False
###Output
_____no_output_____
###Markdown
Loop over Collections
###Code
for sourceDir, subdirList, fileList in os.walk(sourceTree):
for fname in fileList:
if fname.endswith(".dat"):
try:
d = Data(os.path.join(sourceDir,fname))
sourceColl = irods_home+sourceDir[len(sourceTree):]
sourceColl = sourceColl.replace('\\', '/')
#print(sourceColl)
collection = session.collections.get(sourceColl)
#print(sourceDir+" number of axes is "+str(len(d.axes))+" number of columns is "+str(len(d.columns)))
try:
collection.metadata.add("no_of_axes",str(len(d.axes)))
except:
print(sourceDir+" could not add metadata number_of_axes "+str(len(d.axes)))
try:
collection.metadata.add("no_of_data_columns",str(len(d.columns)))
except:
print(sourceDir+" could not add metadata no_of_data_columns "+str(len(d.axes)))
try:
metadata = collection.metadata.get_one("number_of_plots")
collection.metadata.remove("number_of_plots",metadata.value)
except:
print(sourceDir+" could not remove metadata number_of_plots")
for col_text, col_no in d.columns.items():
gfile = irods_home+sourceDir[len(sourceTree):]+"/"+fname+"_plot_"+str(col_no)+".jpg"
gfile = gfile.replace('\\', '/')
try:
data_object = session.data_objects.get(gfile)
data_object.metadata.add("plot_label",col_text)
except:
print("SOMETHING ODD "+gfile)
except:
print(sourceDir+" something odd")
###Output
_____no_output_____ |
notebooks/mc-higlass.ipynb | ###Markdown
Multicontact Data in HiGlass
###Code
%load_ext autoreload
%autoreload 2
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
###Output
_____no_output_____
###Markdown
Download Test Data
###Code
from utils import download_file
filepath_100k = download_file(
'https://4dn-dcic-public.s3.amazonaws.com/multi_contact_files/100k_test3.hdf5',
'100k_test3.hdf5',
base='..'
)
filepath_10k = download_file(
'https://4dn-dcic-public.s3.amazonaws.com/multi_contact_files/10k_test3.h5',
'10k_test3.h5',
base='..'
)
###Output
File already exist. To overwrite pass `overwrite=True`
File already exist. To overwrite pass `overwrite=True`
###Markdown
Tileset Implementation
###Code
import h5py
import higlass as hg
import math
import numpy as np
import pandas as pd
from clodius.tiles.format import format_dense_tile
def mc_1d(filepath, anchors=[], **kwargs):
"""
Tileset for multicontact 1D data
"""
tile_size = 1024
def filter_data(filepath, anchors=[]):
with h5py.File(filepath, 'r') as f:
data = pd.DataFrame({
'bin': f['clusters']['bin'],
'cluster': f['clusters']['cluster_name']
})
min_pos = int(data['bin'].values.min())
max_pos = int(data['bin'].values.max())
counts_by_bin = np.zeros(max_pos - min_pos + 1)
max_cluster_name = data['cluster'].max() + 1
cluster_idx = np.zeros(max_cluster_name, dtype='bool')
for anchor in anchors:
clusters = data['cluster'][data['bin'] == anchor]
cluster_idx[clusters] = True
data = data.iloc[cluster_idx[data['cluster'].values]]
cluster_idx[clusters] = False
counts = data.groupby('bin').count()
counts_by_bin[counts.index.values] = counts['cluster'].values
return counts_by_bin, min_pos, max_pos
data, min_pos, max_pos = filter_data(filepath, anchors)
data_size = data.size
not_nan_data = ~np.isnan(data)
max_zoom = math.ceil(math.log(max_pos / tile_size) / math.log(2))
max_zoom = 0 if max_zoom < 0 else max_zoom
tsinfo = {
'tile_size': tile_size,
'bins_per_dimension': tile_size,
'min_pos': [min_pos],
'max_pos': [max_pos],
'max_zoom': max_zoom,
'max_width': 2 ** max_zoom * 1024,
}
def generate_tile(z, x):
'''
Return tiles at the given positions.
Parameters
-----------
z: int
The zoom level (0 corresponds to most zoomed out)
x: int
The x tile position
'''
tile_width = 2 ** (max_zoom - z) * tile_size
x_start = x * tile_width
x_end = min(max_pos, x_start + tile_width)
tile_data = data[x_start:x_end]
tile_data
num_to_sum = 2 ** (max_zoom - z)
# add some data so that the data can be divided into squares
divisible_x_width = num_to_sum * math.ceil(tile_data.shape[0] / num_to_sum)
divisible_x_pad = divisible_x_width - tile_data.shape[0]
padded_data = np.pad(
tile_data, ((0, divisible_x_pad),), 'constant', constant_values=(np.nan,)
)
out_data = np.nansum(padded_data.reshape((-1, num_to_sum)), axis=1)
not_nan_out_data = not_nan_data[x_start:x_end]
# we want to calculate the means of the data points
na = np.pad(
not_nan_out_data,
((0, divisible_x_pad)),
'constant',
constant_values=(np.nan,)
)
norm_out_data = np.nansum(na.reshape((-1, num_to_sum)), axis=1)
out_data = out_data / (norm_out_data + 1)
# determine how much to pad the array
x_pad = tile_size - out_data.shape[0]
return np.pad(
out_data, ((0, x_pad)), 'constant', constant_values=(np.nan, )
)
def tileset_info():
return tsinfo
def tiles(tile_ids):
tiles = []
for tile_id in tile_ids:
# decompose the tile zoom and location
_, z, x = tile_id.split('.')
# generate the tile
data = generate_tile(int(z), int(x))
# format the tile response
tiles.append((tile_id, format_dense_tile(data)))
return tiles
return hg.Tileset(
tileset_info=tileset_info,
tiles=tiles,
**kwargs
)
###Output
_____no_output_____
###Markdown
Code for enabling selections in HiGlass
###Code
import ipywidgets as widgets
def enable_selection(widget):
select_mode = widgets.ToggleButton(value=False, description='Select Mode')
x_from = widgets.IntText(value=None, description='From:')
x_to = widgets.IntText(value=None, description='To:')
def handle_selection(event, widget):
try:
# `event['data']` is the event data
# `[0]` is the first argument, which is typically a dict
x_from.value = event['data'][0]['dataRange'][0][0]
x_to.value = event['data'][0]['dataRange'][0][1]
except:
print('Oh no...')
widget.on('selection', handle_selection)
widgets.jslink((widget, 'select_mode'), (select_mode, 'value'))
return select_mode, x_from, x_to
###Output
_____no_output_____
###Markdown
Global Track Config
###Code
track_config = {
'track_type': 'horizontal-line',
'position': 'top',
'height': 128,
'options': {
'colorRange': ['#ffbb33', '#e5001c', 'black'],
'labelColor': 'red',
'backgroundColor': 'white',
},
}
axis = hg.Track('top-axis')
###Output
_____no_output_____
###Markdown
--- 0 Anchors (Coverage)
###Code
from IPython.display import display
# ts_100k_3a = mc_1d(filepath_100k, anchors=[10885, 10892, 10814], name='100k Test Data')
ts_100k_0a = mc_1d(filepath_100k, name='100k Test Data: 0 Anchors')
widget_0a, _, _ = hg.display([hg.View([axis, hg.Track(tileset=ts_100k_0a, **track_config)])])
select_mode_0a, x_from_0a, x_to_0a = enable_selection(widget_0a)
display(select_mode_0a, widget_0a, x_from_0a, x_to_0a)
display(widget_0a)
###Output
_____no_output_____
###Markdown
1. Anchor
###Code
# For now just the midpoint
anchor_1 = int(x_from_0a.value + ((x_to_0a.value - x_from_0a.value) / 2))
print('Anchor at {}'.format(anchor_1))
ts_100k_1a = mc_1d(filepath_100k, anchors=[anchor_1], name='100k Test Data: 1 Anchor')
widget_1a, _, _ = hg.display([hg.View([axis, hg.Track(tileset=ts_100k_1a, **track_config)])])
select_mode_1a, x_from_1a, x_to_1a = enable_selection(widget_1a)
display(select_mode_1a, widget_1a, x_from_1a, x_to_1a)
# For now just the midpoint
anchor_2 = int(x_from_1a.value + ((x_to_1a.value - x_from_1a.value) / 2))
print('Anchor at {}'.format(anchor_2))
24
ts_100k_2a = mc_1d(filepath_100k, anchors=[anchor_1, anchor_2], name='100k Test Data: 2 Anchor')
widget_2a, _, _ = hg.display([hg.View([axis, hg.Track(tileset=ts_100k_2a, **track_config)])])
select_mode_2a, x_from_2a, x_to_2a = enable_selection(widget_2a)
display(select_mode_2a, widget_2a, x_from_2a, x_to_2a)
###Output
Anchor at 17394
|
.ipynb_checkpoints/EIVHE Test-checkpoint.ipynb | ###Markdown
Building Efficient Integer Vector HEThis is a test build. The General equation is$$Sc = wx + e$$for encryption, and for decryption,$$x = \lceil{\frac{Sc}{w}}\rfloor$$That is all pretty straight forward. Below I will put the full code for the full system with key switching.
###Code
import numpy as np
def generate_key(w,m,n):
S = (np.random.rand(m,n) * w / (2 ** 16)) # proving max(S) < w
return S
def encrypt(x,S,m,n,w):
assert len(x) == len(S)
e = (np.random.rand(m)) # proving max(e) < w / 2
c = np.linalg.inv(S).dot((w * x) + e)
return c
def decrypt(c,S,w):
return (S.dot(c) / w).astype('int')
def get_c_star(c,m,l):
c_star = np.zeros(l * m,dtype='int')
for i in range(m):
b = np.array(list(np.binary_repr(np.abs(c[i]))),dtype='int')
if(c[i] < 0):
b *= -1
c_star[(i * l) + (l-len(b)): (i+1) * l] += b
return c_star
def switch_key(c,S,m,n,T):
l = int(np.ceil(np.log2(np.max(np.abs(c))))) # returns an integer
c_star = get_c_star(c,m,l)
S_star = get_S_star(S,m,n,l)
n_prime = n + 1
S_prime = np.concatenate((np.eye(m),T.T),0).T
A = (np.random.rand(n_prime - m, n*l) * 10).astype('int')
E = (1 * np.random.rand(S_star.shape[0],S_star.shape[1])).astype('int')
M = np.concatenate(((S_star - T.dot(A) + E),A),0)
c_prime = M.dot(c_star)
return c_prime,S_prime
def get_S_star(S,m,n,l):
S_star = list()
for i in range(l):
S_star.append(S*2**(l-i-1))
S_star = np.array(S_star).transpose(1,2,0).reshape(m,n*l)
return S_star
def get_T(n):
n_prime = n + 1
T = (10 * np.random.rand(n,n_prime - n)).astype('int')
return T # is a vector of integers
def encrypt_via_switch(x,w,m,n,T):
c,S = switch_key(x*w,np.eye(m),m,n,T)
return c,S
x = np.array([0,1,2,5])
m = len(x)
n = m
w = 16
S = generate_key(w,m,n)
###Output
_____no_output_____ |
figure_2/notebooks/Supp1a_peaks_overlap.ipynb | ###Markdown
COMPUTE OVERLAPPING PEAKS BETWEEN AUTOMATED AND MANUAL IMPLEMENTATION AND PLOT H3K4me3
###Code
%%bash
wc -l ../intermediate_files/Merged_Peaks_H3K4me3_manual.bed
bedtools intersect -a ../intermediate_files/Merged_Peaks_H3K4me3_manual.bed \
-b ../intermediate_files/Merged_Peaks_H3K4me3_Biomek.bed \
-u | wc -l
wc -l ../intermediate_files/Merged_Peaks_H3K4me3_Biomek.bed
bedtools intersect -a ../intermediate_files/Merged_Peaks_H3K4me3_Biomek.bed \
-b ../intermediate_files/Merged_Peaks_H3K4me3_manual.bed \
-u | wc -l
only_manual = 5205 - int(np.floor(np.mean([4708,4997])))
only_biomek = 5010 - int(np.floor(np.mean([4708,4997])))
intersect = int(np.floor(np.mean([4708,4997])))
tot = only_manual + only_biomek + intersect
only_manual = np.round(((only_manual / tot) * 100),2)
only_biomek = np.round(((only_biomek / tot) * 100),2)
intersect = np.round(((intersect / tot) * 100),2)
v = venn2(subsets=(only_biomek,only_manual,intersect), set_labels = ('Automated','Manual'), set_colors=('darkred','coral'), alpha = 0.7)
v.get_patch_by_id('11').set_color('gainsboro')
plt.savefig("../figures/H3K4me3_intersect_Biomek-Manual.pdf")
###Output
_____no_output_____
###Markdown
H3K27ac
###Code
%%bash
wc -l ../intermediate_files/Merged_Peaks_H3K27ac_manual.bed
bedtools intersect -a ../intermediate_files/Merged_Peaks_H3K27ac_manual.bed \
-b ../intermediate_files/Merged_Peaks_H3K27ac_Biomek.bed \
-u | wc -l
wc -l ../intermediate_files/Merged_Peaks_H3K27ac_Biomek.bed
bedtools intersect -a ../intermediate_files/Merged_Peaks_H3K27ac_Biomek.bed \
-b ../intermediate_files/Merged_Peaks_H3K27ac_manual.bed \
-u | wc -l
only_manual = 7429 - int(np.floor(np.mean([5677,7005])))
only_biomek = 7061 - int(np.floor(np.mean([5677,7005])))
intersect = int(np.floor(np.mean([5677,7005])))
tot = only_manual + only_biomek + intersect
only_manual = np.round(((only_manual / tot) * 100),2)
only_biomek = np.round(((only_biomek / tot) * 100),2)
intersect = np.round(((intersect / tot) * 100),2)
print(only_manual+only_biomek+intersect)
v = venn2(subsets=(only_biomek,only_manual,intersect), set_labels = ('Automated','Manual'), set_colors=('darkgreen','limegreen'), alpha = 0.7)
v.get_patch_by_id('11').set_color('gainsboro')
plt.savefig("../figures/H3K27ac_intersect_Biomek-Manual.pdf")
###Output
100.0
###Markdown
H3K27me3
###Code
%%bash
wc -l ../intermediate_files/Merged_Peaks_H3K27me3_manual.bed
bedtools intersect -a ../intermediate_files/Merged_Peaks_H3K27me3_manual.bed \
-b ../intermediate_files/Merged_Peaks_H3K27me3_Biomek.bed \
-u | wc -l
wc -l ../intermediate_files/Merged_Peaks_H3K27me3_Biomek.bed
bedtools intersect -a ../intermediate_files/Merged_Peaks_H3K27me3_Biomek.bed \
-b ../intermediate_files/Merged_Peaks_H3K27me3_manual.bed \
-u | wc -l
only_manual = 9331 - int(np.floor(np.mean([7651,10203])))
only_biomek = 10423 - int(np.floor(np.mean([7651,10203])))
intersect = int(np.floor(np.mean([7651,10203])))
tot = only_manual + only_biomek + intersect
only_manual = np.round(((only_manual / tot) * 100),2)
only_biomek = np.round(((only_biomek / tot) * 100),2)
intersect = np.round(((intersect / tot) * 100),2)
print(only_manual+only_biomek+intersect)
v = venn2(subsets=(only_biomek,only_manual,intersect), set_labels = ('Automated','Manual'), set_colors=('k','dimgrey'), alpha = 0.7)
v.get_patch_by_id('11').set_color('gainsboro')
plt.savefig("../figures/H3K27me3_intersect_Biomek-Manual.pdf")
###Output
100.0
|
simple_indexes/04_indexes/3.1.make_news_indexes.ipynb | ###Markdown
ะะฝะดะตะบั ะฝะพะฒะพััะตะน
###Code
import numpy as np
import pandas as pd
import datetime
import os
import pickle
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
matplotlib.style.use('ggplot')
from tqdm import tqdm as tqdm_notebook
from collections import defaultdict
%matplotlib inline
krizis = set(['ะฑะฐะฝะบ', 'ะบััั', 'ะดะพะปะปะฐั', 'ะตะฒัะพ', 'ัะฑ', 'ััั', 'ะผะผะฒะฑ', 'ะธะฟะพัะตะบะฐ', 'ะฐะบัะธั',
'ะบัะตะดะธั', 'ะฟะธั', 'ะฑะฐะฝะบัะพัััะฒะพ', 'ะทะฐะปะพะณ', 'ะดะตัะพะปั', 'ะดะตะฒะฐะปัะฒะฐัะธั','ัะธะฝะฐะฝัะพะฒัะน', 'ะบัะธะทะธั'])
###Output
_____no_output_____
###Markdown
[ะกะตะฝัะธะผะตะฝั-ัะปะพะฒะฐัั ั ะบะพัะฟะพัั](https://github.com/dkulagin/kartaslov)
###Code
url = "https://raw.githubusercontent.com/dkulagin/kartaslov/master/dataset/emo_dict/emo_dict.csv"
df = pd.read_csv(url, sep=';')
# ัะปะพะฒะฐัะธะบ ะฝะตะณะฐัะธะฒะฐ
terms = df[df.tag == 'NGTV'][['term','value']]
negative_dict_corpora_v1 = dict(zip(terms.term.values, terms.value.values))
# ัะปะพะฒะฐัะธะบ ะฟะพะทะธัะธะฒะฐ
terms = df[df.tag == 'PSTV'][['term','value']]
positive_dict_corpora_v1 = dict(zip(terms.term.values, terms.value.values))
negative_dict = negative_dict_corpora_v1
positive_dict = positive_dict_corpora_v1
drop = """ะฒะธััั
ะธะฝัะตะบัะธั
ะทะฐัะฐะทะธัััั
ะทะฐะฑะพะปะตะฒะฐะฝะธะต
ะทะฐัะฐะถะตะฝะธะต
ัะผะตัะตัั
ะบะฐัะฐะฝัะธะฝ
ะทะฐะฑะพะปะตัั
ะทะฐัะฐะทะธัั
ัะฟะธะดะตะผะธั
ะฟะฝะตะฒะผะพะฝะธั
ะฑะพะปะตะทะฝั
ะธะฝัะตะบัะธะพะฝะฝัะน
ะบะฐัะฐะฝัะธะฝะฝัะน
ะธะทะพะปััะธั
ะณัะธะฟะฟ
ะพัะปะพะถะฝะตะฝะธะต
ะณะพัะฟะธัะฐะปะธะทะฐัะธั
ะฒะธัััะฝัะน
ะพะฑะพัััะตะฝะธะต
ะทะฐะฑะพะปะตะฒะฐะตะผะพััั"""
for item in drop.split():
del negative_dict[item]
def neg_ind(w):
scores = [negative_dict.get(item.split("_")[0], 0) for item in w]
return sum(scores)
def pos_ind(w):
scores = [positive_dict.get(item.split("_")[0], 0) for item in w]
return np.mean(scores)
###Output
_____no_output_____
###Markdown
ะะพะดัััั ะดะตัะบัะธะฟัะพัะพะฒ ะฟะพ ะดะฐัะฐะผ ะธ ะธััะพัะฝะธะบะฐะผ```{(ะดะฐัะฐ, ะบะฐัะตะณะพัะธั, ะธััะพัะฝะธะบ) : { ะฒัะตะณะพ ััะฐัะตะน: ัะฐััะพัะฐ, ัะพะบะตะฝ: ัะฐััะพัะฐ }}```
###Code
def zero_add(obj):
obj = str(obj)
if len(obj) == 1:
return '0' + obj
else:
return obj
words_dct = dict( )
for i,source in enumerate(['rbc', 'lenta', 'interfax', 'tass', 'ria']):
print(source)
path = f"../01_ะกะฑะพััะธะบ_ะฝะพะฒะพััะตะธฬ/news_data/{i + 1}.{source}/lemm/"
files = os.listdir(path)
for jtem in files:
with open(path + jtem, 'rb') as f:
data = pickle.load(f)
for item in tqdm_notebook(data):
if source in ['rbc', 'tass', 'ria']:
dt = item['date'][:10]
category = item.get('category', 'not_category')
elif source in ['lenta']:
dt = str(item['year']) + '-' + zero_add(item['month']) + '-' + zero_add(item['day'])
category = item.get('rubrics', 'not_category')
elif source in ['interfax']:
dt = str(item['year']) + '-' + zero_add(item['month']) + '-' + zero_add(item['day'])
category = item['href'].split("/")[1]
obj = (dt, category, source)
if obj not in words_dct:
words_dct[obj] = defaultdict(lambda:0)
tokens = set([w.split("_")[0] for w in item['title']] + [w.split("_")[0] for w in item['text']])
exist_token = tokens & krizis
words_dct[obj]['total_cnt'] += 1
words_dct[obj]['krzis_cnt'] += (len(exist_token) > 0)
# ะะปะพะบ ั ัะตะฝัะธะผะตะฝัะพะผ
words_dct[obj]['title_neg'] += neg_ind(item['title'])
words_dct[obj]['text_neg'] += neg_ind(item['text'])
words_dct[obj]['title_neg_len'] += neg_ind(item['title'])/(len(item['title']) + 1e-4)
words_dct[obj]['text_neg_len'] += neg_ind(item['text'])/(len(item['text']) + 1e-4)
words_dct[obj]['title_pos'] += pos_ind(item['title'])
words_dct[obj]['text_pos'] += pos_ind(item['text'])
words_dct[obj]['title_pos_len'] += pos_ind(item['title'])/(len(item['title']) + 1e-4)
words_dct[obj]['text_pos_len'] += pos_ind(item['text'])/(len(item['text']) + 1e-4)
for tok in exist_token:
words_dct[obj][tok] += 1
len(words_dct)
words_dct = {k : dict(v) for k,v in words_dct.items()}
len(words_dct)
with open("news_tokens_cnt.pickle", 'wb') as f:
pickle.dump(words_dct, f)
###Output
_____no_output_____
###Markdown
ะะณัะตะณะฐัะธั ะฟะพ ะดะฝัะผ
###Code
def index_make(df_term):
corr_matrix = df_term.corr()
w = np.array(corr_matrix.sum()/corr_matrix.sum().sum())
print(w)
index = (np.array(df_term).T*w.reshape(len(w),1)).sum(axis = 0)
return index
def min_max_scaler(df, col):
mx = df[col].max()
mn = df[col].min()
df[col] = 100*(df[col] - mn)/(mx - mn)
pass
with open("news_tokens_cnt.pickle", 'rb') as f:
words_dct = pickle.load(f)
df = pd.DataFrame(words_dct)
df = df.T
df.reset_index(inplace=True)
df.columns = ['date', 'category', 'source'] + list(df.columns[3:])
df.head()
df.source.unique()
df[df.source == 'lenta'].category.value_counts()[:50]
#usl = (df.source == 'rbc')
#usl = (df.source == 'ria')&( df.category.apply(lambda w: w in ['ะญะบะพะฝะพะผะธะบะฐ', 'ะะพะปะธัะธะบะฐ']))
#usl = (df.source == 'ria')
# usl = (df.source == 'tass')&( df.category.apply(lambda w: w in ['ะญะบะพะฝะพะผะธะบะฐ ะธ ะฑะธะทะฝะตั','ะะฐะปัะน ะฑะธะทะฝะตั', 'ะะพะปะธัะธะบะฐ']))
# usl = (df.source == 'tass')
# usl = (df.source == 'interfax')&( df.category.apply(lambda w: w in ['russia','business']))
# usl = (df.source == 'interfax')
# usl = (df.source == 'lenta')&( df.category.apply(lambda w: w in ['russia', 'economics']))
usl = (df.source == 'lenta')
df_cur = df[usl]
df_cur.head()
df_cur.total_cnt.sum()
df_cur = df[usl]
df_cur.loc[:,'date'] = df_cur['date'].apply(lambda w: w[:7]).values
df_cur = df_cur[df_cur['date'].apply(lambda w: '-' in w)]
df_cur = df_cur[df_cur['date'].apply(lambda w: w not in ['2020-06', '2020-07', '2020-08', '2020-09', \
'2020-10', '2020-11', '2020-12'])]
neg = ['title_neg', 'text_neg', 'title_neg_len', 'text_neg_len',
'title_pos', 'text_pos', 'title_pos_len', 'text_pos_len']
col = ['krzis_cnt', 'total_cnt'] + list(krizis)
df_agg = df_cur.groupby('date')[col].sum().sort_index()
df_agg2 = df_cur.groupby('date')[neg].mean().sort_index()
df_agg = df_agg.join(df_agg2)
df_agg.head()
df_index = pd.DataFrame( )
# ะะพะปั ะบัะธะทะธัะฝัั
ััะฐัะตะน ะทะฐ ะดะตะฝั
ind = (df_agg['krzis_cnt']/(df_agg['total_cnt'] + 1e-4))
df_index['date'] = ind.index
df_index['krizis_share'] = ind.values
df_index['total_cnt'] = df_agg['total_cnt'].values
df_index.set_index('date', inplace=True)
min_max_scaler(df_index, 'krizis_share')
ind = index_make(df_agg[list(krizis)])/1000
df_index['krizis_corr'] = ind
min_max_scaler(df_index, 'krizis_corr')
# ะกะปะพะฒะฐ ั ะฝะตะณะฐัะธะฒะพะผ ะธ ะฟะพะทะธัะธะฒะพะผ ั ััะผะผะฐะผะธ-ั
ััะผะผะฐะผะธ :)
df_index['negative'] = df_agg['title_neg'] + df_agg['text_neg']
min_max_scaler(df_index, 'negative')
df_index['negative_len'] = df_agg['title_neg_len'] + df_agg['text_neg_len']
min_max_scaler(df_index, 'negative_len')
df_index['diff'] = df_agg['title_neg'] + df_agg['text_neg'] + \
df_agg['title_pos'] + df_agg['text_pos']
min_max_scaler(df_index, 'diff')
df_index['diff_len'] = df_agg['title_neg_len'] + df_agg['text_neg_len'] + \
df_agg['title_pos_len'] + df_agg['text_pos_len']
min_max_scaler(df_index, 'diff_len')
df_index.head()
df_index[1:][[
#'krizis_share',
'krizis_corr',
#'negative',
#'negative_len',
#'diff'
]].plot(legend=True, figsize=(15,6));
df_index[1:][[
'krizis_share',
'krizis_corr',
'negative',
'negative_len',
'diff'
]].plot(legend=True, figsize=(15,6));
df_index.reset_index(inplace=True)
df_index.columns = ['fielddate', 'newsInd_krizis_share', 'total_cnt', 'newsInd_krizis_corr',
'newsInd_krizis_neg', 'newsInd_krizis_neg_len',
'newsInd_krizis_diff', 'newsInd_krizis_diff_len']
# df_index.to_csv('../ะะฝะดะตะบัั/data_simple_index_v2/news_krizis_index_month.tsv', sep="\t", index=None)
df_index.to_csv('data_simple_index_v2/news_krizis_index_month_lenta_all.tsv', sep="\t", index=None)
df_index
df_index.total_cnt.sum()
###Output
_____no_output_____ |
nbs/1.2_exp.csnc.ipynb | ###Markdown
Data exploration (taken from CodeSearchNet challenge)
###Code
import json
import pandas as pd
from pathlib import Path
pd.set_option('max_colwidth',300)
from pprint import pprint
import re
###Output
_____no_output_____
###Markdown
Preview dataset Download specific java dataset
###Code
!wget https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/java.zip
!unzip java.zip
!gzip -d java/final/jsonl/test/java_test_0.jsonl.gz
with open('test_data/java/final/jsonl/test/java_test_0.jsonl', 'r') as f:
sample_file = f.readlines()
sample_file[0]
print(type(sample_file))
print(len(sample_file))
pprint(json.loads(sample_file[0]))
###Output
Pretty printing has been turned OFF
###Markdown
Exploring the full DataSet
###Code
!ls test_data/java/
!ls test_data/java/final/jsonl
java_files = sorted(Path('test_data/java/final/jsonl/').glob('**/*.gz'))
java_test_files
print('Total of related java files: {}'.format(len(java_files)))
pprint(java_files)
columns_long_list = ['repo', 'path', 'url', 'code',
'code_tokens', 'docstring', 'docstring_tokens',
'language', 'partition']
columns_short_list = ['code_tokens', 'docstring_tokens',
'language', 'partition']
# export
def jsonl_list_to_dataframe(file_list, columns=columns_long_list):
"""Load a list of jsonl.gz files into a pandas DataFrame."""
return pd.concat([pd.read_json(f,
orient='records',
compression='gzip',
lines=True)[columns]
for f in file_list], sort=False)
# export
def plain_json_list_to_dataframe(file_list, columns):
'''Load a list of jsonl files into a pandas DaraFrame.'''
return pd.concat([pd.read_json(f,
orients='records',
compression=None,
lines=True)[columns]
for f in file_list], sort=False)
java_df = jsonl_list_to_dataframe(java_files)
java_df.head()
java_df.columns
java_df['partition'].unique()
###Output
_____no_output_____
###Markdown
Summary stats.
###Code
java_df.partition.value_counts()
java_df.groupby(['partition', 'language'])['code_tokens'].count()
java_df['code_len'] = java_df.code_tokens.apply(lambda x: len(x))
java_df['query_len'] = java_df.docstring_tokens.apply(lambda x: len(x))
###Output
_____no_output_____
###Markdown
Tokens Length Percentile
###Code
code_len_summary = java_df.groupby('language')['code_len'].quantile([.5, .7, .8, .9, .95])
display(pd.DataFrame(code_len_summary))
###Output
_____no_output_____
###Markdown
Query length percentile by language
###Code
query_len_summary = java_df.groupby('language')['query_len'].quantile([.5, .7, .8, .9, .95])
display(pd.DataFrame(query_len_summary))
java_df.shape
###Output
_____no_output_____
###Markdown
EDA using sparkBasic word countings of millions of files (case study) by @danaderp
###Code
from pyspark import SparkContext, SparkConf
appName = None
master = None
textFile = spark.read.text("Readme.txt")
###Output
_____no_output_____
###Markdown
The first thing a Spark program must do is to create a SparkContext object, which tells Spark how to access a cluster. To create a SparkContext you first need to build a SparkConf object that contains information about your application.
###Code
conf = SparkConf().setAppName(appName).setMaster(master)
sc = SparkContext(conf=conf)
###Output
_____no_output_____
###Markdown
Data transformation
###Code
pprint(java_df.columns)
src_code_columns = ['code', 'code_tokens', 'code_len','partition']
java_src_code_df = java_df[src_code_columns]
java_src_code_df.columns
java_src_code_df.shape
###Output
_____no_output_____
###Markdown
Visualizing examples
###Code
java_src_code_df[:10]['code']
data_type_new_column = ['src' for x in range(java_src_code_df.shape[0])]
len(data_type_new_column)
java_src_code_df.loc[:,'data_type'] = data_type_new_column
java_src_code_df.head()
###Output
_____no_output_____
###Markdown
Data cleaning Remove functions with syntax errors
###Code
!pip install radon
java_code_df.shape
type(java_code_df['code'][9071])
java_code_df['code'][9071]
###Output
_____no_output_____
###Markdown
Exploratory analysis
###Code
# export
# Imports
import dit
import math
import os
import logging
import matplotlib.pyplot as plt
import pandas as pd
import sentencepiece as sp
from collections import Counter
from pathlib import Path
from scipy.stats import sem, t
from statistics import mean, median, stdev
from tqdm.notebook import tqdm
# ds4se
from ds4se.mgmnt.prep.bpe import *
from ds4se.exp.info import *
from ds4se.desc.stats import *
java_path = Path('test_data/java/')
n_sample = int(len(code_df)*0.01)
sample_code_df = code_df.sample(n=n_sample)
sample_code_df.shape
sp_model_from_df(sample_code_df, output=java_path, model_name='_sp_bpe_modal', cols=['code'])
sp_processor = sp.SentencePieceProcessor()
sp_processor.Load(f"{java_path/'_sp_bpe_modal'}.model")
java_src_code_df.shape
n_sample_4_sp = int(java_src_code_df.shape[0]*0.01)
print(n_sample_4_sp)
java_code_df = java_src_code_df.sample(n=n_sample_4_sp)
java_code_df.shape
code_df.shape
# Use the model to compute each file's entropy
java_doc_entropies = get_doc_entropies_from_df(code_df, 'code', java_path/'_sp_bpe_modal', ['src'])
len(java_doc_entropies)
# Use the model to compute each file's entropy
java_corpus_entropies = get_corpus_entropies_from_df(code_df, 'code', java_path/'_sp_bpe_modal', ['src'])
java_corpus_entropies
# Use the model to compute each file's entropy
java_system_entropy = get_system_entropy_from_df(code_df, 'code', java_path/'_sp_bpe_modal')
java_system_entropy
flatten = lambda l: [item for sublist in l for item in sublist]
report_stats(flatten(java_doc_entropies))
java_doc_entropies
# Create a histogram of the entropy distribution
plt.hist(java_doc_entropies,bins = 20, color="blue", alpha=0.5, edgecolor="black", linewidth=1.0)
plt.title('Entropy histogram')
plt.ylabel("Num records")
plt.xlabel("Entropy score")
plt.show()
fig1, ax1 = plt.subplots()
ax1.set_title('Entropy box plot')
ax1.boxplot(java_doc_entropies, vert=False)
###Output
_____no_output_____
###Markdown
Descriptive metrics
###Code
#Libraries used in ds4se.desc.metrics.java nb
!pip install lizard
!pip install tree_sitter
!pip install bs4
from ds4se.desc.metrics import *
from ds4se.desc.metrics.java import *
import lizard
import chardet
java_src_code_df.head(1)
test_src_code = java_src_code_df['code'].values[0]
print(test_src_code)
###Output
protected final void fastPathOrderedEmit(U value, boolean delayError, Disposable disposable) {
final Observer<? super V> observer = downstream;
final SimplePlainQueue<U> q = queue;
if (wip.get() == 0 && wip.compareAndSet(0, 1)) {
if (q.isEmpty()) {
accept(observer, value);
if (leave(-1) == 0) {
return;
}
} else {
q.offer(value);
}
} else {
q.offer(value);
if (!enter()) {
return;
}
}
QueueDrainHelper.drainLoop(q, observer, delayError, disposable, this);
}
###Markdown
Sample of available metrics (for method level)
###Code
metrics = lizard.analyze_file.analyze_source_code('test.java', test_src_code)
func = metrics.function_list[0]
print('cyclomatic_complexity: {}'.format(func.cyclomatic_complexity))
print('nloc (length): {}'.format(func.length))
print('nloc: {}'.format(func.nloc))
print('parameter_count: {}'.format(func.parameter_count))
print('name: {}'.format(func.name))
print('token_count {}'.format(func.token_count))
print('long_name: {}'.format(func.long_name))
def add_method_mccabe_metrics_to_code_df(src_code_df, code_column):
"""Computes method level McCabe metrics and adds it as columns in the specified dataframe"""
#result_df = src_code_df.copy()
result_df = pd.DataFrame([])
for index, row in src_code_df.iterrows():
'''print('index{}'.format(index))
print('type:{}'.format(type(row[code_column])))'''
metrics = lizard.analyze_file.analyze_source_code('java_file.java', row[code_column])
metrics_obj = metrics.function_list
''' print('matrics_length', len(metrics_obj))'''
if(len(metrics_obj) == 0):
continue
row['cyclomatic_complexity'] = metrics_obj[0].cyclomatic_complexity
row['nloc'] = metrics_obj[0].nloc
row['parameter_count'] = metrics_obj[0].parameter_count
row['method_name'] = metrics_obj[0].name
row['token_count'] = metrics_obj[0].token_count
result_df = result_df.append(row)
'''
valid_indices.append(index)
cyclomatic_complexity.append(metrics_obj[0].cyclomatic_complexity)
nloc.append(metrics_obj[0].nloc)
parameter_count.append(metrics_obj[0].parameter_count)
method_name.append(metrics_obj[0].name)
token_count.append(metrics_obj[0].token_count)'''
'''src_code_df['cyclomatic_complexity'] = cyclomatic_complexity
src_code_df['nloc'] = nloc
src_code_df['parameter_count'] = parameter_count
src_code_df['method_name'] = method_name
src_code_df['token_count'] = token_count'''
return result_df
code_df = add_method_mccabe_metrics_to_code_df(java_src_code_df, 'code')
code_df.shape
code_df.head()
code_df.to_csv('test_data/clean_java.csv')
code_df.shape
java_code_df.shape
code_df.head()
code_df.describe()
display_numeric_col_hist(code_df['cyclomatic_complexity'], 'Cyclomatic complexity')
fig1, ax1 = plt.subplots()
ax1.set_title('Cyclomatic complexity box plot')
ax1.boxplot(code_df['cyclomatic_complexity'], vert=False)
display_numeric_col_hist(code_df['nloc'], 'Nloc')
fig1, ax1 = plt.subplots()
ax1.set_title('Nloc box plot')
ax1.boxplot(code_df['nloc'], vert=False)
display_numeric_col_hist(code_df['parameter_count'], 'Parameter count')
fig1, ax1 = plt.subplots()
ax1.set_title('Param. count box plot')
ax1.boxplot(code_df['parameter_count'], vert=False)
display_numeric_col_hist(code_df['token_count'], 'Token count')
fig1, ax1 = plt.subplots()
ax1.set_title('Token count box plot')
ax1.boxplot(code_df['token_count'], vert=False)
fig1, ax1 = plt.subplots()
ax1.set_title('Code len box plot')
ax1.boxplot(code_df['code_len'], vert=False)
code_df.shape
code_df[['cyclomatic_complexity', 'nloc', 'token_count', 'parameter_count']].corr()
import seaborn as sns
import numpy as np
def heatmap(x, y, **kwargs):
if 'color' in kwargs:
color = kwargs['color']
else:
color = [1]*len(x)
if 'palette' in kwargs:
palette = kwargs['palette']
n_colors = len(palette)
else:
n_colors = 256 # Use 256 colors for the diverging color palette
palette = sns.color_palette("Blues", n_colors)
if 'color_range' in kwargs:
color_min, color_max = kwargs['color_range']
else:
color_min, color_max = min(color), max(color) # Range of values that will be mapped to the palette, i.e. min and max possible correlation
def value_to_color(val):
if color_min == color_max:
return palette[-1]
else:
val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
ind = int(val_position * (n_colors - 1)) # target index in the color palette
return palette[ind]
if 'size' in kwargs:
size = kwargs['size']
else:
size = [1]*len(x)
if 'size_range' in kwargs:
size_min, size_max = kwargs['size_range'][0], kwargs['size_range'][1]
else:
size_min, size_max = min(size), max(size)
size_scale = kwargs.get('size_scale', 500)
def value_to_size(val):
if size_min == size_max:
return 1 * size_scale
else:
val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
return val_position * size_scale
if 'x_order' in kwargs:
x_names = [t for t in kwargs['x_order']]
else:
x_names = [t for t in sorted(set([v for v in x]))]
x_to_num = {p[1]:p[0] for p in enumerate(x_names)}
if 'y_order' in kwargs:
y_names = [t for t in kwargs['y_order']]
else:
y_names = [t for t in sorted(set([v for v in y]))]
y_to_num = {p[1]:p[0] for p in enumerate(y_names)}
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1) # Setup a 1x10 grid
ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot
marker = kwargs.get('marker', 's')
kwargs_pass_on = {k:v for k,v in kwargs.items() if k not in [
'color', 'palette', 'color_range', 'size', 'size_range', 'size_scale', 'marker', 'x_order', 'y_order', 'xlabel', 'ylabel'
]}
ax.scatter(
x=[x_to_num[v] for v in x],
y=[y_to_num[v] for v in y],
marker=marker,
s=[value_to_size(v) for v in size],
c=[value_to_color(v) for v in color],
**kwargs_pass_on
)
ax.set_xticks([v for k,v in x_to_num.items()])
ax.set_xticklabels([k for k in x_to_num], rotation=45, horizontalalignment='right')
ax.set_yticks([v for k,v in y_to_num.items()])
ax.set_yticklabels([k for k in y_to_num])
ax.grid(False, 'major')
ax.grid(True, 'minor')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
ax.set_facecolor('#F1F1F1')
ax.set_xlabel(kwargs.get('xlabel', ''))
ax.set_ylabel(kwargs.get('ylabel', ''))
# Add color legend on the right side of the plot
if color_min < color_max:
ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot
col_x = [0]*len(palette) # Fixed x coordinate for the bars
bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars
bar_height = bar_y[1] - bar_y[0]
ax.barh(
y=bar_y,
width=[5]*len(palette), # Make bars 5 units wide
left=col_x, # Make bars start at 0
height=bar_height,
color=palette,
linewidth=0
)
ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle
ax.grid(False) # Hide grid
ax.set_facecolor('white') # Make background white
ax.set_xticks([]) # Remove horizontal ticks
ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max
ax.yaxis.tick_right() # Show vertical ticks on the right
columns = ['cyclomatic_complexity', 'nloc', 'token_count', 'parameter_count']
corr = code_df[columns].corr()
corr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y
corr.columns = ['x', 'y', 'value']
heatmap(
x=corr['x'],
y=corr['y'],
size=corr['value'].abs()
)
def corrplot(data, size_scale=500, marker='s'):
corr = pd.melt(data.reset_index(), id_vars='index').replace(np.nan, 0)
corr.columns = ['x', 'y', 'value']
heatmap(
corr['x'], corr['y'],
color=corr['value'], color_range=[-1, 1],
palette=sns.diverging_palette(20, 220, n=256),
size=corr['value'].abs(), size_range=[0,1],
marker=marker,
x_order=data.columns,
y_order=data.columns[::-1],
size_scale=size_scale
)
corrplot(code_df[columns].corr(), size_scale=300);
###Output
_____no_output_____ |
solutions/03_exploring_data_solutions.ipynb | ###Markdown
***EXERCISE 3.1***Get a DataFrame with just `count` and `mean` (first 2 rows) of the numeric columns
###Code
df.describe().head(2)
###Output
_____no_output_____
###Markdown
***EXERCISE 3.2***Show the 6 least common countries (in absolute value) and the 3 most common wine varieties (in percentage)
###Code
df['country'].value_counts().tail(6)
df['variety'].value_counts().head(3)
###Output
_____no_output_____ |
silver/D00_Cirq_Introduction.ipynb | ###Markdown
prepared by รzlem Salehi (QTurkey) This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. $ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\dot}[2]{ 1 \cdot 2} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} 1 \mspace{-1.5mu} \rfloor } $ Introduction to Cirq You can import Cirq using the following command:
###Code
import cirq
###Output
_____no_output_____
###Markdown
Creating qubits There are different ways to create qubits in Cirq. Here, we will introduce two of them. Named qubits Named qubit is the simplest way to create qubits. The qubits are identified by their name.
###Code
#Let's create two qubits named source and target
s = cirq.NamedQubit('source')
t = cirq.NamedQubit('target')
print(s)
print(t)
###Output
source
target
###Markdown
Lined qubits Lined qubit creates a qubit located on a 1-D line and each qubit is identified by its $x$ coordinate.
###Code
# Returns the 4th qubit on the line
q4 = cirq.LineQubit(4)
print(q4)
#Note that 4 is not the number of qubits.
###Output
4
###Markdown
To create a list of qubits, range function should be used.
###Code
#Returns a list of 4 qubits, starting at index 0 and ending at index 3
qlist = cirq.LineQubit.range(4)
#List can be subscriptable
print(qlist)
#Qubits in the list can be stored individually as well
qubit0, qubit1, qubit2 = cirq.LineQubit.range(3)
qubit0
###Output
[cirq.LineQubit(0), cirq.LineQubit(1), cirq.LineQubit(2), cirq.LineQubit(3)]
###Markdown
Creating Circiuts A quantum circuit is created by the following command.
###Code
#Creates a circuit object
circuit = cirq.Circuit()
###Output
_____no_output_____
###Markdown
Gates Here are some gates and how we apply them in Cirq. Gates are applied to qubits forming _operations_ and operations are appended to quantum circuits. Qubits do not belong to circuits.
###Code
#Let's import the gates.
from cirq import X, Z, H, CX, CZ, SWAP, CCX
#Apply NOT gate to qubit 2
circuit.append(X(qlist[2]))
#Apply Z gate to qubit 2
circuit.append(Z(qlist[2]))
#Apply H gate to qubit 3
circuit.append(H(qlist[3]))
#Apply CNOT gate where qubit 2 is control and qubit 0 is target
circuit.append(CX(qlist[2], qlist[0]))
#Apply CZ gate where qubit 0 is control and qubit 1 is target
circuit.append(CZ(qlist[0], qlist[1]))
#Apply SWAP gates to qubits 1 and 3
circuit.append(SWAP(qlist[1], qlist[3]))
#Apply CCNOT gate where qubit 0 and 1 are the control and qubit 2 is the target
circuit.append(CCX(qlist[0], qlist[1], qlist[2]))
###Output
_____no_output_____
###Markdown
Let's draw our circuit to visualize the operations.
###Code
print(circuit)
###Output
0: โโโโโโโโโโโXโโโ@โโโโโโโ@โโโ
โ โ โ
1: โโโโโโโโโโโโผโโโ@โโโรโโโ@โโโ
โ โ โ
2: โโโXโโโZโโโ@โโโโโโโโผโโโXโโโ
โ
3: โโโHโโโโโโโโโโโโโโโรโโโโโโโ
###Markdown
Task 1 Create a quantum circuit with 10 qubits. 1. Apply $H$ gate to qubit 0 2. Apply nine $CNOT$ gates where qubit $0$ is the control and qubit $i$ is the target for $i=1\cdots9$.Draw your circuit.
###Code
import cirq
from cirq import H, CX
qbit = cirq.LineQubit.range(10)
circuit = cirq.Circuit()
circuit.append(H(qbit[0]))
for i in range(1, 10):
circuit.append(CX(qbit[0], qbit[i]))
print(circuit)
###Output
0: โโโHโโโ@โโโ@โโโ@โโโ@โโโ@โโโ@โโโ@โโโ@โโโ@โโโ
โ โ โ โ โ โ โ โ โ
1: โโโโโโโXโโโโผโโโโผโโโโผโโโโผโโโโผโโโโผโโโโผโโโโผโโโ
โ โ โ โ โ โ โ โ
2: โโโโโโโโโโโXโโโโผโโโโผโโโโผโโโโผโโโโผโโโโผโโโโผโโโ
โ โ โ โ โ โ โ
3: โโโโโโโโโโโโโโโXโโโโผโโโโผโโโโผโโโโผโโโโผโโโโผโโโ
โ โ โ โ โ โ
4: โโโโโโโโโโโโโโโโโโโXโโโโผโโโโผโโโโผโโโโผโโโโผโโโ
โ โ โ โ โ
5: โโโโโโโโโโโโโโโโโโโโโโโXโโโโผโโโโผโโโโผโโโโผโโโ
โ โ โ โ
6: โโโโโโโโโโโโโโโโโโโโโโโโโโโXโโโโผโโโโผโโโโผโโโ
โ โ โ
7: โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโXโโโโผโโโโผโโโ
โ โ
8: โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโXโโโโผโโโ
โ
9: โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโXโโโ
###Markdown
[click for our solution](D00_Cirq_Introduction_Solutions.ipynbtask1) More about gates It is possible to apply a gate to multiple qubits at once by using the keyword on_each and using `*` before the qubits. (`*` is used in Python to unpack a list)
###Code
circuit2 = cirq.Circuit()
qlist = cirq.LineQubit.range(4)
circuit2.append(H.on_each(*qlist))
print(circuit2)
###Output
0: โโโHโโโ
1: โโโHโโโ
2: โโโHโโโ
3: โโโHโโโ
###Markdown
controlled function creates the controlled version of a gate.
###Code
CCCH = H(qlist[2]).controlled_by(qlist[0],qlist[1],qlist[3])
circuit2.append(CCCH)
print(circuit2)
###Output
0: โโโHโโโ@โโโ
โ
1: โโโHโโโ@โโโ
โ
2: โโโHโโโHโโโ
โ
3: โโโHโโโ@โโโ
###Markdown
It is also possible to first define the operation and then specify the qubits.
###Code
CCCZ = Z.controlled(3)
circuit2.append(CCCZ(*qlist[0:4]))
print(circuit2)
###Output
0: โโโHโโโ@โโโ
โ
1: โโโHโโโ@โโโ
โ
2: โโโHโโโ@โโโ
โ
3: โโโHโโโ@โโโ
###Markdown
One can define new gates by arithmetic operations as well
###Code
ROOTX = X**0.5
circuit2.append(ROOTX(qlist[1]))
print(circuit2)
###Output
0: โโโHโโโ@โโโ@โโโโโโโโโโโ
โ โ
1: โโโHโโโ@โโโ@โโโX^0.5โโโ
โ โ
2: โโโHโโโHโโโ@โโโโโโโโโโโ
โ โ
3: โโโHโโโ@โโโ@โโโโโโโโโโโ
###Markdown
Task 2 Create a quantum circuit with 10 qubits. 1. Apply $H$ gates to all qubits.2. Apply $X$ gate to qubit 0 controlled by qubits 1-92. Apply $H$ gates to all qubits.Draw your circuit.
###Code
import cirq
from cirq import H, X
qbit = cirq.LineQubit.range(10)
circuit = cirq.Circuit()
circuit.append(H.on_each(qbit[:]))
circuit.append(X(qbit[0]).controlled_by(*qbit[1:10]))
circuit.append(measure(*qbit))
print(circuit)
###Output
0: โโโHโโโXโโโMโโโ
โ โ
1: โโโHโโโ@โโโMโโโ
โ โ
2: โโโHโโโ@โโโMโโโ
โ โ
3: โโโHโโโ@โโโMโโโ
โ โ
4: โโโHโโโ@โโโMโโโ
โ โ
5: โโโHโโโ@โโโMโโโ
โ โ
6: โโโHโโโ@โโโMโโโ
โ โ
7: โโโHโโโ@โโโMโโโ
โ โ
8: โโโHโโโ@โโโMโโโ
โ โ
9: โโโHโโโ@โโโMโโโ
###Markdown
[click for our solution](D00_Cirq_Introduction_Solutions.ipynbtask2) Running Circuits One way to get results from your circuit is to measure and run it for multiple times. Let's first create a simple circuit and measure it.
###Code
import cirq
from cirq import H, measure
# Create quantum and classical registers with 2 qubits
qlist = cirq.LineQubit.range(2)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to qubit 0
circuit.append(H(qlist[0]))
# Measure both qubits, result is the label
circuit.append(measure(*qlist, key='result'))
###Output
_____no_output_____
###Markdown
Cirq can simulate circuits with upto 20 qubits. We initalize the simulator and run our circuit for multiple times to take samples.
###Code
# This is the local simulator
s = cirq.Simulator()
# circuit is the circuit to be simulated
# shots is the how many times we want to run the circuit
samples=s.run(circuit, repetitions=1000)
# Get the results as a dictionary
print(samples.histogram(key='result'))
###Output
Counter({0: 521, 2: 479})
###Markdown
Note that the outputs are in decimal form (i.e. 2 instead of 10). We can obtain the state representation as follows.
###Code
def bitstring(bits):
return "".join(str(int(b)) for b in bits)
counts = samples.histogram(key="result",fold_func=bitstring)
print(counts)
###Output
Counter({'00': 521, '10': 479})
###Markdown
Cirq also provides the list of all measurement outcomes as well.
###Code
result = samples.measurements["result"]
print(result)
###Output
[[1 0]
[0 0]
[0 0]
...
[1 0]
[1 0]
[0 0]]
###Markdown
It is also possible to measure only some of the qubits. Let's only measure qlist[0] this time.
###Code
import cirq
from cirq import H, measure
# Create quantum and classical registers with 2 qubits
qlist = cirq.LineQubit.range(2)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to qubit 0
circuit.append(H(qlist[0]))
# Measure both qubits, result is the label
circuit.append(measure(qlist[0], key='result'))
# This is the local simulator
s = cirq.Simulator()
samples=s.run(circuit, repetitions=1000)
def bitstring(bits):
return "".join(str(int(b)) for b in bits)
counts = samples.histogram(key="result",fold_func=bitstring)
print(counts)
###Output
Counter({'1': 515, '0': 485})
###Markdown
Task 3 Implement the circuit in Task 1. Measure all the qubits and simulate your circuit for 1000 times.
###Code
import cirq
from cirq import H, CX, measure, Simulator
qbit = cirq.LineQubit.range(10)
circuit = cirq.Circuit()
circuit.append(H(qbit[0]))
for i in range(1, 10):
circuit.append(CX(qbit[0], qbit[i]))
circuit.append(measure(*qbit, key='result'))
s = Simulator()
def bitstring(bits):
return "".join(str(int(b)) for b in bits)
samples = s.run(circuit,repetitions=1000)
#vect = s.simulate(circuit)
print(samples.histogram(key='result', fold_func=bitstring))
#print(vect)
###Output
Counter({'0000000000': 513, '1111111111': 487})
###Markdown
[click for our solution](D00_Cirq_Introduction_Solutions.ipynbtask3) Debugging the Circuits - State Representation It is possible to get the exact quantum state from the simulator. You shouldn't measure your circuit before getting the state. We will use the _simulate_ function in Cirq to obtain the exact quantum state.
###Code
import cirq
from cirq import H, measure
# Create quantum and classical registers with 2 qubits
qlist = cirq.LineQubit.range(2)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to qubit 0
circuit.append(H(qlist[0]))
# Simulate the circuit
results=s.simulate(circuit)
print(results)
###Output
measurements: (no measurements)
output vector: 0.707|0โฉ + 0.707|1โฉ
###Markdown
Note that since we did not apply any gate on qlist[1], it is not visible in the output. Let's check the following circuit.
###Code
import cirq
from cirq import H, I, measure
# Create quantum and classical registers with 2 qubits
qlist = cirq.LineQubit.range(2)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to qubit 0
circuit.append(H(qlist[0]))
# Apply Identity to qubit 1
circuit.append(I(qlist[1]))
# Simulate the circuit
results=s.simulate(circuit)
print(results)
###Output
measurements: (no measurements)
output vector: 0.707|00โฉ + 0.707|10โฉ
###Markdown
If you use simulator after the measurement, you will observe that the quantum state has collapsed to one of the states.
###Code
circuit.append(measure(*qlist))
results=s.simulate(circuit)
print(results)
###Output
measurements: 0,1=00
output vector: |00โฉ
###Markdown
If we use the simulate method when there are greater than or equal to 4 qubits, then the quantum state is represented in vector form intead of Dirac notation.
###Code
import cirq
from cirq import H, measure
import cirq
from cirq import H, I, measure
# Create quantum and classical registers with 4 qubits
qlist = cirq.LineQubit.range(4)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to all qubits
circuit.append(H.on_each(*qlist))
# Simulate the circuit
results=s.simulate(circuit)
print(results)
###Output
measurements: (no measurements)
output vector: [0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j]
###Markdown
Task 4 Create a quantum circuit with 4 qubits. Apply Hadamard gate to each qubit and $CZ$ gate to qubits 0 and 1. Use the simulator without measuring the circuit. Check the entries with negative sign.
###Code
import cirq
from cirq import H, CZ
qlist = cirq.LineQubit.range(4)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to all qubits
circuit.append(H.on_each(*qlist))
circuit.append(CZ.on(*qlist[0:2]))
# Simulate the circuit
results=s.simulate(circuit)
print(results, '\n')
print(results1)
print(circuit)
###Output
measurements: (no measurements)
output vector: [ 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
-0.24999997+0.j -0.24999997+0.j -0.24999997+0.j -0.24999997+0.j]
measurements: (no measurements)
output vector: [ 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
0.24999997+0.j 0.24999997+0.j 0.24999997+0.j 0.24999997+0.j
-0.24999997+0.j -0.24999997+0.j -0.24999997+0.j -0.24999997+0.j]
0: โโโHโโโ@โโโ
โ
1: โโโHโโโ@โโโ
2: โโโHโโโโโโโ
3: โโโHโโโโโโโ
###Markdown
[click for our solution](D00_Cirq_Introduction_Solutions.ipynbtask4) Unitary Matrix Representation It is possible to obtain unitary matrix representation of gates and circuits.
###Code
from cirq import CX, X
ROOTX = X**0.5
print('Unitary matrix representation of the CNOT gate')
print(cirq.unitary(CX))
print('Unitary matrix representation of the CROOTX gate we have created')
print(cirq.unitary(ROOTX))
print('Unitary matrix representation of H operator on 2 qubits.')
qlist= cirq.LineQubit.range(2)
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(*qlist))
print(cirq.unitary(circuit))
import cirq
from cirq import H, CZ
qlist = cirq.LineQubit.range(5)
# Create a new circuit
circuit = cirq.Circuit()
# Apply H gate to all qubi
circuit.append(Z(*qlist[:2:5]))
# Simulate the circuit
results=s.simulate(circuit)
print(results)
print(circuit)
###Output
measurements: (no measurements)
output vector: |0โฉ
0: โโโZโโโ
|
03_Visualizations_solutions/Visualization.ipynb | ###Markdown
Website Data Visualization in Python Introduction In this module, you will learn to quickly and flexibly generate a range of visualizations to explore data and communicate with your audience. This module contains a practical introduction to data visualization in Python and covers important rules to follow when creating visualizations. Learning Objectives * Learn critical rules about data visualization (selecting graph types; labeling visual encodings; referencing data sources).* Become familiar with two core Python data visualization tools, Matplotlib and seaborn.* Start to develop the ability to conceptualize which visualizations can best reveal various types of patterns in your data. Choosing a Data Visualization Package <!-- Matplotlib is always capitalized, like a typical proper noun.Seaborn is capitalized like an ordinary word, so it's lowercase if "seaborn" appears in the middle of a sentence.-->There are many excellent data visualization modules available in Python. You can read more about different options for data visualization in Python in the [More Resources](More-Resources:) section at the bottom of this notebook. For this the tutorial we will stick to a tried and true combination of 2-D plotting libraries: Matplotlib and seaborn.Matplotlib is very expressive, meaning that it has functionality to allow extensive and fine-tuned creation of figures. It makes no assumptions about data, so it can be used to make historical timelines and fractals as well as bar charts and scatter plots. Matplotlib's flexibility comes at the cost of additional complexity in its use. Seaborn is a higher-level module, trading some of the expressiveness and flexibility of matplotlib for more concise and easier syntax. For our purposes, Seaborn improves on Matplotlib in several ways, making it easier to create small multiples, improving the color and aesthetics, and including direct support for some visualizations such as regression model results. Seaborn's creator, Michael Waskom, has compared the two:> If Matplotlib "tries to make easy things easy and hard things possible", seaborn tries to make a well-defined set of hard things easy too. Seaborn and Matplotlib together It may seem like we need to choose between these two approaches, but happily this is not the case. Seaborn is itself written in Matplotlib (and you will sometimes see seaborn called a "wrapper" around Matplotlib). We can use seaborn to make graphs quickly, then Matplotlib for specific adjustments. Whenever you see `plt` referenced in the code below, we are using a submodule of `matplotlib`. Import Packages and Set Up
###Code
# These abbreviations (pandas -> pd; seaborn -> sns) may seem arbitrary,
# but they are community conventions that will help keep your work easy
# to read and compare with that of other Python users.
import pandas as pd
from sqlalchemy import create_engine
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Jupyter-specific "magic command" to plot images directly in the notebook.
%matplotlib inline
# Engine to connect to SQL database
# We'll create this once and provide to pandas whenever we use read_sql()
engine = create_engine("postgresql://stuffed.adrf.info/appliedda")
###Output
_____no_output_____
###Markdown
MotivationIn this notebook, we are going to tackle a series of questions. To answer them, we will introduce you to various visualizations which will provide a clearer view of the data than just using summary statistics, and help you create powerful graphics that better convey the point you want to make.The questions we will focus on this notebook are:- About how old are graduate students when they finish their dissertations? That is, what is the distribution of age at dissertation? How does this differ by field of study?- What are the differences in starting salary by field of PhD? How has starting salary changed over the years, both overall and by field?- What are primary sources of funding for students in various fields of study?- What are the funding histories of graduate students in the three years leading up to their dissertation? How do the funding histories differ, and what are the most frequent funding sequences? Load DataFramesWe've separated out the SQL queries constructing the Data Frames that we will use for this notebook, and read them in from `.sql` files. Since they are not the focus of this notebook, we won't go into detail on how we've built up the queries, but we suggest you take a look at the `joined_person.sql` and `joined_semester.sql` files to make sure you understand how they're created.`person_df` is at the individual level across the entire range and has individual level statistics on characteristics such as their demographics, academic achievements and debt levels.`semester_df` is at the person-semester level and has information about their funders, team size, and semester.In both of these Data Frames, we've done some cleaning in SQL (which you can see in the `.sql` files) to help facilitate our visualization creation.
###Code
from pathlib import Path
person_level_query = Path('./joined_person.sql').read_text()
person_df = pd.read_sql(person_level_query, engine)
person_df
from pathlib import Path
semester_level_query = Path('./joined_semester.sql').read_text()
semester_df = pd.read_sql(semester_level_query, engine)
semester_df
###Output
_____no_output_____
###Markdown
Matplotlib We'll begin with some straightforward Matplotlib functions. We'll start with some motivation questions, then use the appropriate Matplotlib commands to create a visualization that helps answer that question. Prepare the data When designing visualizations, it can help to just draw a sketch on paper first. Once you have an idea of what type of graph is best suited to illustrate the fact that you want to show, consider how to prepare the data you need for the graph. We can provide Matplotlib a `pd.DataFrame` or `pd.Series` that we've created using Pandas. We'll want to ensure that the DataFrame includes exactly the information we want to plot, because Matplotlib won't be doing much more than simple aggregation. Histogram**Motivating Question: About how old are graduate students when they finish their dissertations? That is, what is the distribution of age at dissertation? How does this differ by field of study?** Since age is a numerical variable, we'll want to use a visualization such as a histogram. We'll start by plotting a histogram of a single variable, then customizing the figure. For a histogram, we'll want to consider the scale -- whether we should plot everything or a subset of values. Plotting our data as a histogram makes it easier to quickly observe some features, such as the overall shape of the distribution and its skewness and kurtosis.
###Code
# For now, we'll just take a single series: age at dissertation
ages = person_df.age_at_diss.dropna()
ages.describe()
###Output
_____no_output_____
###Markdown
An easy way to get started with Matplotlib is to use its state-based interface, `matplotlib.pyplot`, which we have already imported above as `plt`. We can create a graph, then adjust its current state a bit at a time using `plt` functions.To create a new histogram, we'll simply pass our team size series into `plt.hist()`.
###Code
plt.hist(ages)
# The show() function outputs the current state of `pyplot`: our current fig.
plt.show()
###Output
_____no_output_____
###Markdown
The `.describe()` above already suggested a strong right skew, but this visualization shows us the distribution in much greater detail.But this is bare: let's at least add some labels.
###Code
plt.hist(ages)
plt.ylabel('Dissertators', fontsize='medium', labelpad=10)
plt.xlabel('Age', fontsize='medium', labelpad=10)
# In the notebook environment, the figure will automatically be
# displayed if the Python code cell ends with an update to the plot,
# so we can skip plt.show() in many cases.
###Output
_____no_output_____
###Markdown
Built-in styles Now let's see how we can improve the style of this visualization. Every part of this figure can be customized in several ways, and Matplotlib includes several popular styles built-in.
###Code
print('Built-in style names:', ', '.join(sorted(plt.style.available)))
# Change the default style (affects font, color, positioning, and more)
plt.style.use('fivethirtyeight')
plt.xlabel('Age', fontsize='medium', labelpad=10)
plt.ylabel('Dissertators', fontsize='medium', labelpad=10)
# We need to replot the data in each newnotebook cell.
plt.hist(ages, bins=30)
plt.show()
###Output
_____no_output_____
###Markdown
Style customization That's a bit better, but Matplotlib allows us to customize every individual component on the fly.> *How can we reset customizations?* In a notebook with multiple figures, we may want to reset everything before our next visualization. Or, having explored several options, we might want to undo all the stylistic tweaks without having to rerun the entire notebook. `matplotlib.rc_file_defaults()` will return just about everything to default settings.
###Code
mpl.rc_file_defaults()
# Change the figure size -- let's make it big.
plt.rc('figure', figsize=(8, 5))
# Because `pyplot` works by incrementally updating the state of `plt`,
# some changes must be made prior to creating those elements in the figure.
# We'll make the axes spines (the box around the plot) invisible
mpl.rc('axes', edgecolor='white', titlepad=20)
# These will remove the axes ticks
mpl.rc('xtick', bottom=False)
mpl.rc('ytick', left=False)
# Now we'll replot the data. With such a large sample, let's make a bin for each year.
n_bins = int(ages.max() - ages.min())
plt.hist(
ages,
bins=n_bins,
align='left',
color='xkcd:sage'
)
# Just after adding the data is a good time to remember to source it.
plt.annotate(
'Sources: NCSES SED',
fontsize='x-small',
xycoords="figure fraction", # specify x and y positions as % of the overall figure
xy=(1, 0.01), # 100% to the right (x) and 1% to the top (y) means bottom right
horizontalalignment='right', # the text will align appropriately for bottom right
)
# Add a title to the top of the figure
plt.title("Dissertations Typically Completed Near Age Thirty", fontsize='large')
# Add axis labels, with a bit more padding than default between the label and the axes
plt.xlabel('Age of Dissertator', fontsize='medium', labelpad=10)
plt.ylabel('Number of Dissertations', fontsize='medium', labelpad=10)
# Reduce the size of the axis labels
plt.xticks(fontsize=9)
plt.yticks(fontsize=9)
# Add horizontal gridlines using negative space across the bars
plt.grid(
color='white',
linewidth=1,
axis='y'
)
###Output
_____no_output_____
###Markdown
Data sourcing A critical aspect of any data visualization intended for release is a reference to the source of the data being used. In these examples, we simply reference the agencies and names of the datasets. Whenever possible, we would provide a direct path so that our audience can find the data we used to build the figure. When this is feasible -- as with these restricted-access data -- be sure to direct the reader to documentation describing the data.Either way, providing clear sourcing for the underlying data is an absolute requirement of responsible dissemination. Transparent communication of sources and references builds trust between analyst and audience and helps enable the reproducibility of analyses.
###Code
# If we're repeatedly doing the same kind of annotation,
# it helps a lot to turn that into a function.
def add_sourcing(plt, source_string, fontsize='x-small'):
"""Add small sourcing note to lower-right of current plot
We would be using the same arguments over and over to do this.
So a quick function will make it simpler. Now we can simply:
add_sourcing(plt, 'Sources: IRIS UMETRICS, NCSES SED')
"""
return plt.annotate(
source_string,
fontsize=fontsize,
xycoords="figure fraction", # specify x and y positions as % of the overall figure
xy=(1, 0.01), # 100% to the right (x) and 1% to the top (y) means bottom right
horizontalalignment='right', # the text will align appropriately for bottom right
)
print("Now we can simply run:\n add_sourcing(plt, 'Text goes here')")
###Output
_____no_output_____
###Markdown
Multiple plots in one figure Matplotlib is allowing us to make consecutive changes to the same plot, then display it whenever we're ready. The same process allows us to layer on multiple plots. By default, the first graph you create will be at the lowest layer, with each successive graph layered on top.Below, we observe a difference in mean age of dissertation by field. Let's overlay one of the higher and one of the lower, to visualize the difference in their distributions.
###Code
person_df.groupby('phd_major_field')['age_at_diss'].agg(['mean', 'count']).sort_values('mean')
fields_of_interest = ['Physical Sciences', 'Education']
# Create a subset of person_df, plottinga histogram of age for each
for major_field in fields_of_interest:
field_ages = person_df[person_df.phd_major_field == major_field].age_at_diss.dropna()
plt.hist(field_ages, bins='doane', alpha=0.5)
# We'll definitely need a label to keep these apart...
plt.legend(
labels=fields_of_interest, #ensure that labels are in the same order as above
loc='center right', # the default is upper right, so move a little closer
frameon=False, # remove the box around the legend
)
add_sourcing(plt, 'Sources: NCSES SED')
###Output
_____no_output_____
###Markdown
Checkpoint 1: HistogramTry customizing your own histogram. If you want to try something other than age, another continuous variable in `person_df` is `salary_k`, the PhD graduate's anticipated salary (in thousands). Or `semester_df` includes the `team_size` variable, measuring the size of the federally-funded teams that a student is working with in each semester.You'll definitely want to include:- A title (`plt.title`)- Axis labels (`plt.xlabel` and `plt.ylabel`)- Data sourcing (`plt.annotate` or the `add_sourcing` function defined above)If you use multiple colors, you'll want to add a legend as well (`plt.legend`) Here we will change the variable from `age_at_diss` to `salary_k`. We add `plt.title`, `plt.xlabel`, `plt.ylabel`, `add_sourcing` function. If you want to change the range of the bins on x-axis, you can use `plt.xlim` and define the needed range (e.g. from 0 to 200K). To change the range on the y-axis, you can use `plt.ylim`.
###Code
fields_of_interest = ['Physical Sciences', 'Education']
# Create a subset of person_df, plottinga histogram of age for each
for major_field in fields_of_interest:
field_salary = person_df[person_df.phd_major_field == major_field].salary_k.dropna()
plt.hist(field_salary, bins='doane', alpha=0.5)
plt.title('Most students expect salary to be between $35-75K')
plt.xlabel('Anticipated salary')
plt.ylabel('Number of students')
plt.xlim(0,200) # specify the bin range
# We'll definitely need a label to keep these apart...
plt.legend(
labels=fields_of_interest, #ensure that labels are in the same order as above
loc='center right', # the default is upper right, so move a little closer
frameon=False, # remove the box around the legend
)
add_sourcing(plt, 'Sources: NCSES SED')
###Output
_____no_output_____
###Markdown
Seaborn Seaborn provides a high-level interface to Matplotlib, which is powerful but sometimes unwieldy. Seaborn provides many useful defaults, so that we can quickly have:- More aesthetically pleasing defaults- A better range of color palettes- More complex graphs with less code- Small multiples (a sequence of small graphs in one figure)As you'll see, these libraries are complementary. Some tweaks will still require reaching back into Matplotlib. Bar chartFor this section, consider the following question:**What are the differences in starting salary by field of PhD? How has starting salary changed over the years, both overall and by field?** A bar plot presents categorical data with rectangular bars proportional to the values that they represent. In this case, we plot a horizontal bar plot. A bar plot represent an estimate of central tendency for a numeric variable with the length of each rectangle, and the seaborn `barplot()` function also includes an indication of the uncertainty around the estimate using error bars.
###Code
mpl.rc_file_defaults() # reset most Matplotlib features to defaults
# By convention, a returned Axes object is often called `ax`
ax = sns.barplot(
y="phd_major_field", # seaborn is clever enough to create a horizontal chart
x="salary_k",
data=person_df.sort_values('phd_major_field'), # order in data to order in figure
)
# We can use either `ax` or `plt` here; either will work
add_sourcing(ax, 'Sources: NCSES SED')
ax.set_title('Anticipated Salary Varies Considerably Across Fields of PhD')
###Output
_____no_output_____
###Markdown
Line chart We can use a line plot (seaborn `lineplot()` function) for tracking change in a value over time (a time series graph). Here we look at trend in salary expectations over time. > Note: The title of a visualization occupies the most valuable real estate on the page. If nothing else, you can be reasonably sure a viewer will at least read the title and glance at your visualization. This is why you want to put thought into making a clear and effective title that acts as a **narrative** for your chart. It is best to avoid _explanatory_ titles, such as: "Average Expected Salary over Time (2008-2017)". This title is correct, yes -- but it isn't very useful. It is likely to be redundant, since "salary" and "year" are probably labels on the axes already. Instead, use the title to reinforce and explain the core point of the visualization. It should answer the question **"Why is this graph important?"** and focus the viewer onto the most critical take-away.
###Code
mpl.rc_file_defaults() # reset most settings to defaults
# A `with` statement (context manager) can be used to temporarily set figure styles
with sns.axes_style('darkgrid'):
axes = sns.lineplot(data=person_df, x='phd_year', y='salary_k', color="#229900")
axes.set_title('Anticipated Salary Has Been Increasing')
add_sourcing(plt, 'Sources: IRIS UMETRICS, NCSES SED')
###Output
_____no_output_____
###Markdown
Small multiples Small multiples can be a great way to compare across categories, so that we can see several similarly plotted versions in the same overall figure. Seaborn offers an easy interface for combining multiple plots into a single figure using the `FacetGrid` class. Because `FacetGrid` was designed for exactly this use, seaborn has helpful defaults such as automatically synchronized axes.We've looked at salary expectations over time and across field; here we consider all three variables at once:
###Code
# Prepare our grid, which will share axes across multiple plots (wrapping after 5 columns)
g = sns.FacetGrid(person_df, col='phd_major_field', ylim=(0, 140), col_wrap=5)
# Create a lineplot for each cell of the grid
g = g.map(sns.lineplot, "phd_year", "salary_k")
add_sourcing(plt, 'Source: NCSES SED', fontsize='medium')
# Simplify the titles inside each cell
g.set_titles("{col_name}")
# Remove the spine (vertical line) along the y axis
sns.despine(left=True)
###Output
_____no_output_____
###Markdown
Colors The colors used in figures in both Matplotlib and seaborn can be represented in code in many ways, but here are two that Matplotlib, seaborn, and many other modern visualization packages handle: Hex triplets The hex triplet is a specification for the RGB color model commonly used for website and browser-rendered colors. These are formatted as a string with a pound sign `` followed by a series of six numbers. Each pair of hexadecimal digits (i.e., two of 0-9 and A-F) represents two bytes of color information for red, green, and blue, in that order: `"RRGGBB"`. A low value (minimum 00) contributes less of that primary color, a high value (maximum FF) a larger amount. Together, these can specify over 16 million colors. An additional two hex digits can be added to indicate alpha (transparency) where 00 is completely transparent and FF is completely opaque. Hex triplets are very common across many platforms and packages well beyond data and visualization. XKCD names A relatively new standard, XKCD names were the result of an online study created by Randall Monroe where volunteers entered free-form names of colors displayed on screen. Following the input of tens of thousands of participants, 954 common and distinguishing names were codified. Behind the scenes, these are still equivalent to specific hex triplets, but they can be more convenient. The result is a list of color names that many English speakers will find intuitive, from basics such as "gold," "green," and "light grey" to rarely used terms. In Matplotlib and seaborn these are written as a string prefixed by `xkcd:`, for example: `"xkcd:cement"` (a5a391), `"xkcd:pale magenta"` (d767ad), `"xkcd:sage"` (87ae73), and `"xkcd:green/blue"` (01c08d). Checkpoint 2: Small Multiples Trying using `sns.FacetGrid` in combination with a histogram, bar, or line chart of your choice. Separating simple charts into several categories with small multiples can be a big improvement over trying to graph several things on the same chart.Try experimenting with color choices. Remember to add source and use a title that highlights the main take-away. If you would like to include another variable in a line plot, for example, to look at differences by gender, you can use the `hue = 'sex'` variable in the `FacetGrid` function.
###Code
# Prepare our grid, which will share axes across multiple plots (wrapping after 5 columns)
g = sns.FacetGrid(person_df, col='phd_major_field', hue='sex', ylim=(0, 140), col_wrap=5)
# Create a lineplot for each cell of the grid
g = g.map(sns.lineplot, "phd_year", "salary_k")
add_sourcing(plt, 'Source: NCSES SED', fontsize='medium')
# Simplify the titles inside each cell
g.set_titles("{col_name}")
# Remove the spine (vertical line) along the y axis
sns.despine(left=True)
###Output
_____no_output_____
###Markdown
More visualization methods and motivating examples Scatter plot We can represent a relationship between the age of doctorate recipients and their expected salaries using a scatter plot and seaborn `scatterplot()` function.
###Code
scatter_df = person_df[person_df.salary_k < 300]
ax = sns.scatterplot(
x='age_at_diss',
y='salary_k',
color="xkcd:sea blue",
data=scatter_df,
alpha=.1, # we have a LOT of points, so make each nearly transparent
linewidth=0, # and remove the default (white) circle around each
)
add_sourcing(plt, 'Sources: IRIS UMETRICS, NCSES SED')
###Output
_____no_output_____
###Markdown
Heat mapConsider the following question:**What are primary sources of funding for students in various fields of study?**For something like this, we might want to use a heatmap. This can give a sort of visual summary similar to a crosstab.
###Code
MAJOR_FUNDERS = 'NIH NSF DOD DOE USDA NASA ED'.split()
pre_heatmap_df = pd.merge(semester_df, person_df, how='left', on='drf_id')[['modal_funder', 'phd_major_field']]
heatmap_df = pd.crosstab(pre_heatmap_df.phd_major_field, pre_heatmap_df.modal_funder)[MAJOR_FUNDERS]
ax = sns.heatmap(heatmap_df, cmap=sns.cubehelix_palette(light=1, as_cmap=True))
add_sourcing(ax, 'Sources: IRIS UMETRICS, NCSES SED')
# This heatmap fix is only necessary for 3.1.0 < Matplotlib <= 3.1.1; see https://stackoverflow.com/questions/56948670
ax.set_ylim(len(heatmap_df), 0)
###Output
_____no_output_____
###Markdown
Funding sequence chartConsider the following question:**What are the funding histories of graduate students in the three years leading up to their dissertation? How do the funding histories differ, and what are the most frequent funding sequences?**To create a graphic that lets us answer this question, we need both semester level funding information and time of dissertation. In other words, we need to use a linked dataset with UMETRICS and SED. The UMETRICS data allows us to get the funding history of students, which we can use in conjunction with SED data to see what the funding histories look like leading up to the dissertation.In the following, we use the flexibility of pandas and these visualization libraries to create an unusual kind of chart. We will display the top ten most common patterns of federal funding in the time before and during the year that a student receives the PhD. Conceptual design We have the idea, so we'll first want to think about what it will look like in the end, then work backwards to determine how we need to handle the data to create the table we'll need.It really helps to get concrete, particularly if you aren't doing a standard kind of figure. The final visualization we're aiming for will be organized something like this:```funding pattern - - - X X X X X X | 11%X X X X X X X X X | 10%- - - X X - X X - | 9%- - - X X X X X - | 8%- - - X X X X - - | 7% percent- - X X X X - - - | 6% of sampleX X - - - - - - - | 5%- - - X X - - - - | 4%X X X - - - - - - | 4%- - - - X X X - - | 4%__________________| -2 -1 0 year```We'll put the percentages on the right, to help reinforce that we are looking back in time from the PhD award. Each row is a pattern where an `X` indicates federal funding and a `-` is no funded. If these were the real data, the first row would tell us that 11% of the PhD awardees had federal funding only during the last two years before their degree was awarded. The second row shows 10% with federal funding every single semester, nine in a row. The numbers here are arbitrary -- the point is to get a sense of what we're aiming for. Data preparation Before we get there, though, we'll have a fair amount of data preparation. We'll plan to use a heatmap to present the pattern of yes/no funding, which means an aggregated and simplified dataset to pass into seaborn.From end to beginning: - Top ten rows by % of total, nine columns of yes/no semester funding - ...will need to be counted from a unique student-level dataset that has nine columns of yes/no funding - ...pivoted from the full student X semester-level dataset we have as `semester_df` - ...created from those students we have covered in UMETRICS for the entire time period (we'll cut by institution)
###Code
# 1. DATA CLEANING
# Sixteen schools have UMETRICS coverage 2012-2015, and we only want them for this chart
sequence_sample = semester_df[semester_df['sequence_data_coverage'] == 1]
# 2. PIVOT FROM STUDENTxSEMESTER TO STUDENT
# W pivot to unique rows by person (drf_id) and a column for each semester
pivoted = sequence_sample.pivot(index='drf_id', columns='semester', values='any_federal')
# convert team_size to 1 if federal funding (True); 0 if not (NaN)
pivoted = pivoted.applymap(lambda x: int(x == 1))
# We have two different phd_year so we'll need to adjust which semesters are relative to which
pivoted = pivoted.merge(right=person_df[['drf_id', 'phd_year']], how='inner', on='drf_id')
# 3. A WRINKLE IN THE DATA
# Oops -- our data are recorded by calendar semester, but not everyone graduated in the same year:
# some 2014, some 2015. We want relative semesters, so we need to account for that.
# If we were doing to do this more than once, we'd probably want to create a function to do it automatically...
phd2014 = pivoted[pivoted.phd_year == 2014][['12spr', '12sum', '12fal', '13spr', '13sum', '13fal', '14spr', '14sum', '14fal']]
phd2015 = pivoted[pivoted.phd_year == 2015][['13spr', '13sum', '13fal', '14spr', '14sum', '14fal', '15spr', '15sum', '15fal']]
RELATIVE_COLUMNS = ['-2 Spr', '-2 Sum', '-2 Fal', '-1 Spr', '-1 Sum', '-1 Fal', 'Spr', 'Sum', 'Fal']
phd2015.columns = phd2014.columns = RELATIVE_COLUMNS
# pd.concat() stacks them back together, now that we have re-synchronized their rows
funding_df = pd.concat((phd2014, phd2015))
# To clarify our sample, because our data are not independent of having received federal funding,
# we'll just keep those that show any federal funding during these nine semesters.
funding_df = funding_df[funding_df.sum(axis=1) > 0]
# 4. AGGREGATING TOGETHER BY PATTERNS
# We cluster the data by these semesters (equivalent to the list of columns)
# using size() to count the number of dissertators with each pattern
aggregated = funding_df.groupby(list(funding_df.columns)).size().reset_index()
# Rename the not-so-helpful 0 column from groupby().size() into 'size'
aggregated = aggregated.rename(columns={0: 'size'})
# Keep only the most common ten patterns (10 rows with the highest value in the size column)
aggregated = aggregated.nlargest(10, 'size')
# 5. MINOR BITS OF FORMATTING
# Convert the count size into percentage of the total students
aggregated['size'] = aggregated['size'] / len(pivoted)
# And format the new field so that it will be a nice percentage in our chart
aggregated['size'] = aggregated['size'].apply(lambda x: '{:,.1%}'.format(float(x)))
# Conveniently, seaborn will automatically use the index as the y-axis
aggregated = aggregated.set_index('size')
aggregated
###Output
_____no_output_____
###Markdown
That's exactly the table we want. In reality, putting this together took several iterations to understand exactly how the data should be combined and to minimize errors in sample construction and aggregation.The `aggregated` df is now ready to pass into seaborn.
###Code
mpl.rc_file_defaults() # reset to defaults
ax = sns.heatmap(
aggregated,
cbar=False, # We don't need the heatmap's color bar
cmap='Greens',
linewidth=.5,
)
add_sourcing(plt, 'Sources: IRIS UMETRICS, NCSES SED')
# Move the y axis labels over to the right side to help guide the chronology
ax.tick_params(left=False, bottom=False, labelleft=False, labelright=True, labelrotation=1)
ax.set_title('Most Common Funding Patterns of Federally Funded PhD Recipients')
ax.set_xlabel('Semesters of funding, relative to year of PhD')
ax.set_ylabel('')
# This heatmap fix is only necessary for 3.1.0 < Matplotlib <= 3.1.1; see https://stackoverflow.com/questions/56948670
ax.set_ylim(len(aggregated), 0)
###Output
_____no_output_____
###Markdown
Saving visualizations When you are satisfied with your visualization, you will likely want a copy outside of your notebook. This can be done directly from the code using the `savefig` function of Matplotlib. When using `savefig`, the extension of the filename you choose is important. Image formats such as like PNG and JPEG are **not ideal**. Instead, save visualizations instead as a vector image via PDF or SVG: `plt.savefig("yourfilename.pdf")`> *Why not PNG or JPEG?* Raster image formats such as PNG and JPEG store pictures as compressed information on the colors of pixels. They are great for cases like photographs, where we want to minimize the perceived loss of visual quality while saving storage space.> But with visualizations, we care about *semantic* components: selected fonts, precise curves, and exact distances. Vector images are recorded as coded paths with specific characteristics. A PDF or SVG saved from Matplotlib can be later opened in a program such as Inkscape or Adobe Illustrator to make useful changes. You can shrink the size of a label, change the font used in a title, adjust the position of legends, or scale the entire visualization to the size of a large poster, all with no loss in quality.
###Code
mpl.rc_file_defaults() # reset to defaults
# Scale all font sizes slightly smaller
sns.set_context('paper')
grid = sns.catplot(
data=semester_df,
x='team_size',
y='modal_suborg',
kind='strip',
marker='.',
jitter=0.2,
palette=sns.color_palette('deep'),
order=semester_df['modal_suborg'].value_counts(ascending=True).index
)
sns.color_palette()
grid.set_axis_labels("Size of Student's Team(s) during Semester", "Modal Suborganization of Federal Funding")
add_sourcing(plt, 'Sources: IRIS UMETRICS, NCSES SED')
# Save the current state of the plot to PDF
plt.savefig("example.pdf")
plt.show()
###Output
_____no_output_____ |
exemplo_aula_03.ipynb | ###Markdown
Agrupamento
###Code
import pandas as pd
data = pd.read_csv('data/kc_house_data.csv')
data.head()
df_grouped = data[['id','bedrooms']].groupby('bedrooms')
for bedrooms, frame in df_grouped:
print('numero de quartos: {}'.format( bedrooms))
print(frame.head(), end='\n\n')
###Output
numero de quartos: 0
id bedrooms
875 6306400140 0
3119 3918400017 0
3467 1453602309 0
4868 6896300380 0
6994 2954400190 0
numero de quartos: 1
id bedrooms
154 7920100045 1
264 2123039032 1
350 7325600160 1
465 8658300340 1
648 922049078 1
numero de quartos: 2
id bedrooms
2 5631500400 2
11 9212900260 2
18 16000397 2
23 8091400200 2
31 2426039314 2
numero de quartos: 3
id bedrooms
0 7129300520 3
1 6414100192 3
4 1954400510 3
6 1321400060 3
7 2008000270 3
numero de quartos: 4
id bedrooms
3 2487200875 4
5 7237550310 4
15 9297300055 4
17 6865200140 4
20 6300500875 4
numero de quartos: 5
id bedrooms
14 1175000570 5
22 7137970340 5
42 7203220400 5
51 7231300125 5
54 4217401195 5
numero de quartos: 6
id bedrooms
209 6300000550 6
232 9264902050 6
239 5152100060 6
331 2946001550 6
486 9286000240 6
numero de quartos: 7
id bedrooms
556 5486800070 7
1135 4024100951 7
1241 7227802030 7
3721 5451100490 7
4024 9175600025 7
numero de quartos: 8
id bedrooms
1660 9126101740 8
4035 685000115 8
4067 7226500100 8
6174 1873400020 8
9077 6746700615 8
numero de quartos: 9
id bedrooms
4096 1997200215 9
4235 2902200015 9
6079 9822700190 9
8546 424049043 9
16844 8823900290 9
numero de quartos: 10
id bedrooms
13314 627300145 10
15161 5566100170 10
19254 8812401450 10
numero de quartos: 11
id bedrooms
8757 1773100755 11
numero de quartos: 33
id bedrooms
15870 2402100895 33
|
Getting Started With Notebooks/3.8.0 Earth Sciences.ipynb | ###Markdown
Earth Sciences
###Code
!conda install -y -c conda-forge cartopy
#iris also installs this as a dependency
!conda install -y -c conda-forge iris
!conda install -y -c conda-forge iris-sample-data
!conda install -y -c conda-forge metpy
#MAybe see also https://annefou.github.io/metos_python/04-plotting/
# https://unidata.github.io/python-gallery/index.html
# https://github.com/dgketchum/Landsat578
# https://satpy.readthedocs.io/en/latest/quickstart.html#loading-data
###Output
_____no_output_____ |
1. Introduction to TensorFlow/Udacity/02_simple_model.ipynb | ###Markdown
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
#for i,c in enumerate(celsius_q):
#print(str(c) + " degrees Celsius = degrees Fahrenheit " + str(fahrenheit_a[i]))
layer_0 = Dense(units=4, input_shape=[1])
layer_1 = Dense(units=4)
layer_2 = Dense(units=1)
model = Sequential()
model.add(layer_0)
model.add(layer_1)
model.add(layer_2)
model.compile(loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(0.1))
model.summary()
# Layer 0 : weights 1 x 4 and bias 4 ; Layer 1 : weights 4 x 4 and bias 4 ; Layer 3 : weights 4 x 1 and bias = 1
history = model.fit(x=celsius_q, y=fahrenheit_a, epochs=500, verbose=False)
plt.xlabel('Epoch')
plt.ylabel("Loss")
plt.plot(history.history['loss'])
print(model.predict([100.0]))
# Note the formula is F = 1.8 * C + 32 , where weight is 1.8 and bias is 32 if only one layer and one unit
print("These are the layer_0 variables: " + str(layer_0.get_weights()) + "\n")
print("These are the layer_1 variables: " + str(layer_1.get_weights()) + "\n")
print("These are the layer_2 variables: " + str(layer_2.get_weights()) + "\n")
###Output
These are the layer_0 variables: [array([[-0.3849956 , 0.06945714, 0.8305622 , -0.13198055]],
dtype=float32), array([-3.2937386, -3.2016199, 3.3788104, 2.7487729], dtype=float32)]
These are the layer_1 variables: [array([[ 0.03176934, -0.8822142 , 0.5039331 , -0.80846506],
[-0.4344406 , -0.9153131 , -0.625423 , -1.2447749 ],
[-0.10629681, 0.8674469 , 0.2953448 , 0.9945485 ],
[-0.13630953, -0.13857402, -0.21220978, 1.0797594 ]],
dtype=float32), array([-1.9123148, 3.4233828, -2.7091942, 3.3911142], dtype=float32)]
These are the layer_2 variables: [array([[-0.22691898],
[ 0.6531665 ],
[-0.23812342],
[ 1.2309039 ]], dtype=float32), array([3.318378], dtype=float32)]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.