path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/vae-importance_sampling/vae-mnist-importance-sampling.ipynb | ###Markdown
Variational Autoencoder Parameters
###Code
img_rows, img_cols, img_chns = 28, 28, 1
if K.image_data_format() == 'channels_first':
original_img_size = (mg_chns, img_rows, imgi_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
batch_size = 100
latent_dim = 32
intermediate_dim = 128
epsilon_std = 1.0
epochs = 200
activation = 'relu'
dropout = 0.0
learning_rate = 0.001
decay = 0.0
###Output
_____no_output_____
###Markdown
Load MNIST dataset
###Code
# Hugo Larochelle's Binarized MNIST
from mlio import *
def load_binarized_mnist(dir_path,load_to_memory=False):
"""
Loads a binarized version of MNIST.
The data is given by a dictionary mapping from strings
``'train'``, ``'valid'`` and ``'test'`` to the associated pair of data and metadata.
**Defined metadata:**
* ``'input_size'``
* ``'length'``
"""
input_size=784
dir_path = os.path.expanduser(dir_path)
def load_line(line):
tokens = line.split()
return np.array([int(i) for i in tokens])
train_file,valid_file,test_file = [os.path.join(dir_path, 'binarized_mnist_' + ds + '.amat') for ds in ['train','valid','test']]
# Get data
train,valid,test = [load_from_file(f,load_line) for f in [train_file,valid_file,test_file]]
lengths = [50000,10000,10000]
if load_to_memory:
train,valid,test = [MemoryDataset(d,[(input_size,)],[np.float64],l) for d,l in zip([train,valid,test],lengths)]
# Get metadata
train_meta,valid_meta,test_meta = [{'input_size':input_size,
'length':l} for l in lengths]
return {'train':(train,train_meta),'valid':(valid,valid_meta),'test':(test,test_meta)}
values = load_binarized_mnist('../../data/binarized_mnist', load_to_memory=True)
# Binarized MNIST
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
# X_train = np.round(X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) / 255.)
# X_test = np.round(X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) / 255.)
def reshape_data(data):
return data.reshape(data.shape[0], img_rows, img_cols, 1)
X_train = reshape_data(values['train'][0].mem_data[0])
X_valid = reshape_data(values['valid'][0].mem_data[0])
X_test = reshape_data(values['test'][0].mem_data[0])
print(X_train.shape)
print(X_valid.shape)
print(X_test.shape)
def display_digits(X, digit_size=28, n=10):
figure = np.zeros((digit_size * n, digit_size * n))
for i in range(n):
for j in range(n):
index = np.random.randint(0, X.shape[0])
digit = X[index].reshape(digit_size, digit_size)
x = i * digit_size
y = j * digit_size
figure[x:x + digit_size, y:y + digit_size] = digit
plt.figure(figsize=(n, n))
plt.imshow(figure, cmap='Greys_r')
plt.show()
display_digits(X_train)
###Output
_____no_output_____
###Markdown
Helper Functions
###Code
def create_dense_layers(stage, width, activation=activation, dropout=True):
dense_name = '_'.join(['enc_conv', str(stage)])
bn_name = '_'.join(['enc_bn', str(stage)])
layers = [
Dense(width, name=dense_name),
Activation(activation),
]
if dropout:
layers.append(Dropout(dropout))
return layers
def inst_layers(layers, in_layer):
x = in_layer
for layer in layers:
if isinstance(layer, list):
x = inst_layers(layer, x)
else:
x = layer(x)
return x
def epsilon(args, batch_size=batch_size, latent_dim=latent_dim, epsilon_std=epsilon_std):
return K.random_normal(shape=(batch_size, latent_dim), mean=0., stddev=epsilon_std)
def combine_z_0(args, batch_size=batch_size, latent_dim=latent_dim, epsilon_std=epsilon_std):
eps, z_mean, z_log_sigma = args
return z_mean + K.exp(z_log_sigma) * eps
def create_enc_conv_layers(stage, **kwargs):
conv_name = '_'.join(['enc_conv', str(stage)])
bn_name = '_'.join(['enc_bn', str(stage)])
layers = [
Conv2D(name=conv_name, **kwargs),
Activation(activation),
]
return layers
def create_dense_layers(stage, width):
dense_name = '_'.join(['enc_dense', str(stage)])
bn_name = '_'.join(['enc_bn', str(stage)])
layers = [
Dense(width, name=dense_name),
BatchNormalization(name=bn_name),
Activation(activation),
Dropout(dropout),
]
return layers
def inst_layers(layers, in_layer):
x = in_layer
for layer in layers:
if isinstance(layer, list):
x = inst_layers(layer, x)
else:
x = layer(x)
return x
def create_dec_trans_conv_layers(stage, **kwargs):
conv_name = '_'.join(['dec_trans_conv', str(stage)])
bn_name = '_'.join(['dec_bn', str(stage)])
layers = [
Conv2DTranspose(name=conv_name, **kwargs),
Activation(activation),
]
return layers
###Output
_____no_output_____
###Markdown
Loss Function
###Code
def logpz(x, x_decoded_mean):
return -K.sum(0.5 * np.log(2*math.pi) + 0.5 * z ** 2, axis=-1)
def logqz_x(x, x_decoded_mean):
return -K.sum(0.5 * np.log(2*math.pi) + 0.5 * e_0 ** 2 + z_log_sigma, axis=-1)
def logpx_z(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * img_chns * metrics.binary_crossentropy(x, x_decoded_mean)
return -xent_loss
def vae_loss(x, x_decoded_mean):
# ELBO: - [logp(x|z) + logp(z) - logq(z|x)] = -logp(x|z) - logp(z) + logq(z|x)
return K.mean(-logpx_z(x, x_decoded_mean) - logpz(x, x_decoded_mean) + logqz_x(x, x_decoded_mean), axis=-1)
###Output
_____no_output_____
###Markdown
Basic VAE
###Code
# Encoder
enc_filters=32
enc_layers = [
create_enc_conv_layers(stage=1, filters=enc_filters, kernel_size=3, strides=1, padding='same'),
create_enc_conv_layers(stage=2, filters=enc_filters, kernel_size=3, strides=1, padding='same'),
create_enc_conv_layers(stage=3, filters=enc_filters, kernel_size=3, strides=2, padding='same'),
Flatten(),
create_dense_layers(stage=4, width=intermediate_dim),
create_dense_layers(stage=5, width=intermediate_dim),
]
x_input = Input(shape=original_img_size)
_enc_dense = inst_layers(enc_layers, x_input)
# Original z_0
_z_mean_0 = Dense(latent_dim)(_enc_dense)
_z_log_sigma_0 = Dense(latent_dim)(_enc_dense)
encoder = Model(inputs=x_input, outputs=[_z_mean_0, _z_log_sigma_0])
# Decoder
dec_filters = 32
decoder_layers = [
create_dense_layers(stage=10, width=intermediate_dim),
create_dense_layers(stage=11, width=14 * 14 * 64),
Reshape((14, 14, 64)),
create_dec_trans_conv_layers(12, filters=dec_filters, kernel_size=3, strides=1, padding='same'),
create_dec_trans_conv_layers(13, filters=dec_filters, kernel_size=3, strides=1, padding='same'),
create_dec_trans_conv_layers(14, filters=dec_filters, kernel_size=3, strides=2, padding='same'),
Conv2DTranspose(name='x_decoded', filters=1, kernel_size=1, strides=1, activation='sigmoid'),
]
z_input = Input(shape=(latent_dim,))
_dec_dense = inst_layers(decoder_layers, z_input)
decoder_output = _dec_dense
decoder = Model(inputs=z_input, outputs=decoder_output)
g_input = Input(shape=original_img_size)
z_mean, z_log_sigma = encoder(g_input)
e_0 = Lambda(epsilon)(z_mean)
z = Lambda(combine_z_0, output_shape=(latent_dim,))([e_0, z_mean, z_log_sigma])
g_output = decoder(z)
vae = Model(inputs=g_input, outputs=g_output)
optimizer = Adam(lr=learning_rate, decay=decay)
vae.compile(optimizer=optimizer, loss=vae_loss)
vae.summary()
start = time.time()
early_stopping = keras.callbacks.EarlyStopping('val_loss', min_delta=0.1, patience=10)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.01 * learning_rate)
tensorboard = keras.callbacks.TensorBoard(histogram_freq=10, batch_size=32, write_graph=True, write_grads=True)
callbacks=[early_stopping, reduce_lr]
if 'CMDLINE' not in os.environ:
callbacks += [TQDMNotebookCallback()]
history = vae.fit(
X_train, X_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=(X_valid, X_valid),
verbose=0
)
done = time.time()
elapsed = done - start
print("Elapsed: ", elapsed)
df = pd.DataFrame(history.history)
display(df.describe(percentiles=[0.25 * i for i in range(4)] + [0.95, 0.99]))
df.plot(figsize=(8, 6))
# Eval logpx loss
eval_result = vae.evaluate(x=X_test, y=X_test, batch_size=batch_size)
print("VLB = %.2f" % -eval_result)
def compute_samples(data, num_samples, debug=False):
""" Sample from importance distribution z_samples ~ q(z|X) and
compute p(z_samples), q(z_samples) for importance sampling
"""
z_mean, z_log_sigma = encoder.predict(data)
z_samples = []
qz = []
for m, s in zip(z_mean, z_log_sigma):
z_vals = [np.random.normal(m[i], np.exp(s[i]), num_samples)
for i in range(len(m))]
qz_vals = [norm.pdf(z_vals[i], loc=m[i], scale=np.exp(s[i]))
for i in range(len(m))]
z_samples.append(z_vals)
qz.append(qz_vals)
z_samples = np.array(z_samples)
pz = norm.pdf(z_samples)
qz = np.array(qz)
z_samples = np.swapaxes(z_samples, 1, 2)
pz = np.swapaxes(pz, 1, 2)
qz = np.swapaxes(qz, 1, 2)
if debug:
print(z_mean.shape, z_log_sigma.shape)
print('m, s', m[0], s[0])
print('samples', z_samples[-1][0])
print('pvals', pz[-1][0])
print('qvals', qz[-1][0])
print(z_samples.shape)
print(pz.shape)
print(qz.shape)
return z_samples, pz, qz
#compute_samples(X_test[:10], 4, debug=True);
def estimate_logpx_batch(data, num_samples, debug=False):
z_samples, pz, qz = compute_samples(data, num_samples)
assert len(z_samples) == len(data)
assert len(z_samples) == len(pz)
assert len(z_samples) == len(qz)
# Calculate importance sample
# \log p(x) = E_p[p(x|z)]
# = \log(\int p(x|z) p(z) dz)
# = \log(\int p(x|z) p(z) / q(z|x) q(z|x) dz)
# = E_q[p(x|z) p(z) / q(z|x)]
# ~= \log(1/n * \sum_i p(x|z_i) p(z_i)/q(z_i))
# = \log p(x) = \log(1/n * \sum_i e^{\log p(x|z_i) + \log p(z_i) - \log q(z_i)})
# = \log p(x) = -\logn + \logsumexp_i(\log p(x|z_i) + \log p(z_i) - \log q(z_i))
# See: scipy.special.logsumexp
result = []
for i in range(len(data)):
datum = data[i].reshape(784)
x_predict = decoder.predict(z_samples[i]).reshape(-1, 784)
x_predict = np.clip(x_predict, np.finfo(float).eps, 1. - np.finfo(float).eps)
p_vals = pz[i]
q_vals = qz[i]
# \log p(x|z) = Binary cross entropy
logp_xz = np.sum(datum * np.log(x_predict) + (1. - datum) * np.log(1.0 - x_predict), axis=-1)
logpz = np.sum(np.log(p_vals), axis=-1)
logqz = np.sum(np.log(q_vals), axis=-1)
argsum = logp_xz + logpz - logqz
logpx = -np.log(num_samples) + logsumexp(argsum)
result.append(logpx)
if debug:
print(x_predict.shape)
print(p_vals.shape)
print(q_vals.shape)
print(logp_xz.shape)
print(logpz.shape)
print(logqz.shape)
print("logp_xz", logp_xz)
print("logpz", logpz)
print("logqz", logqz)
print(argsum.shape)
print("logpx", logpx)
return np.array(result)
#estimate_logpx_batch(X_test[:2], num_samples=128, debug=True)
#pass
def estimate_logpx(data, num_samples, verbosity=0):
batches = []
iterations = int(np.ceil(1. * len(data) / batch_size))
for b in range(iterations):
batch_data = data[b * batch_size:(b+1) * batch_size]
batches.append(estimate_logpx_batch(batch_data, num_samples))
if verbosity and b % max(11 - verbosity, 1) == 0:
print("Batch %d [%d, %d): %.2f" % (b, b*batch_size, (b+1) * batch_size,
np.mean(np.concatenate(batches))))
np.mean(np.concatenate(batches))
return np.mean(np.concatenate(batches))
logpx = estimate_logpx(X_test, num_samples=128, verbosity=1)
print("log p(x) = %.2f" % logpx)
import matplotlib.pyplot as plt
n = 10
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
for j in range(n):
z_sample = np.random.normal(size=latent_dim * batch_size).reshape(batch_size, latent_dim)
x_decoded = decoder.predict(z_sample, batch_size=batch_size)
digit = x_decoded.reshape(batch_size, digit_size, digit_size, img_chns)
for i in range(n):
d_x = i * digit_size
d_y = j * digit_size
figure[d_x:d_x + digit_size, d_y:d_y + digit_size] = digit[i, :, :, 0]
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
###Output
_____no_output_____ |
Prepare_for_yolo.ipynb | ###Markdown
with open('data/Anno/list_attr_img.txt', "r") as f: data = [] for itr, line in tqdm_notebook(enumerate(f)): if itr >= 2: line = line.split() convert labels to np.int8 will help to store them all tmp = [np.int8(line[i]) for i in range(1, 1001)] append Path tmp.append(line[0]) data.append(tmp) pd.DataFrame(data).to_csv('data/Anno/Atr_img.csv', index=False)del data
###Code
bbox_img = pd.read_csv('data/Anno/bbox_img.csv')
import cv2
def convert_labels(path, x1, y1, x2, y2):
"""
Definition: Parses label files to extract label and bounding box
coordinates. Converts (x1, y1, x1, y2) KITTI format to
(x, y, width, height) normalized YOLO format.
"""
if x2 > x1:
tmp1, tmp2 = x1, y1
x1, y1 = x2, y2
x2, y2 = tmp1, tmp2
size = get_img_shape(path)
dw = 1./size[0]
dh = 1./size[1]
x = (x1 + x2)/2.0
y = (y1 + y2)/2.0
w = x1 - x2
h = y1 - y2
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def get_img_shape(path):
path = 'data/'+path
img = cv2.imread(path)
try:
return img.shape
except AttributeError:
print('error! ', path)
return (None, None, None)
bbox_img['x'], bbox_img['y'], bbox_img['width'], bbox_img['height'] = zip(*bbox_img.progress_apply(lambda row: convert_labels(row['Path'], row['x1'], row['y1'],
row['x2'], row['y2']), axis=1))
df = bbox_img.merge(train_test_valid_anot).merge(categories_img)
df.to_csv('data/Anno/annotation_w-o_atr.csv', index=False)
dtypes = {str(i): np.int8 for i in range(0, 1000)}
mem = pd.read_csv('data/Anno/Atr_img.csv', dtype=dtypes)
mem['Path'] = mem['1000']
del mem['1000']
with open('data/Anno/list_attr_cloth.txt', "r") as f:
data = []
for itr, line in tqdm_notebook(enumerate(f)):
if itr >= 2:
line = line.split()
data.append(line[0])
# Because last column is 'Path'
data.append('Path')
mem.columns = data
mem.to_csv('data/Anno/annotation_atr.csv', index=False)
###Output
_____no_output_____
###Markdown
In attribute labels, "1" represents positive while "-1" represents negative, '0' represents unknown; Landmarks
###Code
list_ladmarks = pd.DataFrame(extr_data_txt('data/Anno/list_landmarks.txt'))
land_columns = 'image_name clothes_type variation_type landmark_visibility_1 landmark_location_x_1 landmark_location_y_1 landmark_visibility_2 landmark_location_x_2 landmark_location_y_2 landmark_visibility_3 landmark_location_x_3 landmark_location_y_3 landmark_visibility_4 landmark_location_x_4 landmark_location_y_4 landmark_visibility_5 landmark_location_x_5 landmark_location_y_5 landmark_visibility_6 landmark_location_x_6 landmark_location_y_6 landmark_visibility_7 landmark_location_x_7 landmark_location_y_7 landmark_visibility_8 landmark_location_x_8 landmark_location_y_8'.split()
list_ladmarks.rename(columns={i: land_columns[i] for i in range(0, 26)}, inplace=True)
list_ladmarks.to_csv('data/Anno/list_landmarks.csv' ,index=False)
list_ladmarks = pd.read_csv('data/Anno/list_landmarks.csv')
###Output
_____no_output_____ |
TD3/Twin_Delayed_Deep_Deterministic_Policy_Gradients.ipynb | ###Markdown
Setup
###Code
!pip install git+https://github.com/benelot/pybullet-gym.git
!pip install tensorboardX
!pip install gym
#!pip install roboschool
#!pip install pybullet
###Output
Collecting git+https://github.com/benelot/pybullet-gym.git
Cloning https://github.com/benelot/pybullet-gym.git to /tmp/pip-req-build-a876zo_5
Running command git clone -q https://github.com/benelot/pybullet-gym.git /tmp/pip-req-build-a876zo_5
Collecting pybullet>=1.7.8
[?25l Downloading https://files.pythonhosted.org/packages/0a/1c/26640b59ab18deb59104ed03ee4c26d1d998076cdf4a89c5ef1486831172/pybullet-2.6.1.tar.gz (82.8MB)
[K |████████████████████████████████| 82.8MB 36kB/s
[?25hBuilding wheels for collected packages: pybulletgym, pybullet
Building wheel for pybulletgym (setup.py) ... [?25l[?25hdone
Created wheel for pybulletgym: filename=pybulletgym-0.1-cp36-none-any.whl size=1513918 sha256=bf1edf6f5f13421d15122b38b1ee202a7c650c51e8d861d1ec4ccf618e5ccca6
Stored in directory: /tmp/pip-ephem-wheel-cache-99tjs2wj/wheels/ea/34/2e/1a4b77e473ea01bc931d1863c73abf7e4d1cc703904d7c74ea
Building wheel for pybullet (setup.py) ... [?25l[?25hdone
Created wheel for pybullet: filename=pybullet-2.6.1-cp36-cp36m-linux_x86_64.whl size=94540402 sha256=afd5b2d31cb50bc2de770143574b04857972e336cfa30f4b523f13e78bf884c4
Stored in directory: /root/.cache/pip/wheels/6c/85/95/de15ebf350270f905e8ac5b060e9668642ff251d3a3e7f65ad
Successfully built pybulletgym pybullet
Installing collected packages: pybullet, pybulletgym
Successfully installed pybullet-2.6.1 pybulletgym-0.1
Requirement already satisfied: tensorboardX in /usr/local/lib/python3.6/dist-packages (1.9)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from tensorboardX) (1.17.4)
Requirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorboardX) (3.10.0)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from tensorboardX) (1.12.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.8.0->tensorboardX) (42.0.2)
Requirement already satisfied: gym in /usr/local/lib/python3.6/dist-packages (0.15.4)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gym) (1.12.0)
Requirement already satisfied: numpy>=1.10.4 in /usr/local/lib/python3.6/dist-packages (from gym) (1.17.4)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gym) (1.3.3)
Requirement already satisfied: pyglet<=1.3.2,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym) (1.3.2)
Requirement already satisfied: cloudpickle~=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym) (1.2.2)
Requirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (from gym) (4.1.2.30)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.3.2,>=1.2.0->gym) (0.16.0)
###Markdown
Imports
###Code
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from tensorboardX import SummaryWriter
import gym
import pybulletgym
#import roboschool
import sys
import os
if not os.path.exists('saves'):
os.mkdir('saves')
###Output
_____no_output_____
###Markdown
Networks
###Code
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Initialize parameters and build model.
Args:
state_size (int): Dimension of each state
action_size (int): Dimension of each action
max_action (float): highest action to take
seed (int): Random seed
h1_units (int): Number of nodes in first hidden layer
h2_units (int): Number of nodes in second hidden layer
Return:
action output of network with tanh activation
"""
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.max_action * torch.tanh(self.l3(x))
return x
class Critic(nn.Module):
"""Initialize parameters and build model.
Args:
state_size (int): Dimension of each state
action_size (int): Dimension of each action
max_action (float): highest action to take
seed (int): Random seed
h1_units (int): Number of nodes in first hidden layer
h2_units (int): Number of nodes in second hidden layer
Return:
value output of network
"""
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 400)
self.l5 = nn.Linear(400, 300)
self.l6 = nn.Linear(300, 1)
def forward(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
###Output
_____no_output_____
###Markdown
Memory
###Code
# Code based on:
# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
# Expects tuples of (state, next_state, action, reward, done)
class ReplayBuffer(object):
"""Buffer to store tuples of experience replay"""
def __init__(self, max_size=1000000):
"""
Args:
max_size (int): total amount of tuples to store
"""
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, data):
"""Add experience tuples to buffer
Args:
data (tuple): experience replay tuple
"""
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)
def sample(self, batch_size):
"""Samples a random amount of experiences from buffer of batch size
Args:
batch_size (int): size of sample
"""
ind = np.random.randint(0, len(self.storage), size=batch_size)
states, actions, next_states, rewards, dones = [], [], [], [], []
for i in ind:
s, a, s_, r, d = self.storage[i]
states.append(np.array(s, copy=False))
actions.append(np.array(a, copy=False))
next_states.append(np.array(s_, copy=False))
rewards.append(np.array(r, copy=False))
dones.append(np.array(d, copy=False))
return np.array(states), np.array(actions), np.array(next_states), np.array(rewards).reshape(-1, 1), np.array(dones).reshape(-1, 1)
###Output
_____no_output_____
###Markdown
Agent
###Code
class TD3(object):
"""Agent class that handles the training of the networks and provides outputs as actions
Args:
state_dim (int): state size
action_dim (int): action size
max_action (float): highest action to take
device (device): cuda or cpu to process tensors
env (env): gym environment to use
"""
def __init__(self, state_dim, action_dim, max_action, env):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-3)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = Critic(state_dim, action_dim).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)
self.max_action = max_action
self.env = env
def select_action(self, state, noise=0.1):
"""Select an appropriate action from the agent policy
Args:
state (array): current state of environment
noise (float): how much noise to add to acitons
Returns:
action (float): action clipped within action range
"""
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
action = self.actor(state).cpu().data.numpy().flatten()
if noise != 0:
action = (action + np.random.normal(0, noise, size=self.env.action_space.shape[0]))
return action.clip(self.env.action_space.low, self.env.action_space.high)
def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
"""Train and update actor and critic networks
Args:
replay_buffer (ReplayBuffer): buffer for experience replay
iterations (int): how many times to run training
batch_size(int): batch size to sample from replay buffer
discount (float): discount factor
tau (float): soft update for main networks to target networks
Return:
actor_loss (float): loss from actor network
critic_loss (float): loss from critic network
"""
for it in range(iterations):
# Sample replay buffer
x, y, u, r, d = replay_buffer.sample(batch_size)
state = torch.FloatTensor(x).to(device)
action = torch.FloatTensor(u).to(device)
next_state = torch.FloatTensor(y).to(device)
done = torch.FloatTensor(1 - d).to(device)
reward = torch.FloatTensor(r).to(device)
# Select action according to policy and add clipped noise
noise = torch.FloatTensor(u).data.normal_(0, policy_noise).to(device)
noise = noise.clamp(-noise_clip, noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + (done * discount * target_Q).detach()
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if it % policy_freq == 0:
# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
def save(self, filename, directory):
torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))
torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))
def load(self, filename="best_avg", directory="./saves"):
self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename)))
self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename)))
###Output
_____no_output_____
###Markdown
Runner
###Code
class Runner():
"""Carries out the environment steps and adds experiences to memory"""
def __init__(self, env, agent, replay_buffer):
self.env = env
self.agent = agent
self.replay_buffer = replay_buffer
self.obs = env.reset()
self.done = False
def next_step(self, episode_timesteps, noise=0.1):
action = self.agent.select_action(np.array(self.obs), noise=0.1)
# Perform action
new_obs, reward, done, _ = self.env.step(action)
done_bool = 0 if episode_timesteps + 1 == 200 else float(done)
# Store data in replay buffer
replay_buffer.add((self.obs, new_obs, action, reward, done_bool))
self.obs = new_obs
if done:
self.obs = self.env.reset()
done = False
return reward, True
return reward, done
###Output
_____no_output_____
###Markdown
Evaluate
###Code
def evaluate_policy(policy, env, eval_episodes=100,render=False):
"""run several episodes using the best agent policy
Args:
policy (agent): agent to evaluate
env (env): gym environment
eval_episodes (int): how many test episodes to run
render (bool): show training
Returns:
avg_reward (float): average reward over the number of evaluations
"""
avg_reward = 0.
for i in range(eval_episodes):
obs = env.reset()
done = False
while not done:
if render:
env.render()
action = policy.select_action(np.array(obs), noise=0)
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("\n---------------------------------------")
print("Evaluation over {:d} episodes: {:f}" .format(eval_episodes, avg_reward))
print("---------------------------------------")
return avg_reward
###Output
_____no_output_____
###Markdown
Observation
###Code
def observe(env,replay_buffer, observation_steps):
"""run episodes while taking random actions and filling replay_buffer
Args:
env (env): gym environment
replay_buffer(ReplayBuffer): buffer to store experience replay
observation_steps (int): how many steps to observe for
"""
time_steps = 0
obs = env.reset()
done = False
while time_steps < observation_steps:
action = env.action_space.sample()
new_obs, reward, done, _ = env.step(action)
replay_buffer.add((obs, new_obs, action, reward, done))
obs = new_obs
time_steps += 1
if done:
obs = env.reset()
done = False
print("\rPopulating Buffer {}/{}.".format(time_steps, observation_steps), end="")
sys.stdout.flush()
###Output
_____no_output_____
###Markdown
Train
###Code
def train(agent, test_env):
"""Train the agent for exploration steps
Args:
agent (Agent): agent to use
env (environment): gym environment
writer (SummaryWriter): tensorboard writer
exploration (int): how many training steps to run
"""
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
episode_reward = 0
episode_timesteps = 0
done = False
obs = env.reset()
evaluations = []
rewards = []
best_avg = -2000
writer = SummaryWriter(comment="-TD3_Baseline_HalfCheetah")
while total_timesteps < EXPLORATION:
if done:
if total_timesteps != 0:
rewards.append(episode_reward)
avg_reward = np.mean(rewards[-100:])
writer.add_scalar("avg_reward", avg_reward, total_timesteps)
writer.add_scalar("reward_step", reward, total_timesteps)
writer.add_scalar("episode_reward", episode_reward, total_timesteps)
if best_avg < avg_reward:
best_avg = avg_reward
print("saving best model....\n")
agent.save("best_avg","saves")
print("\rTotal T: {:d} Episode Num: {:d} Reward: {:f} Avg Reward: {:f}".format(
total_timesteps, episode_num, episode_reward, avg_reward), end="")
sys.stdout.flush()
if avg_reward >= REWARD_THRESH:
break
agent.train(replay_buffer, episode_timesteps, BATCH_SIZE, GAMMA, TAU, NOISE, NOISE_CLIP, POLICY_FREQUENCY)
# Evaluate episode
if timesteps_since_eval >= EVAL_FREQUENCY:
timesteps_since_eval %= EVAL_FREQUENCY
eval_reward = evaluate_policy(agent, test_env)
evaluations.append(avg_reward)
writer.add_scalar("eval_reward", eval_reward, total_timesteps)
if best_avg < eval_reward:
best_avg = eval_reward
print("saving best model....\n")
agent.save("best_avg","saves")
episode_reward = 0
episode_timesteps = 0
episode_num += 1
reward, done = runner.next_step(episode_timesteps)
episode_reward += reward
episode_timesteps += 1
total_timesteps += 1
timesteps_since_eval += 1
###Output
_____no_output_____
###Markdown
Config
###Code
ENV = "Pendulum-v0"#"Pendulum-v0" #HalfCheetahMuJoCoEnv-v0
SEED = 0
OBSERVATION = 10000
EXPLORATION = 5000000
BATCH_SIZE = 100
GAMMA = 0.99
TAU = 0.005
NOISE = 0.2
NOISE_CLIP = 0.5
EXPLORE_NOISE = 0.1
POLICY_FREQUENCY = 2
EVAL_FREQUENCY = 5000
REWARD_THRESH = 8000
###Output
_____no_output_____
###Markdown
Main
###Code
env = gym.make(ENV)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Set seeds
env.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
policy = TD3(state_dim, action_dim, max_action, env)
replay_buffer = ReplayBuffer()
runner = Runner(env, policy, replay_buffer)
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
# Populate replay buffer
observe(env, replay_buffer, OBSERVATION)
# Train agent
train(policy, env)
policy.load()
for i in range(100):
evaluate_policy(policy, env, render=True)
env.close()
###Output
_____no_output_____ |
notebooks/ml-basics.ipynb | ###Markdown
Basic setupHere we will import the `pyspark` module and set up a `SparkSession`. By default, we'll use a `SparkSession` running locally, with one Spark executor; we're dealing with small data, so it doesn't make sense to run against a cluster.
###Code
import pyspark
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, SQLContext
spark = SparkSession.builder.master("local[1]").getOrCreate()
sc = spark.sparkContext
###Output
_____no_output_____
###Markdown
Creating random dataWe'll start by creating some uniformly-distributed random data with which to demonstrate various machine-learning techniques. Note that we're choosing a data set size that will be possible to plot, not that will make for an interesting problem.
###Code
from pyspark.sql.functions import array, column, rand, udf
from pyspark.ml.linalg import Vectors, VectorUDT
as_vector = udf(lambda l: Vectors.dense(l), VectorUDT())
randomDF = spark.range(0, 2048).select((rand() * 2 - 1).alias("x"), (rand() * 2 - 1).alias("y")).select(column("x"), column("y"), as_vector(array(column("x"), column("y"))).alias("features"))
###Output
_____no_output_____
###Markdown
Setting up plottingWe'll now set up the `seaborn` library to plot into our notebook and do a scatter plot of our random data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import seaborn as sns
import numpy as np
sns.set(color_codes=True)
_ = sns.lmplot("x", "y", randomDF.toPandas(), fit_reg=False, scatter=True)
###Output
_____no_output_____
###Markdown
ClusteringMachine learning practitioners often speak of two types of learning: _supervised_ learning, in which training data are _labeled_, and _unsupervised_ learning, in which training data are unlabeled. We will examine some supervised learning tasks later in this tutorial, but first we will look at a simple unsupervised learning task: _clustering_, or finding groupings of similar objects.There are several algorithms for clustering, but one of the easiest to understand is [_k-means clustering_](https://en.wikipedia.org/wiki/K-means_clustering), which identifies _k_ cluster centers and learns a placement of these centers so as to minimize the variance from samples to their nearest cluster centers. While we certainly don't have enough data to justify parallel or distributed execution, we'll use Spark's k-means implementation so that you can become familiar with it.
###Code
from pyspark.ml.clustering import KMeans
K = 7
SEED = 0xdea110c8
kmeans = KMeans().setK(K).setSeed(SEED).setFeaturesCol("features")
model = kmeans.fit(randomDF)
###Output
_____no_output_____
###Markdown
That's all we need to do to find `K` cluster centers! Now we'll use the model to add a column to `randomDF` with the number of the closest cluster center for each row.
###Code
withPredictions = model.transform(randomDF).select("x", "y", "prediction")
_ = sns.lmplot("x", "y", withPredictions.toPandas(), fit_reg=False, scatter=True, hue="prediction")
###Output
_____no_output_____
###Markdown
We can see how many observations are in each cluster by using data frame aggregate functions:
###Code
from pyspark.sql.functions import count
withPredictions.groupBy("prediction").agg(count("prediction")).show()
###Output
_____no_output_____
###Markdown
Try it out yourselfTry running the code with a different number of cluster centers and seeing how the plot changes!
###Code
def cluster_and_plot(df, k, seed=0xdea110c8):
kmeans = KMeans().setK(k).setSeed(seed).setFeaturesCol("features")
withPredictions = kmeans.fit(df).transform(df).select("x", "y", "prediction")
return sns.lmplot("x", "y", withPredictions.toPandas(), fit_reg=False, scatter=True, hue="prediction")
_ = cluster_and_plot(randomDF, 11)
###Output
_____no_output_____
###Markdown
Sidebar: parameters and hyperparametersYou may have noticed that when we trained the k-means model above, we supplied some parameters to Spark's `KMeans` object. Some of these simply told Spark which columns of our data frame contained the feature data to train on, but others, like _k_, affected the training process and the output of the model independently of the input data. We call these settings _hyperparameters_, which we contrast with the _parameters_ of a given model, which are learned from the data (e.g., in this case, the coordinates of cluster centers). Sidebar: dimensionality, similarity, and distanceYou may have noticed that our input data is two-dimensional, and you may suspect that many machine learning problems involve higher-dimensional data. You're right! (Although there are interesting two-dimensional applications for clustering, like deciding where to put new retail locations given a database of potential customers.) We're using two dimensions here because it is easy to visualize and most people have pretty good intuitions about two-dimensional data. However, it is important to note that most machine learning techniques generalize to arbitrarily many dimensions. Put another way, you can compare objects with the same number of dimensions, no matter how large that number is.k-means clustering in particular uses [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) (which is defined for objects of arbitrary dimensionality) to determine how similar two objects are, but for some problems, it might make sense to use a different distance metric. Euclidean distance makes sense for many, but not all applications, and it may be less useful for very high-dimensional data than other metrics (since points in high-dimensional space are generally sparser than points on the plane). Some other metrics that are interesting include:* [Manhattan distance](https://en.wikipedia.org/wiki/Taxicab_geometry)* [Cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity), which compares angles between points and is especially useful for comparing large sparse vectors* The [Jaccard coefficient](https://en.wikipedia.org/wiki/Jaccard_index), which characterizes the similarity of sets with finite domainsWe'll discuss dimensionality more later in the notebook, so stay tuned! ClassificationThe second family of techniques we'll look at is are supervised learning techniques that are all types of _classification_. Specifically, we're going to show an algorithm for _binary classification_, where there are two sorts of labels. The classifier we'll use will try and cut the feature space in two parts along a linear boundary so that the objects on one side of the boundary are likely to have one label value and that those on the other are likely to have the other value. We'll start by using a Spark user-defined function to generate synthetic labels for our random data:
###Code
from pyspark.sql.types import DoubleType
from random import uniform
synthetic_label = udf(lambda v: (abs(v[0]) * v[0]) + (v[1] * 2) + uniform(-0.5, 0.5) > 0 and 1.0 or 0.0, DoubleType())
labeledDF = randomDF.withColumn("label", synthetic_label(randomDF["features"]))
_ = sns.lmplot("x", "y", labeledDF.select("x", "y", "label").toPandas(), hue="label", fit_reg=False)
###Output
_____no_output_____
###Markdown
Sidebar: using synthetic dataIf you look at the function we use to generate synthetic labels, you'll notice that it takes a simple function (`(abs(x) * x) + 2y`) and adds some uniformly-distributed noise. You may have the (totally reasonable) philosophical objection that it is trivial to identify a separation of the feature space to differentiate between labels, since we constructed the labels to be separable. These techniques will still work for real data, though, and using simple techniques to construct synthetic data makes the tutorial easier to follow! Logistic regressionLogistic regression is a technique for binary or multi-class classification. We will use it for the former here. We will also divide our training data into training and test sets so we can evaluate how well our classifier works on data it has never seen before.
###Code
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression()
training, test = labeledDF.randomSplit([.7,.3])
lr_model = lr.fit(training)
lr_predictions = lr_model.transform(labeledDF)
###Output
_____no_output_____
###Markdown
Unlike the k-means predictions, the logistic regression predictions include a probability.
###Code
lr_predictions.printSchema()
###Output
_____no_output_____
###Markdown
We can now plot the predictions:
###Code
_ = sns.lmplot("x", "y", lr_predictions.select("x", "y", "prediction").toPandas(), hue="prediction", fit_reg=False)
###Output
_____no_output_____
###Markdown
We can also plot just the mispredicted samples:
###Code
_ = sns.lmplot("x", "y", lr_predictions.filter(lr_predictions["prediction"] != lr_predictions["label"]).select("x", "y", "label").toPandas(), hue="label", fit_reg=False).set(xlim=(-1.5, 1.5), ylim=(-1.5, 1.5))
###Output
_____no_output_____
###Markdown
Characterizing classifier performanceIt should be clear that merely being right most of the time isn't necessarily a great measure of how our classifier is doing. (If we were trying to predict a condition that occured in 5% of the population, a trivial classifier that always returned "no" would be right 95% of the time!)One way to visualize the performance of a binary classifier is with a [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix), which shows the actual values in rows and the predicted values in columns, so you can see true positives and true negatives along one diagonal and false positives and false negatives along the other. We can make such a matrix with data frame queries:
###Code
from pyspark.sql.functions import column, sum, when
lrp = lr_predictions
lr_predictions.select("label", when(lrp["prediction"] == 0.0, 1).otherwise(0).alias("p0"), when(lrp["prediction"] == 1.0, 1).otherwise(0).alias("p1")).groupBy("label").agg(sum(column("p0")).alias("predicted 0"), sum(column("p1")).alias("predicted 1")).show()
###Output
_____no_output_____
###Markdown
Another way to visualize our performance is with a [receiver operating characteristic curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) ("ROC curve" for short). This is a useful way to compare several classifiers (or several thresholds for deciding that objects should be labeled `1.0`), as it plots the true positive rate against the false positive rate. We can construct an ROC curve with Spark and Seaborn as follows:
###Code
summary = lr_model.summary
roc = summary.roc
roc = roc.select(roc["FPR"].alias("False Positive Rate"), roc["TPR"].alias("True Positive Rate")).toPandas()
_ = sns.lmplot("False Positive Rate", "True Positive Rate", roc, fit_reg=False, scatter=True, scatter_kws={'marker':'1', 's':6}).set(xlim=(0,1), ylim=(0,1))
###Output
_____no_output_____
###Markdown
Linear regression[Linear regression](https://en.wikipedia.org/wiki/Linear_regression) attempts to optimize a linear model for a dependent variable from the values of several regressors. To show how to do linear regression in Spark, we'll use a classic data set: the 1993 automobile city fuel efficiency data. (This data set was originally from StatLib at Carnegie Mellon; we will use a version available from [UCI](https://archive.ics.uci.edu/ml/datasets/Auto+MPG).)While this data set, like the random data frames we generated above, is not large at all, it will allow you to try out using these techniques in Spark. We will try and predict each car's city fuel efficiency as a function of its engine characteristics.
###Code
mpg = spark.read.json("data/auto-mpg.json")
mpg.printSchema()
###Output
_____no_output_____
###Markdown
We'll start by taking the input data, using the `dropna` function to exclude rows with missing values (since linear regression won't handle those properly), and creating a feature vector from some of the features from the original data.
###Code
mpg_with_features = mpg.dropna().select("mpg", "acceleration", "cylinders", "displacement", "horsepower", "weight", as_vector(array(mpg["acceleration"], mpg["cylinders"], mpg["displacement"], mpg["horsepower"], mpg["weight"])).alias("features"))
from pyspark.ml.regression import LinearRegression
lr = LinearRegression()
lr.setLabelCol("mpg")
lr.setFeaturesCol("features")
lr.setStandardization(True)
model = lr.fit(mpg_with_features)
###Output
_____no_output_____
###Markdown
Given a model, we can now look at the predicted MPG versus the actual features of each car.
###Code
predictions = model.transform(mpg_with_features)
to_plot = predictions.select("mpg", "acceleration", "cylinders", "displacement", "horsepower", "weight", "prediction").show()
###Output
_____no_output_____ |
soukAnalyticsPython/Pandas.ipynb | ###Markdown
PandasVamos analisar a performance de estudantes usando o Pandas.
###Code
import pandas as pd
import numpy as np
df = pd.read_csv('StudentsPerformance.csv')
df
type(df)
# 5 primeiras linhas
df.head()
# 5 últimas linhas
df.tail()
# quantidade de linhas e colunas
df.shape
df.columns
df.duplicated()
# verifica linhas duplicadas
df.duplicated().sum()
df.info()
# verifica existência de NaN
df.isna().sum()
# sumário estatístico
df.describe()
# sumário estatístico - inclusive para as variáveis categóricas
df.describe(include = 'all')
# quantidade de valores únicos em cada coluna
df.nunique()
# valores únicos
df['parental level of education'].unique()
# frequência entre os gêneros
df.gender.value_counts()
provas = ['math score', 'reading score', 'writing score']
df
df.sort_values(['math score']).reset_index(drop = True)
# ordena o dataset
df = df.sort_values(by = provas, ascending = False)\
.reset_index(drop = True)
df
# coluna com a média das provas
df['mean'] = df[provas].mean(axis = 1)
df.head()
# consulta
df.query('(gender == "male") & (`test preparation course` == "none") & (`math score` >= 70)')
df[(df.gender == 'male') & (df['test preparation course'] == 'none') & (df['math score'] >= 70)]
df.loc[(df.gender == 'male') & (df['test preparation course'] == 'none') & (df['math score'] >= 70)]
# agrupamento - agrupa os dados por gênero e obtém estatísticas descritivas
df.groupby(by = 'gender')[provas].agg([np.mean, np.median]).T
###Output
_____no_output_____ |
IS2021/LSTM1.ipynb | ###Markdown
Code for IS2021 LSTM1 model (RNN model with 1 unidirectional LSTM layer)The Python code (run under TensorFlow 2.3) that was used to train and evaluate the LSTM1 model submitted to Interspeech 2021 is given below. The code is unaltered, except that (1) comments have been added, and (2) code used solely to evaluate the trained model on non-TIMIT data has been removed.Note that the code makes some assumptions based on the circumstances of our computational setup at the time (e.g. file names and locations, etc.) and so cannot be run as-is without the same setup. You may also notice differences in code between the four models. Some of these differences are due to the necessary differences between the 4 experiments, of course, while other differences are irrelevant to the training and evaluation, and are simply due to the evolution of the code over time, e.g. to make the code more readable and generalizable. An updated, generalized, and user-friendly version of the code for general public use has been provided in the **../User/** directory. Execution:This script was run with two command-line parameters that indicate the number of formants and antiformants, and the output (consisting of data statistics, model specifications, and script progress reports, including training and validation loss) is saved to an output file, e.g.:LSTM1.py 6 1 > LSTM1.f6z1.outIt needs to be run on a server with access to Tensorflow 2.3. On a GPU, the script make take a few hours; on a CPU, the script may take several hours to a few days to run. (In our case, running the script on CPU machines using 24 parallel cores per job, these experiments ran between half a day and 2 days.) Input:If the name of the directory in which the script is run is e.g. expdir/, then the script looks for the input data in a sister directory ../data/, where the two input file lists timit_ordered1.txt and VTRlist0.txt (described below) should be found. Also in there should be a directory ../data/timit/srcflt_r480_ENV_db68/vtspecs/, which holds the individual spectral envelope files derived from each TIMIT wavefile (also described below). Output:The output models and evaluation files are saved to a directory named expdir/mvt13_f6z1/ (where "mvt13" was the unique designation for this experiment, and "f6z1" indicates 6 formants and 1 zero). The model files are stored directly in this directory. A subdirectory, expdir/mvt13_f6z1/timit/, will hold the output formant track files, one for each input file. These are stored in a format (described below) that was designed for the specific interests of our laboratory, so scripts are provided that were used to extract the frequencies for evaluation against the VTR-TIMIT database.
###Code
import numpy as np
import tensorflow as tf
import os
import glob
import subprocess
import sys
# The testing parameter is used for quick testing of the code in e.g. a Jupyter Lab window.
# If testing is set to True, then only a small number of input files are loaded, a few
# iterations of training are run, and only a few test files are evaluated.
#testing = True
testing = False
# Variables that differ among subexperiments:
expname = "mvt13" # A unique designation for the experiment -- used to name the output directories and files below
# Subexperiment parameters, given either in the script or as command-line parameters:
# We tested various numbers of formants and zeros, but ultimately settled on 6 formants and 1 zero for IS2021.
if testing is True:
NFORMANTS = 6
NZEROS = 1
else:
NFORMANTS = int(sys.argv[1])
NZEROS = int(sys.argv[2])
###Output
_____no_output_____
###Markdown
**Input filelist:** The input filelist "timit_ordered1.txt" has a format where the directory and suffix have been removed, e.g.:*train_dr1_fecd0_sa1 train_dr1_fecd0_sa2 train_dr1_fecd0_si1418 train_dr1_fecd0_si2048 train_dr1_fecd0_si788* The code further down below assumes 6300 files, in the order train (4140), validation (480), test (1680).The evaluation filelist VTRlist0.txt has the same format, except it only lists the 516 files included in the VTR-TIMIT corpus, in any order.**Context frames and sequences:** For CNN models, the model input for each time-step was set up so that it included not only the target frame, but also the N preceding frames and N following frames, for context. This is controlled by the variable **n_context_frames** below, and the total length of the input (2\*N+1) is stored in **window_length**. For CNN3, n_context_frames was 10 (window_length 21). Each of these windows overlaps with the next, e.g. the window for frame 50 includes frames 40-60, the window for frame 51 includes frames 41-61, and so on. For RNNs, n_context_frames was 0 (window_length 1), but a training **SEQUENCE_LENGTH** of 64 is specified; the training set is split into non-overlapping sequences of frames of this length (though the final model can accept sequences of any length for evaluation).
###Code
NSUM = NFORMANTS + NZEROS
#Creation of output directory name (checkpoint_dir)
subexp = "f" + str(NFORMANTS) + "z" + str(NZEROS)
if testing is True:
checkpoint_dir = expname + "_tmp_" + subexp
else:
checkpoint_dir = expname + "_" + subexp
#Specifying the input files and directories
if testing is True:
subdir = "/srcflt_r480_ENV/vtspecs/"
else:
subdir = "/srcflt_r480_ENV_db68/vtspecs/" #Where the training files are
superdir = "../data/" #Where all input files are, including filelists
suffix = "_spec.fea" #The suffix of the input files
filelist = 'timit_ordered1.txt' #The input file list
# Other variables:
n_context_frames = 0 #For our LSTM, our "windows" will just be single frames....
window_length = n_context_frames * 2 + 1
SEQUENCE_LENGTH = 64
BATCH_SIZE = 32
top_activation='sigmoid' #The activation of the model output layer
floor = 0.001 #Floor value added to linear spectra before conversion to log domain
print("")
print(expname + " OUTPUT:")
print("Formants:", NFORMANTS)
print("Zeros:", NZEROS)
print("Number of context spectra on either side:", n_context_frames)
print("Total window length:", window_length)
print("Sequence length:", SEQUENCE_LENGTH)
print("Data list:", filelist)
print("Data:", superdir + "SPKR" + subdir + "FILENAME" + suffix)
print("Saved in:", checkpoint_dir)
print("")
#Load the filelists
with open(superdir + filelist) as f:
allfileset = [i[:-1] for i in list(f)]
datadir = superdir + "timit" + subdir
with open(superdir + 'VTRlist0.txt') as f:
vtrfileset = [i[:-1] for i in list(f)]
if testing is True:
vtrfileset = vtrfileset[:10]
###Output
_____no_output_____
###Markdown
Datasets -- log scale spectra:The following code reads in and normalizes the training and validation data, which consist of a log-scale (dB) spectral envelope calculated from each frame of input, as described in our IS2021 paper. There is one input file for each wavefile. The file is in binary format, and starts with a 24-byte header. The first 8 bytes consist of two 4-byte integers specifying first the number of frames, and then the number of data points per frame (the spectral resolution), which for IS2021 was kept at a constant 257 points and stored in the variable **npoints**. Following the header are the spectra themselves, which are stored as float values.
###Code
# The following function is used to load data and, if needed, add N context frames (ncframes) to each end.
# Input include datadir (the common superdirectory for all input files), the filelist, and a suffix.
# datadir and suffix are prepended and appended (respectively) to each file in the filelist, and may each
# be left as empty strings. The filelist may itself include its own subdirectories and suffixes. The filelist
# may be a single file (necessary for evaluation, as seen below). If filelist is empty, the function will
# load all files in datadir (in which case suffix should be left empty). Note that getdata() concatenates
# all input files into one single NumPy array, with nothing to indicate boundaries between input files.
# Initial and final context frames (copies of the first and last frames) are added to this entire structure.
# This is necessary because of how tf.keras.preprocessing.timeseries_dataset_from_array() works, which will
# be used to divide the array into input windows (see below).
def getdata(datadir, filelist=[], suffix=".wav", ncframes=0, verbose=1):
import struct
import time
start_time = time.perf_counter()
datalist = []
if filelist == []:
filelist = [name for name in os.listdir(datadir)]
for f in filelist:
with open(datadir + '/' + f + suffix, 'rb') as file:
nspecs, speclen = struct.unpack('ii', file.read(8))
file.seek(24)
x = file.read(nspecs*speclen*4)
datalist += list(struct.unpack(str(nspecs*speclen)+'f', x))
dataset = np.array(datalist, dtype=np.float32).reshape(-1, speclen)
if (ncframes > 0):
x = firstframe = [dataset[0,:]]
y = lastframe = [dataset[-1,:]]
for j in range(ncframes-1):
x = np.concatenate((x, firstframe))
for j in range(ncframes*2-1):
y = np.concatenate((y, lastframe))
dataset = np.concatenate((x, dataset, y))
if (verbose > 0):
print("Loaded", len(filelist), "files (time:", time.perf_counter() - start_time, ")")
return speclen, dataset
# A function to get some stats on the dataset. Note that the mean and standard deviation of
# the training set calculated with this function must be used to normalize all training
# and evaluation data prior to being fed to the model.
def getstats(d):
print("Shape:", d.shape)
print("Range:", np.min(d), "-", np.max(d))
mean=np.mean(d)
stdev=np.std(d)
print("Mean:", mean)
print("Stdev:", stdev)
return mean, stdev
print("")
print("Loading validation data ....")
sys.stdout.flush()
if testing is True:
npoints, val1 = getdata(datadir, allfileset[4140:4200], suffix, n_context_frames)
else:
npoints, val1 = getdata(datadir, allfileset[4140:4620], suffix, n_context_frames)
print("Log validation data stats:")
getstats(val1)
print("")
print("Loading training data ....")
sys.stdout.flush()
if testing is True:
len2, train1 = getdata(datadir, allfileset[:50], suffix, n_context_frames)
else:
len2, train1 = getdata(datadir, allfileset[:4140], suffix, n_context_frames)
print("Log training data stats:")
trmean, trstd = getstats(train1)
print("")
print("trmean =", trmean)
print("trstd =", trstd)
print("")
sys.stdout.flush()
###Output
_____no_output_____
###Markdown
We normalize the datasets by the mean and stdev of the training set.
###Code
def normdata(data, mean=trmean, sd=trstd):
normed_data = (data - mean) / sd
return normed_data, data
###Output
_____no_output_____
###Markdown
The Keras function tf.keras.preprocessing.timeseries_dataset_from_array() is used to convert the training and validation sets from single NumPy arrays into tf.data.Datasets of non-overlapping **SEQUENCE_LENGTH** sequences. Note that the data fed as input to the model are the normalized frames, but the data used as the targets for evaluation are the original **un**-normalized frames. Since RNNs output sequences, both input and output data are 3-dimensional (batch_length * sequence_length * resolution).
###Code
batched_train_dset = tf.keras.preprocessing.timeseries_dataset_from_array(
data=train1, targets=None, sequence_length=SEQUENCE_LENGTH, sequence_stride=SEQUENCE_LENGTH, batch_size=BATCH_SIZE)
batched_val_dset = tf.keras.preprocessing.timeseries_dataset_from_array(
data=val1, targets=None, sequence_length=SEQUENCE_LENGTH, sequence_stride=SEQUENCE_LENGTH, batch_size=BATCH_SIZE)
batched_train_dset = batched_train_dset.map(normdata)
batched_val_dset = batched_val_dset.map(normdata)
print("")
for batch_input, batch_target in batched_train_dset.take(1):
print("Input shape:", batch_input.shape)
print("Target shape:", batch_target.shape)
print("")
sys.stdout.flush()
del train1, val1
###Output
_____no_output_____
###Markdown
Definition of Loss function, etc.The functions used to compute the loss are defined here. We tried to write the code so that it could handle variations in sampling rate (srate), frequency range (from 0 to maxfreq), number of formants (NFORMANTS), number of anti-formants (NZEROS), spectral resolution (npoints), and the activation type of the final model output layer (top_activation). For IS2021, these were all set constant across all experiments: 16K sampling rate, 0-8K frequency range, 6 formants, 1 zero, 257-point spectra, sigmoid activation.The code here is a bit different here than in the CNN models, due to the necessity of having to add another dimension to the data for sequences. The formant() function takes the frequency F and bandwidth B of each formant predicted by the model, and generates a corresponding formant: an array of spectrum levels h at each frequency bin f in the spectrum range at the given resolution (see Eqn. (1) of the paper). The vtfn() function weights these by their corresponding amplitude factors, and combines them (multiplying or dividing, corresponding to whether it's a pole or zero) to produce a linear-scale spectral envelope.
###Code
maxfreq=8000
spec1 = tf.cast(np.linspace(0, maxfreq, npoints), dtype=tf.float32)
@tf.function
def formant(freq, bw, nres, npoints=257, maxfreq=8000):
fex = tf.expand_dims(freq, axis=-1)
bex = tf.expand_dims(bw, axis=-1)
bsq = bex**2 * 0.25
anum = fex**2 + bsq
#spec1 = tf.cast(np.linspace(0, maxfreq, npoints), dtype=tf.float32)
spec2 = tf.tile(spec1, [tf.size(freq)])
spec = tf.reshape(spec2, [-1, nres, npoints])
negspec = (spec - fex)**2 + bsq
posspec = (spec + fex)**2 + bsq
formants = anum / tf.math.sqrt(negspec * posspec)
return(formants)
#Note that vtfn returns a LINEAR-scale spectrum
if NZEROS == 0:
@tf.function
def vtfn(freqs, bws, amps, npoints=257, srate=16000):
ax = tf.expand_dims(amps, axis=-1)
ax = 10.0 ** (ax / 20.0) #convert log amplitudes to linear
maxf = srate // 2
specs = formant(freqs, bws, NFORMANTS, npoints, maxf)
sumspec = tf.reduce_sum(ax * specs, axis = -2)
return sumspec
else:
@tf.function
def vtfn(freqs, bws, amps, zfreqs, zbws, npoints=257, srate=16000):
ax = tf.expand_dims(amps, axis=-1)
ax = 10.0 ** (ax / 20.0) #convert log amplitudes to linear
maxf = srate // 2
fspecs = ax * formant(freqs, bws, NFORMANTS, npoints, maxf)
sumspecs = tf.reduce_sum(fspecs, axis = -2, keepdims=True)
zspecs = 1.0 / formant(zfreqs, zbws, NZEROS, npoints, maxf)
allspecs = tf.concat([sumspecs, zspecs], axis = -2)
prodspecs = tf.reduce_prod(allspecs, axis = -2)
return prodspecs
###Output
_____no_output_____
###Markdown
The rescale_params() function takes the output of the model, and rescales it to the expected scale for formant parameters (e.g. 0-8000 Hz for frequencies). The input scale depends on the output activation of the model; we experimented with linear, tanh, softsign, and ReLU, but found that sigmoid usually works best. Note that this function forces the output of the model to be in the order F1 F2 F3 ... B1 B2 B3 ... A1 A2 A3 .... Note also that this function is needed for evaluation (further below) and any future use of the model.
###Code
@tf.function(input_signature=(tf.TensorSpec(shape=[None, NFORMANTS*3 + NZEROS*2], dtype=tf.float32),))
def rescale_params(params):
freqs, bws, amps = tf.split(params, [NSUM, NSUM, NFORMANTS], axis=-1)
if top_activation == 'sigmoid': #network produces values between 0 and 1
freqs = freqs * 8000.0
bws = (bws * 5000.0) + 20.0
amps = (amps - 0.5) * 200.0
elif top_activation == 'softsign' or top_activation == 'tanh': #network produces values between -1 and 1
freqs = (freqs + 1.0) * 4000.0
bws = (bws * 2500.0) + 2520.0
amps = amps * 100.0
elif top_activation == 'relu': #network produces values of 0 or greater. Add 20.0 to avoid division by 0
bws = bws + 20.0
amps = amps - 100.0
return freqs, bws, amps
###Output
_____no_output_____
###Markdown
Finally, the model loss is calculated with custom_loss(). First, the batch and sequence dimensions are collapsed. Then the input model parameters are rescaled with rescale_params(). The formants are split into poles and zeros, and sent to vtfn() to compute a linear-scale spectral envelope. The envelope is then converted to decibel scale, and the loss is calculated as the mean square difference between the generated envelope and the original envelope.
###Code
#Note that the floor is added to the log conversion here.
def get_custom_loss():
if NZEROS == 0:
@tf.function(input_signature=(tf.TensorSpec(shape=[None, None, npoints], dtype=tf.float32),
tf.TensorSpec(shape=[None, None, NFORMANTS*3 + NZEROS*2], dtype=tf.float32)))
def custom_loss(specs_input, params_pred):
npoints=specs_input.shape[-1]
specs_input = tf.reshape(specs_input, [-1, npoints]) #collapse batch and sequence dimensions
params_pred = tf.reshape(params_pred, [-1, NFORMANTS*3 + NZEROS*2]) #collapse batch and sequence dimensions
freqs, bws, amps = rescale_params(params_pred)
specs_pred = vtfn(freqs, bws, amps, npoints=specs_input.shape[-1], srate=16000)
specs_pred = 20.0 * tf.math.log(floor + specs_pred) / tf.math.log(10.0)
return(tf.math.reduce_mean(tf.square(specs_input - specs_pred))) #loss over whole batch
else:
@tf.function(input_signature=(tf.TensorSpec(shape=[None, None, npoints], dtype=tf.float32),
tf.TensorSpec(shape=[None, None, NFORMANTS*3 + NZEROS*2], dtype=tf.float32)))
def custom_loss(specs_input, params_pred):
npoints=specs_input.shape[-1]
specs_input = tf.reshape(specs_input, [-1, npoints]) #collapse batch and sequence dimensions
params_pred = tf.reshape(params_pred, [-1, NFORMANTS*3 + NZEROS*2]) #collapse batch and sequence dimensions
freqs, bws, amps = rescale_params(params_pred)
pfreqs, zfreqs = tf.split(freqs, [NFORMANTS, NZEROS], axis=-1)
pbws, zbws = tf.split(bws, [NFORMANTS, NZEROS], axis=-1)
specs_pred = vtfn(pfreqs, pbws, amps, zfreqs, zbws, npoints=specs_input.shape[-1], srate=16000)
specs_pred = 20.0 * tf.math.log(floor + specs_pred) / tf.math.log(10.0)
return(tf.math.reduce_mean(tf.square(specs_input - specs_pred))) #loss over whole batch
return custom_loss
###Output
_____no_output_____
###Markdown
Build and train model
###Code
model = tf.keras.Sequential([
tf.keras.layers.LSTM(512, return_sequences=True),
tf.keras.layers.Dense(NFORMANTS*3 + NZEROS*2, activation=top_activation),
])
print("Input shape: ", batch_input.shape)
print("Output shape: ", model(batch_input).shape)
print("")
sys.stdout.flush()
model.summary()
#myloss = get_custom_loss(top_activation)
myloss = get_custom_loss()
model.compile(
optimizer=tf.keras.optimizers.Adam(0.0001),
loss=myloss, metrics=[myloss]
)
###Output
_____no_output_____
###Markdown
The trained model is saved after every epoch that produces a validation loss lower than that of any previous epoch. Models were trained until the best validation loss was not improved after 20 epochs (patience=20), or a maximum of 200 epochs.
###Code
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/weights." + top_activation + "{epoch:03d}-{val_custom_loss:.3f}",
save_best_only=True, save_weights_only=True,
monitor='val_custom_loss', mode='min')
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
patience=20, monitor='val_custom_loss', mode='min')
if testing is True:
EPOCHS = 3
VERBOSITY = 1
else:
EPOCHS = 200
VERBOSITY = 2
model.fit(batched_train_dset, epochs=EPOCHS, verbose=VERBOSITY,
callbacks=[model_checkpoint_callback, early_stopping_callback],
validation_data=batched_val_dset)
###Output
_____no_output_____
###Markdown
Restore best model and evaluate
###Code
print("")
sys.stdout.flush()
checkpoints = glob.glob(checkpoint_dir + "/*.index")
latest_checkpoint = max(checkpoints, key=os.path.getctime)
model.load_weights(latest_checkpoint[:-6]) #remove .index suffix
print("Restoring from", latest_checkpoint)
sys.stdout.flush()
train_eval = model.evaluate(batched_train_dset, verbose=0)
print("Training loss:", train_eval[0])
sys.stdout.flush()
val_eval = model.evaluate(batched_val_dset, verbose=0)
print("Validation loss:", val_eval[0])
sys.stdout.flush()
###Output
_____no_output_____
###Markdown
At this point, the TIMIT test data is loaded and tested. Note that all input data fed to the model must first be normalized using the mean and standard deviation of the original training data, so those values need to be recorded for evaluation (see also below) and any future model use. (Here they are stored in the function normdata().)
###Code
if testing is True:
len3, test1 = getdata(datadir, allfileset[4620:4680], suffix, n_context_frames, verbose=1)
else:
len3, test1 = getdata(datadir, allfileset[4620:6300], suffix, n_context_frames, verbose=0)
test1b = test1
batched_test_dset = tf.keras.preprocessing.timeseries_dataset_from_array(
test1b, targets=None, sequence_length=SEQUENCE_LENGTH, sequence_stride=SEQUENCE_LENGTH, batch_size=BATCH_SIZE)
batched_test_dset = batched_test_dset.map(normdata)
test_eval = model.evaluate(batched_test_dset, verbose=0)
print("Test loss (TIMIT):", test_eval[0])
print("")
sys.stdout.flush()
###Output
_____no_output_____
###Markdown
Generate TIMIT dataFor evaluation, the models were run on the TIMIT recordings whose formants were measured for the VTR-TIMIT formant database. The output files are generated in a subdirectory /timit/ of the output directory (*checkpoint_dir*). The filename will be prefixed by *checkpoint_dir* and have the suffix ".abs". These files are text files, with one output line per input spectral envelope in the input file. The columns of the output file consist of the following in order: the filename, 3 placeholder columns (for the particular needs of our research), the time point in milliseconds, 7 more placeholder columns, and then the total number of resonances (poles plus zeros). This is followed by the parameters (frequency, bandwidth, and amplitude correction factor, in that order) of the poles, in order of increasing mean frequency, and then those of the zeros, in order of increasing mean absolute frequency.Other notes:* For output interpretation, it's important to remember that the generated "amplitudes" are not actually final formant amplitudes, but rather weighting factors that are used to adjust the initial formant amplitudes generated by formant().* The following code changes the frequencies of the zeros to negative values, to distinguish them from the poles. Also, since the zeros don't have their own amplitude correction factors, a placeholder value of "0.0" is inserted (theoretically we should have used 1.0 instead, but this value is not used in any computations).* The output code below assumes a frame rate of once every 5 milliseconds, which is the rate we used for our input data. (However, the VTR TIMIT measurements were taken once every 10 milliseconds, so every other output frame was used for evaluation.)* Since there is nothing in the custom loss code above that distinguishes one formant from another (aside from poles versus zeros), and any of them can take frequency values between 0 and 8000, the model output neurons may generate the formants in any random order (although that order will be constant from one frame to the next; e.g. if neuron 3 generates F1 for one frame, it does so for all frames and files). The code below reorders the formants by their mean frequencies over all frames.* For the CNN models, each input frame must be converted into a window, and each window fed to the model one at a time in a loop. For the RNN models, the frame sequence can be given to the model all at once because it is designed to read sequences.
###Code
print("Generating", len(vtrfileset), ".abs files on TIMIT data:")
cmd = ["mkdir", "-p", checkpoint_dir + "/timit"]
subprocess.run(cmd)
datadir = superdir + "timit" + subdir
for filename in vtrfileset:
outname=checkpoint_dir + "/timit/" + checkpoint_dir + "_" + filename
#Note that we feed the files one at a time to getdata()
lenf, f0 = getdata(datadir, [filename], suffix, n_context_frames, verbose=0)
f1 = f0
#Again, the input data must be normalized by the training set statistics
f2 = (f1 - trmean) / trstd
#Add a third dimension so that frame sequence can be read directly by the model
f2 = tf.expand_dims(f2, axis=0)
y = model.predict(f2)[0]
# Rescale and reorganize output
f, b, a = rescale_params(y)
zf=f.numpy()
za=a.numpy()
zb=b.numpy()
# Convert zero frequencies to negative numbers and insert placeholder "0.0" for zero amplitudes.
# Then sort formants and zeroes separately by increasing frequency.
if NZEROS > 0:
fp, f0 = tf.split(zf, [NFORMANTS, NZEROS], axis=-1)
f0 = f0 * -1.0
zf = tf.concat([fp, f0], axis=-1)
a0 = tf.zeros([za.shape[0], NZEROS], dtype=tf.float32)
za = tf.concat([za, a0], axis=-1)
ord = np.hstack((np.argsort(np.mean(fp, axis=0)), (np.flip(np.argsort(np.mean(f0, axis=0)) + NFORMANTS))))
else:
ord = np.argsort(np.mean(np.abs(zf),axis=0))
print("FILE:", filename)
sys.stdout.flush()
#Re-sort parameters in the order F1 B1 A1, F2 B2 A2, etc. and write to output file
p = [(i, i+NSUM, i+(NSUM*2)) for i in ord]
p2=sum(p, ())
zp=np.hstack((zf, zb, za))
out1=zp[:,p2]
ff = open(outname + ".abs", "w")
for i in range(out1.shape[0]):
ff.write("{} AA 1 1 {:.1f} 200.0 60 0 0 60 40 2 {} ".format(filename, i*5.0, NSUM))
out1[i,:].tofile(ff, sep=" ")
ff.write(" \n")
ff.close()
print("")
print("FINISHED script for", checkpoint_dir)
###Output
_____no_output_____ |
the_box_plot.ipynb | ###Markdown
Project - Fundamentals of data analyses Cource: HDIP Data Analytics Module: 52446 Project Student ID: G00364778 Submission: 14 December 2018 This Jupyter notebook will discuss the history of box plots, the parameters and the meanings used in box plots and methods to derive the parameters using the Python libraries matplotlib.pyplot and Seaborn. Examples to be given and discussed to create the plots as well as sampled incorporating the use of Pandas dataframes to render and filter the data. Searching for Atricles on BoxplotsDoing a search online for boxplots yields a lot of repetitive information going over the basics of the box plot starting with Q1, Q2 and Q3 to IQR and the calculations of the fences at 1.5IQR and outliers. The is some minor diversions going into percentiles.The most interesting bits of information was from a [discussion on stackexchange](https://stats.stackexchange.com/questions/369393/whats-the-history-of-box-plots-and-how-did-the-box-and-whiskers-design-evolv) with further references to more interesting bits that is not generally discussed somewhere else. This section has some hints where Tukey might have drawn his inspiration from to expand on ideas that was already is use by geographers for some time. It is actually interesting to note how once an idea propagates and become more discussed that the same information is generally repeated and that extensive digging needs to occur to find more detail not already extensively discussed, but the dilemma becomes quoting information not so often referenced and therefore the reputation comes in question. Background on BoxplotsBoxplots are a measure of how well distributed the data in a data set is. It divides the data set into three quartiles. The boxplot graph represents the minimum, maximum, median, first quartile and third quartile in the data set. It is also useful in comparing the distribution of data across data sets by drawing boxplots for each of them.The box plot is common in data analysis for investigating individual numerical variables and is a very useful tool when showing a statistical distribution of a dataset. The box plot is is also suitable for comparing range and distribution for groups of numerical data for visual comparison.The boxplot function from the Python package matplotlib.pyplot of the Seaborn library can be used to create box plots in Python. The Boxplot is actually very much discussed online and a variety or articles and many different approaches taken to discuss the meaning of the various functionalities and components of the graphic representation. The approach that appealed to me was comparing it to something know like the histogram and deriving new meaning from the existing understanding of something else. This approach proved very useful since it really helped making a lot of sense and even show logic in the process applied to Tukey arriving at $1.5\times IQR$ for the outer fences since this lines up very well with the tails on the normal distribution.This approach also departed from the approach taken in the majority of articles simply discussing and describing the different components of the boxplot and explaining how they were derived. The History of the BoxplotsThe Box plot was first introduced by John Wilder Tukey in 1970. Tukey was an American mathematician best known for development of the FFT algorithm and box plot. The Tukey range test, the Tukey lambda distribution, the Tukey test of additivity, and the Teichmüller–Tukey lemma all bear his name. He is also credited with coining the term 'bit'. Box plots has been around for over forty years now and was first introduced in 1970 by John Tukey in his toolkit for exploratory data analyses and became widely know in his formal publications in 1977.Today the box plot is one of the most frequently used statistical graphs and one of the few plot types invented in the 20'th century that has had widespread adoption. Due to their elegance and practicality, a wealth of variations and enhancements were developed over the years. The basic graphic form of the box plot was already established in the early 1950's and was initially drawn by hand and calculations done manually. The idea of showing the median and quartiles as basic summaries goes back at least to the dispersion diagrams introduced by the geographer Percy Robert Crowe (1933). Dispersion diagrams were staple fare for geographers and used in many textbooks as well as research papers from the late 1930s on. What is most original in Tukey's version of box plots are first of all criteria for identifying all the relevant points in the tails to be plotted separately and identified as deserving detailed consideration and as often flagging that a variable should be analysed on a transformed scale. His 1.5 IQR rule of thumb emerged only after much experimentation. It has mutated in some hands to a hard rule for deleting data points, which was never Tukey's intent. A punchy, memorable name, box plot, did no harm in ensuring much wider impact of these ideas. Dispersion diagram in contrast is rather a dull and dreary term. A Boxplot compared to a HistogramThe main purpose of the comparison exercise was to simply overlay a box plot over a histogram to generate my own graphic representation for further discussion to show the correlations between the plots, however taking the time to generate the code from various previous samples covered in lecture material and figuring out how to overlay a box plot above a histogram on the same axis, ended up in a very useful exercise to gain a much deeper understanding of the calculations and integrals in deriving the percentages of the data points under any given area and how they all relate to the quartiles and fences discussed in the rest of the document.
###Code
import warnings # Import the warning to disable future warning messages in the graph outputs.
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np # Import the numpy library for random number generation purposes
import seaborn as sns # import seaborn for graphing the plots
import matplotlib.pyplot as plt # import matplotlib for plot control commands
%matplotlib inline
plt.style.use('seaborn') # set the plot style for the graphs
plt.rcParams['figure.figsize'] = 16, 8 # Set the size of the plots in the document below
x = np.random.randn(50000) # generate 50,000 normally distributed data point for use in the plots.
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (.20, .80)}) # devide the plot area in two
sns.boxplot(x, ax=ax_box) # set the box plot
sns.distplot(x, ax=ax_hist) # set the distribbution plot
plt.axvline(x=-2.698, color='r', linestyle=':') # draw the red vertical line on the distribution plot
plt.axvline(x=-0.67, color='r', linestyle=':')
plt.axvline(x= 0.67, color='r', linestyle=':')
plt.axvline(x= 2.698, color='r', linestyle=':')
plt.text(-2.698,0.43,r'$-2.698\sigma$',size=15, color='r', horizontalalignment='center') # set the sigma symbols on the plots
plt.text(-0.6745,0.43,r'$-0.6745\sigma$',size=15, color='r', horizontalalignment='center')
plt.text( 0.6745,0.43,r'$ 0.6745\sigma$',size=15, color='r', horizontalalignment='center')
plt.text( 2.698,0.43,r'$ 2.698\sigma$',size=15, color='r', horizontalalignment='center')
plt.text(0,0.02,r'$50%$',size=15, color='r', horizontalalignment='center') # draw the percentages on the graph
plt.text(-1.7,0.02,r'$24.65%$',size=15, color='r', horizontalalignment='center')
plt.text(1.7,0.02,r'$24.65%$',size=15, color='r', horizontalalignment='center')
plt.text(-3.5,0.02,r'$0.35%$',size=15, color='r', horizontalalignment='center')
plt.text(3.5,0.02,r'$0.35%$',size=15, color='r', horizontalalignment='center')
ax_box.set(yticks=[]) # remove the tickmarks on the left of the boxplot
plt.show() # display the graph
###Output
_____no_output_____
###Markdown
For a normally distributed dataset, 50% of the data points falls between the 0.6745 $\sigma$ values, 99.3% between the 2.698 $\sigma$ values and 0.7% outside in the _outlier range_.This coincides on the boxplot boundary calculations and provides meaningful insight into the positioning of the fences, boundaries, minimum and maximum values of the boxplot discussed in the following sections. Much more can be read from a boxplot than is obvious from the simplistic method of its construction, particularly when the boxplots of several samples are lined up alongside one another. The box length gives an indication of the sample variability and the median line across the box shows where the sample is centred. The position of the box in its whiskers and the position of the line in the box also tells us whether the sample is symmetric or skewed, either to the right or left. For a symmetric distribution, _long whiskers_, relative to the box length, can betray a _heavy tailed_ population and _short whiskers_, a _short tailed_ population. So, provided the number of points in the sample is not too small, the boxplot also gives us some idea of the "shape" of the sample, and by implication, the shape of the population from which it was drawn. This is all important when considering appropriate analyses of the data being investigated.This analogy goes a long way for creating a much deeper understanding of terminology used like tails, to provide some guidelines as to where tails start and what they represent. It also creates a lot of meaning understanding where _1.5IQR_ lies in relation to the distribution of the dataset and the meaning of the 0.7% divided in tails and outliers. The meaning imparted by this knowledge creates a new enlightenment to analytics and statistics that could not have been imparted so thoroughly through other method of teaching. Boxplot TerminologyA box plot consists of five carefully chosen components to give a robust summary of the distribution of a dataset. - The _median_- Two _hinges_, the upper and lower fourths(_quartiles_) Q1 and Q3- The data values adjacent to the fences (1.5 x inner fourth from median)- Two _whiskers_ to connect the _hinges_ to the _fences_- _out-liers_ (outside the extremes) The MedianIn real simple terms, the median in a dataset is the value in the middle of the set for an eneven set of numbers or the average value of the two numbers in the middle of the set.```pythondataset = [1,2,3,4,5,6,7,8,9]```So in this case the median of the set would be 5.```pythondataset = [1,2,3,4,5,6,7,8,9,10]```In the second case the median of the set would be 5.5, so the average value between 5 and 6. The FencesThe data values at the fences although described as _minimum_ and _maximum_ is not the same as the descriptive statistical min and max values. The _"minimum"_ value is calculated: $minimum=Q1-1.5\times IQR$ The _"maximimum"_ value is calculated: $maximum=Q3+1.5\times IQR$ The Terms Quartile Essentially, quartiles divides the dataset into groups around the _median_ value, for example:```pythonIn [2]: [i for i in range(18,100,8)]Out[2]: [18, 26, 34, 42, 50, 58, 66, 74, 82, 90, 98] Q2 ```- So the _median_ of Q1 is then 34 is this is the _lower quartile_.- The _median_ of Q3 is 82 and is the _upper quartile_- From this we can determine the IQR(interquartile range), i.e. IQR=Q3-Q1=82-34=48The IQR is a very useful measurement because it is less influenced by the extreme values on the edges and limits the range to the middle 50% of the values and the interquartile range is the middle 50% of the dataset indicated by the blue box in the sample above. The terms percentile.The box plot is defined by five data-summary values as listed below. The box portion of the box plot is defined by two lines at the 25th percentile and 75th percentile. The line inside the box at the median is at the 50th percentile.The percentiles divides the dataset into quarters where the borders of of the quarters are determined by the calculations for each as described below- median: 50% (Q2)- first quartile: 25% (Q1)- third quartile: 75% (Q3)- interquartile range (IQR): Q3-Q1- outliers: - "maximum": - "minimum": The Out-liersAn outlier is an observation that is numerically distant from the rest of the data. When reviewing a boxplot, an outlier is defined as a data point that is located outside the fences (“whiskers”) of the boxplot (e.g: outside 1.5 times the interquartile range above the upper quartile and bellow the lower quartile)The outliers are the values that falls outside of the gates that is set at $1.5\times IQR$, or 1.5 time the distance of the distribution of the middle 50%. Values outside of the range is marked as outliers. Outliers is an important aspect of boxplots and not just identified to be discarded. As previously shown, outliers is typically 0.7% of a dataset. These are often points worthy of investigation in order to understand why they differ. Such points can lead to significant discoveries and the objective of identifying outliers is for deeper investigation and not necessarily for the purposes or discarding them.As an example of importance of outliers, in the early 1980s scientists detected a dramatical seasonal drop in ozone levels over Antarctica by a fly over. The subsequently they spent two years rechecking their satellite data and discovered that satellites had dutifully been recording the ozone collapse but the computers had not raised an alert because they were programmed to reject such extreme data as anomalies [R. Benedick, Scientific American, April 1992] Situations in which Box plots are usedIt is often used in explanatory data analysis and mostly seen in stock exchange data and graphs. This type of graph is used to show the shape of the distribution, its central value, and its variability and datapoints for outliers. Demonstration of using Boxplots on weather data of 75 yearsThe use of a boxplot is best illustrated on a dataset, for example, the daily weather data for Dublin collected at Dublin Airport since 1942. For this exercise we will focus on the maximum daily temperature values and explore interesting patterns spotted in the sets and focus on exploring the outliers and their potential meanings.
###Code
import pandas as pd # import the neccesary librariers for data manipulation and plotting
import matplotlib.pyplot as pl
%matplotlib inline
data_url='https://raw.githubusercontent.com/G00364778/52446_project/master/data/dly532.csv' # github url for raw data
plt.style.use('default') # set the default maplotlib styles
df=pd.read_csv(data_url,skiprows=25) # load the dataset into a dataframe
df['date']=pd.to_datetime(df['date']) # convert to a pandas date
df=df[['date','maxtp']] # discard the unwanted data
#df=df[df.date.dt.year>1987] # filter on the year
df['year']=df.date.dt.year # create a month column
df.boxplot('maxtp',figsize=(3,3)) # generate a box plot
pl.show() # show the plot
###Output
_____no_output_____
###Markdown
Summarising the complete dataset of 28,000 data points over three quarters of a century, I can now say with conviction, from a single graph, that the average temperature for Dublin falls typically between 9 and 17 degrees with lows around zero, but some extreme outliers below zero but rarely as low as -5. Typical highs is around 27 with rare outliers very close to but below 30. What an incredible insight on such a big dataset.
###Code
import pandas as pd # import the neccesary librariers
import matplotlib.pyplot as pl
%matplotlib inline
df=pd.read_csv(data_url,skiprows=25) # load the dataset into a dataframe again since filtering was applied before
df['date']=pd.to_datetime(df['date']) # convert to a pandas date format
df=df[['date','maxtp']] # discard the unwanted data
df=df[df.date.dt.year>1941] # filter on the year
df['year']=df.date.dt.year # create a month column
df.boxplot('maxtp','year',rot=90, figsize=(16,4.5)) # generate a box plot
pl.show() # show the plot
###Output
_____no_output_____
###Markdown
The plot by year give insight into the annual movements and highlights exceptional years. Interesting aspects is the very stable median around the 12,13 mark. Lows rarely hits zero and highs are consistently around 25. This plot was primarily generated to find the most interesting years to zoom in on in order to visualise the annual weather movements. So looking at the entire set to find the most interesting year, i.e. the year with the most varied patterns, the ones that stands out is 2010 for the extreme low and a wide distribution, 2007 and 2012 for the lowest overall distributions and really making for a very informative view of what transpired over the last 20 years. 1990 warrants some extra scrutiny for its single outlier in August and 2010 for a consistently cold December.A final interesting observation on the set is the size variance of the interquartile ranges of the plots and the fact that the 2018 plot already shows similar ranges to exceptional years like 2010, 1987, 1979 and so on. With only ten months of data and lower fence extremes already at zero, it opens itself to speculations of snow over Christmas. I wonder if one can reach conclusions like that looking at trends in progress with 16% of the lower end of the data missing.
###Code
import pandas as pd # import the neccesary librariers
import matplotlib.pyplot as pl
%matplotlib inline
pl.rcParams['figure.figsize'] = 12, 6 # set the size of the plot in the document below
df=pd.read_csv(data_url,skiprows=25) # load the dataset into a dataframe
df['date']=pd.to_datetime(df['date']) # convert to a pandas date
df=df[['date','maxtp']] # discard the unwanted data
df=df[df.date.dt.year==2010] # filter on the year
df['month']=df.date.dt.month # create a month column
df.boxplot('maxtp','month') # generate a box plot
pl.show() # show the plot
###Output
_____no_output_____
###Markdown
So then looking at the distributions over one of the interesting most varied years, actually shows surprising tight months compared to other years, but with a distinct sinusoidal pattern emerging from the set.While we might see extremes in Decembers and January's below 5, there is still milder days on average and December is on Average the coldest month in spite of snowfalls in January and later months suggested otherwise. It's probably typically the extreme outliers causing the late snow days and not the norm or typical values for the periods.August definitely seems like a tight stable month for predictable holiday weather, although July seems to have warmer exceptions. So we should select July for the warmest possible days, but August for consistent but slightly cooler similar days. Compare the box plot to alternativesMany varied alternatives were derived from the original ideas of the boxplot by Tukey, with some variations and additions that will be explored in the following sections in more detail. Boxplot using SeabornAn alternative more colourful variant of the box plots already illustrated above. Adding colour can also serve the purpose of showing the variation of another dimensions like regional variance in the samples used in the illustration.
###Code
import seaborn as sns
sns.boxplot(x=df.month,y=df.maxtp)
pl.show()
###Output
_____no_output_____
###Markdown
Notched Box PlotThe notched box plot indicates the confidence level of the median, other than that it is the same as the regular box plot.
###Code
import seaborn as sns
sns.boxplot(x=df.month,y=df.maxtp, notch=True)
pl.show()
###Output
_____no_output_____
###Markdown
The Boxen PlotThe Boxenplot shows a much larger number of quantiles that the regular box plot. Conventional boxplots are useful displays for conveying rough information about the central 50% and the extent of data. For small-sized data sets (n < 200), detailed estimates of tail behaviour beyond the quartiles may not be trustworthy, so the information provided by boxplots is appropriately somewhat vague beyond the quartiles, and the expected number of {"}outliers{"} of size n is often less than 10. Larger data sets (n ~ 10,000-100,000) afford more precise estimates of quantiles beyond the quartiles, but conventional boxplots do not show this information about the tails, and, in addition, show large numbers of extreme, but not unexpected, observations.
###Code
import seaborn as sns
sns.boxenplot(x=df.month,y=df.maxtp)
pl.show()
###Output
_____no_output_____
###Markdown
The StripplotThe strip plot shows all the observations of a given set and is typically used alongside some the the other category type plots.
###Code
sns.catplot(x='month',y='maxtp', kind='strip', data=df, aspect=2)
pl.show()
###Output
_____no_output_____
###Markdown
The Swarm PlotThe swarm plot is like the strip plot but the points are adjusted along the categorical axis so that they do not overlap. This improves the visual representation of the distribution of the values, but does not scale well with very large numbers of observations.
###Code
sns.swarmplot(x=df.month,y=df.maxtp)
pl.show()
###Output
_____no_output_____
###Markdown
The Violin PlotA violin plot plays a similar role as a box plot. It shows the distribution of quantitative data across several levels of one (or more) categorical variables such that those distributions can be compared. Unlike a box plot, in which all of the plot components correspond to actual datapoints, the violin plot features a kernel density estimation of the underlying distribution.This can be an effective and attractive way to show multiple distributions of data at once, but keep in mind that the estimation procedure is influenced by the sample size, and violins for relatively small samples might look misleadingly smooth.
###Code
sns.violinplot(x=df.month,y=df.maxtp, dodge=False)
pl.show()
###Output
_____no_output_____
###Markdown
The Bean PlotSame as a violin plot except that a reference rule or the scale factor to use when computing the kernel bandwidth. The actual kernel size will be determined by multiplying the scale factor by the standard deviation of the data within each bin.
###Code
sns.catplot(x='month',y='maxtp', data=df, kind='violin', bw=.15, aspect=2)
pl.show()
###Output
_____no_output_____ |
examples/tutorials/7_Creating_a_WaveformReducer.ipynb | ###Markdown
Creating A WaveformReducerThe `WaveformReducer` functionality in CHECLabPy (and extract_dl1.py) is facilitated by two additional utilities: `column` and `WaveformReducerChain`. This tutorial will describe these objects, and show how to create your own simple `WaveformReducer`. WaveformReducerAn example of a `WaveformReducer` is shown below:
###Code
from CHECLabPy.core.reducer import WaveformReducer, column
class WaveformMaxReducer(WaveformReducer):
@column
def waveform_max(self):
return self.waveforms.max(axis=1)
###Output
_____no_output_____
###Markdown
As you can see, it can be very simple to create a WaveformReducer.There are 3 stages to a `WaveformReducer`:1. When the `WaveformReducer` is initialised, the arguments passed to it dictate which of its columns are activated or disabled. For example `reducer = WaveformMaxReducer(waveform_max=False)` would disable the column for the above reducer.
###Code
reducer = WaveformMaxReducer(n_pixels=2048, n_samples=128, waveform_max=True)
print(reducer.active_columns)
reducer = WaveformMaxReducer(n_pixels=2048, n_samples=128, waveform_max=False)
print(reducer.active_columns)
###Output
_____no_output_____
###Markdown
2. To process an event, the `process` method is called. The first thing the reducer does is calculate the values that multiple of its columns require, and store them as members of the `WaveformReducer`. This is performed in the `_prepare` method, which is the first thing called by `process`. By default, the `_prepare` method simply attaches the waveform for the current event to the reducer, ready to be processed by the `columns`:
###Code
import numpy as np
reducer = WaveformMaxReducer(n_pixels=2048, n_samples=128, waveform_max=True)
waveforms = np.random.rand(2048, 128)
reducer._prepare(waveforms)
print((waveforms == reducer.waveforms).all())
###Output
_____no_output_____
###Markdown
(It is important to not that it is not necessary to call the `_prepare` method yourself, it is automatically called when calling `process`.3. The active columns are looped through, and a dict containing the extracted values per pixel for each column is produced and returned:
###Code
import numpy as np
reducer = WaveformMaxReducer(n_pixels=2048, n_samples=128, waveform_max=True)
waveforms = np.random.rand(2048, 128)
params = reducer.process(waveforms)
print(params)
print(params['waveform_max'].shape)
###Output
_____no_output_____
###Markdown
columnThe purpose of the column decorator is to identify the items that are to be included as column in the extracted dl1 file. It is expected that a `column` returns a numpy array of size n_pixels, and uses the `self.waveforms` attribute to perform the calculation (or other pre-calculated attributes from the `_prepare` method.No two columns can have the same name, even if they are in different `WaveformReducers`, ensuring that columns are unique. If a column with a duplicate name is defined in a different `WaveformReducer`, an error is raised:
###Code
from CHECLabPy.core.reducer import WaveformReducer, column
class WaveformMaxReducer2(WaveformReducer):
@column
def waveform_max(self):
return self.waveforms.max(axis=1)
###Output
_____no_output_____
###Markdown
ChainThe purpose of `Chain` is to loop over all defined WaveformReducers, and accumulate the column results for all activated columns. If a WaveformReducer has no active columns, it is skipped. This means that now multiple `WaveformReducers` can contribute to the same dl1 file.The `Chain` class also defines which columns are active by default, and can also read a yaml configuration file, allowing the user to select the active columns from the command line by specifying a path to a config file. This config file path can be specified as an argument to `extract_dl1.py`.
###Code
from CHECLabPy.data import get_file
config_path = get_file("extractor_config.yml")
!echo "waveform_max: True\ncharge_averagewf: True" > $config_path
!cat $config_path
import numpy as np
from CHECLabPy.core.chain import WaveformReducerChain
chain = WaveformReducerChain(n_pixels=2048, n_samples=128, config_path=config_path)
waveforms = np.random.rand(2048, 128)
params = chain.process(waveforms)
print("\n", params)
###Output
_____no_output_____ |
.ipynb_checkpoints/2020.01.10_ActivationMax_rna-seq_pan_BRCA_tumor-v-normal-checkpoint.ipynb | ###Markdown
Sample Prep
###Code
samples = pd.read_csv('../data/TCGA/rna-seq_pan/meta/gdc_sample_sheet.2019-12-12.tsv', sep="\t")
# get file type
samples['data'] = [val[1] for i,val in samples['File Name'].str.split(".").items()]
samples.head()
###Output
_____no_output_____
###Markdown
Samples with RNAseq adjacent normal tissue
###Code
samples[samples['Sample Type']=='Solid Tissue Normal']['data'].value_counts()
samples['project'] = [val[1] for i,val in samples['Project ID'].str.split("-").items()]
samples['project'].value_counts()
# all cases with adjacent normal tissue
cases = samples[samples['Sample Type']=='Solid Tissue Normal']['Case ID']
# disparity in cases
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor')
& (samples['data']=='FPKM') & (samples['project']=='BRCA')]['Case ID'].nunique()
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal')
& (samples['data']=='FPKM') & (samples['project']=='BRCA')]['Case ID'].nunique()
# divide, join, subset
case_tumor = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor') &
(samples['data']=='FPKM') & (samples['project']=='BRCA')]
case_norm = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal') &
(samples['data']=='FPKM') & (samples['project']=='BRCA')]
cases = case_norm[case_norm['Case ID'].isin(case_tumor['Case ID'])]['Case ID']
cases.shape
case_tumor = case_tumor[case_tumor['Case ID'].isin(cases)]
case_norm = case_norm[case_norm['Case ID'].isin(cases)]
cases = pd.concat([case_tumor, case_norm])
case_tumor.shape
case_norm.shape
cases.shape
###Output
_____no_output_____
###Markdown
Dataset Prep
###Code
from sklearn.model_selection import train_test_split
target = 'Sample Type'
cases[target] = cases[target].astype('category')
train, test = train_test_split(cases)
train[target].value_counts()
test[target].value_counts()
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
from trainer import fit
import visualization as vis
import numpy as np
cuda = torch.cuda.is_available()
print("Cuda is available: {}".format(cuda))
classes = train[target].cat.categories.values
from tcga_datasets import TCGA, SiameseTCGA
root_dir = "../data/TCGA/rna-seq_pan/"
batch_size = 1
train_dataset = TCGA(root_dir, samples=train, train=True, target=target)
test_dataset = TCGA(root_dir, samples=test, train=False, target=target)
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {'num_workers': 10}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
###Output
_____no_output_____
###Markdown
Siamese Network
###Code
# Step 1 set up dataloader
root_dir = "../data/TCGA"
siamese_train_dataset = SiameseTCGA(train_dataset) # Returns pairs of images and target same/different
siamese_test_dataset = SiameseTCGA(test_dataset)
batch_size = 8
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {}
siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Set up the network and training parameters
from tcga_networks import EmbeddingNet, SiameseNet
from losses import ContrastiveLoss
from metrics import AccumulatedAccuracyMetric
# Step 2
embedding_net = EmbeddingNet()
# Step 3
model = SiameseNet(embedding_net)
if cuda:
model.cuda()
# Step 4
margin = 1.
loss_fn = ContrastiveLoss(margin)
lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 10
# print training metrics every log_interval * batch_size
log_interval = 30
train_loss, val_loss = fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler,
n_epochs, cuda, log_interval)
plt.plot(range(0, n_epochs), train_loss, 'rx-')
plt.plot(range(0, n_epochs), val_loss, 'bx-')
train_embeddings_cl, train_labels_cl = vis.extract_embeddings(train_loader, model)
vis.plot_embeddings(train_embeddings_cl, train_labels_cl, classes)
val_embeddings_baseline, val_labels_baseline = vis.extract_embeddings(test_loader, model)
vis.plot_embeddings(val_embeddings_baseline, val_labels_baseline, classes)
###Output
_____no_output_____
###Markdown
Activation Maximization
###Code
flat = pd.Series(train_dataset.data.values.ravel())
flat.describe()
###Output
_____no_output_____
###Markdown
Fromhttps://github.com/MisaOgura/flashtorch/blob/master/flashtorch/activmax/gradient_ascent.py
###Code
model()
for name_embed, embed in model._modules.items():
for name_fc, fc in embed._modules.items():
for name, layer in fc._modules.items():
print(name, layer)
import torch.nn as nn
import copy
tmp_model = copy.deepcopy(model)
activations = {}
def hook_fn(m, i, o):
activations[m] = o
def get_all_layers(net):
for name, layer in net._modules.items():
#If it is a sequential, don't register a hook on it
# but recursively register hook on all it's module children
if isinstance(layer, EmbeddingNet) or isinstance(layer, nn.Sequential):
get_all_layers(layer)
# if isinstance(layer, nn.Sequential):
# get_all_layers(layer)
else:
print(layer)
# it's a non sequential. Register a hook
layer.register_forward_hook(hook_fn)
get_all_layers(tmp_model)
siamese_test_dataset.test_labels
siamese_test_dataset.labels
siamese_test_dataset[-1]
(dat1, dat2), targ = siamese_test_dataset[-1]
tmp_model.cuda()
out1, out2 = tmp_model.forward(dat1.cuda(), dat2.cuda())
for key,val in activations.items():
print(key, val.shape, "\n\t\t", val)
out2
# need to record activations at final layer
# then optimize random
###Output
_____no_output_____ |
resnet18_udacity_pytorch_challenge.ipynb | ###Markdown
Udacity PyTorch Scholarship Final Lab Challenge **By [Droid(Kaggle - Droid021)](https://www.linkedin.com/in/v3nvince)**
###Code
!pip install --no-cache-dir -I pillow
#!wget 'https://s3.amazonaws.com/content.udacity-data.com/courses/nd188/flower_data.zip'
#!unzip flower_data.zip
#!pip3 install torch==0.4.0 torchvision
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import numpy as np
data_dir = 'flower_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/valid', transform=test_transforms)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=80, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=80)
model = models.resnet18(pretrained=True)
# Freeze parameters so we don't backprop through them
for _, param in model.named_parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(512, 512)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(0.4)),
('fc2', nn.Linear(512, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.fc = classifier
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=0.005)
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
epochs = 30
valid_loss_min = np.Inf
# Some lists to keep track of loss and accuracy during each epoch
epoch_list = []
train_loss_list = []
val_loss_list = []
train_acc_list = []
val_acc_list = []
# Start epochs
for epoch in range(epochs):
#adjust_learning_rate(optimizer, epoch)
# monitor training loss
train_loss = 0.0
val_loss = 0.0
###################
# train the model #
###################
# Set the training mode ON -> Activate Dropout Layers
model.train() # prepare model for training
# Calculate Accuracy
correct = 0
total = 0
# Load Train Images with Labels(Targets)
for data, target in train_loader:
if train_on_gpu:
data, target = data.to(device), target.to(device)
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
if type(output) == tuple:
output, _ = output
# Calculate Training Accuracy
predicted = torch.max(output.data, 1)[1]
# Total number of labels
total += len(target)
# Total correct predictions
correct += (predicted == target).sum()
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
# from torch.autograd import Variable
# loss = Variable(loss, requires_grad = True)
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# calculate average training loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
# Avg Accuracy
accuracy = 100 * correct / float(total)
# Put them in their list
train_acc_list.append(accuracy)
train_loss_list.append(train_loss)
# Implement Validation like K-fold Cross-validation
# Set Evaluation Mode ON -> Turn Off Dropout
model.eval() # Required for Evaluation/Test
# Calculate Test/Validation Accuracy
correct = 0
total = 0
with torch.no_grad():
for data, target in test_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Predict Output
output = model(data)
if type(output) == tuple:
output, _ = output
# Calculate Loss
loss = criterion(output, target)
val_loss += loss.item()*data.size(0)
# Get predictions from the maximum value
predicted = torch.max(output.data, 1)[1]
# Total number of labels
total += len(target)
# Total correct predictions
correct += (predicted == target).sum()
# calculate average training loss and accuracy over an epoch
val_loss = val_loss/len(test_loader.dataset)
accuracy = 100 * correct/ float(total)
# Put them in their list
val_acc_list.append(accuracy)
val_loss_list.append(val_loss)
# Print the Epoch and Training Loss Details with Validation Accuracy
print('Epoch: {} \tTraining Loss: {:.4f}\t Val. acc: {:.2f}%'.format(
epoch+1,
train_loss,
accuracy
))
# save model if validation loss has decreased
if val_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
val_loss))
# Save Model State on Checkpoint
torch.save(model.state_dict(), 'gdrive/My Drive/remodel.pt')
torch.save(model.state_dict(), 'umodel.pt')
valid_loss_min = val_loss
# Move to next epoch
epoch_list.append(epoch + 1)
###Output
Epoch: 1 Training Loss: 3.2448 Val. acc: 67.00%
Validation loss decreased (inf --> 1.340319). Saving model ...
Epoch: 2 Training Loss: 1.8805 Val. acc: 79.00%
Validation loss decreased (1.340319 --> 0.806241). Saving model ...
Epoch: 3 Training Loss: 1.5993 Val. acc: 80.00%
Validation loss decreased (0.806241 --> 0.708884). Saving model ...
Epoch: 4 Training Loss: 1.4636 Val. acc: 84.00%
Validation loss decreased (0.708884 --> 0.609268). Saving model ...
Epoch: 5 Training Loss: 1.4414 Val. acc: 84.00%
Validation loss decreased (0.609268 --> 0.586282). Saving model ...
Epoch: 6 Training Loss: 1.4378 Val. acc: 84.00%
Validation loss decreased (0.586282 --> 0.548567). Saving model ...
Epoch: 7 Training Loss: 1.4294 Val. acc: 85.00%
Epoch: 8 Training Loss: 1.3244 Val. acc: 83.00%
Epoch: 9 Training Loss: 1.3344 Val. acc: 86.00%
Validation loss decreased (0.548567 --> 0.469412). Saving model ...
Epoch: 10 Training Loss: 1.2951 Val. acc: 84.00%
Epoch: 11 Training Loss: 1.3268 Val. acc: 85.00%
Validation loss decreased (0.469412 --> 0.469262). Saving model ...
Epoch: 12 Training Loss: 1.3079 Val. acc: 85.00%
Epoch: 13 Training Loss: 1.3089 Val. acc: 86.00%
Epoch: 14 Training Loss: 1.2927 Val. acc: 86.00%
Validation loss decreased (0.469262 --> 0.447514). Saving model ...
Epoch: 15 Training Loss: 1.2229 Val. acc: 86.00%
Epoch: 16 Training Loss: 1.2679 Val. acc: 85.00%
Epoch: 17 Training Loss: 1.3248 Val. acc: 88.00%
Validation loss decreased (0.447514 --> 0.440362). Saving model ...
Epoch: 18 Training Loss: 1.3192 Val. acc: 86.00%
Epoch: 19 Training Loss: 1.2600 Val. acc: 87.00%
Validation loss decreased (0.440362 --> 0.419240). Saving model ...
Epoch: 20 Training Loss: 1.2317 Val. acc: 87.00%
Epoch: 21 Training Loss: 1.2035 Val. acc: 87.00%
Epoch: 22 Training Loss: 1.2750 Val. acc: 87.00%
Epoch: 23 Training Loss: 1.2640 Val. acc: 88.00%
Epoch: 24 Training Loss: 1.2167 Val. acc: 88.00%
Validation loss decreased (0.419240 --> 0.417008). Saving model ...
Epoch: 25 Training Loss: 1.2593 Val. acc: 85.00%
Epoch: 26 Training Loss: 1.2268 Val. acc: 87.00%
Epoch: 27 Training Loss: 1.2107 Val. acc: 88.00%
Epoch: 28 Training Loss: 1.2544 Val. acc: 87.00%
Epoch: 29 Training Loss: 1.2506 Val. acc: 89.00%
Validation loss decreased (0.417008 --> 0.413562). Saving model ...
Epoch: 30 Training Loss: 1.2396 Val. acc: 87.00%
###Markdown
Links Here: **Model State Checkpoint File: [umodel.pt](./umodel.pt)** (Preferred)
###Code
model.load_state_dict(torch.load('umodel.pt'))
# Training / Validation Loss
plt.plot(epoch_list,train_loss_list)
plt.plot(val_loss_list)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Training/Validation Loss vs Number of Epochs")
plt.legend(['Train', 'Valid'], loc='upper right')
plt.show()
# Train/Valid Accuracy
plt.plot(epoch_list,train_acc_list)
plt.plot(val_acc_list)
plt.xlabel("Epochs")
plt.ylabel("Training/Validation Accuracy")
plt.title("Accuracy vs Number of Epochs")
plt.legend(['Train', 'Valid'], loc='best')
plt.show()
val_acc = sum(val_acc_list[:]).item()/len(val_acc_list)
print("Validation Accuracy of model = {} %".format(val_acc))
!git clone https://github.com/GabrielePicco/deep-learning-flower-identifier
!pip install airtable
import sys
sys.path.insert(0, 'deep-learning-flower-identifier')
from test_model_pytorch_facebook_challenge import calc_accuracy
calc_accuracy(model, input_image_size=224, use_google_testset=False)
###Output
Downloading the dataset from: https://www.dropbox.com/s/da6ye9genbsdzbq/flower_data_original_test.zip?dl=1
|
doc/python/vi/3/old/3_ex1.ipynb | ###Markdown
Exercices sur les listes Objectifs* Créer, afficher et manipuler une liste Créer une liste Créez une liste qui s'appelle `fruits` et qui contient les trois chaînes de caractère suivantes :- pomme- poire- cerise**rappel :** `une_liste = ['premier élément','second élément']`
###Code
pass
###Output
_____no_output_____
###Markdown
Afficher un élément de la liste Affichez le deuxième élément de la liste `fruits` avec la fonction `print()`**rappel :** le premier élément de la liste est `une_liste[0]`
###Code
pass
###Output
_____no_output_____
###Markdown
Remplacer un élément de la liste Remplacez le deuxième élément (poire) par `fraise`
###Code
pass
###Output
_____no_output_____
###Markdown
Affichez le deuxième élément à l'aide de la fonction `print()`
###Code
pass
###Output
_____no_output_____ |
DAY 401 ~ 500/DAY499_[Programmers] 최소직사각형 (Python).ipynb | ###Markdown
2021년 10월 1일 금요일 Programmers - 위클리 챌린지 8주차 - 최소직사각형 (Python) 문제 : https://programmers.co.kr/learn/courses/30/lessons/86491 블로그 : https://somjang.tistory.com/entry/Programmers-%EC%9C%84%ED%81%B4%EB%A6%AC-%EC%B1%8C%EB%A6%B0%EC%A7%80-8%EC%A3%BC%EC%B0%A8-%EC%B5%9C%EC%86%8C%EC%A7%81%EC%82%AC%EA%B0%81%ED%98%95-Python Solution
###Code
def solution(sizes):
answer = 0
sizes = [sorted(size, reverse=True) for size in sizes]
widths = [size[0] for size in sizes]
heights = [size[1] for size in sizes]
width, height = max(widths), max(heights)
answer = width * height
return answer
###Output
_____no_output_____ |
Experiments/Random Pictures.ipynb | ###Markdown
im = cv2.resize(cv2.imread('/home/mckc/cat.jpg'), (224, 224)).astype(np.float32)im[:,:,0] -= 103.939im[:,:,1] -= 116.779im[:,:,2] -= 123.68im = im.transpose((2,0,1))im = np.expand_dims(im, axis=0)out = model.predict(im)print np.argmax(out)im = im[0,:,:,:].transpose(1,2,0)im[:,:,0] += 103.939im[:,:,1] += 116.779im[:,:,2] += 123.68plt.imshow(im.astype(np.uint8))print lables[np.argmax(out)] im = cv2.resize(cv2.imread('/home/mckc/dog.jpg'), (224, 224)).astype(np.float32)im[:,:,0] -= 103.939im[:,:,1] -= 116.779im[:,:,2] -= 123.68im = im.transpose((2,0,1))im = np.expand_dims(im, axis=0)out = model.predict(im)print np.argmax(out)im = im[0,:,:,:].transpose(1,2,0)im[:,:,0] += 103.939im[:,:,1] += 116.779im[:,:,2] += 123.68plt.imshow(im.astype(np.uint8))print lables[np.argmax(out)]
###Code
model.fit(X,Y_train,verbose=1,validation_data=(X_ts,Y_test),nb_epoch=100,batch_size=3)
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD,RMSprop,Adadelta,Adagrad
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
#model = VGG_16('/home/mckc/Downloads/vgg16_weights.h5')
model = VGG_16('/home/mckc/Face_code/face.h5')
#model.layers.pop()
#model.add(Dense(2, activation='softmax'))
adagrad = Adagrad(lr=0.001, epsilon=1e-08)
model.compile(optimizer=adagrad, loss='categorical_crossentropy',metrics=['accuracy'])
im = cv2.resize(cv2.imread('/home/mckc/Downloads/vgg_face_caffe/ak.png'), (224, 224)).astype(np.float32)
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
out = model.predict(im)
print np.argmax(out)
labels = np.loadtxt('/home/mckc/Downloads/vgg_face_caffe/names.txt', str, delimiter='\t')
im = im[0,:,:,:].transpose(1,2,0)
im[:,:,0] += 103.939
im[:,:,1] += 116.779
im[:,:,2] += 123.68
plt.imshow(im.astype(np.uint8))
print labels[np.argmax(out)]
model.fit(X,Y_train,verbose=1,validation_data=(X_ts,Y_test),nb_epoch=150,batch_size=5)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(X, Y_train, batch_size=2),validation_data=(X_ts.astype(np.float16),Y_test)
,nb_worker=7,samples_per_epoch=len(X), nb_epoch=1000, pickle_safe=True)
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import cv2, numpy as np
def VGG_19(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
model = VGG_19('/home/mckc/Downloads/vgg19_weights.h5')
model.layers.pop()
model.add(Dense(2, activation='softmax'))
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
model.fit(X_tr.astype(np.float16),Y_train,verbose=1,validation_data=(X_ts.astype(np.float16),Y_test),nb_epoch=5000,batch_size=5)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(X_tr, Y_train, batch_size=10),validation_data=(X_ts.astype(np.float16),Y_test)
,nb_worker=7,samples_per_epoch=len(X_tr), nb_epoch=5000, pickle_safe=True)
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dense, Input, Activation
from convnetskeras. convnets import convnet
alexnet = convnet('alexnet', weights_path='/home/mckc/Downloads/alexnet_weights.h5')
input = alexnet.input
img_representation = alexnet.get_layer("dense_2").output
classifier = Dense(7,name='classifier')(img_representation)
classifier = Activation("softmax", name="softmax")(classifier)
model = Model(input=input,output=classifier)
sgd = SGD(lr=.001, decay=1.e-6, momentum=0.9, nesterov=False)
model.compile(optimizer=sgd, loss='categorical_crossentropy',metrics=["accuracy"])
model = Alexnet()
model.load_weights('/home/mckc/Downloads/alexnet_weights.h5')
###Output
_____no_output_____ |
fine-tuning/Fine_tuning.ipynb | ###Markdown
###Code
!pip install transformers
!pip install wget
from transformers import AutoTokenizer, AutoModel
from transformers import BertTokenizer, BertModel, BertTokenizerFast
import torch
import tensorflow as tf
import os
###Output
_____no_output_____
###Markdown
Preparing GPU
###Code
# Get the GPU device name.
device_name = tf.test.gpu_device_name()
# The device name should look like the following:
if device_name == '/device:GPU:0':
print('Found GPU at: {}'.format(device_name))
else:
raise SystemError('GPU device not found')
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
!nvidia-smi
###Output
Thu Apr 1 14:08:05 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.67 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |
| N/A 69C P0 32W / 70W | 222MiB / 15109MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
###Markdown
Corpus preparation GENIA corpus
###Code
import wget
url = "http://www.nactem.ac.uk/GENIA/current/GENIA-corpus/Term/GENIAcorpus3.02.tgz"
if not os.path.exists('./GENIAcorpus3.02.tgz'):
wget.download(url,'./GENIAcorpus3.02.tgz')
if not os.path.exists('./GENIA_term_3.02'):
!tar -xvzf GENIAcorpus3.02.tgz
###Output
GENIA_term_3.02/
GENIA_term_3.02/gpml.css
GENIA_term_3.02/LICENSE
GENIA_term_3.02/GENIAcorpus3.02.xml
GENIA_term_3.02/gpml.dtd
GENIA_term_3.02/gpml.css.legend.html
GENIA_term_3.02/gpml.readme.html
###Markdown
Model preparation
###Code
tokenizer = AutoTokenizer.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext")
model = AutoModel.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext")
print('tokenizer type:', type(tokenizer))
print('model type:', type(model))
text1 = "Hydrogels are hydrophilic polymer networks which may absorb from " \
"10–20% (an arbitrary lower limit) up to thousands of times their " \
"dry weight in water."
text2 = "SARS-CoV-2 variants of concern (VOC) have arisen independently at multiple locations [1, 2]" \
"and may reduce the efficacy of current vaccines targeting the spike glycoprotein [3]. Here, using a live virus neutralization assay (LVNA)," \
" we compared neutralization of a non-VOC variant versus the 501Y.V2 variant using plasma collected from adults hospitalized with COVID-19" \
" from two South African infection waves, with the second wave dominated by 501Y.V2 infections."
tokens = tokenizer.tokenize(text2)
for token in tokens:
print(token + '\n')
###Output
sars
-
cov
-
2
variants
of
concern
(
voc
)
have
arisen
independently
at
multiple
locations
[
1
,
2
]
and
may
reduce
the
efficacy
of
current
vaccines
targeting
the
spike
glycoprotein
[
3
]
.
here
,
using
a
live
virus
neutralization
assay
(
lv
##na
)
,
we
compared
neutralization
of
a
non
-
voc
variant
versus
the
501
##y
.
v2
variant
using
plasma
collected
from
adults
hospitalized
with
cov
##id
-
19
from
two
south
african
infection
waves
,
with
the
second
wave
dominated
by
501
##y
.
v2
infections
.
|
notebooks/plot_ecco_argo.ipynb | ###Markdown
Plot Argo floats in Tropical Pacific used in ECCOv4r4
###Code
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import cartopy.crs as ccrs
###Output
_____no_output_____
###Markdown
Read in the datasetDo some renaming of the variables
###Code
def read_and_rename(fname, **kwargs):
""" Read in a dataset, rename variables, and restrict to our location
"""
xds = xr.open_dataset(fname, **kwargs)
# remove the 'prof_' prefix
for vname in xds.data_vars:
xds = xds.rename({vname:vname.replace('prof_','')})
# rename the "iPROF" dimension with profile
xds = xds.rename_dims({'iPROF':'profile'})
# swap the "iDEPTH" dimension with actual depth
xds = xds.swap_dims({'iDEPTH':'depth'})
# restrict to our lon/lat box
xds = xds.where( (xds.lon>140) | (xds.lon<-80), drop=True)
xds = xds.where( (xds.lat<15) & (xds.lat>-15), drop=True)
return xds
#ds1 = read_and_rename('/scratch2/shared/ecco/v4r4/profiles/ARGO_20190131_95to13_model.nc',
# chunks={'iPROF':10_000})
ds2 = read_and_rename('/scratch2/shared/ecco/v4r4/profiles/ARGO_20190131_14to17_model.nc',
chunks={'iPROF':50_000})
###Output
_____no_output_____
###Markdown
Convert the time units to something we can useAnd swap the `profile` dimension for this `time`, which is more intuitive and useful
###Code
def convert_time(xds):
""" Convert the strange time units provided into something useful
"""
# get year month day as array of 8 character string
ymd = xds['YYYYMMDD'].astype(int).astype(str).values
# Get hours, minutes, seconds as strings, padded with 0's in the front
# if there are less than 6 characters
hms = xds.HHMMSS.astype(int).astype(str).values
hms = [f'{x:>06s}' for x in hms]
# Now stick it together in a format that numpy.datetime64 recognizes
# then return it as an xarray dataarray
time = [f'{x[:4]}-{x[4:6]}-{x[6:]}T{y[:2]}:{y[2:4]}:{y[4:]}' for x,y in zip(ymd,hms)]
time = [np.datetime64(x) for x in time]
time = xr.DataArray(time,coords=xds['YYYYMMDD'].coords,dims=xds['HHMMSS'].dims)
return time
#ds1['time'] = convert_time(ds1)
ds2['time'] = convert_time(ds2)
###Output
_____no_output_____
###Markdown
Swap out the dims
###Code
#ds1 = ds1.swap_dims({'profile':'time'}).sortby('time')
ds2 = ds2.swap_dims({'profile':'time'}).sortby('time')
###Output
_____no_output_____
###Markdown
Restrict to time period that we wantWhich is 1997-1998 and 2014-2016, I'll make two separate datasets but at this stage we could merge the twodatasets together now that they have an axis (time) which it makes sense to concatenate them together along.e.g. with```pythonds = xr.concat([ds1,ds2],dim='time')```Note that `slice` is not inclusive, so `slice(1,3)` has everything starting at 1 and up to but not including 3.
###Code
#ds1 = ds1.sel(time=slice('1997','1999-01-01'))
ds2 = ds2.sel(time=slice('2014','2017-01-01'))
#len(ds1.time)
###Output
_____no_output_____
###Markdown
There are no Argo floats during the first period, so we can skip this one. Make a plot of locations
###Code
%%time
ds2.lon.load();
%%time
ds2.lat.load();
###Output
CPU times: user 129 ms, sys: 27.4 ms, total: 157 ms
Wall time: 60.1 ms
###Markdown
Run through lon/lat pairs and get the unique ones. Additionally, round to nearest 10th of degree longitude/latitudeThis still leaves ~40k points, which is a plotting nightmare.
###Code
def get_unique_tuples(mylist):
from collections import Counter
# Create a list of sets indicating how many times this pair shows up
the_count = Counter(frozenset(x) for x in mylist)
# return the unique ones
return [x for x in mylist if the_count[frozenset(x)]==1]
locations = [(np.round(lo,decimals=1),np.round(la,decimals=1)) for lo,la in zip(ds2.lon.values,ds2.lat.values)]
unique_locs = get_unique_tuples(locations)
%%time
nrows = 1
fig,ax = plt.subplots(nrows,1,figsize=(12,5*nrows),
subplot_kw={'projection':ccrs.Robinson(-140)},
constrained_layout=True)
label = '2014-2016'
for lon,lat in unique_locs[:10_000]:
ax.scatter(lon,lat,marker='o',s=20,transform=ccrs.PlateCarree(),color='black')
ax.set_extent([100,-50,-60,60],crs=ccrs.PlateCarree())
ax.coastlines('50m')
ax.set_title(f'Argo Floats During {label}',fontsize=20)
fig.savefig('figures/eccov4r4_euc_argo.png',dpi=300,bbox_inches='tight')
###Output
CPU times: user 7min 22s, sys: 4.11 s, total: 7min 26s
Wall time: 7min 26s
|
Lectures/search_in_practice-approximate_nearest_neighbors/Exhaustive Search.ipynb | ###Markdown
Exhaustive Search
###Code
class BruteForceIndex():
def __init__(self, vectors, labels):
self.vectors = vectors.astype('float32')
self.labels = labels
self.index = faiss.IndexFlatL2(vectors.shape[1])
self.index.add(self.vectors)
def query(self, vectors, k=10):
distances, indices = self.index.search(vectors, k)
return [self.labels[i] for i in indices[0]]
index = BruteForceIndex(data["vector"], data["name"])
index.build()
movie_vector, movie_name = data['vector'][90:91], data['name'][90]
simlar_movies_names = '\n* '.join(index.query(movie_vector))
print(f"The most similar movies to {movie_name} are:\n* {simlar_movies_names}")
###Output
The most similar movies to Nightmare Before Christmas, The (1993) are:
* Nightmare Before Christmas, The (1993)
* Heavy Metal (1981)
* Sirens (1994)
* Beauty and the Beast (1991)
* Akira (1988)
* Fantasia (1940)
* Benny & Joon (1993)
* Barbarella (1968)
* Pete's Dragon (1977)
* James and the Giant Peach (1996)
|
_downloads/5824d222ba0d77c706c6359245254779/plot_UOT_1D.ipynb | ###Markdown
1D Unbalanced optimal transportThis example illustrates the computation of Unbalanced Optimal transportusing a Kullback-Leibler relaxation.
###Code
# Author: Hicham Janati <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
from ot.datasets import make_1D_gauss as gauss
###Output
_____no_output_____
###Markdown
Generate data-------------
###Code
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
b = gauss(n, m=60, s=10)
# make distributions unbalanced
b *= 5.
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
###Output
_____no_output_____
###Markdown
Plot distributions and loss matrix----------------------------------
###Code
pl.figure(1, figsize=(6.4, 3))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
pl.legend()
# plot distributions and loss matrix
pl.figure(2, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
###Output
_____no_output_____
###Markdown
Solve Unbalanced Sinkhorn--------------
###Code
# Sinkhorn
epsilon = 0.1 # entropy parameter
alpha = 1. # Unbalanced KL relaxation parameter
Gs = ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, alpha, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gs, 'UOT matrix Sinkhorn')
pl.show()
###Output
_____no_output_____ |
Python Projects/Risk and Returns_ The Sharpe Ratio/notebook.ipynb | ###Markdown
1. Meet Professor William SharpeAn investment may make sense if we expect it to return more money than it costs. But returns are only part of the story because they are risky - there may be a range of possible outcomes. How does one compare different investments that may deliver similar results on average, but exhibit different levels of risks?Enter William Sharpe. He introduced the reward-to-variability ratio in 1966 that soon came to be called the Sharpe Ratio. It compares the expected returns for two investment opportunities and calculates the additional return per unit of risk an investor could obtain by choosing one over the other. In particular, it looks at the difference in returns for two investments and compares the average difference to the standard deviation (as a measure of risk) of this difference. A higher Sharpe ratio means that the reward will be higher for a given amount of risk. It is common to compare a specific opportunity against a benchmark that represents an entire category of investments.The Sharpe ratio has been one of the most popular risk/return measures in finance, not least because it's so simple to use. It also helped that Professor Sharpe won a Nobel Memorial Prize in Economics in 1990 for his work on the capital asset pricing model (CAPM).The Sharpe ratio is usually calculated for a portfolio and uses the risk-free interest rate as benchmark. We will simplify our example and use stocks instead of a portfolio. We will also use a stock index as benchmark rather than the risk-free interest rate because both are readily available at daily frequencies and we do not have to get into converting interest rates from annual to daily frequency. Just keep in mind that you would run the same calculation with portfolio returns and your risk-free rate of choice, e.g, the 3-month Treasury Bill Rate. So let's learn about the Sharpe ratio by calculating it for the stocks of the two tech giants Facebook and Amazon. As benchmark we'll use the S&P 500 that measures the performance of the 500 largest stocks in the US. When we use a stock index instead of the risk-free rate, the result is called the Information Ratio and is used to benchmark the return on active portfolio management because it tells you how much more return for a given unit of risk your portfolio manager earned relative to just putting your money into a low-cost index fund.
###Code
# Importing required modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Settings to produce nice plots in a Jupyter notebook
plt.style.use('fivethirtyeight')
%matplotlib inline
# Reading in the data
stock_data = pd.read_csv("datasets/stock_data.csv",parse_dates=["Date"],index_col=["Date"]).dropna()
benchmark_data = pd.read_csv("datasets/benchmark_data.csv",parse_dates=["Date"],index_col=["Date"]).dropna()
###Output
DEBUG:matplotlib.pyplot:Loaded backend module://ipykernel.pylab.backend_inline version unknown.
###Markdown
2. A first glance at the dataLet's take a look the data to find out how many observations and variables we have at our disposal.
###Code
# Display summary for stock_data
print('Stocks\n')
print(stock_data.info())
print(stock_data.head())
# Display summary for benchmark_data
print('\nBenchmarks\n')
print(benchmark_data.info())
print(benchmark_data.head())
###Output
Stocks
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 252 entries, 2016-01-04 to 2016-12-30
Data columns (total 2 columns):
Amazon 252 non-null float64
Facebook 252 non-null float64
dtypes: float64(2)
memory usage: 5.9 KB
None
Amazon Facebook
Date
2016-01-04 636.989990 102.220001
2016-01-05 633.789978 102.730003
2016-01-06 632.650024 102.970001
2016-01-07 607.940002 97.919998
2016-01-08 607.049988 97.330002
Benchmarks
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 252 entries, 2016-01-04 to 2016-12-30
Data columns (total 1 columns):
S&P 500 252 non-null float64
dtypes: float64(1)
memory usage: 3.9 KB
None
S&P 500
Date
2016-01-04 2012.66
2016-01-05 2016.71
2016-01-06 1990.26
2016-01-07 1943.09
2016-01-08 1922.03
###Markdown
3. Plot & summarize daily prices for Amazon and FacebookBefore we compare an investment in either Facebook or Amazon with the index of the 500 largest companies in the US, let's visualize the data, so we better understand what we're dealing with.
###Code
# visualize the stock_data
stock_data.plot(subplots=True,title="Stock Data")
# summarize the stock_data
stock_data.describe()
###Output
_____no_output_____
###Markdown
4. Visualize & summarize daily values for the S&P 500Let's also take a closer look at the value of the S&P 500, our benchmark.
###Code
# plot the benchmark_data
benchmark_data.plot(title="S&P 500")
# summarize the benchmark_data
benchmark_data.describe()
###Output
_____no_output_____
###Markdown
5. The inputs for the Sharpe Ratio: Starting with Daily Stock ReturnsThe Sharpe Ratio uses the difference in returns between the two investment opportunities under consideration.However, our data show the historical value of each investment, not the return. To calculate the return, we need to calculate the percentage change in value from one day to the next. We'll also take a look at the summary statistics because these will become our inputs as we calculate the Sharpe Ratio. Can you already guess the result?
###Code
# calculate daily stock_data returns
stock_returns = stock_data.pct_change()
# plot the daily returns
stock_returns.plot()
# summarize the daily returns
stock_returns.describe()
###Output
_____no_output_____
###Markdown
6. Daily S&P 500 returnsFor the S&P 500, calculating daily returns works just the same way, we just need to make sure we select it as a Series using single brackets [] and not as a DataFrame to facilitate the calculations in the next step.
###Code
# calculate daily benchmark_data returns
series=benchmark_data["S&P 500"]
sp_returns = series.pct_change()
# plot the daily returns
sp_returns.plot(title="S&P 500 prices daily change")
# summarize the daily returns
sp_returns.describe()
###Output
_____no_output_____
###Markdown
7. Calculating Excess Returns for Amazon and Facebook vs. S&P 500Next, we need to calculate the relative performance of stocks vs. the S&P 500 benchmark. This is calculated as the difference in returns between stock_returns and sp_returns for each day.
###Code
# calculate the difference in daily returns
excess_returns =stock_returns.sub(sp_returns,axis=0)
# plot the excess_returns
excess_returns.plot(title="Excess Return")
# summarize the excess_returns
excess_returns.describe()
###Output
_____no_output_____
###Markdown
8. The Sharpe Ratio, Step 1: The Average Difference in Daily Returns Stocks vs S&P 500Now we can finally start computing the Sharpe Ratio. First we need to calculate the average of the excess_returns. This tells us how much more or less the investment yields per day compared to the benchmark.
###Code
# calculate the mean of excess_returns
# ... YOUR CODE FOR TASK 8 HERE ...
avg_excess_return = excess_returns.mean()
# plot avg_excess_returns
avg_excess_return.plot.bar(title='Mean of the Return Difference');
###Output
_____no_output_____
###Markdown
9. The Sharpe Ratio, Step 2: Standard Deviation of the Return DifferenceIt looks like there was quite a bit of a difference between average daily returns for Amazon and Facebook.Next, we calculate the standard deviation of the excess_returns. This shows us the amount of risk an investment in the stocks implies as compared to an investment in the S&P 500.
###Code
# calculate the standard deviations
sd_excess_return = excess_returns.std()
# plot the standard deviations
sd_excess_return.plot.bar(title="Standard Deviation of the Return Difference");
###Output
_____no_output_____
###Markdown
10. Putting it all togetherNow we just need to compute the ratio of avg_excess_returns and sd_excess_returns. The result is now finally the Sharpe ratio and indicates how much more (or less) return the investment opportunity under consideration yields per unit of risk.The Sharpe Ratio is often annualized by multiplying it by the square root of the number of periods. We have used daily data as input, so we'll use the square root of the number of trading days (5 days, 52 weeks, minus a few holidays): √252
###Code
# calculate the daily sharpe ratio
daily_sharpe_ratio = avg_excess_return.div(sd_excess_return)
# annualize the sharpe ratio
annual_factor = np.sqrt(252)
annual_sharpe_ratio = daily_sharpe_ratio.mul(annual_factor)
# plot the annualized sharpe ratio
annual_sharpe_ratio.plot.bar(title="")
###Output
_____no_output_____
###Markdown
11. ConclusionGiven the two Sharpe ratios, which investment should we go for? In 2016, Amazon had a Sharpe ratio twice as high as Facebook. This means that an investment in Amazon returned twice as much compared to the S&P 500 for each unit of risk an investor would have assumed. In other words, in risk-adjusted terms, the investment in Amazon would have been more attractive.This difference was mostly driven by differences in return rather than risk between Amazon and Facebook. The risk of choosing Amazon over FB (as measured by the standard deviation) was only slightly higher so that the higher Sharpe ratio for Amazon ends up higher mainly due to the higher average daily returns for Amazon. When faced with investment alternatives that offer both different returns and risks, the Sharpe Ratio helps to make a decision by adjusting the returns by the differences in risk and allows an investor to compare investment opportunities on equal terms, that is, on an 'apples-to-apples' basis.
###Code
# Uncomment your choice.
# buy_amazon = True
# buy_facebook = True
###Output
_____no_output_____ |
work3.ipynb | ###Markdown
选择 布尔类型、数值和表达式- 注意:比较运算符的相等是两个等到,一个等到代表赋值- 在Python中可以用整型0来代表False,其他数字来代表True- 后面还会讲到 is 在判断语句中的用发
###Code
10 > 10
###Output
_____no_output_____
###Markdown
字符串的比较使用ASCII值
###Code
'a'>'b'
###Output
_____no_output_____
###Markdown
Markdown - https://github.com/younghz/Markdown EP:- - 输入一个数字,判断其实奇数还是偶数 产生随机数字- 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数 其他random方法- random.random 返回0.0到1.0之间前闭后开区间的随机浮点- random.randrange(a,b) 前闭后开 EP:- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字的和,并判定其是否正确- 进阶:写一个随机序号点名程序
###Code
import random
num1 = random.randint(1,50)
num2 = random.randint(1,50)
he = num1 + num2
print(num1,num2)
while True:
sum_ = eval(input('>>'))
if sum_ == he:
print('正确')
break
else:
print('不正确,请重新计算')
###Output
18 22
>>30
不正确,请重新计算
>>40
正确
###Markdown
if语句- 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句- Python有很多选择语句:> - 单向if - 双向if-else - 嵌套if - 多向if-elif-else - 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进- 切记不可tab键和space混用,单用tab 或者 space- 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐 EP:- 用户输入一个数字,判断其实奇数还是偶数- 进阶:可以查看下4.5实例研究猜生日 双向if-else 语句- 如果条件为真,那么走if内部语句,否则走else内部语句 EP:- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确错误 嵌套if 和多向if-elif-else EP:- 提示用户输入一个年份,然后显示表示这一年的动物- 计算身体质量指数的程序- BMI = 以千克为单位的体重除以以米为单位的身高
###Code
birth = eval(input('>>'))
animals = birth % 12
if animals == 0:
print('猴')
elif animals == 1:
print('鸡')
elif animals == 2:
print('狗')
elif animals == 3:
print('猪')
elif animals == 4:
print('鼠')
elif animals == 5:
print('牛')
elif animals == 6:
print('虎')
elif animals == 7:
print('兔')
elif animals == 8:
print('龙')
elif animals == 9:
print('蛇')
elif animals == 10:
print('马')
else:
print('羊')
###Output
>>1997
牛
###Markdown
逻辑运算符  EP:- 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年- 提示用户输入一个年份,并返回是否是闰年- 提示用户输入一个数字,判断其是否为水仙花数 实例研究:彩票
###Code
import random
sui = random.randint(10,99)
print(sui)
num_ = eval(input('>>'))
shi_ = num_ // 10
ge_ = num_ % 10
shi = sui // 10
ge = sui % 10
if (shi_ == shi) and (ge_ == ge):
print('恭喜获得一等奖')
elif(shi_ == ge) and (ge_ == shi):
print('恭喜获得二等奖')
elif((shi_ == shi)and(shi_ != ge)and(ge_ != shi)and(ge_ != ge))or((shi_ != shi)and(shi_ == ge)and(ge_ != shi)and(ge_ != ge))or((shi_ != shi)and(shi_ != ge)and(ge_ == shi)and(ge_ != ge))or((shi_ != shi)and(shi_ != ge)and(ge_ != shi)and(ge_ == ge))or((shi_ != shi)and(shi_ == ge)and(ge_ != shi)and(ge_ == ge))or((shi_ == shi)and(shi_ != ge)and(ge_ == shi)and(ge_ != ge))or((shi_ == shi)and(shi_ == ge)and(ge_ != shi)and(ge_ != ge))or((shi_ != shi)and(shi_ != ge)and(ge_ == shi)and(ge_ == ge)):
print('恭喜获得三等奖')
else:
print('抱歉')
###Output
75
>>11
抱歉
###Markdown
Homework- 1
###Code
a,b,c = eval(input('请输入a,b,c的值'))
x1 = (-b + (b**2 - 4*a*c)**0.5)/2*a
x2 = (-b - (b**2 - 4*a*c)**0.5)/2*a
if (b**2 - 4*a*c) > 0:
print('方程有两个实根,分别为',round(x1,6),round(x2,5))
elif (b**2 - 4*a*c) == 0:
print('方程有一个实根',x1)
else:
print('方程没有实根')
###Output
请输入a,b,c的值1.0,3,1
方程有两个实根,分别为 -0.381966 -2.61803
###Markdown
- 2
###Code
import random
num1 = random.randint(1,99)
num2 = random.randint(1,99)
all = num1 + num2
print(num1,num2)
while True:
sum_ = eval(input('请输入两个随机数的和:'))
if sum_ == all:
print('真')
break
else:
print('假')
###Output
36 56
请输入两个随机数的和:50
假
请输入两个随机数的和:92
真
###Markdown
- 3
###Code
num = eval(input('Enter today is day:'))
num_ = eval(input('Enter the number of days elapsed since today:'))
if num == 0:
a = 'Sunday'
elif num == 1:
a = 'Monday'
elif num == 2:
a = 'Tusday'
elif num == 3:
a = 'Wednesday'
elif num == 4:
a = 'Thurday'
elif num == 5:
a = 'Friday'
else:
a = 'Saturday'
if (num + num_ % 7) == 0:
b = 'Sunday'
elif (num + num_ % 7) == 1:
b = 'Monday'
elif (num + num_ % 7) == 2:
b = 'Tusday'
elif (num + num_ % 7) == 3:
b = 'Wednesday'
elif (num + num_ % 7) == 4:
b = 'Thurday'
elif (num + num_ % 7) == 5:
b = 'Friday'
else:
b = 'Saturday'
print('Today is',a,'and the future is',b)
###Output
Enter today is day:0
Enter the number of days elapsed since today:31
Today is Sunday and the future is Wednesday
###Markdown
- 4
###Code
a,b,c = eval(input('请输入三个整数:'))
min_ = min(a,b,c)
max_ = max(a,b,c)
middle = a+b+c-(min_+max_)
print(min_,middle,max_)
###Output
请输入三个整数:5,8,2
2 5 8
###Markdown
- 5
###Code
w1,p1 = eval(input('Enter weight and price for package 1:'))
w2,p2 = eval(input('Enter weight and price for package 2:'))
jg1 = w1 / p1
jg2 = w2 / p2
if jg1 > jg2:
print('Package 1 has the better price')
elif jg1 < jg2:
print('Package 2 has the better price')
else:
print('same')
###Output
Enter weight and price for package 1:50,24.59
Enter weight and price for package 2:25,11.99
Package 2 has the better price
###Markdown
- 6
###Code
month,year = eval(input('请输入月份和年数:'))
if ((year % 4 == 0) and (not year % 100 == 0)) or (year % 400 == 0)and(month == 2):
print(year_,'年的',month,'月有29天')
elif(month == 1 or month == 3 or month== 5 or month == 7 or month == 8 or month == 10 or month == 12):
print(year_,'年的',month,'月有31天')
elif(month == 2):
print(year,'年的',month,'月有28天')
else:
print(year,'年的',month,'月有30天')
###Output
请输入月份和年数:2,2000
2000 年的 2 月有29天
###Markdown
- 7
###Code
import random
sum = random.randint(1,2)
test = eval(input('请输入一个猜测值:'))
if sum == test:
print('猜对了')
else:
print('猜错了')
###Output
请输入一个猜测值:2
猜对了
###Markdown
- 8
###Code
import random
num = eval(input('scissor(0),scissor(1),scissor(2):'))
num_ = random.randint(0,2)
if (num == 0 and num_ == 2)or(num == 1 and num_ == 0)or(num == 2 and num_ == 1):
print('you won')
elif num == num_:
print('same')
else:
print('you draw')
###Output
scissor(0),scissor(1),scissor(2):1
same
###Markdown
- 9
###Code
year = eval(input('请输入年份:'))
month = eval(input('请输入月份:'))
day = eval(input('请输入日期:'))
year_ = year - 1
if month == 1:
x = (day+(26*(13+1)/10)//1+year_%100+(year_%100/4)//1+(((year_/100)//1)/4)//1+5*(year_/100)//1)%7
elif month == 2:
x = (day+(26*(14+1)/10)//1+year_%100+(year_%100/4)//1+(((year_/100)//1)/4)//1+5*(year_/100)//1)%7
else:
x = (day+(26*(month+1)/10)//1+year%100+(year%100/4)//1+(((year/100)//1)/4)//1+5*(year/100)//1)%7
if x == 0:
print('Saturday')
elif x == 1:
print('Sunday')
elif x == 2:
print('Monday')
elif x == 3:
print('Tuesday')
elif x == 4:
print('Wendesday')
elif x == 5:
print('Thursday')
elif x == 6:
print('Friday')
else:
pass
###Output
请输入年份:2012
请输入月份:5
请输入日期:12
Saturday
###Markdown
- 10
###Code
import random
x = random.randint(0,12)
y = random.randint(0,3)
xrr = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']
yrr = ['梅花','红桃','方块','黑桃']
print('您选的牌是',brr[x],arr[y])
###Output
您选的牌是 方块 4
###Markdown
- 11
###Code
num = eval(input('Enter a three-digit integer:'))
baiwei = num // 100
shiwei = (num //10) % 10
gewei = num % 10
if baiwei == gewei:
print(num,'is a palindrome')
else:
print(num,'not a palindrome')
###Output
Enter a three-digit integer:121
121 is a palindrome
###Markdown
- 12
###Code
a,b,c = eval(input('Enter three edges:'))
if (a+b>c) and (a+c>b) and (b+c>a):
print('The perimeter:',a+b+c)
else:
print('Erro!')
###Output
Enter three edges:1,1,1
The perimeter: 3
###Markdown
Config
###Code
class Config:
n_folds=10
random_state=42
tbs = 1024
vbs = 512
data_path="data"
result_path="results"
models_path="models"
###Output
_____no_output_____
###Markdown
plot and util
###Code
def write_to_txt(file_name,column):
with open(file_name, 'w') as f:
for item in column:
f.write("%s\n" % item)
###Output
_____no_output_____
###Markdown
Load data
###Code
train=pd.read_csv(os.path.join(Config.data_path,"train.csv"))
test=pd.read_csv(os.path.join(Config.data_path,"test.csv"))
aae=pd.read_csv(os.path.join(Config.data_path,"amino_acid_embeddings.csv"))
submission=pd.read_csv(os.path.join(Config.data_path,"SampleSubmission.csv"))
###Output
_____no_output_____
###Markdown
Prepare and split data
###Code
train["Sequence_len"]=train["Sequence"].apply(lambda x : len(x))
test["Sequence_len"]=test["Sequence"].apply(lambda x : len(x))
max_seq_length = 550 # max seq length in this data set is 550
#stratified k fold
train["folds"]=-1
kf = StratifiedKFold(n_splits=Config.n_folds, random_state=Config.random_state, shuffle=True)
for fold, (_, val_index) in enumerate(kf.split(train,train["target"])):
train.loc[val_index, "folds"] = fold
train.head()
# reduce seq length
if max_seq_length>550 :
train["Sequence"] = train["Sequence"].apply(lambda x: "".join(list(x)[0:max_seq_length]))
test["Sequence"] = test["Sequence"].apply(lambda x: "".join(list(x)[0:max_seq_length]))
voc_set = set(['P', 'V', 'I', 'K', 'N', 'B', 'F', 'Y', 'E', 'W', 'R', 'D', 'X', 'S', 'C', 'U', 'Q', 'A', 'M', 'H', 'L', 'G', 'T'])
voc_set_map = { k:v for k , v in zip(voc_set,range(1,len(voc_set)+1))}
number_of_class = train["target"].nunique()
def encode(text_tensor, label):
encoded_text = [ voc_set_map[e] for e in list(text_tensor.numpy().decode())]
return encoded_text, label
def encode_map_fn(text, label):
# py_func doesn't set the shape of the returned tensors.
encoded_text, label = tf.py_function(encode,
inp=[text, label],
Tout=(tf.int64, tf.int64))
encoded_text.set_shape([None])
label=tf.one_hot(label,number_of_class)
label.set_shape([number_of_class])
return encoded_text, label
def get_data_loader(file,batch_size,labels):
label_data=tf.data.Dataset.from_tensor_slices(labels)
data_set=tf.data.TextLineDataset(file)
data_set=tf.data.Dataset.zip((data_set,label_data))
data_set=data_set.repeat()
data_set = data_set.shuffle(len(labels))
data_set=data_set.map(encode_map_fn,tf.data.experimental.AUTOTUNE)
data_set=data_set.padded_batch(batch_size)
data_set = data_set.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data_set
def get_data_loader_test(file,batch_size,labels):
label_data=tf.data.Dataset.from_tensor_slices(labels.target)
data_set=tf.data.TextLineDataset(file)
data_set=tf.data.Dataset.zip((data_set,label_data))
data_set=data_set.map(encode_map_fn,tf.data.experimental.AUTOTUNE)
data_set=data_set.padded_batch(batch_size)
data_set = data_set.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data_set
###Output
_____no_output_____
###Markdown
Model
###Code
def model():
name = "seq"
dropout_rate = 0.1
learning_rate = 0.001
sequnce = Input([None],name="sequnce")
EMB_layer = Embedding(input_dim = len(voc_set)+1, output_dim = 128, name = "emb_layer")
GRU_layer_2 = GRU(units=256, name = "gru_2", return_sequences = False)
BIDIR_layer_2 = Bidirectional(GRU_layer_2, name="bidirectional_2")
Dens_layer_1 = Dense(units=512, activation=relu, kernel_regularizer=None, bias_regularizer=None, name=name+"_dense_layer_1")
Dens_layer_2 = Dense(units=256, activation=relu, kernel_regularizer=None, bias_regularizer=None, name=name+"_dense_layer_2")
output = Dense(units=number_of_class, activation=softmax, kernel_regularizer=None, bias_regularizer=None, name=name+"_dense_layer_output")
dropout_1 = Dropout(dropout_rate)
emb_layer = EMB_layer(sequnce)
logits = output(Dens_layer_2(dropout_1(Dens_layer_1(BIDIR_layer_2(emb_layer)))))
model = tf.keras.Model(inputs={"sequnce":sequnce, },outputs=logits)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss= tfa.losses.SigmoidFocalCrossEntropy(reduction=tf.keras.losses.Reduction.AUTO)
#loss=CategoricalCrossentropy()
model.compile(optimizer=optimizer, loss=loss, metrics=[tf.keras.metrics.CategoricalAccuracy(name="Acc")])
model.summary()
return model
###Output
_____no_output_____
###Markdown
training
###Code
def trainn(fold):
model_path=f"model_{fold}.h5"
df_train = train[train["folds"] != fold].reset_index(drop=True)
df_valid = train[train["folds"] == fold].reset_index(drop=True)
write_to_txt(f"data/train_{fold}.txt",df_train.Sequence)
write_to_txt(f"data/valid_{fold}.txt",df_valid.Sequence)
train_label=df_train["target"]
valid_label=df_valid["target"]
train_dl = get_data_loader(f"data/train_{fold}.txt",Config.tbs,train_label)
valid_dl = get_data_loader(f"data/valid_{fold}.txt",Config.vbs,valid_label)
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(Config.models_path,model_path),
save_weights_only=True,monitor = 'val_loss',
save_best_only=True,mode="min", verbose=1)
callbacks=[checkpoint]
my_model = model()
history = my_model.fit(train_dl,
validation_data=valid_dl,
epochs=20,
verbose=1,
batch_size=Config.tbs,
validation_batch_size=Config.vbs,
validation_steps=len(df_valid)//Config.vbs,
steps_per_epoch=len(df_train)/Config.tbs,
callbacks=callbacks
)
def predict(fold):
model_path=f"model_{fold}.h5"
write_to_txt(f"data/test_{fold}.txt",test.Sequence)
test["target"]=0
test_label=test["target"]
test_dl = get_data_loader_test(f"data/test_{fold}.txt",Config.vbs,test)
my_model = model()
my_model.load_weights(os.path.join(Config.models_path,model_path))
prediction=my_model.predict(test_dl)
return prediction
trainn(3)
p=predict(3)
sub=test[["ID"]].copy()
for i in range(number_of_class):
sub["target_{}".format(i)]=p[:,i]
sub.head()
sub.to_csv(os.path.join(Config.result_path,"sub_p3_epoch20.csv"),index=False)
###Output
_____no_output_____ |
Homeworks/HW_3/190072E_HomeWork3.ipynb | ###Markdown
INDEX No: 190072E Name : E.M.D.A Bandara
###Code
%matplotlib inline
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread('images/butterfly.jpg' , cv.IMREAD_REDUCED_GRAYSCALE_4)
assert img is not None
k_size = 9
sigma = 4
box_kernel = 1/81*np.ones((k_size,k_size))
im_avg = cv.filter2D(img, -1, box_kernel)
im_gaussian = cv.GaussianBlur(img, (k_size , k_size) , sigma)
fig, ax = plt.subplots( 1, 3, figsize = (18, 6))
ax[0].imshow(img, cmap = 'gray', vmin = 0, vmax = 255)
ax[0].set_title('Original')
ax[1].imshow(im_avg, cmap = 'gray', vmin = 0, vmax = 255)
ax[1].set_title('BoxFiltered')
ax[2].imshow(im_gaussian, cmap = 'gray', vmin = 0, vmax = 255)
ax[2].set_title('Gaussian Filtered')
plt.show()
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig, ax = plt.subplots()
ax = fig.add_subplot(111, projection = '3d')
step = 0.1
X = np.arange(-5, 5 + step, step)
Y = np.arange(-5, 5 + step, step)
XX, YY = np.meshgrid(X, Y)
sigma = 1
g = np.exp(-(XX**2 + YY**2)/(2*sigma**2))
surf = ax.plot_surface(XX, YY, g, cmap = cm.jet)
cset = ax.contourf(XX, YY, g, zdir = 'z', offset = np.min(g) - 1.5, cmap = cm.jet)
ax.set_zlim(np.min(g) -2, np.max(g))
plt.axis('off')
img = cv.imread('images/contact_lens.tif' , cv.IMREAD_GRAYSCALE).astype(np.float32)
assert img is not None
sobel_vertical = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=np.float32)
sobel_horizontal = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32)
f_x = cv.filter2D(img, -1, sobel_vertical)
f_y = cv.filter2D(img, -1, sobel_horizontal)
grad_mag = np.sqrt(f_x**2, f_y**2)
fig, ax = plt.subplots( 1, 4, figsize = (18, 6))
ax[0].imshow(img, cmap = 'gray', vmin = 0, vmax = 255)
ax[0].set_title('Original')
ax[1].imshow(f_x, cmap = 'gray', vmin = -1020, vmax = 1020)
ax[1].set_title('Sobel Vertical $f_x$')
ax[2].imshow(f_y, cmap = 'gray', vmin = -1020, vmax = 1020)
ax[2].set_title('Sobel Vertical $f_y$')
ax[3].imshow(grad_mag, cmap = 'gray')
ax[3].set_title('Gradient Magnitude ')
for i in range(4):
ax[i].axis('off')
plt.show()
f= cv.imread('images/tom.jpg' , cv.IMREAD_GRAYSCALE).astype(np.float32)
assert f is not None
sigma = 2
gaussian_1d = cv.getGaussianKernel(5, sigma)
f_lp = cv.sepFilter2D(f, -1, gaussian_1d, gaussian_1d)
f_hp = f - f_lp
f_sharpened = cv.addWeighted(f, 1.0, f_hp, 2.0, 0)
fig, ax = plt.subplots( 1, 4, figsize = (18, 6))
ax[0].imshow(img, cmap = 'gray')
ax[0].set_title('Original')
ax[1].imshow(f_lp, cmap = 'gray')
ax[1].set_title('f_{lp}')
ax[2].imshow(f_hp, cmap = 'gray')
ax[2].set_title('f_{hp}')
ax[3].imshow(f_sharpened, cmap = 'gray')
ax[3].set_title('Sharpened')
for i in range(4):
ax[i].axis('off')
plt.show()
###Output
_____no_output_____ |
exercizes/notebooks/TSID_ex2.ipynb | ###Markdown
Task Space Inverse Dynamics In this notebook is presented a simple use of the TSID framework on a humanoid robot, Pyrene. Notations and DefinitionsThe robot system has a state $x$ and we denote the control inputs as $u$. The model depends on both the __robot__ and **task**. In our case the model of the robot actuators is torque based because the robot's motors provide torque sources. Therefore the control input $u$ are motor torques $\tau$. With the robot configuration $q$ and its velocity $\dot{q}$ we have the robot state $x$:$$ x \triangleq (q, \dot{q}) $$ Generic FeatureA feature $s$, can be an end-effector pose (such as a gripper), the robot center-of-mass, a visual feature position inside an image.We assume that a feature can be computed from the robot motor configuration vector $q$ to a set called $FS$ for Feature Space:$$s = f_{task}(q)$$It is also assumed that $f_{task}$ is $\mathcal{C}^1$ then:$$J = \displaystyle \frac{\partial f_{task}}{\partial q}$$When a desired feature $s^*$ is given, a task is defined as:$$ e: \mathcal{C} \times \mathbf{R} \rightarrow \mathbf{R}^m e = s \ominus s^*$$where $\ominus$ is the difference operator in the feature space.It is also required to define the dynamics of the task.Usually, when the robot is controlled in velocity, the dynamics is constrained to :$$ \dot{e} = - \lambda e $$When the robot is controlled in acceleration, the dynamics is the following:$$ \ddot{e} = K_P ( s \ominus s^*) + K_D ( \dot{s} - \dot{s}^*) + K_I \displaystyle \int_{j=0}^{j=k} ( s(j) \ominus s^*(j))dj $$with $K_P$ the error gain, $K_D$ the derivative of the error gain, and $K_I$ the integral gain.We can also see that:$$ \dot{e} = \dot{s} - \dot{s}^* = J \dot{q} - \dot{s}^*$$which also gives:$$\ddot{e} = J \ddot{q} + \dot{J} \dot{q} - \ddot{s}^* $$ Torque Control of Under-Actuated SystemsA humanoid robot is __under actuated__, its number of actuators is less than its number of degrees of freedom (DoFs). We denotes $n_{va}$ the number of actuators and $n_v$ the number of DoFs:$$ n_{va} < n_v $$Assume elements of $q$ are ordered, $q \triangleq (q_u, q_a)$, where:- $q_u \in \mathbb{R}^{n_{qu}} $ are the passive (unactuated) joints- $q_a \in \mathbb{R}^{n_{qa}} $ are the actuated jointsSimilarly, $\dot{q} \triangleq (\dot{q}_u, \dot{q}_a)$, where $\dot{q}_u \in \mathbb{R}^{n_{vu}}$ and $\dot{q}_a \in \mathbb{R}^{n_{va}}$.$ S \triangleq [\ 0_{n_{va} \ \times \ n_{vu}} \ \ I_{n_{va}}] $ is a selection matrix associated to the actuated joints:$$ \dot{q}_a = S \ \dot{q} $$The dynamic of an under-actuated mechanical system is:$$ M(q) \ \ddot{q} \ + \ h(q, \dot{q}) \ = \ S^T \tau \ + \ J(q)^T f $$where $M(q) \in \mathbb{R}^{n_v × n_v}$ is the mass matrix, $h(q,v_q) \in \mathbb{R}^{n_v}$ are the bias forces, $\tau \in \mathbb{R}^{n_{va}}$ are the joint torques, $f \in \mathbb{R}^{n_f}$ are the contact forces, and $J(q) \in \mathbb{R}^{n_f×n_v}$ is the contact Jacobian.This dynamic is often decomposed into unactuated and actuated parts:\begin{array}{r c r c l} M_u(q) \ \ddot{q} & + & h_u(q, \dot{q}) & = & J_u(q)^T f \\ M_a(q) \ \ddot{q} & + & h_a(q, \dot{q}) & = & \tau \ + \ J_a(q)^T f \end{array}Where\begin{array}{r c l} M & = & \begin{bmatrix} M_u \\ M_a\end{bmatrix} \\ h & = & \begin{bmatrix} h_u \\ h_a\end{bmatrix} \\ J & = & [J_u \ \ J_a]\end{array} QP Optimisation problem with acceleration and torque The transcription of the motion reference from the task space to the whole-body control is naturally written as a quadratic program (QP). A QP is composed of two layers, namely the constraint and the cost. It can be seen as a hierarchy of two levels, the constraint having priority over the cost. Inequalities can also be taken into account directly, as constraints, or in the cost function.Let us simplify the motion equation based on the rigid body dynamics when there is no contacts: $$M \ddot{q} + h = S\tau $$If we have a task $e$ regulating a feature $s$, the dynamic of the task can be imposed, for instance as an exponential decay:$$ e = s - s^*$$$$ \dot{e} = - \lambda \, e $$We can then introduce a slack variable $w$ (an implicit optimization variable) to add some freedom to the solver if no solution can be found: $$\dot{e} - \lambda \, (s - s^*) = w $$A simple formulation of the QP problem can then be expressed as:$$ \underset{\ddot{q},\tau}{\min} \quad \| w \|^2 \\ \textrm{s.t.} \quad M \ddot{q} + h = S\tau $$If the system is in contact with environment, its dynamic must account for contact forces $f_{ext}$. If contacts are soft, measured/estimated contact forces $\hat{f}_{ext}$ can be easily included:$$ \underset{\ddot{q},\tau}{\min} \quad \| w \|^2 \\ \textrm{s.t.} \quad M \ddot{q} + h = S\tau + J_c^T \hat{f}_{ext} $$But if contacts are rigid, they constrain the motion. They are usually modelled as nonlinear functions, which are differentiated twice:$$ c(q) = 0 \, \Leftrightarrow \text{Contact point do not move}$$$$ J_c^T \, \dot{q} = 0 \, \Leftrightarrow \text{Contact point velocities are null}$$$$ J_c^T \, \ddot{q} + \dot{J_c} \, \dot{q} = 0 \, \Leftrightarrow \text{Contact point accelerations are null}$$This leads to the following optimization problem:$$ \underset{\ddot{q},\tau}{\min} \quad \| w \|^2 \\ \textrm{s.t.} \quad \Bigg[ \begin{array}{lll} J_c & 0 & 0 \\ M & -J_c^T & -S^T \end{array} \Bigg] \, \Bigg[\begin{array}{l} \ddot{q} \\ f \\ \tau \end{array} \Bigg] \, = \, \Bigg[\begin{array}{l} - \dot{J_c} \, \dot{q} \\ -h \end{array} \Bigg]$$The main benefit of QP solver is that they can handle inequality-constraints. It is mainly used to defined boundaries of the system such as torque, velocity or joint limits; and also friction cones for the contacts. Weighted SumComplex robots are typically redundant with respect to the main task they must perform, this redundancy can be used to execute secondary tasks. This multi-objective optimization can be achieved by setting respective weights between the tasks (strategy used in TSID), or by imposing a strict hierarchy between them (cascade of QP or HQP).Assume robot must perform N tasks, each defined by a task function and its optimization variable $w_i$:$$ g_i = \| w_i \|^2 $$The easiest strategy is to sum all functions using user-defined weights $\lambda_i$:$$ \underset{\ddot{q},\tau}{\min} \quad \displaystyle \sum_{i=0}^N{\lambda_i \, g_i} \\ \textrm{s.t.} \quad \Bigg[ \begin{array}{lll} J_c & 0 & 0 \\ M & -J_c^T & -S^T \end{array} \Bigg] \, \Bigg[\begin{array}{l} \ddot{q} \\ f \\ \tau \end{array} \Bigg] \, = \, \Bigg[\begin{array}{l} - \dot{J_c} \, \dot{q} \\ -h \end{array} \Bigg]$$This problem remains standard computationally-efficient (compared to Least Square Programming). But, finding proper weights is hard and too extreme weights can lead to numerical issues. An example: CoM Sinusoidal trajectory on the robot TALOSThe goal of this exercise is to create a set of tasks allowing the control of the robot CoM position.As a second approach, the tracking of a sinusoidal trajectory by the CoM is presented.In the following is described the process to create, initialise and solve the HQP problem defined by the tasks to control the CoM of the robot Pyrene.
###Code
import sys
def enable_global_distpackages():
sys.path.append('/usr/lib/python2.7/dist-packages')
sys.path.append('/usr/local/lib/python2.7/dist-packages')
# Python import needed by the exercise
import matplotlib.pyplot as plt
import numpy as np
import numpy.matlib as matlib
from numpy import nan
from numpy.linalg import norm as norm
import os
import time as tmp
# import the library TSID for the Whole-Body Controller
import tsid
# import the pinocchio library for the mathematical methods (Lie algebra) and multi-body dynamics computations.
import pinocchio as pin
enable_global_distpackages()
sys.path.append('..')
# import graphical tools
import plot_utils as plut
import gepetto.corbaserver
# Definition of the tasks gains and weights and the foot geometry (for contact task)
lxp = 0.1 # foot length in positive x direction
lxn = 0.11 # foot length in negative x direction
lyp = 0.069 # foot length in positive y direction
lyn = 0.069 # foot length in negative y direction
lz = 0.107 # foot sole height with respect to ankle joint
mu = 0.3 # friction coefficient
fMin = 1.0 # minimum normal force
fMax = 1000.0 # maximum normal force
rf_frame_name = "leg_right_6_joint" # right foot joint name
lf_frame_name = "leg_left_6_joint" # left foot joint name
contactNormal = np.matrix([0., 0., 1.]).T # direction of the normal to the contact surface
w_com = 1.0 # weight of center of mass task
w_posture = 0.1 # weight of joint posture task
w_forceRef = 1e-3 # weight of force regularization task
w_waist = 1.0 # weight of waist task
kp_contact = 30.0 # proportional gain of contact constraint
kp_com = 20.0 # proportional gain of center of mass task
kp_waist = 500.0 # proportional gain of waist task
kp_posture = np.matrix( # proportional gain of joint posture task
[ 10. , 5. , 5. , 1. , 1. , 10., #lleg #low gain on axis along y and knee
10. , 5. , 5. , 1. , 1. , 10., #rleg
500. , 500. , #chest
50., 10. , 10., 10., 10. , 10. , 10. , 10. , #larm
50., 10. , 10., 10., 10. , 10. , 10. , 10. , #rarm
100., 100.] #head
).T
dt = 0.001 # controller time step
PRINT_N = 500 # print every PRINT_N time steps
DISPLAY_N = 20 # update robot configuration in viwewer every DISPLAY_N time steps
N_SIMULATION = 10000 # number of time steps simulated
# Set the path where the urdf file of the robot is registered
path = "/opt/openrobots/share"
urdf = path + '/talos_data/urdf/talos_reduced.urdf'
vector = pin.StdVec_StdString()
vector.extend(item for item in path)
# Create the robot wrapper from the urdf, it will give the model of the robot and its data
robot = tsid.RobotWrapper(urdf, vector, pin.JointModelFreeFlyer(), False)
srdf = path + '/talos_data/srdf/talos.srdf'
# Creation of the robot wrapper for gepetto viewer (graphical interface)
robot_display = pin.RobotWrapper.BuildFromURDF(urdf, [path, ], pin.JointModelFreeFlyer())
tmp.sleep(1)
cl = gepetto.corbaserver.Client()
gui = cl.gui
robot_display.initViewer(loadModel=True)
# Take the model of the robot and load its reference configurations
model = robot.model()
pin.loadReferenceConfigurations(model, srdf, False)
# Set the current configuration q to the robot configuration half_sitting
q = model.referenceConfigurations['half_sitting']
# Set the current velocity to zero
v = np.matrix(np.zeros(robot.nv)).T
# Display the robot in Gepetto Viewer in the configuration q = halfSitting
robot_display.displayCollisions(False)
robot_display.displayVisuals(True)
robot_display.display(q)
# Check that the frames of the feet exist.
assert model.existFrame(rf_frame_name)
assert model.existFrame(lf_frame_name)
t = 0.0 # time
# Creation of the inverse dynamics HQP problem using
# the robot accelerations (base + joints) and the contact forces as decision variables
# As presented in the cell on QP optimisation
invdyn = tsid.InverseDynamicsFormulationAccForce("tsid", robot, False)
# Compute the problem data with a solver based on EiQuadProg: a modified version of uQuadProg++ working with Eigen
invdyn.computeProblemData(t, q, v)
# Get the data -> initial data
data = invdyn.data()
###Output
_____no_output_____
###Markdown
Tasks Definitions A **Task** is a control objective for the robot, which is used at each control cycle to generate a **Constraint**. Note that constraints are purely mathematical objects that are independent of the concept of robot, while Tasks are instead robot-related objects.A **Constraint** is a linear equality or inequality.A QP is composed of two layers, namely the constraint and the cost. It can be seen as a hierarchy of two levels, the constraints having priority over the cost. In TSID the HQP is defined as a collection of constraints with different priority levels and weights.There are three kind of constraints defined in TSID:- Equalities, represented by matrix $A$ and vector $a$: $$ Ax = a $$ - Inequalities, represented by matrix $A$ and vectors $lb$ and $ub$: $$ lb ≤ Ax ≤ ub $$- Bounds, represented by vectors $lb$ and $ub$: $$ lb ≤ x ≤ ub $$ There are three kind of Task in TSID:- **TaskMotion**: computes a constraint that is a linear function of the robot accelerations- **TaskContactForce**: computes a constraint that is a linear function of the contact forces- **TaskActuation**: computes a constraint that is a linear function of the joint torquesTasks can compute either equality constraints, or bounds, or inequality constraints. Examples of **TaskMotion**:- **TaskComEquality**: computes an equality constraint to specify a desired acceleration of the center of mass (CoM) of the robot. - **TaskJointPosture**: computes an equality constraint to specify the desired joint accelerations.- **TaskSE3Equality**: computes an equality constraint to specify the desired acceleration for a frame attached to one of the links of the robot. - **TaskJointBounds**: computes a bounds constraint to specify the joint acceleration bounds in order to satisfy the joint position/velocity/acceleration limits. In this exercise, for the sinusoidal movement of the CoM, we need 3 task motions: - **TaskComEquality** as **constraint of the control** (priority level = 0) to make the CoM follow a sinusoidal trajectory. It is the most important task so has a weight of 1 (in constraint scope). - **TaskSE3Equality** in the **cost function** (priority level = 1) for the waist of the robot, to maintain its orientation (with a reference trajectory). It is an important task so has a weight of 1 (in cost function scope). - **TaskJointPosture** in the **cost function** (priority level = 1) for the posture of the robot, to maintain it to half-sitting as much as possible (with a reference trajectory). It is the less important task so has a weight of 0.1 (in cost function scope).
###Code
# COM Task
comTask = tsid.TaskComEquality("task-com", robot)
comTask.setKp(kp_com * matlib.ones(3).T) # Proportional gain defined before = 20
comTask.setKd(2.0 * np.sqrt(kp_com) * matlib.ones(3).T) # Derivative gain = 2 * sqrt(20)
# Add the task to the HQP with weight = 1.0, priority level = 0 (as real constraint) and a transition duration = 0.0
invdyn.addMotionTask(comTask, w_com, 0, 0.0)
# WAIST Task
waistTask = tsid.TaskSE3Equality("keepWaist", robot, 'root_joint') # waist -> root_joint
waistTask.setKp(kp_waist * matlib.ones(6).T) # Proportional gain defined before = 500
waistTask.setKd(2.0 * np.sqrt(kp_waist) * matlib.ones(6).T) # Derivative gain = 2 * sqrt(500)
# Add a Mask to the task which will select the vector dimensions on which the task will act.
# In this case the waist configuration is a vector 6d (position and orientation -> SE3)
# Here we set a mask = [0 0 0 1 1 1] so the task on the waist will act on the orientation of the robot
mask = matlib.ones(6).T
mask[:3] = 0.
waistTask.setMask(mask)
# Add the task to the HQP with weight = 1.0, priority level = 1 (in the cost function) and a transition duration = 0.0
invdyn.addMotionTask(waistTask, w_waist, 1, 0.0)
# POSTURE Task
postureTask = tsid.TaskJointPosture("task-posture", robot)
postureTask.setKp(kp_posture) # Proportional gain defined before (different for each joints)
postureTask.setKd(2.0 * kp_posture) # Derivative gain = 2 * kp
# Add the task to the HQP with weight = 0.1, priority level = 1 (in the cost function) and a transition duration = 0.0
invdyn.addMotionTask(postureTask, w_posture, 1, 0.0)
###Output
_____no_output_____
###Markdown
Rigid Contacts Definitions A **Rigid Contact** is a description of a rigid contact between a body of the robot and another object. The main difference between a task and a rigid contact is that a rigid contact is associated to reaction forces, while a task is not. This class allows to use different representations for the motion space and the force space:- Motion Task - Represents motion constraint (equality) caused by rigid contact - $ J_c \, \ddot{q} \, = \, - \dot{J_c} \, \dot{q} \, - \, K_p \ e - \, K_d \, \dot{e}$ - Force Task - Represents inequality constraints acting on contact forces - e.g., friction cone constraints : $A \, f \, ≤ \, a$ - Force Regularization Task - Regularizes contact forces - e.g., keep them close to friction cone center - Force-Generator matrix $T$ - Maps force variables to motion constraint representation - Dynamic: $ M \ \ddot{q} \ + \ h \ = \ S^T \tau \ + \ J^T T f $ - Motion constraint : $ J_c \, \ddot{q} \, = \, - \dot{J_c} \, \dot{q}$ - Friction cones: $A \, f \, ≤ \, a$ Contact 6dIn case of a unilateral plane contact (with polygonal shape), the motion constraint is 6d, because the body in contact cannot move in any direction. PROBLEMMinimal force representation would be 6d (3d force + 3d moment):- It is hard to write friction constraints with 6d force representation (especially for non-rectangular contact shapes)- A solution would be to represent the reaction force as collection of 3d forces applied at vertices of contact surface (writting friction constraints is then easy) - But it leads to redundant representation, e.g., 4-vertex surface → 12 variables - The redundancy is an issue for motion constraint if HQP solver does not handle redundant constraints (as eiQuadProg). SOLUTION- Use 6d representation for motion constraint $ J_c \, \ddot{q} \, = \, - \dot{J_c} \, \dot{q} \, \in \mathbf{R}^6$- But use 12d representation for force variable $f \in \mathbf{R}^{12}$- A force-generator matrix $T \in \mathbf{R}^{6 \times 12}$ defines the mapping between the two representations: $\tau_{c} \, = J_c^T \, T \, f$ In this exercise, we need two Rigid Contacts, one for each foot as **Contact6d**.The Rigid Contacts are always defined as **constraint of the control** (priority level = 0) to maintain the robot in contact. However it is less important than the CoM task, thus, has a weight of 1e-3 (in constraint scope).
###Code
# CONTACTS 6D
# Definition of the foot geometry with respect to the ankle joints (which are the ones controlled)
contact_Point = np.matrix(np.ones((3,4)) * lz)
contact_Point[0, :] = [-lxn, -lxn, lxp, lxp]
contact_Point[1, :] = [-lyn, lyp, -lyn, lyp]
# The feet are the only bodies in contact in this experiment and their geometry define the plane of contact
# between the robot and the environement -> it is a Contact6D
# To define a contact6D :
# We need the surface of contact (contact_Point), the normal vector of contact (contactNormal along the z-axis)
# the friction parameter with the ground (mu = 0.3), the normal force bounds (fMin =1.0, fMax=1000.0)
# Right Foot
contactRF = tsid.Contact6d("contact_rfoot", robot, rf_frame_name, contact_Point, contactNormal, mu, fMin, fMax)
contactRF.setKp(kp_contact * matlib.ones(6).T) # Proportional gain defined before = 30
contactRF.setKd(2.0 * np.sqrt(kp_contact) * matlib.ones(6).T) # Derivative gain = 2 * sqrt(30)
# Reference position of the right ankle -> initial position
H_rf_ref = robot.position(data, model.getJointId(rf_frame_name))
contactRF.setReference(H_rf_ref)
# Add the contact to the HQP with weight = 0.1, priority level = 1e-3 (as real constraint)
invdyn.addRigidContact(contactRF, w_forceRef)
# Left Foot
contactLF = tsid.Contact6d("contact_lfoot", robot, lf_frame_name, contact_Point, contactNormal, mu, fMin, fMax)
contactLF.setKp(kp_contact * matlib.ones(6).T) # Proportional gain defined before = 30
contactLF.setKd(2.0 * np.sqrt(kp_contact) * matlib.ones(6).T) # Derivative gain = 2 * sqrt(30)
# Reference position of the left ankle -> initial position
H_lf_ref = robot.position(data, model.getJointId(lf_frame_name))
contactLF.setReference(H_lf_ref)
# Add the contact to the HQP with weight = 0.1, priority level = 1e-3 (as real constraint)
invdyn.addRigidContact(contactLF, w_forceRef)
###Output
_____no_output_____
###Markdown
TSID TrajectoryA **Trajectory** is a multi-dimensional function of time describing the motion of an object and its time derivatives.For standard use in control, the method *compute_next* is provided, which computes the value of the trajectory at the next time step.In the example, we need to set 3 trajectories, one for each task. These trajectories will give at each time step the desired position, velocity and acceleration of the different tasks (CoM, posture and waist).In our case, the posture and the waist will be constants, equal to their initial values.For the CoM however, we will update the trajectory with a sinusoidal signal at each time step.
###Code
# Set the reference trajectory of the tasks
com_ref = data.com[0] # Initial value of the CoM
trajCom = tsid.TrajectoryEuclidianConstant("traj_com", com_ref)
sampleCom = trajCom.computeNext() # Compute the first step of the trajectory from the initial value
q_ref = q[7:] # Initial value of the joints of the robot (in halfSitting position without the freeFlyer (6 first values))
trajPosture = tsid.TrajectoryEuclidianConstant("traj_joint", q_ref)
waist_ref = robot.position(data, model.getJointId('root_joint')) # Initial value of the waist (root_joint)
# Here the waist is defined as a 6d vector (position + orientation) so it is in the SE3 group (Lie group)
# Thus, the trajectory is not Euclidian but remains in the SE3 domain -> TrajectorySE3Constant
trajWaist = tsid.TrajectorySE3Constant("traj_waist", waist_ref)
# Initialisation of the Solver
# Use EiquadprogFast: dynamic matrix sizes (memory allocation performed only when resizing)
solver = tsid.SolverHQuadProgFast("qp solver")
# Resize the solver to fit the number of variables, equality and inequality constraints
solver.resize(invdyn.nVar, invdyn.nEq, invdyn.nIn)
# Initialisation of the plot variables which will be updated during the simulation loop
# These variables describe the behavior of the CoM of the robot (reference and real position, velocity and acceleration)
com_pos = np.zeros((3, N_SIMULATION))*nan
com_vel = np.zeros((3, N_SIMULATION))*nan
com_acc = np.zeros((3, N_SIMULATION))*nan
com_pos_ref = np.zeros((3, N_SIMULATION))*nan
com_vel_ref = np.zeros((3, N_SIMULATION))*nan
com_acc_ref = np.zeros((3, N_SIMULATION))*nan
com_acc_des = np.zeros((3, N_SIMULATION))*nan
# Parameters of the sinusoid
offset = robot.com(data) # offset of the mesured CoM
amp = np.array([0.0, 0.05, 0.0]).T # amplitude function of 0.05 along the y axis
two_pi_f = 2*np.pi*np.array([0.0, 0.5, 0.0]).T # 2π function along the y axis with 0.5 amplitude
two_pi_f_amp = np.multiply(two_pi_f,amp) # 2π function times amplitude function
two_pi_f_squared_amp = np.multiply(two_pi_f, two_pi_f_amp) # 2π function times squared amplitude function
# Simulation loop
# At each time step compute the next desired positions of the tasks
# Set them as new references for each tasks
# The CoM trajectory is set with the sinusoid parameters:
# a sinus for the position, a cosinus (derivative of sinus) for the velocity
# and a -sinus (derivative of cosinus) for the acceleration
# Compute the new problem data (HQP problem update)
# Solve the problem with the solver
# Get the forces and the accelerations computed by the solver
# Update the plot variables of the CoM
# Print the forces applied at each feet
# Print the tracking error of the CoM task and the norm of the velocity and acceleration needed to follow the
# reference trajectory
# Integrate the control (which is in acceleration and is given to the robot in position):
# One simple euler integration from acceleration to velocity
# One integration (velocity to position) with pinocchio to have the freeFlyer updated
# Display the result on the gepetto viewer
for i in range(0, N_SIMULATION):
time_start = tmp.time()
sampleCom.pos(offset + np.multiply(amp, matlib.sin(two_pi_f*t)))
sampleCom.vel(np.multiply(two_pi_f_amp, matlib.cos(two_pi_f*t)))
sampleCom.acc(np.multiply(two_pi_f_squared_amp, -matlib.sin(two_pi_f*t)))
comTask.setReference(sampleCom)
sampleWaist = trajWaist.computeNext()
waistTask.setReference(sampleWaist)
samplePosture = trajPosture.computeNext()
postureTask.setReference(samplePosture)
HQPData = invdyn.computeProblemData(t, q, v)
# if i == 0: HQPData.print_all()
sol = solver.solve(HQPData)
if(sol.status!=0):
print ("QP problem could not be solved! Error code:", sol.status)
break
tau = invdyn.getActuatorForces(sol)
dv = invdyn.getAccelerations(sol)
com_pos[:,i] = robot.com(invdyn.data())
com_vel[:,i] = robot.com_vel(invdyn.data())
com_acc[:,i] = comTask.getAcceleration(dv)
com_pos_ref[:,i] = sampleCom.pos()
com_vel_ref[:,i] = sampleCom.vel()
com_acc_ref[:,i] = sampleCom.acc()
com_acc_des[:,i] = comTask.getDesiredAcceleration
if i%PRINT_N == 0:
print ("Time %.3f"%(t))
if invdyn.checkContact(contactRF.name, sol):
f = invdyn.getContactForce(contactRF.name, sol)
print ("\tnormal force %s: %.1f"%(contactRF.name.ljust(20,'.'),contactRF.getNormalForce(f)))
if invdyn.checkContact(contactLF.name, sol):
f = invdyn.getContactForce(contactLF.name, sol)
print ("\tnormal force %s: %.1f"%(contactLF.name.ljust(20,'.'),contactLF.getNormalForce(f)))
print ("\ttracking err %s: %.3f"%(comTask.name.ljust(20,'.'), norm(comTask.position_error, 2)))
print ("\t||v||: %.3f\t ||dv||: %.3f"%(norm(v, 2), norm(dv)))
v_mean = v + 0.5*np.resize(dt*dv, np.shape(v))
v += np.resize(dt*dv, np.shape(v))
q = pin.integrate(model, q, dt*v_mean)
t += dt
if i%DISPLAY_N == 0: robot_display.display(q)
time_spent = tmp.time() - time_start
if(time_spent < dt): tmp.sleep(dt-time_spent)
# PLOT the result
time = np.arange(0.0, N_SIMULATION*dt, dt)
# Position tracking of the CoM along the x,y,z axis
(f, ax) = plut.create_empty_figure(3, 1, figsize=(10,10))
for i in range(3):
ax[i].plot(time, com_pos[i,:], label='CoM '+str(i))
ax[i].plot(time, com_pos_ref[i,:], 'r:', label='CoM Ref '+str(i))
ax[i].set_xlabel('Time [s]')
ax[i].set_ylabel('CoM [m]')
leg = ax[i].legend()
leg.get_frame().set_alpha(0.5)
plt.show()
# Velocity tracking of the CoM along the x,y,z axis
(f, ax) = plut.create_empty_figure(3, 1, figsize=(10,10))
for i in range(3):
ax[i].plot(time, com_vel[i,:], label='CoM Vel '+str(i))
ax[i].plot(time, com_vel_ref[i,:], 'r:', label='CoM Vel Ref '+str(i))
ax[i].set_xlabel('Time [s]')
ax[i].set_ylabel('CoM Vel [m/s]')
leg = ax[i].legend()
leg.get_frame().set_alpha(0.5)
plt.show()
# Acceleration tracking of the CoM along the x,y,z axis
(f, ax) = plut.create_empty_figure(3, 1, figsize=(10,10))
for i in range(3):
ax[i].plot(time, com_acc[i,:], label='CoM Acc '+str(i))
ax[i].plot(time, com_acc_ref[i,:], 'r:', label='CoM Acc Ref '+str(i))
ax[i].plot(time, com_acc_des[i,:], 'g--', label='CoM Acc Des '+str(i))
ax[i].set_xlabel('Time [s]')
ax[i].set_ylabel('CoM Acc [m/s^2]')
leg = ax[i].legend()
leg.get_frame().set_alpha(0.5)
plt.show()
###Output
_____no_output_____ |
lectures/.ipynb_checkpoints/8-checkpoint.ipynb | ###Markdown
Stack
###Code
class Stack:
def __init__(self, initial_collection=[], max_capacity=100):
self.__stack = initial_collection
self.__max_capacity = max_capacity
def pop(self):
return self.__stack.pop()
def push(self, value):
if len(self.__stack) < self.__max_capacity:
self.__stack.append(value)
else:
return False
def top(self):
return self.__stack[-1]
stack = Stack(initial_collection=[1,2,3,4], max_capacity=10)
stack.pop()
print(stack._Stack__stack)
stack.push('asd')
print(stack._Stack__stack)
stack.top()
###Output
_____no_output_____
###Markdown
Queue
###Code
class Queue:
def __init__(self, size=100):
self.__data = []
self.__size = size
def push(self, value):
if len(self.__data) < self.__size:
self.__data.insert(0, value)
def pop(self):
if self.__data:
return self.__data.pop()
def top(self):
if self.__data:
return self.__data[-1]
q = Queue()
q.push('asd')
q.top()
q.push('asd')
q.push(42)
q.top()
q.pop()
q.top()
# Односвязный список [node] -> [node] -> [node]
# Двусвязный список [node] <-> [node] <-> [node]
# Дерево - 'список' хранящий два и более указателя
# Бинарное дерево -> хранит две ссылки
# Сложность поиска в бинарном дереве log(n) по высоте
# wiki -> красно-черное дерево, B-дерево, AVL-дерево
# Роберт Седжвик - стуктуры данных \ алгоритмы
# сделать дерево (д.з)
class Tree:
def __init__(self):
pass
def add(self, value):
pass
def search(self, value):
pass
class Node:
def __init__(self, payload, left, right):
self.payload = payload
self.left = left
self.right = right
###Output
_____no_output_____
###Markdown
Обратная польская запись - калькулятор (арифмический) Реализовать дерево - поиск, добавление, удаление Operator overload
###Code
class Number:
def __init__(self, value):
self.value = value
def __add__(self, second):
return Number(self.value + second.value)
def __repr__(self):
return 'Custom number with value: {}'.format(self.value)
Number(12) + Number(67) + Number(100)
###Output
_____no_output_____ |
Ejercicio 5.ipynb | ###Markdown
UNIVERSIDAD NACIONAL DE CÓRDOBA Fa.M.A.F – Observatorio Astronómico Licenciatura en Astronomía Tapia Martina Astrometría 2020 Práctico N° 4: Inferencia Bayesiana con métodos Monte Carlo: Cadenas de Markov Introducción En este notebook se llevará a cabo la última parte del estudio de interferencia Bayesiana con métodos Monte Carlo. El *objetivo* es minimizar funciones, mediante la técnica del gradiente descendente. Conceptos Básicos Cuadrados mínimos no-lineales Si se considera ahora un ajuste de un modelo que depende no-linealmente sobre un conjunto de M parámetros $a_{k}$; k = 1, . . . , M. El procedimiento es básicamente el mismo que con los otros métodos trabajados, ya que la función de mérito es la $\chi^2$ y se intenta la busqueda de los parámetros que minimizan esa función. Sin embargo, el proceso de búsqueda en este caso debe seriterativo. La idea es dar valores iniciales y desarrollar un procedimiento para mejorarlos. El procedimiento se repite hasta que $\chi^2$ deje de crecer. Entonces, se tiene una función o modelo del tipo: $$ y(x) = \sum_{i=1}^{M}f_{i}(\vec{a_i}x) = y(x| \vec{a}) $$ y por lo tanto $$ \chi^2(\vec{a}) = \sum_{i=1}^N [\frac{y_{i} − y(x_{i}|\vec{a})}{\sigma_i^2}] $$Un procedimiento a aplicar para mejorar un conjunto inicial de parámetros es seguir la dirección de máximo crecimiento (o decrecimiento) indicada por el gradiente de una función. En este caso, el gradiente que se busca es el de la función $\chi^2$. Método del gradiente descendiente El método que usa el gradiente como indicador para optimizar los parámetros es el denominado “Gradiente Descendiente”, en el cual: $$ \vec{a_{new}} = \vec{a_{old}} + \eta * \nabla \chi^2(\vec{a_{old}})$$ donde el parámetro $\nu $ es el tamaño del paso que se puede adoptar como fijo. Ahora, como se elege el parámetro $\eta$?. En el caso en el que se esta cerca del punto crítico (mínimo o máximo) el gradiente $\nabla \chi^2 $ varía muy poco, porque la superficie es bastante plana, por lo tanto multiplicar por un $\eta$ muy chico hace que se avance muy poco, lo cual hace que sean necesarios muchos pasos. En el caso de que el valor de $\eta$ sea muy grande, los parámetros nuevos oscilan mucho (péndulo) en la zona del mínimo, haciendo difícil la convergencia. Existe un método de selección del $\eta$ que se llama “line-search” en el cual se comienza eligiendo un valor pequeño, se hace unos pasos y se mira el comportamiento; si los parámetros no cambian mucho, se cambia el $\eta$ por un valor más grande, se corren un par de pasos y si ve que cambian mucho, se reduce nuevamente el $\eta$. En resumen: - El método nos permite apuntar en la dirección correcta cuando se esta lejos del mínimo.- Es muy sensible al valor de $\eta$ adoptado. Si es muy chico, no converge o tarda muchísimo. Si es muy grande, se pasa del mínimo varias veces. Implementación del método "Gradiente descendente" En ésta instancia se implementará el método de gradiente descendente para encontrar el conjunto de parámetros que minimizan la función de $\chi^2$.
###Code
#Primero se importan las librerías necesarias para trabajar
import numpy as np
import matplotlib.pyplot as plt
import random
import math
import seaborn as sns
from scipy import stats as st
import functools
sns.set()
# Se importan los datos y sus errores, para ser usados en el notebook
datos = np.genfromtxt('datos2.dat',delimiter=',')
einf = np.genfromtxt('errores_inferiores.dat', delimiter = ',')
esup = np.genfromtxt('errores_superiores.dat', delimiter = ',')
x = datos[:,0]
y = datos[:,1]
ei = einf[:,1]
es = esup[:,1]
e1 = y- ei
e2 = es - y
#Se grafica la función luminosidad de galaxias con las barras de error correspondientes
plt.title('Función de luminosidad de galaxias obtenida por Blanton et al.(2001)')
plt.xlabel('$M_{r}$')
plt.ylabel('$\hat \phi(M_{r})$')
plt.yscale('log')
plt.errorbar(x,y, yerr =(e1,e2),marker ='o', markersize=2, color='green',ecolor ='green',elinewidth=1,linewidth=0.5)
plt.plot(x,ei, '_g', markersize=4, )
plt.plot(x, es, '_g', markersize=4)
plt.show()
#Se define la función que representará el modelo de Schechter
def model_Sche(phi0, M0, a, M):
phi1 = []
for i in range(len(M)):
b = -0.4*(M[i]-M0)*(a+1)
c = -0.4*(M[i]-M0)
phi = 0.4 * math.log(10) * phi0 * 10**b * math.exp(-10**c)
phi1.append(phi)
return(phi1)
#Se verifica si el modelo funciona
M1= np.linspace(-16,-23,100)
y1a = model_Sche(phi0=1.46E-2,M0=-20.83,a=-1.20,M=M1)
#Se grafica el modelo sobre los puntos para verificar la función modelo
plt.title('Función de luminosidad de galaxias obtenida por Blanton et al.(2001)')
plt.xlabel('$M_{r}$')
plt.ylabel('$\hat \phi(M_{r})$')
plt.yscale('log')
plt.yscale('log')
plt.plot(x,y, 'oy', label='Datos importados')
plt.plot(M1,y1a, label='Modelo de Blanton')
plt.legend(loc='lower right')
plt.show()
#Se define el valor de sigma para trabajar
s = es - ei
###Output
_____no_output_____
###Markdown
Como se explicó anteriormente, lo que se intenta hacer es realizar la búsqueda de los parámetros que minimizan la función $\chi^2$. Por éste motivo, lo primero que se realiza es definir la función $\chi^2$ usando la fórmula descripta en la intrucción.
###Code
def Chi2(datax, datay, phi0, M0, a, s):
chi = 0
model = model_Sche(phi0, M0, a, datax)
N = len(datax)
for i in range(N):
chi = chi + [(datay[i] - model[i])**2]/ s[i]**2
return(chi)
#Se prueba que la función no tenga errores
Chi2(x, y, phi0=1.46E-2,M0=-20.83,a=-1.20,s=s)
#Se analiza el comportamiento de Chi2 con ayuda de plots
N = 100
PHI = np.linspace(0.0005, 0.025,N)
M_0 = np.linspace(-21.2, -20.2, N)
A_0 = np.linspace(-1.5,-0.9, N)
#Se crean las matrices con ceros
p_0 = np.zeros((N,N))
m_0 = np.zeros((N,N))
a_0 = np.zeros((N,N))
for ip, p in enumerate(PHI):
for im, m in enumerate(M_0):
p_0[im,ip] = Chi2(x, y, p, m, a=-1.20, s = s)
for ip, p in enumerate(PHI):
for ia, a in enumerate(A_0):
m_0[ip,ia] = Chi2(x, y, p, M0=-20.83, a=a, s =s )
for im, m in enumerate(M_0):
for ia, a in enumerate(A_0):
a_0[im,ia] = Chi2(x,y, phi0=1.46E-2, M0 = m, a =a, s = s)
plt.figure(figsize=(20,5))
plt.subplot(1,3,1)
plt.contourf(PHI, M_0, p_0, 100, cmap=plt.cm.viridis)
plt.colorbar()
plt.xlabel('$\phi_{*}$')
plt.ylabel('$M_{*}$')
plt.subplot(1,3,2)
plt.contourf(PHI, A_0, m_0, 100,cmap=plt.cm.viridis)
plt.colorbar()
plt.xlabel('$\phi_{*}$')
plt.ylabel('$alfa$')
plt.subplot(1,3,3)
plt.contourf(M_0, A_0, a_0, 100, cmap=plt.cm.viridis)
plt.colorbar()
plt.xlabel('$M_{*}$')
plt.ylabel('$alfa$')
plt.show()
###Output
_____no_output_____
###Markdown
Notar que en éstos gráficos se puede apreciar las zonas donde se encuentra el mínimo en color morado oscuro. Los colores ayudan a visualizar la tercer dimensión, ya que se tienen 3 parámetros a analizar. Luego, usando éstos resultados se pueden definir los puntos iniciales de las cadenas que encotrarán los prámetros que minimizan a $\chi^2$. Antes de hacer las cadenas, se debe calcular $\nabla \chi^2$. Para ello, se realizan diferentes funciones que calculen las derivadas involucradas. Realizando éste proceso paso a paso, resultará más sencillo encontrar el error en caso de que luego hayan problemas.
###Code
#Se define la derivada del modelo respecto del parámetro phi0
def dmodel_phi0(phi0, M0, a, M):
dmphi1 = []
for i in range(len(M)):
b = -0.4*(M[i]-M0)*(a+1)
c = -0.4*(M[i]-M0)
dmphi = 0.4 * math.log(10) * 10**b * math.exp(-10**c)
dmphi1.append(dmphi)
return(dmphi1)
#Se define la derivada del modelo respecto del parámetro m0
def dmodel_M0(phi0, M0, a, M):
dmM01 = []
for i in range(len(M)):
A = 0.4*math.log(10)*phi0
f = 10**(-0.4*(M[i]-M0)*(a+1))
g = math.exp(-10**(-0.4*(M[i]-M0)))
df = 10**(-0.4 * (M[i]-M0) * (a + 1))* 0.4 * math.log(10)*(a+1)
dg = -math.exp(-10**(-0.4*(M[i]-M0))) * 0.4 * math.log(10) * 10**(-0.4 * (M[i]-M0))
dmM0 = A * (df*g + f*dg)
dmM01.append(dmM0)
return(dmM01)
#Se define la derivada del modelo respecto del parámetro alpha
def dmodel_a(phi0, M0, a, M):
dma1 = []
for i in range(len(M)):
A = 0.4* math.log(10)*phi0
f = 10**(-0.4*(M[i]-M0)*(a+1))
df = 10**(-0.4*(M[i]-M0)*(a+1))*-0.4*math.log(10)*(M[i]-M0)
g = math.exp(-10**(-0.4*(M[i]-M0)))
dma = A * df * g
dma1.append(dma)
return(dma1)
#Se calcula el gradiente de Chi cuadrado
#Se usan las funciones definidas anteriormente
def Gradiente_Chi2(datax, datay, phi0, M0, a, sigma = s):
dchi2_phi0 = 0
dchi2_m0 = 0
dchi2_a = 0
model_pars = model_Sche(phi0, M0, a, datax)
dfmodel_phi0 = dmodel_phi0(phi0, M0, a, M = datax)
dfmodel_m0 = dmodel_M0(phi0, M0, a, M = datax)
dfmodel_a0 = dmodel_a(phi0, M0, a, M=datax)
for j in range(len(datax)):
dchi2_phi0 = dchi2_phi0 - 2 * (datay[j] - model_pars[j]) / (sigma[j] ** 2) * dfmodel_phi0[j]
dchi2_m0 = dchi2_m0 - 2 * (datay[j] - model_pars[j]) / (sigma[j] ** 2) * dfmodel_m0[j]
dchi2_a = dchi2_a - 2 * (datay[j] - model_pars[j]) / (sigma[j] ** 2) * dfmodel_a0[j]
return np.array([dchi2_phi0, dchi2_m0, dchi2_a])
#Se realiza una prueba de la funcion
Gradiente_Chi2(x, y, 0.0146, -20.83, -1.20, s)
###Output
_____no_output_____
###Markdown
Una vez obtenidas las derivadas y calculado el gradiente de $\chi^2$, se procede a la aplicación del método Gradiente descendente realizando una función que devuelve tres listas, una para cada parámetro, siendo el último el valor que minimiza la función $\chi^2$.
###Code
def Gradiente_descendente(x, y, n, N_pasos):
#Se inicializan los parámetros
grad = np.array([0,0,0])
while np.any(grad < (1.E-8)):
phi0_inicial = random.uniform(0.0004, 0.025)
m0_inicial = random.uniform(-21.0, -20.5)
a0_inicial = random.uniform(-1.5, -0.9)
grad = Gradiente_Chi2(x, y, phi0_inicial, m0_inicial, a0_inicial, s)
#Se crean las listas para guardar los resultados
camino_phi0 = [phi0_inicial]
camino_m0 = [m0_inicial]
camino_a = [a0_inicial]
for i in range(1, N_pasos):
phi0_inicial = phi0_inicial - n * (grad[0]) * (10**-6)
m0_inicial = m0_inicial - n * (grad[1]) * (10**-2)
a0_inicial = a0_inicial - n * (grad[2]) * (10**-3)
grad = Gradiente_Chi2(x, y, phi0_inicial, m0_inicial, a0_inicial, s)
camino_phi0.append(phi0_inicial)
camino_m0.append(m0_inicial)
camino_a.append(a0_inicial)
return(camino_phi0, camino_m0, camino_a)
#Se ejecuta la función
g1 = Gradiente_descendente(x, y, n = 0.05, N_pasos = 10)
#Se grafica el resultado para obtener una mejor visualización
#Se grafican dos parámetros a modo representativo
plt.plot(g1[0], g1[1], marker='.')
plt.plot(g1[0][0], g1[1][0], 'or', label = 'Punto incial del camino')
plt.xlabel('$\phi_{*}$')
plt.ylabel('$M_{*}$')
plt.legend(loc='lower right')
###Output
_____no_output_____
###Markdown
Notar que, usando un $\eta$ = 0.05 y haciendo 10 pasos, los parámetros obtenidos no están cerca de los valores conocidos por Blanton. Se agrande $\eta$ a 0.06 con la misma cantidad de pasos y se obtiene
###Code
g2 = Gradiente_descendente(x, y, n = 0.06, N_pasos = 10)
plt.plot(g2[0], g2[1], marker='.')
plt.plot(g2[0][0], g2[1][0], 'og', label = 'Punto incial del camino')
plt.xlabel('$\phi_{*}$')
plt.ylabel('$M_{*}$')
plt.legend(loc='lower right')
###Output
_____no_output_____
###Markdown
Variando levemente el valor de $\eta$ no se aprecian grandes cambios en los parámetros obtenidos. Luego, lo que se hace es variar la cantidad de pasos dejando fijo el valor de $\eta$.
###Code
g3 = Gradiente_descendente(x, y, n = 0.06, N_pasos = 100)
plt.plot(g3[0], g3[1], marker='.')
plt.plot(g3[0][0], g3[1][0], 'og', label = 'Punto incial del camino')
plt.xlabel('$\phi_{*}$')
plt.ylabel('$M_{*}$')
plt.legend(loc='lower right')
###Output
_____no_output_____
###Markdown
Se puede ver aquí, que los valores para phi0 y m0 se acercan mucho más a los de Blanton. Es decir que se debe comprueba lo enunciado en el apartado de conceptos básicos. En el caso en el que se esta cerca del punto crítico (mínimo) el gradiente $\nabla \chi^2 $ varía muy poco, porque la superficie es bastante plana, por lo tanto multiplicar por un $\eta$ muy chico hace que se avance muy poco, lo cual hace que sean necesarios muchos pasos. Por otra parte, si el valor de $\eta$ es muy grande, los parámetros nuevos oscilan mucho (péndulo) en la zona del mínimo, haciendo difícil la convergencia. Entonces, lo que se necesita es lograr un equilibrio entre la cantidad de pasos y el valor de $\eta$.
###Code
#En este paso, se mostrará lo que ocurre si eta es muy grande
#En el gráfico se puede visualizar la oscilación predicha
g4 = Gradiente_descendente(x, y, n = 0.09, N_pasos = 50)
plt.plot(g4[0], g4[1], marker='.')
plt.plot(g4[0][0], g4[1][0], 'og', label = 'Punto incial del camino')
plt.xlabel('$\phi_{*}$')
plt.ylabel('$M_{*}$')
plt.legend(loc='lower right')
#Se intentará lograr un equilibro entre eta y el numero de pasos
g5 = Gradiente_descendente(x, y, n = 0.07, N_pasos = 170)
plt.plot(g5[0], g5[1], marker='.')
plt.plot(g5[0][0], g5[1][0], 'og', label = 'Punto incial del camino')
plt.xlabel('$\phi_{*}$')
plt.ylabel('$M_{*}$')
plt.legend(loc='lower right')
###Output
_____no_output_____
###Markdown
Con la última elección, $\eta$ = 0.07 y realizando 170 pasos se logró obtener mejores valores para los parámetros. A continuación se graficará el modelo, valuado en los parámetros de Blanton, y el modelo con los parámetros encontrados con la función Gradiente descendente.
###Code
ypars = model_Sche(phi0=g5[0][169],M0=g5[1][169],a=g5[2][169],M=x)
yblanton = model_Sche(phi0=1.46E-2,M0=-20.83,a=-1.20,M=x)
plt.figure(figsize=(10,7))
plt.title('Ajustes usando el modelo de Schecter valuado en distintos conj. de parámetros')
plt.xlabel('$M_{r}$')
plt.ylabel('$\hat \phi(M_{r})$')
plt.yscale('log')
plt.yscale('log')
plt.plot(x,y, 'og', label='Datos importados', markersize=4)
plt.plot(x,yblanton, label='Modelo con parámetros de Blanton')
plt.plot(x,ypars, label='Modelo con los parametros obtenidos')
plt.legend(loc='lower right')
plt.show()
###Output
_____no_output_____
###Markdown
PRÁCTICO N° 3 : Bases de Datos Tapia Martina Actividad 4 En esta actividad lo que se busca es calcular la magnitud absoluta para cada galaxia, usando la aproximación: $$ M = m - 25- 5* log_{10}(\frac{c*z}{H}) $$ donde - c es la velocidad de la luz y;- $ H = 75 \frac{km}{s*M_{pc}}$ Además, se quiere graficar la magnitud absoluta vs. el redshift para todas las galaxias con $m_{r} < 17.5 $ y obtener un ajuste para la envolvente de los puntos.
###Code
#Primero se importan las librerías necesarias para trabajar
import numpy as np
import matplotlib.pyplot as plt
import random
import math
import seaborn as sns
sns.set()
import scipy.stats
data = np.genfromtxt('muestra1_MartinaTapia.csv',delimiter=',', skip_header = 1)
# Constantes
c = 300000
H = 75
# Magnitudes aparentes r (todas las galaxias)
mr = data[:, 4]
z = data[:, 6]
# Calculo de Magnitudes Absolutas
def MagAbs():
M = []
z2 =[]
for i in range(len(mr)):
if (mr[i]<17.5):
if (z[i]>=0):
z2.append(z[i])
M1 = mr[i] - 25 - 5 * math.log10 ((c * z[i])/ H)
M.append(M1)
else:
None
return(M,z2)
Mag, z = MagAbs()
# Se grafican las magnitudes absolutas vs el redshift
plt.title('Magnitud absoluta vs Redshift para galaxias con mr < 17.5')
plt.xlabel('Redshift $z$')
plt.ylabel('Magnitud absoluta')
plt.ylim(-24.5,-16)
plt.scatter(z,Mag, color = 'greenyellow')
plt.show()
###Output
_____no_output_____
###Markdown
Ahora se quiere obtener el ajuste de la envolvente de los puntos y para eso, se tienen que conocer cuáles serian los valores máximos de magnitud absoluta en un determinado intervalo de z. Para poder averiguar éstos valores máximos se usa la función binned_statistic de scipy.stats.
###Code
# Los valores de magnitud máximos
mag = scipy.stats.binned_statistic(z,Mag,statistic='max',bins=100)[0]
#Los valores de redshift correspondientes a los máximos de magnitud
z2 = scipy.stats.binned_statistic(z,Mag,statistic='max',bins=100)[1]
# se define una función que ajuste el modelo de magnitudes con los valores máximos encontrados
def ajuste(b = 17.5):
a = []
a1 = 0
for i in range(len(z2)):
a1 = b -25-5 * math.log10 ((c * z2[i])/ H)
a.append(a1)
return(a)
#Se ejecuta la fucnión para diferentes valores de b
y = ajuste(b = 17.5)
y1 = ajuste(b=17.3)
y2 =ajuste(b=17.7)
# Se grafican las magnitudes absolutas vs el redshift con el ajuste realizado
plt.title('Magnitud absoluta vs Redshift para galaxias con mr < 17.5')
plt.xlabel('Redshift $z$')
plt.ylabel('Magnitud absoluta')
plt.ylim(-24.5,-16)
plt.scatter(z,Mag, color = 'greenyellow')
plt.scatter(z2[0:100],mag,color= 'red', label = 'máximos')
plt.plot(z2, y, label = 'Ajuste', color = 'yellow', linewidth=2.5)
plt.plot(z2, y1, label='Ajuste1',color = 'darkturquoise', linewidth=2.5)
plt.plot(z2, y2,label = 'Ajuste2',color = 'cadetblue', linewidth=2.5)
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/wide_and_deep-checkpoint.ipynb | ###Markdown
数据描述
###Code
# wide inputs
X_train_wide = pd.read_csv(mypath/"train_features.csv", index_col=0)
X_train_wide.head()
print(X_train_wide.shape)
## deep inputs
X_train_deep = np.load(mypath/"embedding-train-x.npy")
Y_train = np.load(mypath/"embedding-train-y.npy")
X_train_deep[0:5]
print(X_train_deep.shape)
###Output
(17690, 500)
###Markdown
保证 inputs dim 一致
###Code
Y_train
###Output
_____no_output_____
###Markdown
encoding
###Code
indices = np.arange(X_train_wide.shape[0]) # 起始随机状态
np.random.shuffle(indices)
training_samples = int(len(indices) * .8)
validation_samples = len(indices) - training_samples
print(training_samples, validation_samples)
###Output
14152 3538
###Markdown
训练集和测试集切分
###Code
X_train_wide_tr, X_train_wide_va = X_train_wide[:training_samples], X_train_wide[training_samples: training_samples + validation_samples]
Y_train_tr, Y_train_va = Y_train[:training_samples], Y_train[training_samples: training_samples + validation_samples]
X_train_deep_tr, X_train_deep_va = X_train_deep[:training_samples], X_train_deep[training_samples: training_samples + validation_samples]
###Output
_____no_output_____
###Markdown
`training_samples: training_samples + validation_samples`等于从`training_samples`到`training_samples + validation_samples`
###Code
X_train_deep_tr.shape
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense, LSTM, Dropout, Bidirectional
from keras import layers
import keras
###Output
Using TensorFlow backend.
###Markdown
wide model
###Code
wide_input_len = X_train_wide.shape[1]
y_len = Y_train.shape[1]
variety_inputs = layers.Input(shape=(wide_input_len,))
merged_layer = layers.Dense(256, activation='relu')(variety_inputs)
predictions = layers.Dense(y_len, activation='softmax')(merged_layer)
wide_model = keras.Model(inputs=variety_inputs, outputs=predictions)
wide_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(wide_model.summary())
###Output
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 16) 0
_________________________________________________________________
dense_3 (Dense) (None, 256) 4352
_________________________________________________________________
dense_4 (Dense) (None, 4) 1028
=================================================================
Total params: 5,380
Trainable params: 5,380
Non-trainable params: 0
_________________________________________________________________
None
###Markdown
deep model
###Code
input_length = X_train_deep.shape[1]
em_n_rows = 1000
em_n_cols = 100
embedding_layer = Embedding(input_dim=em_n_rows,
output_dim=em_n_cols,
input_length=input_length,
name='embedding_layer')
dropout_layer = Dropout(0.2)
i = layers.Input(shape=(input_length,), dtype='int32', name='main_input')
x = embedding_layer(i)
x = dropout_layer(x)
LSTM_units = 16
x = Bidirectional(LSTM(units=LSTM_units, dropout=0.2, recurrent_dropout=0.2, input_shape=(em_n_rows, em_n_cols)))(x)
x = dropout_layer(x)
# x = Flatten()(x)
o = Dense(y_len, activation='softmax')(x)
deep_model = keras.Model(inputs=i, outputs=o)
print(deep_model.summary())
deep_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
模型合并
###Code
# Combine wide and deep into one model
merged_out = layers.concatenate([wide_model.output, deep_model.output])
merged_out = layers.Dense(y_len, activation='softmax')(merged_out)
combined_model = keras.Model([wide_model.input] + [deep_model.input], merged_out)
print(combined_model.summary())
combined_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Run training
history = combined_model.fit([X_train_wide_tr] + [X_train_deep_tr], Y_train_tr, epochs=20, batch_size=32,
validation_data=([X_train_wide_va] + [X_train_deep_va], Y_train_va))
# 有监督的学习
combined_model.save(mypath/"keras-wide_and_deep-v.1.0.0.h5")
###Output
D:\install\miniconda\lib\site-packages\tensorflow_core\python\framework\indexed_slices.py:424: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
###Markdown
模型融合后并不理想。
###Code
accr = combined_model.evaluate([X_train_wide_tr] + [X_train_deep_tr], Y_train_tr)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
accr = combined_model.evaluate([X_train_wide_va] + [X_train_deep_va], Y_train_va)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
###Output
14152/14152 [==============================] - 29s 2ms/step
Test set
Loss: 0.612
Accuracy: 0.775
3538/3538 [==============================] - 7s 2ms/step
Test set
Loss: 0.634
Accuracy: 0.770
###Markdown
没有过拟合。
###Code
import matplotlib.pyplot as plt
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show();
###Output
_____no_output_____ |
08_tarea_1.ipynb | ###Markdown
Tarea 1 Implementar la clase `OisCashflow`. Configuración Inicial
###Code
from finrisk import QC_Financial_3 as Qcf
from dataclasses import dataclass
from enum import Enum
class BusCal(Enum):
NY = 1
SCL = 2
def get_cal(code: BusCal) -> Qcf.BusinessCalendar:
"""
"""
if code == BusCal.NY:
cal = Qcf.BusinessCalendar(Qcf.QCDate(1, 1, 2020), 20)
for agno in range(2020, 2071):
f = Qcf.QCDate(12, 10, agno)
if f.week_day() == Qcf.WeekDay.SAT:
cal.add_holiday(Qcf.QCDate(14, 10, agno))
elif f.week_day() == Qcf.WeekDay.SUN:
cal.add_holiday(Qcf.QCDate(13, 10, agno))
elif f.week_day() == Qcf.WeekDay.MON:
cal.add_holiday(Qcf.QCDate(12, 10, agno))
elif f.week_day() == Qcf.WeekDay.TUE:
cal.add_holiday(Qcf.QCDate(11, 10, agno))
elif f.week_day() == Qcf.WeekDay.WED:
cal.add_holiday(Qcf.QCDate(10, 10, agno))
elif f.week_day() == Qcf.WeekDay.THU:
cal.add_holiday(Qcf.QCDate(9, 10, agno))
else:
cal.add_holiday(Qcf.QCDate(8, 10, agno))
cal.add_holiday(Qcf.QCDate(15, 2, 2021))
return cal
###Output
_____no_output_____
###Markdown
Clase `OisCashflow` La clase `OisCashflow` es una [`dataclass`](https://realpython.com/python-data-classes/). La tarea consiste en:- Implementar los 3 métodos de la clase que están con `pass`.- Implementar la función `present_value`.- Implementar la función `set_expected_rate`.
###Code
@dataclass # syntactic sugar
class OisCashflow:
start_date: Qcf.QCDate
end_date: Qcf.QCDate
settlement_date: Qcf.QCDate
notional: float
currency: Qcf.QCCurrency
amortization: float
amort_is_cashflow: bool
interest_rate: Qcf.QCInterestRate
on_index: Qcf.InterestRateIndex
spread: float
gearing: float
def get_accrued_rate(self, accrual_date: Qcf.QCDate, fixings: Qcf.time_series) -> float:
"""
Calcula la tasa equivalente desde `start_date` a `accrual_date`. La tasa equivalente
se calcula como:
(P - 1) * 360 / (accrual_date - start_date)
donde P es el producto de los factores de capitalización de todas las tasas overnight
entre `start_date` y `accrual_date`. Los valores de esas tasas deben estar almacenados
en la variable `fixings`.
Ver la documentación de `QC_Financial_3` para el uso y funcionamiento de los objetos
de tipo Qcf.time_series.
"""
pass
def get_accrued_interest(self, accrual_date: Qcf.QCDate, fixings: Qcf.time_series) -> float:
"""
Calcula el interés devengado desde `start_date` a `accrual_date` utilizando la tasa equivalente
que se calcula con el método anterior.
Los valores de las tasas overnight deben estar almacenados en la variable `fixings`.
Ver la documentación de `QC_Financial_3` para el uso y funcionamiento de los objetos
de tipo Qcf.time_series.
"""
pass
def amount(self, fixings: Qcf.time_series) -> float:
"""
Calcula el flujo total al vencimiento (amortización más intereses devengados hasta end_date).
Los valores de las tasas overnight deben estar almacenados en la variable `fixings`.
Ver la documentación de `QC_Financial_3` para el uso y funcionamiento de los objetos
de tipo Qcf.time_series.
"""
pass
###Output
_____no_output_____
###Markdown
Construcción de un objeto `Qcf.InterestRateIndex`.
###Code
codigo = 'SOFR'
tasa_on = Qcf.QCInterestRate(.0, Qcf.QCAct360(), Qcf.QCLinearWf())
fixing_lag = Qcf.Tenor('0d')
tenor = Qcf.Tenor('1d')
fixing_calendar = get_cal(BusCal.NY)
settlement_calendar = fixing_calendar
sofr = Qcf.InterestRateIndex(
codigo,
tasa_on,
fixing_lag,
tenor,
fixing_calendar,
settlement_calendar,
Qcf.QCUSD()
)
###Output
_____no_output_____
###Markdown
Construcción de una instancia de `OisCashflow`.
###Code
ois = OisCashflow(
Qcf.QCDate(13, 1, 2020),
Qcf.QCDate(13, 1, 2021),
Qcf.QCDate(13, 1, 2021),
10000000,
Qcf.QCUSD(),
1000000,
True,
Qcf.QCInterestRate(0.0, Qcf.QCAct360(), Qcf.QCLinearWf()),
sofr,
0,
1
)
print(ois.start_date)
print(ois.notional)
###Output
10000000
###Markdown
Funciones
###Code
def present_value(val_date: Qcf.QCDate, ois_cashflow: OisCashflow, zcc: Qcf.ZeroCouponCurve) -> float:
"""
Esta función opera de la misma forma que la análoga función de `QC_Financial_3`.
"""
pass
def set_expected_rate(
val_date: Qcf.QCDate,
ois_cashflow: OisCashflow,
zcc: Qcf.ZeroCouponCurve,
fixings: Qcf.time_series) -> None:
"""
Esta función opera de la misma forma que la análoga función de `QC_Financial_3`.
Ver por ejemplo los casos cuando usamos el objeto Qcf.ForwardRates() en
el notebook 9.
"""
pass
###Output
_____no_output_____ |
cmsc_210/examples/lecture_27/notebooks/Multivariate-Regression.ipynb | ###Markdown
Multivariate RegressionWe will use a real-life dataset containing information about size and weight for several fish species.1. species name of fish (Bream, Roach, Whitefish, Parkki, Perch, Pike, Smelt)2. weight of fish in grams3. vertical length in cm (`Length1`)4. diagonal length in cm (`Length2`)5. cross length in cm (`Length3`)6. height in cm7. diagonal width in cmWe would like to see if we can develop a way to predict the weight of a fish given its dimensions. Task**Can we predict the weight of a fish given its dimensions and species?** Acquiring the dataFirst let's load the data into a [Pandas](https://mazelife.github.io/cmsc-291/lecture_10.htmlpandas) dataframe:
###Code
import pandas
fish_dataframe = pandas.read_csv("Fish.csv", dtype={"Species": "category"})
fish_dataframe
import seaborn
# Seaborn has various themes/styles. We'll pick a good default:
seaborn.set_theme(style="darkgrid")
seaborn.set_context("talk")
seaborn.relplot(x="Length1", y="Weight", hue="Species", data=fish_dataframe, height=8, aspect=1.5)
###Output
_____no_output_____
###Markdown
Multivariate Linear RegressionWe can do linear regression with multiple X variables. For example, we could try to build a model that predicts for all giiven speces of fish rather than just one by using species as one of our variables. In univariate linear regression (2-d), we fit lines. In higher dimensions, we will fit hyperplanes (a hyperplane is a subspace whose dimension is one less than that of its ambient space). Converting non-numeric features One feature we will definitely want to include in our model is the species of the fish, but that's a text value. How do we use it in our model?
###Code
from sklearn.preprocessing import LabelEncoder
# Convert labels to a numeric value that can be used in a model:
species_label_encoder = LabelEncoder()
species_label_encoder.fit(fish_dataframe["Species"])
fish_dataframe["Encoded Species"] = species_label_encoder.transform(fish_dataframe["Species"])
fish_dataframe
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# We will train on height, width, vertical length, and species.
feature_columns = [ "Height", "Width", "Length1", "Encoded Species"]
x_values = fish_dataframe[feature_columns].values.reshape(-1, len(feature_columns))
y_values = fish_dataframe.Weight.values.reshape(-1, 1)
model = LinearRegression()
model.fit(x_values, y_values)
predicted_y = model.predict(x_values)
print("MSE is {:,.2f}".format(mean_squared_error(y_values, predicted_y)))
###Output
MSE is 14,160.88
###Markdown
Evaluating with train/test split
###Code
from sklearn.model_selection import train_test_split
# Split 20% of our data off into testing data:
x_training_data, x_test_data, y_training_data, y_test_data = train_test_split(x_values, y_values, test_size=0.2, random_state=0)
# Train our model as usual with the 80% of the data we reserved for training:
model = LinearRegression()
fish_model.fit(x_training_data, y_training_data)
# Then have it predict X values
predicted_y = fish_model.predict(x_test_data)
print("Mean-squared error: {:,.2f}".format(mean_squared_error(y_test_data, predicted_y)))
###Output
Mean-squared error: 23,858.72
###Markdown
Polynomial Multivariate Regression
###Code
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=2)
x_poly_values = poly_reg.fit_transform(x_values)
poly_reg.fit(x_poly_values, y_values)
poly_model = LinearRegression()
poly_model.fit(x_poly_values, y_values)
predicted_y = poly_model.predict(x_poly_values)
print("Mean-squared error: {:,.2f}".format(mean_squared_error(y_values, predicted_y)))
###Output
Mean-squared error: 2,151.58
###Markdown
Evaluating with train/test split
###Code
x_training_data, x_test_data, y_training_data, y_test_data = train_test_split(x_values, y_values, test_size=0.2, random_state=0)
x_training_data = poly_reg.fit_transform(x_training_data)
x_test_data = poly_reg.fit_transform(x_test_data)
poly_model = LinearRegression()
poly_model.fit(x_training_data, y_training_data)
predicted_y = poly_model.predict(x_test_data)
print("Mean-squared error: {:,.2f}".format(mean_squared_error(y_test_data, predicted_y)))
###Output
Mean-squared error: 5,986.30
|
05 - Logistic Regression/Logistic_Regression.ipynb | ###Markdown
Import Library and dataset
###Code
import pandas as pd
dataset = pd.read_csv('https://raw.githubusercontent.com/algonacci/Data-Warehouse/main/online_raw.csv')
dataset.fillna(dataset.mean(), inplace = True)
from sklearn.preprocessing import LabelEncoder
LE = LabelEncoder()
dataset['Month'] = LE.fit_transform(dataset['Month'])
LE = LabelEncoder()
dataset['VisitorType'] = LE.fit_transform(dataset['VisitorType'])
X = dataset.drop(['Revenue'], axis = 1)
y = dataset['Revenue']
###Output
_____no_output_____
###Markdown
Splitting the data
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 12)
###Output
_____no_output_____
###Markdown
Make the model
###Code
from sklearn.linear_model import LogisticRegression
# Call the classifier
logreg = LogisticRegression()
# Fit the classifier to the training data
logreg = logreg.fit(X_train,y_train)
#Training Model: Predict
y_pred = logreg.predict(X_test)
###Output
C:\Python39\lib\site-packages\sklearn\linear_model\_logistic.py:814: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
Evaluation
###Code
print('Training Accuracy :', logreg.score(X_train, y_train))
print('Testing Accuracy :', logreg.score(X_test, y_test))
from sklearn.metrics import confusion_matrix, classification_report
print('\nClassification report')
cr = classification_report(y_test, y_pred)
print(cr)
print('\nConfusion matrix')
cm = confusion_matrix(y_test, y_pred)
print(cm)
###Output
Confusion matrix
[[2027 57]
[ 227 155]]
###Markdown
Visualization
###Code
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_test, y_pred)), ('Positive', 'Negative'), ('Positive', 'Negative'))
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Training Model\n(Logistic Regression)', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
ROC Curve and AUV
###Code
ns_probs = [0 for _ in range(len(y_test))]
lr_probs = logreg.predict_proba(X_test)
lr_probs = lr_probs[:, 1]
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_auc = roc_auc_score(y_test, ns_probs)
lr_auc = roc_auc_score(y_test, lr_probs)
print('ROC AUC=%.3f' % (ns_auc))
print('ROC AUC=%.3f' % (lr_auc))
ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
# plot the roc curve for the model
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic')
# axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# show the plot
plt.show()
###Output
_____no_output_____ |
Applications/Vision/Colorizer/Notebooks/GrayRemover.ipynb | ###Markdown
Setup Imports
###Code
import os
from glob import glob
from skimage.color import rgb2lab
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
vai Modules
###Code
from vaiutils import path_consts
###Output
_____no_output_____
###Markdown
Define Useful Variables and Functions
###Code
for k, v in path_consts('COCO'):
exec(k + ' = v')
###Output
_____no_output_____
###Markdown
Load Data
###Code
filenames = glob(os.path.join(DIR_DATA, 'val2017', '*.jpg'))
###Output
_____no_output_____
###Markdown
Checks
###Code
assert len(glob(os.path.join(DIR_DATA, 'val2017', 'grayscale', '*.jpg'))) == 0, "Think grayscale images already moved.\nNo need to run notebook again!"
###Output
_____no_output_____
###Markdown
Move Grayscale Images
###Code
grayscale_files = []
threshold = 5
for filename in tqdm_notebook(filenames):
img = imread(filename)
if len(img.shape) == 2:
grayscale_files.append(filename)
continue
ab = np.take(rgb2lab(img / 255), [1, 2], -1)
ab = np.linalg.norm(ab, axis=-1)
if np.all(ab < threshold):
grayscale_files.append(filename)
os.makedirs(os.path.join(os.path.split(DIR_DATA)[0], 'grayscale'), exist_ok=True)
for filename in grayscale_files:
os.rename(filename, os.path.join(os.path.split(DIR_DATA)[0], 'grayscale', os.path.split(filename)[1]))
###Output
_____no_output_____ |
nytwire-gcp.ipynb | ###Markdown
Get API Key by creating The NewYork Times Developer accounthttps://developer.nytimes.com/accounts/createhttps://developer.nytimes.com/accounts/create Get Google Cloud Credentials - assuming you have Google cloud accesshttps://cloud.google.com/docs/authentication/getting-started
###Code
#!pip3 install webdrivermanager
#!webdrivermanager firefox chrome --linkpath /usr/local/bin
# Library imports and config
import os
import pandas as pd
import urllib3, requests
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from tqdm import tqdm
from bokeh.io import output_notebook, show
from bokeh.models import (ColumnDataSource, HoverTool, LabelSet)
from bokeh.plotting import figure, output_file
from bokeh.palettes import Set3
#from bokeh.io import export_png
output_notebook()
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_colwidth', 0)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Google cloud credentials goes here if you don't already have declared
os.environ['GOOGLE_APPLICATION_CREDENTIALS']='gcp-creds.json'
# The New York Times API Key goes here
API_KEY="GenerateOneFrom-NYT"
# Function to get latest articles from NewYork Times Wire API and store it as Data Frame
def get_nyt_articles(API_KEY,limit=500):
url = "https://api.nytimes.com/svc/news/v3/content/all/all.json?api-key="+API_KEY+"&limit="+str(limit)
try:
page = requests.get(url, verify=False)
df_page=pd.json_normalize(page.json()['results'])[['slug_name','byline','section','item_type','material_type_facet','des_facet','org_facet','per_facet','geo_facet','title','abstract','first_published_date']]
df_page['first_published_date_parsed']=pd.to_datetime(df_page['first_published_date'],format='%Y-%m-%d %H:%M:%S').dt.tz_convert('Europe/London')
except:
print("Error generating articles from The New York Times")
return
return df_page
# Function to initialize GCP Natural Langugage Service client
def get_client(text_content):
client = language_v1.LanguageServiceClient()
type_ = enums.Document.Type.PLAIN_TEXT
language = "en"
document = {"content": text_content, "type": type_, "language": language}
encoding_type = enums.EncodingType.UTF8
return client, document, encoding_type
# Function to invoke google cloud analyze sentiment API
def analyze_sentiment(text_content,articleid):
try:
client, document, encoding_type = get_client(text_content)
response = client.analyze_sentiment(document, encoding_type=encoding_type)
results=[]
document={}
document['articleid']=articleid
document['level']='document'
document['sentiment_score']=float(response.document_sentiment.score)
document['sentiment_magnitude']=float(response.document_sentiment.magnitude)
document['language']=str(response.language)
results.append(document)
for sentence in response.sentences:
allsen={}
allsen['articleid']=articleid
allsen['level']='sentence'
allsen['sentence_text']=str(sentence.text.content)
allsen['sentiment_magnitude']=float(sentence.sentiment.magnitude)
allsen['sentiment_score']=float(sentence.sentiment.score)
results.append(allsen)
except:
print("Error generating sentiment analysis using Google cloud")
return results
# Function to iterate through New York times articles and call API and append results to a dataframe
def gcp_analyze_sentiment(df):
dfBase=pd.json_normalize(analyze_sentiment('Test','Doc1'))
for index,row in tqdm(df.iterrows()):
try:
results1=analyze_sentiment(str(row['title'])+". "+str(row['abstract']),row['slug_name'])
df1=pd.json_normalize(results1)
dfBase=pd.concat([dfBase,df1])
except:
print("Error analysing: {}".format(row['slug_name']))
return dfBase
def get_top10(dfOut,colname,ascending=False):
if not ascending:
# Prepare chart data - top 10 highest ranking sentiment score by individual setnences against articleid
dfX1=dfOut[dfOut.sentence_text.isnull()].groupby([colname]).mean().nlargest(10,'sentiment_score')[['sentiment_score']].index.str[:40].tolist()
dfY1=dfOut[dfOut.sentence_text.isnull()].groupby([colname]).mean().nlargest(10,'sentiment_score')[['sentiment_score']].values.round(2).tolist()
elif ascending:
# Prepare chart data - top 10 highest ranking sentiment score by individual setnences against articleid
dfX1=dfOut[dfOut.sentence_text.isnull()].groupby([colname]).mean().nsmallest(10,'sentiment_score')[['sentiment_score']].index.str[:40].tolist()
dfY1=dfOut[dfOut.sentence_text.isnull()].groupby([colname]).mean().nsmallest(10,'sentiment_score')[['sentiment_score']].values.round(2).tolist()
dfY1=[y[0] for y in dfY1]
return dfX1,dfY1
def generate_vis(dfX1,dfY1,xaxislabel):
# Visualize
sorted_score = sorted(dfX1, key=lambda x: dfY1[dfX1.index(x)])
source = ColumnDataSource(data=dict(sentences=dfX1, sentiment_score=dfY1, color=Set3[10]))
from math import pi
p = figure(x_range=sorted_score, y_range=(-1,1),height=800, width=1000, title="The New York Times Wire - Sentiment Analysis", toolbar_location=None)
p.vbar(x='sentences', top='sentiment_score', color='color', width=0.5, source=source)
hover_tool = HoverTool(tooltips=[("sentiment_score", "@sentiment_score")])
labels = LabelSet(x='sentences', y='sentiment_score', text='sentiment_score', source=source, render_mode='canvas')
p.add_tools(hover_tool)
p.xaxis.axis_label=xaxislabel
p.yaxis.axis_label="Sentiment Score"
p.xaxis.axis_label_text_font_size = "13pt"
p.yaxis.axis_label_text_font_size = "13pt"
p.xaxis.major_label_orientation = pi/3
p.xaxis.major_label_text_font_size = "13pt"
p.y_range.start = -1
p.add_layout(labels)
#export_png(p, filename=str(xaxislabel.strip())+".png")
show(p)
# Execute article extraction from NYT
df_page=get_nyt_articles(API_KEY)
if df_page:
# Execute the sentiment analysis across google cloud platform - sample 100
dfBase0=gcp_analyze_sentiment(df_page.sample(10))
# Join the score with original article extract
dfOut=dfBase0.join(df_page.set_index('slug_name'), on='articleid')
else:
# Read sample static file
dfOut = pd.read_csv('dfOut.csv')
# Get highest positive ranking Organisation
(dfX1,dfY1) = get_top10(dfOut,'org_facet',False)
generate_vis(dfX1,dfY1,"Ranked Positive by Organisation")
# Get highest negative ranking Organisation
(dfX1,dfY1) = get_top10(dfOut,'org_facet',True)
generate_vis(dfX1,dfY1,"Ranked Negative by Organisation")
# Get highest positive ranking Geo location
(dfX1,dfY1) = get_top10(dfOut,'geo_facet',False)
generate_vis(dfX1,dfY1,"Ranked Positive by Geo Location")
# Get highest negative ranking Geo location
(dfX1,dfY1) = get_top10(dfOut,'geo_facet',True)
generate_vis(dfX1,dfY1,"Ranked Negative by Geo Location")
# Get highest positive ranking Geo location
(dfX1,dfY1) = get_top10(dfOut,'des_facet',False)
generate_vis(dfX1,dfY1,"Ranked Positive by Entities")
# Get highest negative ranking Geo location
(dfX1,dfY1) = get_top10(dfOut,'des_facet',True)
generate_vis(dfX1,dfY1,"Ranked Negative by Entities")
# Get highest positive ranking Geo location
(dfX1,dfY1) = get_top10(dfOut,'title',False)
generate_vis(dfX1,dfY1,"Ranked Positive by Text")
# Get highest negative ranking Geo location
(dfX1,dfY1) = get_top10(dfOut,'title',True)
generate_vis(dfX1,dfY1,"Ranked Negative by Text")
###Output
_____no_output_____ |
examples/real-world-data/census/census.ipynb | ###Markdown
Read the dataset Before reading the dataset, dowload it using:```./dowload_data.sh```
###Code
col_names = ["age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation", "relationship",
"race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country", "class"]
df = pd.read_csv("adult.csv", sep=",", header=None, names=col_names, index_col=False)
train = df.head(int(len(df) * 0.7))
test = df.tail(len(df) - len(train))
resp_var = "class"
X_train = train.drop(resp_var, axis=1)
y_train = train[resp_var]
X_test = test.drop(resp_var, axis=1)
y_test = test[resp_var]
final_cols = []
categorical_feats = ["workclass", "marital-status", "occupation", "relationship", "race", "sex", "native-country"]
to_drop = ["education"]
transformers = []
for column in X_train.columns:
name = column
trans = "passthrough"
if column in categorical_feats:
trans = OneHotEncoder()
name = f"{column}_class"
elif column in to_drop:
trans = "drop"
transformers.append((name, trans, [f"{column}"]))
if trans != "drop":
final_cols.append(column)
ct = ColumnTransformer(transformers, remainder="passthrough")
ct.fit(X_train)
# Encoder for the labels
le = LabelEncoder()
le.fit(y_train)
X_train_trans = ct.transform(X_train)
X_test_trans = ct.transform(X_test)
y_train_trans = le.transform(y_train)
y_test_trans = le.transform(y_test)
###Output
_____no_output_____
###Markdown
Feature selection Feature selection To compute the results run:```python forest_train.pypython feat_selection.py```
###Code
range_n_splines = range(1, 11)
range_n_inter = range(0, 9)
acc = np.load("precomputed/feat_selection.npy")
dimension = (len(range_n_splines), len(range_n_inter))
mask = np.zeros(dimension)
for i, n_splines in enumerate(tqdm(range_n_splines)):
for j, n_inter in enumerate(range_n_inter):
if n_inter > comb(n_splines, 2):
mask[i, j] = True
continue
accuracy_df = pd.DataFrame(acc, columns=range_n_inter, index=range_n_splines)
ax = sns.heatmap(accuracy_df, annot=True, mask=mask, cmap=sns.color_palette("Blues", as_cmap=True),
cbar_kws={'label': 'accuarcy'})
ax.set_xlabel("Number of interaction terms used")
ax.set_ylabel("Number of splines used")
###Output
_____no_output_____
###Markdown
Sampling strategy To replicate the experiments run:```python forest_train.pypython sampling_analysis.py``` Setup
###Code
sampling_methods = ["all", "quantile", "equal", "kmeans", "equi_size"]
range_m = range(50, 5001, 250)
with open('precomputed/sampling_comparison.pickle', 'rb') as f:
acc_methods = pickle.load(f)
###Output
_____no_output_____
###Markdown
Plot
###Code
labels = [r"\emph{All-Thresholds}", r"\emph{Quantile}", r"\emph{Equi-Width}", r"\emph{$k$-Means}", "\emph{Equi-Size}"]
colors = sns.color_palette(n_colors=len(sampling_methods))
for i, sampling_method in enumerate(sampling_methods):
plt.plot(range_m, acc_methods[sampling_method], 'o--', color=colors[i], label=labels[i])
plt.xlabel("$K$")
plt.ylabel("Accuracy")
plt.legend()
###Output
_____no_output_____
###Markdown
Global explanation GEF To replicate the results run:```python forest_train.pypython final_explainer.py```
###Code
with open("precomputed/explainer.pickle", "rb") as f:
explainer = pickle.load(f)
final_cols = ct.get_feature_names_out().copy()
final_cols[14] = "MS-Married"
final_cols[47] = "CapitalGain"
final_cols[11] = "EducationNum"
final_cols[0] = "Age"
sample_index = 0
sample = X_train_trans[sample_index].reshape(1, -1)
n_row, n_col = 2, 2
fig = plt.figure(figsize=(13, 8), tight_layout=False)
lines = []
terms = [(i, x) for i, x in enumerate(explainer.gam.terms) if not x.isintercept and not x.istensor]
terms.sort(key=lambda x: x[1].feature)
c1, c2, c3 = sns.color_palette(n_colors=3)
plot_index = 0
axes = []
for i, term in enumerate(explainer.gam.terms):
if plot_index == 4:
break
if term.isintercept or term.istensor:
continue
ax = fig.add_subplot(n_row, n_col, plot_index + 1, sharey = axes[-1] if plot_index % n_col != 0 else None)
plt.setp(ax.get_yticklabels(), visible=plot_index % n_col == 0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
print(term.feature)
# Spline print
grid = explainer.gam.generate_X_grid(term=i, meshgrid=term.istensor)
pdep, confi = explainer.gam.partial_dependence(term=i, X=grid, width=0.95, meshgrid=term.istensor)
conf_u = ax.plot(grid[:, term.feature], confi[:, 0], ls="--", c=c2, zorder=1)
conf_l = ax.plot(grid[:, term.feature], confi[:, 1], label="95% width confidence interval", ls="--", c=c2, zorder=1)
l1 = ax.plot(grid[:, term.feature], pdep, label="Spline learned", lw=2, c=c1, zorder=2)
ax.set_title(final_cols[term.feature])
# Print the sample
"""
x_point = sample[0, term.feature] # col vector
y_point = explainer.gam.partial_dependence(term=i, X=sample)
plt.vlines(x_point, ax.get_ylim()[0], y_point, linestyle="dashed", color=c3)
plt.hlines(y_point, ax.get_xlim()[0], x_point, linestyle="dashed", color=c3)
ax.scatter(x_point, y_point, label="Sample under investigation", color=c3, zorder=3)
"""
plot_index += 1
axes.append(ax)
plt.subplots_adjust(hspace=0.3)
file_out = "plots/generators.pdf"
params = {'legend.fontsize': 18,
'figure.figsize': (20, 5),
'axes.titlesize': 18,
'xtick.labelsize': 20,
'ytick.labelsize': 20}
plt.rcParams.update(params)
plt.legend(loc='upper center', bbox_to_anchor=(-0.35, 2.7), ncol=3)
plt.savefig(file_out)
###Output
_____no_output_____
###Markdown
SHAP To replicate the results run:```python forest_train.pypython compute_shap.py``` Setup
###Code
with open('precomputed/shap_values_training.pickle', 'rb') as f:
shap_values = pickle.load(f)
with open('precomputed/shap_explainer_training.pickle', 'rb') as f:
shap_explainer = pickle.load(f)
###Output
_____no_output_____
###Markdown
Plot
###Code
# visualize the first prediction's explanation
shap.plots.force(shap_explainer.expected_value[1], shap_values.values[0, :, 1], matplotlib=True)
shap_values = shap_values[:, :, 1]
n_row, n_col = 2, 2
fig = plt.figure(figsize=(13, 8))
lines = []
terms = [(i, x) for i, x in enumerate(explainer.gam.terms) if not x.isintercept and not x.istensor]
terms.sort(key=lambda x: x[1].feature)
c1, c2, c3 = sns.color_palette(n_colors=3)
plot_index = 0
axes = []
for i, term in enumerate(explainer.gam.terms):
if plot_index == 4:
break
if term.isintercept or term.istensor:
continue
ax = fig.add_subplot(n_row, n_col, plot_index + 1, sharey = axes[-1] if plot_index % n_col != 0 else None)
# Shap scatter print
shap.plots.scatter(shap_values[:, term.feature], ax=ax, show=False, hist=False, color=c1)
shap_plot = ax
plt.setp(ax.get_yticklabels(), visible=plot_index % n_col == 0)
ax.tick_params(labelsize=20)
ax.set_ylabel("")
ax.set_xlabel("")
ax.set_title(final_cols[term.feature])
# Print the sample
"""
x_point = shap_values[sample_index, term.feature].data
y_point = shap_values[sample_index, term.feature].values
plt.vlines(x_point, ax.get_ylim()[0], y_point, linestyle="dashed", color=c2)
plt.hlines(y_point, ax.get_xlim()[0], x_point, linestyle="dashed", color=c2)
sample_plot = ax.scatter(x_point, y_point, label="Sample under investigation", color=c2, zorder=3)
"""
plot_index += 1
axes.append(ax)
params = {'legend.fontsize': 18,
'figure.figsize': (20, 5),
'axes.titlesize': 18}
plt.rcParams.update(params)
plt.subplots_adjust(hspace=0.3)
file_out = "plots/shap.pdf"
dummy_shap_plot = Line2D([0], [0], marker='o', color=c1, label='SHAP values', lw=0)
plt.legend(handles=[dummy_shap_plot], loc='upper center', bbox_to_anchor=(-1.0, 2.7), ncol=3, fontsize=14)
plt.savefig(file_out)
###Output
_____no_output_____ |
notebooks/datafest_2016_03_21/Baseball Regression Modeling Managing Multicollinearity with PCA.ipynb | ###Markdown
Baseball Regression Modeling Managing Multicollinearity with PCA Learning Objectives Use Principal Components Decomposition/Analysis to model out multicollinearity * identify multicollinearity in the covariates * fit and interpret PCA * run regression on principal components Imports
###Code
import pandas as pd
import statsmodels.api as sms
import sklearn.decomposition as dcmp
%matplotlib inline
###Output
_____no_output_____
###Markdown
Get Data and Subset Data
###Code
# retrieve csv file and store to dataframe
df = pd.read_csv('baseball_data.csv')
# subset the dataframe removing rows with NULL values
bix = df.notnull().all(axis=1)
df = df[bix]
###Output
_____no_output_____
###Markdown
A First Regression Model Using All Variables The hope here is that we can get a batch look at how the variables relate to the target. This unfortunately fails due to tight correlations within the covariates.
###Code
model = sms.OLS(df.salary_in_thousands_of_dollars, sms.add_constant(df.iloc[:, 1:]))
###Output
_____no_output_____
###Markdown
Notes: * we acheive a fairly high R2 right off the bat with this approach * a couple of variables are significant or nearly so - on_base_percentage - number_of_runs - number_of_runs_batted_in - number_of_strike_outs - number_of_stolen_bases - indicator_of_free_agency_eligibility - indicator_of_free_agent_in_1991_1992 - indicator_of_arbitration_eligibility - indicator_of_arbitration_in_1991_1992 * there are a large number of variables, how do we know what should be in the model and what should be out? * warning 2 in the printed output below states there may be high multicollinearity (high correlation between covariates)
###Code
result = model.fit()
print(result.summary())
###Output
OLS Regression Results
==========================================================================================
Dep. Variable: salary_in_thousands_of_dollars R-squared: 0.711
Model: OLS Adj. R-squared: 0.696
Method: Least Squares F-statistic: 47.90
Date: Mon, 21 Mar 2016 Prob (F-statistic): 4.71e-74
Time: 17:01:02 Log-Likelihood: -2602.7
No. Observations: 329 AIC: 5239.
Df Residuals: 312 BIC: 5304.
Df Model: 16
Covariance Type: nonrobust
=========================================================================================================
coef std err t P>|t| [95.0% Conf. Int.]
---------------------------------------------------------------------------------------------------------
const 358.1683 328.898 1.089 0.277 -288.971 1005.307
batting_average 2888.8905 2791.277 1.035 0.301 -2603.216 8380.997
on_base_percentage -3837.4642 2399.913 -1.599 0.111 -8559.525 884.596
number_of_runs 8.9410 5.541 1.614 0.108 -1.962 19.844
number_of_hits -2.6934 3.291 -0.818 0.414 -9.168 3.782
number_of_doubles -2.1494 8.467 -0.254 0.800 -18.810 14.511
number_of_triples -22.7478 21.212 -1.072 0.284 -64.485 18.989
number_of_home_runs 12.7920 12.377 1.034 0.302 -11.561 37.145
number_of_runs_batted_in 19.2680 4.975 3.873 0.000 9.480 29.056
number_of_walks 5.6493 4.474 1.263 0.208 -3.154 14.453
number_of_strike_outs -9.9699 2.106 -4.733 0.000 -14.114 -5.825
number_of_stolen_bases 12.4465 4.654 2.674 0.008 3.289 21.604
number_of_errors -10.3114 7.380 -1.397 0.163 -24.832 4.209
indicator_of_free_agency_eligibility 1401.9627 106.539 13.159 0.000 1192.337 1611.588
indicator_of_free_agent_in_1991_1992 -389.6385 136.471 -2.855 0.005 -658.158 -121.119
indicator_of_arbitration_eligibility 801.9127 116.551 6.880 0.000 572.588 1031.238
indicator_of_arbitration_in_1991_1992 398.7514 247.671 1.610 0.108 -88.565 886.067
==============================================================================
Omnibus: 29.533 Durbin-Watson: 1.591
Prob(Omnibus): 0.000 Jarque-Bera (JB): 61.562
Skew: 0.480 Prob(JB): 4.29e-14
Kurtosis: 4.889 Cond. No. 1.45e+04
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.45e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
Look at Correlations in the Covariates We should look at correlations in the covariates. Chaining .style.background_gradient(cmap='viridis') on the end of the corr() call, styles the correlation matrix help us find high correlation items. Notes: * there are high correlations between salary and a number of variables that are not significant in the first model * there is low correlation between a number of the variables which appear significant and salary * there is high correlation between some of the variables which are highly correlated with salary and which do not show up as significant * these correlations between the covariates may be causing us issues
###Code
df.corr().style.background_gradient(cmap='Blues')
###Output
_____no_output_____
###Markdown
Managing Correlations in Covariates Create and Fit a PCA Object
###Code
pca = dcmp.PCA()
pca.fit(df.iloc[:, 1:])
###Output
_____no_output_____
###Markdown
Review the Components If the model is meant to be interpreted and not just provide predictions, then we need to provide an interpretation of the variables.Below we review the PCs and see: - the variables number_of_THIS where THIS is a thing that happens while batting are all heavily loaded in PC1 - number_of_hits and number_of_runs are loaded heavy positive and home_runs, rbis, walks, and strike_outs are loaded heavy negative for PC2Our interpretation for the first two PCs is as follows: - PC1 describes number of at bats * as noted above the variance explained here is all of the variety count of things while at bat - PC2 describes at bat efficacy * here the major loadings are positive number_of_hits and negative number_of_strike_outs (the loadings are "competing")
###Code
pd.DataFrame(pca.components_.T,
columns=['PC' + str(i+1) for i in range(pca.components_.shape[1])],
index=df.iloc[:, 1:].columns)
###Output
_____no_output_____
###Markdown
Review the Scree Plot
###Code
pd.Series(pca.explained_variance_ratio_).plot(title='Scree Plot');
###Output
_____no_output_____
###Markdown
Get the Transformed Data
###Code
transformed = pca.fit_transform(df.iloc[:, 1:])
reduced = transformed[:, :2]
data = pd.DataFrame(reduced, columns=['p1', 'p2'])
###Output
_____no_output_____
###Markdown
Fit Regression on the Transformed Data
###Code
model = sms.OLS(df.salary_in_thousands_of_dollars.reset_index().drop('index', axis=1), sms.add_constant(data))
results = model.fit()
print(results.summary())
###Output
OLS Regression Results
==========================================================================================
Dep. Variable: salary_in_thousands_of_dollars R-squared: 0.425
Model: OLS Adj. R-squared: 0.421
Method: Least Squares F-statistic: 120.4
Date: Mon, 21 Mar 2016 Prob (F-statistic): 6.90e-40
Time: 17:01:25 Log-Likelihood: -2715.8
No. Observations: 329 AIC: 5438.
Df Residuals: 326 BIC: 5449.
Df Model: 2
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [95.0% Conf. Int.]
------------------------------------------------------------------------------
const 1248.3647 51.532 24.225 0.000 1146.988 1349.741
p1 -10.8503 0.710 -15.279 0.000 -12.247 -9.453
p2 5.7921 2.127 2.723 0.007 1.608 9.976
==============================================================================
Omnibus: 15.761 Durbin-Watson: 1.168
Prob(Omnibus): 0.000 Jarque-Bera (JB): 18.630
Skew: 0.438 Prob(JB): 9.00e-05
Kurtosis: 3.769 Cond. No. 72.6
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
|
prediction/multitask/pre-training/source code summarization/python/small_model.ipynb | ###Markdown
**Summarize the python source code using codeTrans multitask training model**You can make free prediction online through this Link (When using the prediction online, you need to parse and tokenize the code first.) **1. Load necessry libraries including huggingface transformers**
###Code
!pip install -q transformers sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline
###Output
_____no_output_____
###Markdown
**2. Load the token classification pipeline and load it into the GPU if avilabile**
###Code
pipeline = SummarizationPipeline(
model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_small_source_code_summarization_python_multitask"),
tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_small_source_code_summarization_python_multitask", skip_special_tokens=True),
device=0
)
###Output
/usr/local/lib/python3.6/dist-packages/transformers/models/auto/modeling_auto.py:852: FutureWarning: The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.
FutureWarning,
###Markdown
**3 Give the code for summarization, parse and tokenize it**
###Code
code = '''with open("file.txt", "r") as in_file:\n buf = in_file.readlines()\n\nwith open("file.txt", "w") as out_file:\n for line in buf:\n if line == "; Include this text\n":\n line = line + "Include below\n"\n out_file.write(line)''' #@param {type:"raw"}
import tokenize
import io
def pythonTokenizer(line):
result= []
line = io.StringIO(line)
for toktype, tok, start, end, line in tokenize.generate_tokens(line.readline):
if (not toktype == tokenize.COMMENT):
if toktype == tokenize.STRING:
result.append("CODE_STRING")
elif toktype == tokenize.NUMBER:
result.append("CODE_INTEGER")
elif (not tok=="\n") and (not tok==" "):
result.append(str(tok))
return ' '.join(result)
tokenized_code = pythonTokenizer(code)
print("code after tokenization " + tokenized_code)
###Output
code after tokenization with open ( CODE_STRING , CODE_STRING ) as in_file : buf = in_file . readlines ( ) with open ( CODE_STRING , CODE_STRING ) as out_file : for line in buf : if line == " ; Include this text " : line = line + " Include below " out_file . write ( line )
###Markdown
**4. Make Prediction**
###Code
pipeline([tokenized_code])
###Output
Your max_length is set to 512, but you input_length is only 81. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)
|
Homework4/kharih2.ipynb | ###Markdown
Tweet Processing FunctionsBelow are the definitions of the functions that are used to process the tweets before being sent on to feature extraction
###Code
!pip install --upgrade nltk
!pip install matplotlib
%matplotlib inline
import re
import matplotlib.pyplot as plt
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from nltk import bigrams
with open('./data/stopwords.txt') as stopword_file:
stopwords = {stopword.strip(): 1 for stopword in stopword_file.readlines()}
porter_stemmer = PorterStemmer()
def basic_cleaning(tweet):
clean_tweet = [word.lower().strip().strip('\'').strip('"') for word in tweet]
clean_tweet = [word.replace('.', '').replace('?', '').replace(',', '') for word in clean_tweet]
clean_tweet = [word.replace('#', '').replace('!', '').replace('\'', '') for word in clean_tweet]
clean_tweet = [word.replace('"', '').replace('...', ' ').replace('..', ' ') for word in clean_tweet]
clean_tweet = [word.replace('-', '').replace('.', ' ') for word in clean_tweet]
clean_tweet = list(filter(lambda word: word != '', clean_tweet))
return clean_tweet
def remove_non_alpha_starting_words(tweet):
non_alpha_start_words_regex = '(^|\s)[^a-zA-Z]\w*($|\s)'
clean_tweet = [re.sub(non_alpha_start_words_regex, '', word) for word in tweet]
clean_tweet = list(filter(lambda word: word != '', clean_tweet))
return clean_tweet
def remove_stopwords(tweet):
tweet_without_stopwords = []
for word in tweet:
if word.find(' ') == -1:
new_word = word if word not in stopwords else ''
else:
new_word = ' '.join([w for w in word.split() if w not in stopwords])
tweet_without_stopwords.append(new_word)
tweet_without_stopwords = list(filter(lambda word: word != '', tweet_without_stopwords))
tweet_without_stopwords = list(set(tweet_without_stopwords))
return tweet_without_stopwords
def replace_urls(tweet):
http_url_regex = 'http(?s):\/\/.*'
www_url_regex = 'www\.\w+.*'
clean_tweet = [re.sub(http_url_regex, 'URL', word) for word in tweet]
clean_tweet = [re.sub(www_url_regex, 'URL', word) for word in clean_tweet]
clean_tweet = list(filter(lambda word: word != '', clean_tweet))
return clean_tweet
def replace_user_handles(tweet):
user_handle_regex = '@.*'
clean_tweet = [re.sub(user_handle_regex, 'AT_USER', word) for word in tweet]
clean_tweet = list(filter(lambda word: word != '', clean_tweet))
return clean_tweet
def replace_repeated_characters(tweet):
repeated_character_regex = '(\w)\\1{2,}'
clean_tweet = [re.sub(repeated_character_regex, r'\1\1', word) for word in tweet]
clean_tweet = list(filter(lambda word: word != '', clean_tweet))
return clean_tweet
def generate_bigrams(tweet):
bigram_tweet = bigrams(tweet)
bigram_tweet = [bigram[0] + ' ' + bigram[1] for bigram in bigram_tweet]
bigram_tweet.extend(tweet)
return bigram_tweet
def stem_tweet(tweet, stemmer):
stemmed_tweet = []
for word in tweet:
if word.find(' ') == -1:
stemmed_word = stemmer.stem(word)
else:
stemmed_word = ' '.join([stemmer.stem(w) for w in word.split()])
stemmed_tweet.append(stemmed_word)
return stemmed_tweet
clean_tweet_sentiment_rdd = tweet_rdd.map(lambda record: (record[0], replace_urls(record[1]))) \
.map(lambda record: (record[0], replace_user_handles(record[1]))) \
.map(lambda record: (record[0], basic_cleaning(record[1]))) \
.map(lambda record: (record[0], remove_non_alpha_starting_words(record[1]))) \
.map(lambda record: (record[0], replace_repeated_characters(record[1]))) \
.map(lambda record: (record[0], generate_bigrams(record[1]))) \
.map(lambda record: (record[0], remove_stopwords(record[1]))) \
.map(lambda record: (record[0], stem_tweet(record[1], porter_stemmer)))
###Output
_____no_output_____
###Markdown
Feature Extraction using HashingTF and IDFThis section consists of extracting features to be fed into our algorithms to generate the classifier
###Code
from pyspark.mllib.feature import HashingTF, IDF
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
clean_tweet_rdd = clean_tweet_sentiment_rdd.map(lambda record: record[1])
hashingtf = HashingTF(100000)
tf = hashingtf.transform(clean_tweet_rdd)
tf.cache()
idf = IDF(minDocFreq=2).fit(tf)
tfidf = idf.transform(tf)
tf_idf_sentiment = clean_tweet_sentiment_rdd.map(lambda record: record[0]).zip(tfidf) \
.map(lambda record: LabeledPoint(record[0], record[1]))
training = tf_idf_sentiment
###Output
_____no_output_____
###Markdown
Naive BayesThis section consists of the training of a Naive Bayes model and checking its accuracy value
###Code
nb_model = NaiveBayes.train(training, 1.0)
nb_labels_and_preds = training.map(lambda record: (nb_model.predict(record.features), record.label))
nb_accuracy = nb_labels_and_preds.filter(lambda x: x[0] == x[1]).count() / training.count()
nb_accuracy
###Output
_____no_output_____
###Markdown
Logistic RegressionThis following section describes the Logistic Regression steps
###Code
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
lr_model = LogisticRegressionWithLBFGS.train(training, regType='l2')
lr_labels_and_preds = training.map(lambda record: (record.label, lr_model.predict(record.features)))
trainErr = lr_labels_and_preds.filter(lambda record: record[0] != record[1]).count() / float(training.count())
trainErr
def k_fold_cross_validation(k_value, model_type, data):
split_ratio = [0.1 for i in range(0, k_value)]
data_split = data.randomSplit(split_ratio)
test_index = 0
avg_accuracy = 0
best_model = None
for test_index in range(0, k_value):
print(str(test_index) + 'th iteration')
training_list = [i for index, i in enumerate(data_split) if index != test_index]
training_rdd = sc.emptyRDD()
for training in training_list:
training_rdd.union(training)
model = model_type.train(training, 1.0)
accuracy = test_with_model(model, data_split[test_index])
avg_accuracy += accuracy
avg_accuracy /= k_value
return avg_accuracy
def test_with_model(model, test):
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = predictionAndLabel.filter(lambda x: x[0] == x[1]).count() / test.count()
return accuracy
NB_avg = k_fold_cross_validation(10, NaiveBayes, tf_idf_sentiment)
NB_avg
LR_avg = k_fold_cross_validation(10, LogisticRegressionWithLBFGS, tf_idf_sentiment)
LR_avg
###Output
0th iteration
1th iteration
2th iteration
3th iteration
4th iteration
5th iteration
6th iteration
7th iteration
8th iteration
9th iteration
###Markdown
Running classifiers on Test DataThis section shows the results obtained by running the generated models on the test data
###Code
test_rdd = sc.textFile('./data/test.csv') \
.map(parse_line) \
.map(lambda record: (int(record[0]), record[1].split()))
test_sentiment_rdd = test_rdd.map(lambda record: (record[0], replace_urls(record[1]))) \
.map(lambda record: (record[0], replace_user_handles(record[1]))) \
.map(lambda record: (record[0], basic_cleaning(record[1]))) \
.map(lambda record: (record[0], remove_non_alpha_starting_words(record[1]))) \
.map(lambda record: (record[0], replace_repeated_characters(record[1]))) \
.map(lambda record: (record[0], generate_bigrams(record[1]))) \
.map(lambda record: (record[0], remove_stopwords(record[1]))) \
.map(lambda record: (record[0], stem_tweet(record[1], porter_stemmer)))
clean_test_rdd = test_sentiment_rdd.map(lambda record: record[1])
test_tf = hashingtf.transform(clean_test_rdd)
test_tf.cache()
test_tfidf = idf.transform(test_tf)
test_tfidf_sentiment = test_sentiment_rdd.map(lambda record: record[0]).zip(test_tfidf) \
.map(lambda record: LabeledPoint(record[0], record[1]))
nb_labels_and_preds = test_tfidf_sentiment.map(lambda p: (nb_model.predict(p.features), p.label))
accuracy = nb_labels_and_preds.filter(lambda x: x[0] == x[1]).count() / test_tfidf_sentiment.count()
accuracy
lr_model = LogisticRegressionWithLBFGS.train(training, regType='l2')
lr_labels_and_preds = test_tfidf_sentiment.map(lambda record: (record.label, lr_model.predict(record.features)))
trainErr = lr_labels_and_preds.filter(lambda record: record[0] != record[1]).count() / float(test_tfidf_sentiment.count())
trainErr
###Output
_____no_output_____
###Markdown
Evaluation of ClassifiersThis section covers capturing of the various performance metrics that the algorithms achieve on the test set.
###Code
def precision(labels_and_preds):
classified_1s = list(filter(lambda line: line[1] == 1, labels_and_preds))
classified_0s = list(filter(lambda line: line[1] == 0, labels_and_preds))
correctly_predicted_1s = len(list(filter(lambda line: line[0] == line[1], classified_1s)))
correctly_predicted_0s = len(list(filter(lambda line: line[0] == line[1], classified_0s)))
precision_1s = correctly_predicted_1s / len(classified_1s)
precision_0s = correctly_predicted_0s / len(classified_0s)
return {'p1': precision_1s, 'p0': precision_0s}
def recall(labels_and_preds):
actual_1s = list(filter(lambda line: line[0] == 1, labels_and_preds))
actual_0s = list(filter(lambda line: line[0] == 0, labels_and_preds))
correctly_predicted_1s = len(list(filter(lambda line: line[0] == line[1], actual_1s)))
correctly_predicted_0s = len(list(filter(lambda line: line[0] == line[1], actual_0s)))
recall_1s = correctly_predicted_1s / len(actual_1s)
recall_0s = correctly_predicted_0s / len(actual_0s)
return {'r1': recall_1s, 'r0': recall_0s}
def generate_confusion_matrix(labels_and_preds):
actual_1s = list(filter(lambda line: line[0] == 1, labels_and_preds))
actual_0s = list(filter(lambda line: line[0] == 0, labels_and_preds))
correctly_predicted_1s = len(list(filter(lambda line: line[0] == line[1], actual_1s)))
correctly_predicted_0s = len(list(filter(lambda line: line[0] == line[1], actual_0s)))
incorrectly_predicted_1s = len(list(filter(lambda line: line[0] != line[1], actual_1s)))
incorrectly_predicted_0s = len(list(filter(lambda line: line[0] != line[1], actual_0s)))
return {'correct_1s': correctly_predicted_1s, 'correct_0s': correctly_predicted_0s, \
'incorrect_1s': incorrectly_predicted_1s, 'incorrect_0s': incorrectly_predicted_0s}
nb_precisions = precision(nb_labels_and_preds.collect())
lr_precisions = precision(lr_labels_and_preds.collect())
nb_recalls = recall(nb_labels_and_preds.collect())
lr_recalls = recall(lr_labels_and_preds.collect())
nb_precisions['p1'], nb_precisions['p0']
nb_recalls['r1'], nb_recalls['r0']
lr_precisions['p1'], lr_precisions['p0']
lr_recalls['r1'], lr_recalls['r0']
nb_conf_matrix = generate_confusion_matrix(nb_labels_and_preds.collect())
lr_conf_matrix = generate_confusion_matrix(lr_labels_and_preds.collect())
nb_conf_matrix
lr_conf_matrix
###Output
_____no_output_____
###Markdown
Calculating Tweets with the Highest Prediction ProbabilitiesThis section calculates the Tweets with the highest prediction probabilities for Logistic Regression
###Code
from operator import itemgetter
lr_model = LogisticRegressionWithLBFGS.train(training, regType='l2')
lr_labels_and_preds = test_tfidf_sentiment.map(lambda record: (record.label, lr_model.predict(record.features)))
trainErr = lr_labels_and_preds.filter(lambda record: record[0] != record[1]).count() / float(test_tfidf_sentiment.count())
lr_lnpred_list = lr_labels_and_preds.collect()
lr_model.clearThreshold()
lr_labels_and_probs = test_tfidf_sentiment.map(lambda record: (record.label, lr_model.predict(record.features)))
lr_lnprob_list = lr_labels_and_probs.collect()
def calculate_highest_probs(labels_and_preds, labels_and_probs, tweets_list):
lr_lnpredprob_list = [(r1[0], r1[1], r2[1])for r1, r2 in zip(labels_and_preds, labels_and_probs)]
lr_lnpredprob_list = [(index, val[0], val[1], val[2]) for index, val in enumerate(lr_lnpredprob_list)]
lr_pred_prob_correct = list(sorted(filter(lambda record: record[1] == record[2], lr_lnpredprob_list), key=itemgetter(3), \
reverse=True))
lr_pred_prob_incorrect = list(sorted(filter(lambda record: record[1] != record[2], lr_lnpredprob_list), key=itemgetter(3), \
reverse=True))
tweets_with_pred_probs = {'correct': [], 'incorrect': []}
for record in lr_pred_prob_correct[:5]:
tweets_with_pred_probs['correct'].append({'text': ' '.join(tweets_list[record[0]][1]), 'prob': record[3]})
for record in lr_pred_prob_incorrect[:5]:
tweets_with_pred_probs['incorrect'].append({'text': ' '.join(tweets_list[record[0]][1]), 'prob': record[3]})
return tweets_with_pred_probs
highest_prob_tweets = calculate_highest_probs(lr_lnpred_list, lr_lnprob_list, tweet_rdd.collect())
highest_prob_tweets
###Output
_____no_output_____ |
Linear_Regression/my_linear_regression.ipynb | ###Markdown
Visualização dos dados normalizados
###Code
plt.scatter(x, y)
plt.xlabel('Comprimento sepalas')
plt.ylabel('Comprimento petalas')
plt.show()
###Output
_____no_output_____
###Markdown
Código compute_cost
###Code
def compute_cost(X_b, y, wT):
'''
Compute cost for linear regression
(X,y): amostras rotuladas X(n_samples,2) e y(n_samples,)
wT: vetor coluna de parâmetros (já transposto)
aceita tanto shape (2,1) Para um caso como (2,n_history) para n_history casos
'''
e = X_b.dot(wT) - y
J = (e * e).mean(axis=0)
return J
###Output
_____no_output_____
###Markdown
Gradiente Descendente por Lote (*batch*)
###Code
def batch_gradient_descent(X, y, w_init, lr, n_epochs):
'''
Perform batch gradient descent to learn w
by taking n_epochs gradient steps with learning
rate lr
Dados rotulados: X.shape:(M,2) e y.shape:(M,1)
Parâmetro inicial: w_init.shape:(1,2)
output: w_history:(num_epochs+1,2)
'''
M = y.size
w_history = np.zeros((2,n_epochs+1))
wT = w_init.T
w_history[:,0] = wT[:,0]
for i in range(n_epochs):
wT = wT - lr * (2.0 / M) * (X.T).dot(X.dot(wT) - y)
w_history[:,i+1]=wT[:,0]
return w_history.T
###Output
_____no_output_____
###Markdown
Laço de minimização via gradiente descendente - Código
###Code
# Parâmetros do gradiente descendente: número de iterações e taxa de aprendizagem
iterations = 100
lr = 0.1
n_samples = y.shape[0]
#Montagem da matriz X agora com os elementos 1 na primeira coluna
X_bias = np.hstack([np.ones(shape=(n_samples,1)), x])
w_init = np.zeros(shape=(1,2))
w_history = batch_gradient_descent(X_bias, y, w_init, lr, iterations)
#print(w_history)
w_gd = w_history[::-1][0:1] # pega o último valor do gradiente descendente
print('w_gd:\n', w_gd)
print('final cost (gradiente descendente):', compute_cost(X_bias, y, w_gd.T))
J_history = compute_cost(X_bias, y, w_history.T)
print(J_history[:10])
###Output
w_gd:
[[ 0.34857359 0.51798724]]
final cost (gradiente descendente): [ 0.02286703]
[ 0.40907029 0.24348202 0.1505187 0.09829049 0.0689112 0.05234888
0.04297685 0.03763921 0.03456589 0.03276406]
###Markdown
Plot da curva de minimização de J com o número de iterações
###Code
plt.plot(J_history) # mostrando log para amplificar valores baixos
#plt.ylim(0,1)
plt.show()
###Output
_____no_output_____
###Markdown
Comparando com a solução analíticaA solução analítica deste problema é dada por:$$ \mathbf{w} =(\mathbf{X}^T \mathbf{X})^{−1}\mathbf{X}^T \mathbf{y} $$
###Code
w_opt = (np.linalg.inv((X_bias.T).dot(X_bias)).dot(X_bias.T)).dot(y)
print(w_opt)
print('final cost (analítica):', compute_cost(X_bias, y, w_opt))
###Output
[[ 0.26134159]
[ 0.68646976]]
final cost (analítica): [ 0.02116942]
###Markdown
Plotagem da reta de predição (solução analítica e solução via gradiente descendente)
###Code
plt.scatter(x, y, marker='o', c='b')
plt.title('Iris dataset')
plt.xlabel('x')
plt.ylabel('y')
# Plotagem da reta utilizando os parâmetros ótimos encontrados no gradiente descendente
X_all = np.linspace(x.min(),x.max(),100).reshape(100,1)
X_all_bias = np.hstack([np.ones((100,1)),X_all])
result = X_all_bias.dot(w_gd.T) # Predição via minimização gradiente descendente
plt.plot(X_all, result, c='k')
result_opt = X_all_bias.dot(w_opt) # Predição do valor analítico
plt.plot(X_all, result_opt,c='r')
plt.show()
###Output
_____no_output_____
###Markdown
from Jview import JviewJview(X_bias, y, w_history, w_opt)
###Code
import matplotlib.cm as cm
def contour_plot(X_bias, y, w_history, w_opt):
size = 256
w0 = np.linspace(-0.5,1,size)
w1 = np.linspace(-0.5,1,size)
J_history_2 = np.zeros((len(w0),len(w1)))
ws1, ws2 = np.meshgrid(w0,w1)
w_grid = np.c_[ws1.ravel(), ws2.ravel()]
J_history_2 = compute_cost(X_bias,y,w_grid.T)
J_history_2 = J_history_2.reshape(size,size)
fig = plt.figure(figsize=(10.0,5.0))
CS = plt.contourf(w0, w1, J_history_2,20, cmap=cm.coolwarm ,label='Cost Function')
plt.scatter(w_history[:,0],w_history[:,1], marker='*', color='r', label='Solution Found')
plt.scatter(w_opt[0],w_opt[1], marker='v', color='w', label='Optimal Point')
CB = plt.contour(w0, w1, J_history_2,20, colors='black', linewidth=.5)
plt.clabel(CS, inline=1, fontsize=10)
plt.colorbar(CS)
plt.title("Contour Plot of Cost Function")
plt.xlabel("w0")
plt.ylabel("w1")
#plt.figure(figsize=(10,6))
#plt.legend(loc='best')
legend = plt.legend(loc='center left', bbox_to_anchor=(1.2, 0.5))
legend.get_frame().set_facecolor('#929aae')
fig.savefig('fig_2D.png', bbox_extra_artists=(legend,), bbox_inches='tight')
plt.show(fig)
contour_plot(X_bias, y, w_history, w_opt)
%ls -ls
import IPython
img = IPython.display.Image(filename='fig_2D.png')
img
###Output
_____no_output_____
###Markdown
Visualização da Perda x Parâmetros w0,w1 em gráfico 3DA trecho a seguir permite a visualização da imagem anterior, agora numa superfície tridimensional de modo que para cada par de parâmetro $(w_0,w_1)$ o valor da perda $J(\mathbf{w})$ é uma altura da superfície no gráfico.Sobre esta superfície da função Perda, os valores de $\mathbf{w}$ ocorridos durante a busca dogradiente descendente é mostrado na forma de bolinhas vermelhas. Desta forma é possível acompanhar o caminho que a solução percorre até chegar à solução ótima. from Jview import Jview3DJview3D(X_bias, y, w_history, w_opt)
###Code
from mpl_toolkits.mplot3d import Axes3D
def contour_plot_3D(X_bias, y, w_history, w_opt):
#creates the mesh for the multivariate plot
size = 256
w0 = np.linspace(-0.5,1,size)
w1 = np.linspace(-0.5,1,size)
J_history_2 = np.zeros((len(w0),len(w1)))
ws1, ws2 = np.meshgrid(w0,w1)
w_grid = np.c_[ws1.ravel(), ws2.ravel()]
J_history_2 = compute_cost(X_bias,y,w_grid.T)
J_history_2 = J_history_2.reshape(size,size)
#creates a 3D fig istance with a proper size
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(1,3,3, projection='3d')
ax3d = Axes3D(fig)
ax3d.sides_to_draw = list('r')
ax = fig.add_axes(ax3d)
C = plt.contour(ws1, ws2, J_history_2,20,offset=np.min(ws1), zdir='z',colors = 'k',linewidth=0.1,alpha=0.2)
plt.clabel(C,inline=True,fontsize=10)
##Creates a surface for the function and the mesh points
ax.plot_surface(ws1, ws2, J_history_2, rstride=5, cstride=5,edgecolor='none', alpha=.2, cmap=cm.coolwarm)
#Creates a contour, one with a color map and one black so it is better to see the contour lines.
ax.contour(ws1, ws2, J_history_2,20, color="black",alpha = 0.7, stride=30)
ax.contour(ws1, ws2, J_history_2,20, cmap=cm.coolwarm,alpha = 0.3, stride=30)
#Set the legend of the axes
ax.set_xlabel('w0')
ax.set_ylabel('w1')
ax.set_zlabel('Custo (J)')
#Create the optimal minimum (calculated analytically)
ax.plot(w_opt[0],w_opt[1],compute_cost(X_bias,y,(w_opt[0],w_opt[1])), markerfacecolor='b', markeredgecolor='w', marker='*', markersize=15, label = 'Optimal Point');
#Plot the trajectory of the gradient descent point projected in the 2D plot
ax.plot(w_history[:,0],w_history[:,1],0,markersize=10,label="Gradient Descent in 2D")
#Plot the trajectory of the gradient descent in 3D
ax.plot(w_history[:,0],w_history[:,1],J_history,markersize=10,label="Gradient Descent in 3D")
#set the axes limits
ax.set_xlim3d(-0.4, 1.5)
ax.set_ylim3d(-1.0, 1.0)
ax.set_zlim3d(-0.4, 2.0)
legend = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
fig.savefig('fig_3D.png', bbox_extra_artists=(legend,), bbox_inches='tight')
contour_plot_3D(X_bias, y, w_history, w_opt)
%ls -ls
import IPython
img = IPython.display.Image(filename='fig_3D.png')
img
import pandas as pd
print("Parameters in each iteration")
data_history = pd.DataFrame(data=w_history[:,:])
data_history.columns = ['w0','w1']
data_history
###Output
Parameters in each iteration
|
untested_notebooks/localization_elasticity_polycrystal_hex_3D.ipynb | ###Markdown
Linear Elasticity in 3D for Polycrystalline MicrostructuresAuthors: Noah Paulson, Andrew Medford, David Brough IntroductionThis example demonstrates the use of MKS to predict strain fields in a polycrystalline sample. The Generalized Spherical Harmonic (GSH) basis is introduced and used for a material with hexagonal crystal symmetry. The effect of different levels of truncation in the GSH basis functions are examined, as well as the effect of selecting an incorrect crystal symmetry. Modeling with MKS Obtaining Data for MKS Calibration and Validation
###Code
import pymks
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
To start off we need to obtain data from somewhere. In order to make things easy the pymks_share package is used to import data.
###Code
from pymks_share import DataManager
manager = DataManager('pymks.me.gatech.edu')
X, y = manager.fetch_data('random hexagonal orientations')
print(X.shape)
print(y.shape)
###Output
(50, 21, 21, 21, 3)
(50, 21, 21, 21)
###Markdown
The X variable contains a set of 3 Bunge Euler angles at each spatial point, corresponding to the crystal orientation. The y variable is the '11' component of the strain tensor as obtained by a finite element simulation ($\epsilon_{xx}$). We can visualize this by plotting a slice of a 3-D microstructure ($\phi_1$ angle only) and its corresponding strain response.
###Code
from pymks.tools import draw_microstructure_strain
n = X.shape[1]
center = (n-1) / 2
draw_microstructure_strain(X[0, center, :, :, 0], y[0, center])
###Output
_____no_output_____
###Markdown
This may not mean much, but trust us that the $\epsilon_{xx}$ field is rather expensive to calculate. In principle we could visualize this in 3 dimensions using a package like mayavi, but for this tutorial we will just look at a single slice down through the center.In order to ensure that our models are valid, we need to split the data into "calibration" and "validation" sets. The idea here is that we train the model on a subset of N_cal datasets, then test the model on the rest. This is a crude form of "cross validation", and will give us confidence that we have not over-fit the model.
###Code
N_cal = 40
X_cal = X[0:N_cal, ...]
X_val = X[N_cal:, ...]
y_cal = y[0:N_cal, ...]
y_val = y[N_cal:, ...]
print(X_cal.shape)
print(X_val.shape)
###Output
(40, 21, 21, 21, 3)
(10, 21, 21, 21, 3)
###Markdown
We can see that we have 40 calibration sets, and 10 validation sets. Ideally we would have a lot more data to validate the model, but at least the 10 should give us an idea of how transferable the model is.Next we need to set up the MKS "localization model" which will be used to compute all the parameters we need for the machine to "learn" how the input microstructure field is related to the output strain field. In order to capture the orientation dependence we are going to use a basis set of "generalized spherical harmonics". A quick Google search of "generalized spherical harmonics" will tell you that these are pretty trippy functions (nearly all the results are from technical journals!).In the GSH basis n_states refers to the set of basis functions we want to work with. In this example we want to use the first 5 basis functions, so we assign a list containing indices 0-5 to n_states (we could alternately pass the integer 5 to n_states and PyMKS would automatically know to use the first 5 basis functions!). If we only wanted the 5th basis function we would simply pass n_states a list with only one entry: n_states=[5].We also need to specify the symmetry we want (and the symmetric domain) of our basis function. PyMKS makes this very easy; we can simply give domain a string specifying the desired crystal symmetry. For example, passing 'hexagonal' specifies a hexagonal crystal symmetry, while passing 'cubic' specifies cubic symmetry. If we pass "triclinic", or don't define the domain at all the non-symmetrized version of the GSH basis is used. Calibrating First Order Influence Coefficients
###Code
from pymks import MKSLocalizationModel
from pymks.bases import GSHBasis
gsh_hex_basis = GSHBasis(n_states=np.arange(6), domain="hexagonal")
###Output
_____no_output_____
###Markdown
Now we have selected the basis functions, perhaps we want to know more about what we've selected. Let's ask for the l, m and n indices of the GSH basis functions we've selected (Note that this is an advanced feature and may only be useful for the most seasoned materials scientists!).
###Code
print(gsh_hex_basis.basis_indices)
###Output
[[ 0 0 1]
[ 2 -2 1]
[ 2 -1 1]
[ 2 0 1]
[ 2 1 1]
[ 2 2 1]]
###Markdown
Now all of the complexity of the GSH basis set will be taken care of by pyMKS from here on out. We just need to fit the model:
###Code
model = MKSLocalizationModel(basis=gsh_hex_basis)
model.fit(X_cal, y_cal)
###Output
_____no_output_____
###Markdown
and then we can look at the "influence coefficients" which capture the connection between the input and output fields. Notice that there are 6 (complex valued) influence coefficients, because we had 6 "states" in the basis set. In other words, there are 2 fields of influence coefficients for each basis set. Again, we are only looking at a slice through the center in order to avoid the complexities of visualizing 3D data.
###Code
from pymks.tools import draw_coeff
coef_ = model.coef_
draw_coeff(np.real(coef_[:,center, :, :]), figsize=(2, 3))
###Output
_____no_output_____
###Markdown
we also want to plot the imaginary components of the influence coefficients, you can't forget about these when using the GSH basis!
###Code
draw_coeff(np.imag(coef_[:,center, :, :]), figsize=(2, 3))
###Output
_____no_output_____
###Markdown
We can see that the coefficients for some basis sets have significant values, while others are mostly zero. This means that in principle we could probably describe the system with fewer basis states. We also notice that when there are non-zero components, they are typically centered near zero. This is intuitive, since it tells us that the elastic response of the material is local, as we would expect (and as can be seen in the other elasticity tutorials). Prediction of Strain Fields for Validation MicrostructuresNow we want to use these coefficients to predict the response of the validation set, and ensure that the results are in line with the outputs of the full simulation.
###Code
y_predict = model.predict(X_val)
###Output
_____no_output_____
###Markdown
First let's simply compare slices of the $\epsilon_{xx}$ strain fields for one of our validation microstructures
###Code
from pymks.tools import draw_strains_compare
draw_strains_compare(y_val[0, center], y_predict[0, center])
###Output
_____no_output_____
###Markdown
So it looks like the MKS is working pretty well in this case, but it is worth seeing if we can do even better. Improving the MKS ResultsThe next thing to try is to include even more basis functions:
###Code
gsh_hex_basis = GSHBasis(n_states=np.arange(20), domain='hexagonal')
model = MKSLocalizationModel(basis=gsh_hex_basis)
model.fit(X_cal, y_cal)
y_predict = model.predict(X_val)
draw_strains_compare(y_val[0, center], y_predict[0, center])
###Output
_____no_output_____
###Markdown
Clearly now the results are very good. You might ask if we have too few or too many basis functions? First, let's look at the influence coefficients and what is going on.
###Code
from pymks.tools import draw_coeff
coeff = model.coef_
draw_coeff(np.real(coeff[:,center, :, :]), figsize=(4, 5))
draw_coeff(np.imag(coeff[:,center, :, :]), figsize=(4, 5))
###Output
_____no_output_____
###Markdown
If we look carefully at the influence coefficients we notice that they appear to be identically zero for the 15th basis function and beyond. If we wanted to be thorough we would want to check the influence coefficients for even more basis functions, but for the purposes of this example we can be satisfied that we only need the first 15.Let's redo the study once more with only the first 15 basis functions and hexagonal symmetry.
###Code
gsh_hex_basis = GSHBasis(n_states=np.arange(15), domain='hexagonal')
model = MKSLocalizationModel(basis=gsh_hex_basis)
model.fit(X_cal, y_cal)
y_predict = model.predict(X_val)
draw_strains_compare(y_val[0, center], y_predict[0, center])
###Output
_____no_output_____
###Markdown
As we expected the results look great, probably even better than the previous iteration. Selection of the Wrong Crystal Symmetry!Finally let's take a look at what happens when we choose the wrong crystal symmetry.
###Code
gsh_cube_basis = GSHBasis(n_states=np.arange(15), domain='cubic')
model = MKSLocalizationModel(basis=gsh_cube_basis)
model.fit(X_cal, y_cal)
y_predict = model.predict(X_val)
draw_strains_compare(y_val[0, center], y_predict[0, center])
###Output
_____no_output_____ |
notebooks/T8 - 1 - SVM - Linear SVC_Py38.ipynb | ###Markdown
Linear Support Vector Classifier
###Code
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
from sklearn import svm
X = [1,5,1.5,8,1,9]
Y = [2,8,1.8,8,0.6,11]
plt.scatter(X,Y)
plt.show()
data = np.array(list(zip(X,Y)))
data
target = [0, 1, 0, 1, 0, 1]
classifier = svm.SVC(kernel="linear", C = 1.0)
classifier.fit(data, target)
p = np.array([10.32, 12.67]).reshape(1,2)
print(p)
classifier.predict(p)
###Output
[[10.32 12.67]]
###Markdown
* Modelo: w0 . x + w1 . y + e = 0* Ecuación del hiperplano en 2D: y = a . x + b
###Code
w = classifier.coef_[0]
w
a = -w[0]/w[1]
a
b = - classifier.intercept_[0]/w[1]
b
xx = np.linspace(0,10)
yy = a * xx + b
plt.plot(xx, yy, 'k-', label = "Hiperplano de separación")
plt.scatter(X, Y, c = target)
plt.legend()
plt.plot()
###Output
_____no_output_____ |
Class Notebooks/Lab 10 - Sentiment Analysis [Improved Version].ipynb | ###Markdown
Improved Sentiment AnalysisIn the previous lab notebook, we got the fundamentals down for sentiment analysis. In this lab notebook, we'll actually get decent results.**We will use:**- packed padded sequences- pre-trained word embeddings- different RNN architecture - bidirectional RNN - multi-layer RNN- regularization- a different optimizerThis will allow us to achieve ~88% test accuracy. Preparing DataAs before, we'll set the seed, define the `Fields` and get the train/valid/test splits.**We'll be using *packed padded sequences*, which will make our RNN only process the non-padded elements of our sequence, and for any padded element the `output` will be a zero tensor.** To use packed padded sequences, we have to tell the RNN how long the actual sequences are. We do this by setting `include_lengths = True` for our `TEXT` field. This will cause `batch.text` to now be a tuple with the first element being our sentence (a numericalized tensor that has been padded) and the second element being the actual lengths of our sentences.
###Code
import torch
from torchtext.legacy import data
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize = 'spacy',
tokenizer_language = 'en_core_web_sm',
include_lengths = True)
LABEL = data.LabelField(dtype = torch.float)
###Output
_____no_output_____
###Markdown
We then load the IMDb dataset.
###Code
from torchtext.legacy import datasets
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
###Output
aclImdb_v1.tar.gz: 0%| | 98.3k/84.1M [00:00<01:25, 977kB/s]
###Markdown
Then create the validation set from our training set.
###Code
import random
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
###Output
_____no_output_____
###Markdown
**Next is the use of pre-trained word embeddings. Now, instead of having our word embeddings initialized randomly, they are initialized with these pre-trained vectors.**We get these vectors simply by specifying which vectors we want and passing it as an argument to `build_vocab`. `TorchText` handles downloading the vectors and associating them with the correct words in our vocabulary.Here, we'll be using the `"glove.6B.100d" vectors"`. `glove` is the algorithm used to calculate the vectors, go [here](https://nlp.stanford.edu/projects/glove/) for more. `6B` indicates these vectors were trained on 6 billion tokens and `100d` indicates these vectors are 100-dimensional.The theory is that these pre-trained vectors already have words with similar semantic meaning close together in vector space, e.g. **"terrible", "awful", "dreadful"** are nearby. **This gives our embedding layer a good initialization as it does not have to learn these relations from scratch.****Note**: these vectors are about 862MB, so watch out if you have a limited internet connection.- By default, TorchText will initialize words in your vocabulary but not in your pre-trained embeddings to zero. We don't want this, and instead initialize them randomly by setting `unk_init` to `torch.Tensor.normal_`. This will now initialize those words via a Gaussian distribution.
###Code
MAX_VOCAB_SIZE = 25000
TEXT.build_vocab(train_data,
max_size = MAX_VOCAB_SIZE,
vectors = "glove.6B.100d",
unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
###Output
.vector_cache/glove.6B.zip: 862MB [02:40, 5.38MB/s]
100%|█████████▉| 398024/400000 [00:15<00:00, 24884.26it/s]
###Markdown
As before, we create the iterators, placing the tensors on the GPU if one is available.**Another thing for packed padded sequences all of the tensors within a batch need to be sorted by their lengths. This is handled in the iterator by setting `sort_within_batch = True`.**
###Code
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_within_batch = True,
device = device)
###Output
_____no_output_____
###Markdown
Build the ModelThe model features the most drastic changes. Different RNN ArchitectureWe'll be using a different RNN architecture called a **Long Short-Term Memory (LSTM)**. **Why is an LSTM better than a standard RNN?**Standard RNNs suffer from the [vanishing gradient problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem). LSTMs overcome this by having an extra recurrent state called a _cell_, $c$ - which can be thought of as the "memory" of the LSTM - and the use use multiple _gates_ which control the flow of information into and out of the memory. For more information, go [here](https://colah.github.io/posts/2015-08-Understanding-LSTMs/). We can simply think of the LSTM as a function of $x_t$, $h_t$ and $c_t$, instead of just $x_t$ and $h_t$.$$(h_t, c_t) = \text{LSTM}(x_t, h_t, c_t)$$Thus, the model using an LSTM looks something like (with the embedding layers omitted):**The initial cell state, $c_0$, like the initial hidden state is initialized to a tensor of all zeros.** The sentiment prediction is still, however, only made using the final hidden state, not the final cell state, i.e. $\hat{y}=f(h_T)$. Bidirectional RNNThe concept behind a **bidirectional RNN** is simple. As well as having an RNN processing the words in the sentence from the **first to the last** **(a forward RNN)**, we have a second RNN processing the words in the sentence from the **last to the first** **(a backward RNN)**. At time step $t$, the forward RNN is processing word $x_t$, and the backward RNN is processing word $x_{T-t+1}$. In PyTorch, the hidden state (and cell state) tensors returned by the forward and backward RNNs are stacked on top of each other in a single tensor. **We make our sentiment prediction using a concatenation of the last hidden state from the forward RNN (obtained from final word of the sentence), $h_T^\rightarrow$, and the last hidden state from the backward RNN (obtained from the first word of the sentence), $h_T^\leftarrow$, i.e. $\hat{y}=f(h_T^\rightarrow, h_T^\leftarrow)$** The image below shows a bi-directional RNN, with the forward RNN in orange, the backward RNN in green and the linear layer in silver. Multi-layer RNN**Multi-layer RNNs (also called *deep RNNs*)** are another simple concept. The idea is that we add additional RNNs on top of the initial standard RNN, where each RNN added is another *layer*. The hidden state output by the first (bottom) RNN at time-step $t$ will be the input to the RNN above it at time step $t$. The prediction is then made from the final hidden state of the final (highest) layer.**The image below shows a multi-layer unidirectional RNN, where the layer number is given as a superscript.** Also note that each layer needs their own initial hidden state, $h_0^L$. RegularizationAlthough we've added improvements to our model, each one adds additional parameters. Without going into overfitting into too much detail, **the more parameters you have in in your model, the higher the probability that your model will overfit (memorize the training data, causing a low training error but high validation/testing error, i.e. poor generalization to new, unseen examples).** To combat this, we use regularization. More specifically, we use a method of regularization called **dropout**. **Dropout works by randomly *dropping out* (setting to 0) neurons in a layer during a forward pass.** The probability that each neuron is dropped out is set by a hyperparameter and each neuron with dropout applied is considered indepenently. **One theory about why dropout works is that a model with parameters dropped out can be seen as a "weaker" (less parameters) model.** The predictions from all these "weaker" models (one for each forward pass) get averaged together withinin the parameters of the model. **Thus, your one model can be thought of as an ensemble of weaker models, none of which are over-parameterized and thus should not overfit.** Implementation DetailsAnother addition to this model is that we are not going to learn the embedding for the `` token. This is because we want to explitictly tell our model that **padding tokens are irrelevant to determining the sentiment of a sentence.** This means the embedding for the pad token will remain at what it is initialized to (**we initialize it to all zeros later**). We do this by passing the index of our pad token as the `padding_idx` argument to the `nn.Embedding` layer.To use an LSTM instead of the standard RNN, we use `nn.LSTM` instead of `nn.RNN`. Also, note that the LSTM returns the `output` and a tuple of the final `hidden` state and the final `cell` state, whereas the standard RNN only returned the `output` and final `hidden` state. **As the final hidden state of our LSTM has both a forward and a backward component, which will be concatenated together, the size of the input to the `nn.Linear` layer is twice that of the hidden dimension size.**Implementing bidirectionality and adding additional layers are done by passing values for the `num_layers` and `bidirectional` arguments for the RNN/LSTM. Dropout is implemented by initializing an `nn.Dropout` layer (the argument is the probability of dropping out each neuron) and using it within the `forward` method after each layer we want to apply dropout to. **Note: never use dropout on the input or output layers (`text` or `fc` in this case), you only ever want to use dropout on intermediate layers.** The LSTM has a `dropout` argument which adds dropout on the connections between hidden states in one layer to hidden states in the next layer. As we are passing the lengths of our sentences to be able to use packed padded sequences, we have to add a second argument, `text_lengths`, to `forward`. Before we pass our embeddings to the RNN, we need to pack them, which we do with `nn.utils.rnn.packed_padded_sequence`. This will cause our RNN to only process the non-padded elements of our sequence. The RNN will then return `packed_output` (a packed sequence) as well as the `hidden` and `cell` states (both of which are tensors). Without packed padded sequences, `hidden` and `cell` are tensors from the last element in the sequence, which will most probably be a pad token, however when using packed padded sequences they are both from the last non-padded element in the sequence. Note that the `lengths` argument of `packed_padded_sequence` must be a CPU tensor so we explicitly make it one by using `.to('cpu')`.We then unpack the output sequence, with `nn.utils.rnn.pad_packed_sequence`, to transform it from a packed sequence to a tensor. **The elements of `output` from padding tokens will be zero tensors (tensors where every element is zero). Usually, we only have to unpack output if we are going to use it later on in the model.** Although we aren't in this case, we still unpack the sequence just to show how it is done.The final hidden state, `hidden`, has a shape of _**[num layers * num directions, batch size, hid dim]**_. These are ordered: **[forward_layer_0, backward_layer_0, forward_layer_1, backward_layer 1, ..., forward_layer_n, backward_layer n]**. As we want the final (top) layer forward and backward hidden states, we get the top two hidden layers from the first dimension, `hidden[-2,:,:]` and `hidden[-1,:,:]`, and concatenate them together before passing them to the linear layer (after applying dropout).
###Code
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers,
bidirectional, dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
self.rnn = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
#text = [sent len, batch size]
embedded = self.dropout(self.embedding(text))
#embedded = [sent len, batch size, emb dim]
#pack sequence
# lengths need to be on CPU!
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths.to('cpu'))
packed_output, (hidden, cell) = self.rnn(packed_embedded)
#unpack sequence
output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
#output = [sent len, batch size, hid dim * num directions]
#output over padding tokens are zero tensors
#hidden = [num layers * num directions, batch size, hid dim]
#cell = [num layers * num directions, batch size, hid dim]
#concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
#and apply dropout
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
#hidden = [batch size, hid dim * num directions]
return self.fc(hidden)
###Output
_____no_output_____
###Markdown
Like before, we'll create an instance of our RNN class, with the new parameters and arguments for the number of layers, bidirectionality and dropout probability.To ensure the pre-trained vectors can be loaded into the model, the `EMBEDDING_DIM` must be equal to that of the pre-trained GloVe vectors loaded earlier.We get our pad token index from the vocabulary, getting the actual string representing the pad token from the field's `pad_token` attribute, which is `` by default.
###Code
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.5
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = RNN(INPUT_DIM,
EMBEDDING_DIM,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT,
PAD_IDX)
###Output
_____no_output_____
###Markdown
The final addition is copying the pre-trained word embeddings we loaded earlier into the `embedding` layer of our model.We retrieve the embeddings from the field's vocab, and check they're the correct size, _**[vocab size, embedding dim]**_
###Code
pretrained_embeddings = TEXT.vocab.vectors
print(pretrained_embeddings.shape)
###Output
torch.Size([25002, 100])
###Markdown
We then replace the initial weights of the `embedding` layer with the pre-trained embeddings.**Note**: this should always be done on the `weight.data` and not the `weight`!
###Code
model.embedding.weight.data.copy_(pretrained_embeddings)
###Output
_____no_output_____
###Markdown
As our `` and `` token aren't in the pre-trained vocabulary they have been initialized using `unk_init` (an $\mathcal{N}(0,1)$ distribution) when building our vocab. **It is preferable to initialize them both to all zeros to explicitly tell our model that, initially, they are irrelevant for determining sentiment.** **We do this by manually setting their row in the embedding weights matrix to zeros.** We get their row by finding the index of the tokens, which we have already done for the padding index.**Note**: like initializing the embeddings, this should be done on the `weight.data` and not the `weight`!
###Code
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
print(model.embedding.weight.data)
###Output
tensor([[ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],
[-0.0382, -0.2449, 0.7281, ..., -0.1459, 0.8278, 0.2706],
...,
[ 0.4321, -0.6715, 0.5848, ..., 0.2963, 0.1717, 0.8273],
[ 0.2671, -0.0532, 0.0419, ..., -0.4741, -0.4803, -0.1848],
[-0.3165, 0.2370, 0.5244, ..., 0.0429, -0.4964, 0.2210]])
###Markdown
We can now see the first two rows of the embedding weights matrix have been set to zeros. As we passed the index of the pad token to the `padding_idx` of the embedding layer it will remain zeros throughout training, however the `` token embedding will be learned. Train the ModelNow to training the model.The only change we'll make here is changing the optimizer from `SGD` to `Adam`. SGD updates all parameters with the same learning rate and choosing this learning rate can be tricky. `Adam` adapts the learning rate for each parameter, giving parameters that are updated more frequently lower learning rates and parameters that are updated infrequently higher learning rates. More information about `Adam` (and other optimizers) can be found [here](http://ruder.io/optimizing-gradient-descent/index.html).To change `SGD` to `Adam`, we simply change `optim.SGD` to `optim.Adam`, also note how we do not have to provide an initial learning rate for Adam as PyTorch specifies a sensibile default initial learning rate.
###Code
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
###Output
_____no_output_____
###Markdown
The rest of the steps for training the model are unchanged.We define the criterion and place the model and criterion on the GPU (if available)...
###Code
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
###Output
_____no_output_____
###Markdown
We implement the function to calculate accuracy...
###Code
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
###Output
_____no_output_____
###Markdown
We define a function for training our model. As we have set `include_lengths = True`, our `batch.text` is now a tuple with the first element being the numericalized tensor and the second element being the actual lengths of each sequence. We separate these into their own variables, `text` and `text_lengths`, before passing them to the model.**Note**: as we are now using dropout, we must remember to use `model.train()` to ensure the dropout is "turned on" while training.
###Code
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
###Output
_____no_output_____
###Markdown
Then we define a function for testing our model, again remembering to separate `batch.text`.**Note**: as we are now using dropout, we must remember to use `model.eval()` to ensure the dropout is "turned off" while evaluating.
###Code
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
###Output
_____no_output_____
###Markdown
And also create a nice function to tell us how long our epochs are taking.
###Code
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
###Output
_____no_output_____
###Markdown
Finally, we train our model...
###Code
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'lab10-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
###Output
Epoch: 01 | Epoch Time: 0m 37s
Train Loss: 0.632 | Train Acc: 63.27%
Val. Loss: 0.532 | Val. Acc: 75.47%
Epoch: 02 | Epoch Time: 0m 38s
Train Loss: 0.525 | Train Acc: 73.89%
Val. Loss: 0.479 | Val. Acc: 77.86%
Epoch: 03 | Epoch Time: 0m 39s
Train Loss: 0.442 | Train Acc: 79.61%
Val. Loss: 0.394 | Val. Acc: 85.59%
Epoch: 04 | Epoch Time: 0m 40s
Train Loss: 0.375 | Train Acc: 83.82%
Val. Loss: 0.324 | Val. Acc: 86.17%
Epoch: 05 | Epoch Time: 0m 40s
Train Loss: 0.280 | Train Acc: 88.67%
Val. Loss: 0.304 | Val. Acc: 87.70%
###Markdown
...and get our new and vastly improved test accuracy!
###Code
model.load_state_dict(torch.load('lab10-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
###Output
Test Loss: 0.314 | Test Acc: 87.10%
|
Day3_Model.ipynb | ###Markdown
FEATURE ENGENERING
###Code
df.head()
df['param_color'].factorize()
df['param_color'].factorize()
df['param_color'].factorize()[0]
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorize_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorize_values
else:
df[feat + SUFFIX_CAT] = factorize_values
# Rozwinięcie:
# 1 Iteracja pętli
#'a'
#'a__cat'
# 2 Iteracja
#'a'
#'a__cat'
#'a_cat__cat'
# Tutaj w drugiej Iteracji pojawi się jeszcze raz __cata - Nie chcemy tego tak mieć.
cat_feats = [ x for x in df.columns if SUFFIX_CAT in x ]
cat_feats = [x for x in cat_feats if 'price' not in x]
len(cat_feats)
X = df[cat_feats].values
y = df['price_value'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X,y, cv=3, scoring='neg_mean_absolute_error' )
np.mean(scores)
###Output
_____no_output_____
###Markdown
Sprawdzanie Cech - które są Ciekawe
###Code
m = DecisionTreeRegressor(max_depth=5)
m.fit(X,y)
imp = PermutationImportance (m, random_state=0).fit(X,y)
eli5.show_weights(imp, feature_names=cat_feats)
cd /content/drive/My Drive/Cloab Notebooks/matrix/matrix_two/dw_matrix_car/
ls
###Output
[0m[01;34mdata[0m/ day2_visualization.ipynb LICENSE
Day1_meta.ipynb Day3_Model.ipynb README.md
###Markdown
Trzeba potem przenieść pliki z dysku Google do powyższej ścieżki
###Code
!git config --global user.email "[email protected]"
!git config --global user.name "Sylwek"
###Output
_____no_output_____
###Markdown
DODANIE ZMIAN DO GITHUBA
###Code
!git add Day3_Model.ipynb
!git add Day1_meta.ipynb
!git commit -m "add model"
GITHUB_TOKEN='44d04f6f6c31db28fe53daf3bf0519f13ff6e0b6'
GITHUB_URL = 'https://{}@github.com/smachnio/dw_matrix_car.git'.format(GITHUB_TOKEN)
!git help credentials
!git push https://github.com/smachnio/dw_matrix_car.git
!git push -u origin master
###Output
_____no_output_____
###Markdown
Zadanie z Gwiazdką
###Code
def group_and_barplot (feat_groupby, feat_agg='price_value', agg_funcs=[np.mean,np.median, np.size], feat_sort='mean',top=50, subplots=True):
return(
df
.groupby(feat_groupby)[feat_agg]
.agg(agg_funcs)
.sort_values(by=feat_sort,ascending=False)
.head(top)
) .plot(kind='bar', figsize=(15,5), subplots= subplots)
group_and_barplot('param_napęd');
###Output
_____no_output_____ |
Data Visualization Project.ipynb | ###Markdown
Data Visualization Project using Python In this Assignment, you will demonstrate the data visualization skills your learned by completing this course. You will be required to generate two visualization plots. 1) The first one will be a plot to summarize the results of a survey that was conducted to gauge an audience interest in different data science topics. 2) The second plot is a Choropleth map of the crime crate in San Francisco. A survey was conducted to gauge an audience interest in different data science topics, namely:1. Big Data (Spark / Hadoop) 2. Data Analysis / Statistics 3. Data Journalism 4. Data Visualization 5. Deep Learning 6. Machine Learning The participants had __three__ options for each topic: **Very Interested**, **Somewhat interested**, and **Not interested**. 2,233 respondents completed the survey.The survey results have been saved in a csv file and can be accessed through this link: https://cocl.us/datascience_survey_data.If you examine the csv file, you will find that the first column represents the data science topics and the first row represents the choices for each topic.
###Code
# Libraries
import pandas as pd
import numpy as np
df_survey = pd.read_csv('https://cocl.us/datascience_survey_data/Topic_Survey_Assignment.csv', index_col=0)
df_survey.shape
df_survey.head(6)
###Output
_____no_output_____
###Markdown
Use the artist layer of Matplotlib to visualize the percentage of the respondents' interest in the different data science topics surveyed.To create this bar chart, you can follow the following steps:- Sort the dataframe in descending order of Very interested.- Convert the numbers into percentages of the total number of respondents. Recall that 2,233 respondents completed the survey. Round percentages to 2 decimal places.- As for the chart:- use a figure size of (20, 8),- bar width of 0.8,- use color 5cb85c for the Very interested bars, color 5bc0de for the Somewhat interested bars, and color d9534f for the Not interested bars,- use font size 14 for the bar labels, percentages, and legend,- use font size 16 for the title, and,- display the percentages above the bars as shown above, and remove the left, top, and right borders. *Import Matplotlib*
###Code
# use the inline backend to generate the plots within the browser
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('ggplot') # optional: for ggplot-like style
# check for latest version of Matplotlib
print ('Matplotlib version: ', mpl.__version__) # >= 2.0.0
df_survey.sort_values(['Very interested'], ascending=False, axis=0, inplace=True)
# Change this line to plot percentages instead of absolute values
ax = (df_survey.div(df_survey.sum(1), axis=0)).plot(kind='bar',figsize=(20,8),width = 0.8,color = ['#5cb85c','#5bc0de','#d9534f'],edgecolor=None)
plt.legend(labels=df_survey.columns,fontsize= 14)
plt.title("Percentage of Respondents' Interest in Data Science Areas",fontsize= 16)
plt.xticks(fontsize=14)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.yticks([])
# Add this loop to add the annotations
for i in ax.patches:
width, height = i.get_width(), i.get_height()
x, y = i.get_xy()
ax.annotate('{:.2%}'.format(height), (x, y + height + 0.01), (i.get_x()+.5*width, i.get_y() + height + 0.01), ha = 'center')
ax.set_facecolor('white')
###Output
_____no_output_____
###Markdown
*Chloropleth Map* In the final lab, we created a map with markers to explore crime rate in San Francisco, California. In this question, you are required to create a Choropleth map to visualize crime in San Francisco.Before you are ready to start building the map, let's restructure the data so that it is in the right format for the Choropleth map. Essentially, you will need to create a dataframe that lists each neighborhood in San Francisco along with the corresponding total number of crimes.Based on the San Francisco crime dataset, you will find that San Francisco consists of 10 main neighborhoods, namely:1. Central,2. Southern,3. Bayview,4. Mission,5. Park,6. Richmond,7. Ingleside,8. Taraval,9. Northern, and,10. Tenderloin.Convert the San Francisco dataset, which you can also find here, https://cocl.us/sanfran_crime_dataset, into a pandas dataframe, like the one shown below, that represents the total number of crimes in each neighborhood.
###Code
df_sf_crime = pd.read_csv('https://cocl.us/sanfran_crime_dataset/Police_Department_Incidents_-_Previous_Year__2016_.csv')
df_sf_crime.head()
sf_neighbourhoods = ['Central','Southern','Bayview','Mission','Park','Richmond','Ingleside','Taraval','Northern','Tenderloin']
sf_neighbourhood_counts = df_sf_crime['PdDistrict'].value_counts().to_frame()
sf_neighbourhood_counts.groupby(['PdDistrict'],as_index=False).sum()
sf_neighbourhood_counts.rename(columns={'PdDistrict': 'Count'}, inplace=True)
sf_neighbourhood_counts.index.name = 'Neighbourhood'
sf_neighbourhood_counts2 = sf_neighbourhood_counts.reset_index()
sf_neighbourhood_counts2
###Output
_____no_output_____
###Markdown
Now you should be ready to proceed with creating the Choropleth map.As you learned in the Choropleth maps lab, you will need a GeoJSON file that marks the boundaries of the different neighborhoods in San Francisco. In order to save you the hassle of looking for the right file, I already downloaded it for you and I am making it available via this link: https://cocl.us/sanfran_geojson.For the map, make sure that:- it is centred around San Francisco,- you use a zoom level of 12,- you use fill_color = 'YlOrRd',- you define fill_opacity = 0.7,- you define line_opacity=0.2, and,- you define a legend and use the default threshold scale.
###Code
!conda install -c anaconda xlrd --yes
!conda install -c conda-forge folium=0.5.0 --yes
import folium
print('Folium installed and imported!')
# download countries geojson file
!wget --quiet https://cocl.us/sanfran_geojson/san-francisco.geojson -O san-francisco.json
print('GeoJSON file downloaded!')
sf_geo = r'san-francisco.json' # geojson file
# create a plain world map
sf_map = folium.Map(location=[37.773972, -122.431297], zoom_start=12, tiles='Mapbox Bright')
# generate choropleth map using the total immigration of each country to Canada from 1980 to 2013
sf_map.choropleth(
geo_data=sf_geo,
data=sf_neighbourhood_counts2,
columns=['Neighbourhood', 'Count'],
key_on='feature.properties.DISTRICT',
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Crime in San Francisco')
# display map
sf_map
###Output
_____no_output_____ |
.ipynb_checkpoints/17_Sorting-checkpoint.ipynb | ###Markdown
Sorting Bubble Sort
###Code
def bubbleSort(customList):
for i in range(len(customList)):
for j in range(len(customList)-i-1): # since at the top of the part is filled with maximum value, we need to subtract i
if customList[j] > customList[j+1]:
customList[j], customList[j+1] = customList[j+1], customList[j]
print(customList)
cList = [2,1,7,6,5,3,4,9,8]
bubbleSort(cList)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
Selection Sort
###Code
def selectionSort(customList):
sortedList = []
for i in range(len(customList)):
min_index = i
for j in range(i+1, len(customList)):
if customList[min_index] > customList[j]:
min_index = j
customList[i], customList[min_index] = customList[min_index], customList[i]
print(customList)
cList = [2,1,7,6,5,3,4,9,8]
selectionSort(cList)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
Insertion Sort
###Code
def insertionSort(customList):
# for: i is right side of the criteria / j is left side of the criteria
for i in range(1, len(customList)):
key = customList[i]
j = i-1
while j >= 0 and customList[j] > key:
customList[j+1]=customList[j]
j -= 1
customList[j+1]=key
return customList
cList = [2,1,7,6,5,3,4,9,8]
print(insertionSort(cList))
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
Bucket Sort
###Code
import math
def bucketSort(customList):
numberofBuckets = round(math.sqrt(len(customList)))
maxValue = max(customList)
arr = []
# make buckets
for i in range(numberofBuckets):
arr.append([])
# bucker selection
for j in customList:
index_b = math.ceil(j*numberofBuckets/maxValue)
arr[index_b-1].append(j)
# sort inside of the buckets
for i in range(numberofBuckets):
arr[i] = insertionSort(arr[i])
k = 0
for i in range(numberofBuckets):
for j in range(len(arr[i])):
customList[k] = arr[i][j]
k+=1
return customList
cList = [2,1,7,6,5,3,4,9,8]
print(bucketSort(cList))
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
Merge Sort
###Code
def merge(customList, l, m, r):
# number of elements in first sub array
n1 = m-l+1
# number of elements in second sub array
n2 = r-m
L = [0]*(n1)
R = [0]*(n2)
# divide array as two
for i in range(0, n1):
L[i] = customList[l+i]
for j in range(0, n2):
R[j] = customList[m+1+j]
i = 0 # initial index of first sub array
j = 0 # initial index of second sub array
k = l # initial index of merged array
# combine arrays as one
while i < n1 and j < n2:
if L[i] <= R[j]:
customList[k] = L[i]
i+=1
else:
customList[k] = R[j]
j+=1
k+=1
# since one of the arrays amog L or R is reached the last index, we can use the following loop
while i < n1:
customList[k] = L[i]
i+=1
k+=1
while j < n2:
customList[k] = R[j]
j+=1
k+=1
def mergeSort(customList, l, r):
if l < r:
m = (l+(r-1))//2
mergeSort(customList, l, m)
mergeSort(customList, m+1, r)
merge(customList, l, m, r)
return customList
cList = [2,1,7,6,5,3,4,9,8]
mergeSort(cList, 0, len(cList)-1)
###Output
_____no_output_____
###Markdown
Quick Sort
###Code
def partition(customList, low, high):
i = low - 1
pivot =customList[high] # We chose the index at the last node as a pivot
for j in range(low, high):
if customList[j] <= pivot:
i+=1
customList[i],customList[j] = customList[j], customList[i]
customList[i+1], customList[high] = customList[high], customList[i+1]
return (i+1)
def quickSort(customList, low, high):
if low < high:
pi = partition(customList, low, high)
quickSort(customList, low, pi-1)
quickSort(customList, pi, high)
cList = [2,1,7,6,5,3,4,9,8]
quickSort(cList, 0, len(cList)-1)
print(cList)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
Heap Sort
###Code
def heapify(customList, n, i):
smallest = i
l = 2*i + 1
r = 2*i + 2
if l<n and customList[l]<customList[smallest]:
smallest = l
if r<n and customList[r]<customList[smallest]:
smallest = r
if smallest != i:
customList[i], customList[smallest] = customList[smallest], customList[i]
heapify(customList, n, smallest)
def heapSort(customList):
n = len(customList)
for i in range(int(n/2)-1,-1,-1):
heapify(customList,n,i)
for i in range(n-1,0,-1):
customList[i],customList[0] = customList[0], customList[i]
heapify(customList, i, 0)
customList.reverse()
cList = [2,1,7,6,5,3,4,9,8]
heapSort(cList)
print(cList)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
|
lrp-calendar.ipynb | ###Markdown
Table of Contents1 Parse program info and visit timeline webpages Parse program info and visit timeline webpages
###Code
import os
import numpy as np
import matplotlib.pyplot as plt
import requests
# https://stackoverflow.com/questions/24124643/parse-xml-from-url-into-python-object
import traceback
import urllib3
import xmltodict
def getxml(url="https://yoursite/your.xml"):
http = urllib3.PoolManager()
response = http.request('GET', url)
try:
data = xmltodict.parse(response.data)
except:
print("Failed to parse xml from response (%s)" % traceback.format_exc())
data = {}
return data
def visit_xml(proposal_id=1324):
url = f"https://www.stsci.edu/cgi-bin/get-visit-status?id={proposal_id}&markupFormat=xml&observatory=JWST"
data = getxml(url=url)
if 'visitStatusReport' in data:
data = data['visitStatusReport']
return data
def prop_html(proposal_id=1324):
from bs4 import BeautifulSoup
vgm_url = f"https://www.stsci.edu/cgi-bin/get-proposal-info?id={proposal_id}&observatory=JWST"
html_text = requests.get(vgm_url).text
soup = BeautifulSoup(html_text, 'html.parser')
return soup
def program_info(proposal_id=1324):
soup = prop_html(proposal_id=proposal_id)
meta = {'proposal_id':proposal_id}
meta['raw'] = soup
if 1:
ps = soup.findAll('p')
meta['pi'] = ps[0].contents[1].strip()
meta['title'] = ps[1].contents[1].strip()
meta['cycle'] = int(ps[1].contents[5].strip())
meta['allocation'] = float(ps[1].contents[9].strip().split()[0])
meta['proptime'] = float(ps[1].contents[-1].strip().split()[0])
meta['type'] = soup.findAll('h1')[0].contents[1].contents[0]
else:
meta['pi'] = 'x'
meta['title'] = 'x'
meta['cycle'] = 0
meta['allocation'] = 0
meta['proptime'] = 0.
meta['type'] = 'x'
visits = visit_xml(proposal_id)
#for k in ['visit']: #visits:
# meta[k] = visits[k]
if isinstance(visits['visit'], list):
meta['visit'] = visits['visit']
else:
meta['visit'] = [visits['visit']]
return meta
#return soup
#visit = visit_xml(proposal_id=1324)
meta = program_info(proposal_id=1567)
from grizli import utils
import astropy.units as u
import astropy.time
def show_window(v, meta):
row = [meta[k] for k in ['type','proposal_id','title', 'pi', 'cycle', 'allocation', 'proptime']]
row += [v[k] for k in ['@observation', '@visit', 'target', 'configuration', 'hours']]
if 'planWindow' not in v:
row.extend(['2029-01-01','-','-'])
return row, None
w = v['planWindow']
if '(2' in w:
dates = ('2'+w.split('(2')[1].strip()).strip(')').split(' - ')
else:
row.extend(['2029-01-01','-','-'])
return row, None
inst = v['configuration'].strip().split()[0].lower()
colors = {'niriss':utils.MPL_COLORS['b'],
'nirspec':utils.MPL_COLORS['orange'],
'miri':utils.MPL_COLORS['r'],
'nircam':utils.MPL_COLORS['g']}
if inst not in colors:
colors[inst] = '0.5'
fig, ax = plt.subplots(1,1,figsize=(8,0.4))
wlim = astropy.time.Time(['2022:150','2024:300'], format='yday')
# ax.plot_date(wlim.plot_date, [0,0], color='w', lw=5, linestyle='-')
wdate = astropy.time.Time([d.replace('.',':') for d in dates], format='yday')
ax.plot_date(wdate.plot_date, [0,0], color=colors[inst], lw=2, fmt='s-')
prop = meta['proptime']
#prop = 6
if prop > 0:
pub = wdate + prop/12.*u.year
ax.plot_date(wdate.plot_date, [0,0], fmt='o', zorder=100, color='w', markersize=4)
ax.plot_date(pub.plot_date, [0,0], color=colors[inst], lw=2, linestyle=':', fmt='s-',
alpha=0.5)
wgrid = astropy.time.Time(['2022-07-01', '2022-10-01',
'2023-01-01', '2023-04-01', '2023-07-01', '2023-10-01',
'2024-01-01', '2024-04-01', '2024-07-01', '2024-10-01'])
ax.vlines(wgrid.plot_date[[2,6]], -1, 1, color='k', alpha=0.2)
ax.set_ylim(-0.2, 0.2)
#ax.axis('off')
#ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
#ax.spines['left'].set_visible(False)
ax.set_xticks(wgrid.plot_date)
ax.set_xlim(*wlim.plot_date)
ax.tick_params(length=0, which='both')
ax.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid()
iso = [w.iso.split()[0] for w in wdate]
# dates, meta['visit'][0], meta
row += iso
timeline = f"jwst_{meta['proposal_id']}_{v['@observation']}_{v['@visit']}"
row.append(f'<img src="{timeline}.png" />')
fig.savefig(timeline+'.png')
return row, fig
progs = [1837, # primer
1345, # ceers
1433, # coe
1727, # cosmos
2079, # ngdeep
2426, # bagley
2659, # beasts bubbles
2561, # uncover
1914, # aurora
1895, # fresco
1567, # seiji
1324, # glass
1208, # canucs
1180, # eisenstein, gs1
1286, # gs2
1287, # gs3
1181, # gn1
1210, # ferruit
1211, # ferruit NRS GTO
1213,
1214,
1215,
1216,
1217,
1262,
1263, # colina
1284, # colina 2
1264, # colina 3
1283, # miri udf
1243, # lilly
1176, # windhorst
1207, # rieke
2282, # earendel
1869, # LyC22
1626, # stefanon
1657, # harikane
1671, # maseda
1740, # harikane 2
1747, # roberts-borsani
1758, # finkelstein
1791, # spilker
1810, # belli
1933, # mathee
1963, # williams udf
2110, # kriek
2136, # tucker
2279, # naidu
2285, # carnall
2345, # newman
2362, # marsan
2478, # stark
2484, # mirka
2516, # hodge
2555, # sunburst
2565, # glazebrook,
1908, # vanzella
1764, # fan agn
1964, # overzier
1328, # armus
1355, # rigby
1199, # stiavelli
2123, # kassin
2130, # lee
2198, # barrufet
2234, # banados
2321, # best
2566,
2674,
1871,
2078,
1678, # ashby
2107,
1717,
1554,
2593, # strom
]
from bs4 import BeautifulSoup
import requests
progs = []
for url in ["https://www.stsci.edu/jwst/science-execution/approved-ers-programs",
"https://www.stsci.edu/jwst/science-execution/approved-programs/cycle-1-gto",
"https://www.stsci.edu/jwst/science-execution/approved-programs/cycle-1-go"]:
html_text = requests.get(url).text
soup = BeautifulSoup(html_text, 'html.parser')
tabs = soup.findAll('tbody')
for tab in tabs:
for link in tab.findAll('a'):
try:
prop = int(link.getText())
progs.append(prop)
except:
continue
len(progs)
rows = []
done = []
failed = []
len(progs)
res = {}
os.chdir('/Users/gbrammer/Research/JWST/LRP')
names = ['type','proposal_id','title', 'pi', 'cycle', 'allocation', 'proptime',
'observation', 'visit', 'target', 'configuration', 'hours',
'win_start', 'win_end', 'timeline']
# progs = [1963]
for i, prog in enumerate(progs):
print(i, prog)
if prog in done:
continue
try:
m = program_info(proposal_id=prog)
except IndexError:
failed.append(prog)
print('Fail')
continue
except TypeError:
failed.append(prog)
continue
done.append(prog)
res[prog] = m
for v in m['visit']:
try:
row, fig = show_window(v, m)
except KeyError:
continue
rows.append(row)
plt.close('all')
failed
tab = utils.GTable(rows=rows, names=names)
# tab['timeline'] = [f'<img src="{t}.png" />' for t in tab['timeline']]
tab['proposal_id'] = [f'<a href="https://www.stsci.edu/cgi-bin/get-proposal-info?id={p}&observatory=JWST">{p}</a>'
for p, t in zip(tab['proposal_id'], tab['type'])]
# tab['pi'] = [p.strip().split()[-1] for p in tab['pi']]
prop = []
for p in tab['proptime']:
if p > 0:
prop.append(f'<p style="color:red;"> {p:.0f} </p>')
else:
prop.append(f'<p style="color:green;"> {p:.0f} </p>')
tab['proptime'] = prop
targ = []
for t in tab['target']:
if isinstance(t, list):
targ.append('; '.join(t))
else:
targ.append(t)
tab['target'] = targ
tab.rename_column('proposal_id','ProgID')
tab.rename_column('proptime','Prop')
tab.rename_column('pi','PI')
tab.rename_column('title','Full_Proposal_Title')
tab.rename_column('allocation', 'Total')
tab.rename_column('hours', 'Hours')
tab['Prop'].description = 'Proprietary time, months'
tab['Hours'].description = 'Visit duration'
tab['Total'].description = 'Total program allocation, hours'
tab['win_start'].description = 'Beginning of LRP scheduling window'
tab['win_end'].description = 'End of LRP scheduling window'
tab.rename_column('observation','Obs')
tab.write_sortable_html('full_timeline.html', use_json=True,
localhost=False, max_lines=10000, filter_columns=['Total', 'Prop','Hours','win_start','win_end'])
with open('full_timeline.html') as fp:
lines = fp.readlines()
#
lines.insert(-3, f'<p style="font-size:x-small;"> Generated {astropy.time.Time.now().iso} by {os.getlogin()} with <a href="./lrp-calendar.ipynb">lrp-calendar.ipynb</a> </p>\n')
with open('full_timeline.html','w') as fp:
fp.writelines(lines)
len(tab)
tab['target']
###Output
_____no_output_____ |
PYTHON_Aula02.ipynb | ###Markdown
**AULA 02 – INTRODUÇÃO AO PYTHON – PARTE 2** OBJETIVOS DESTA AULA:1- Conhecer os principais conceitos básicos da linguagem Python;2- Treinar a execução de alguns exemplos em Python;3- Praticar declaração de variáveis, a estrutura sequencial e condicional de programação; e estruturas de repetição (while e for) 2.1 - ESTRUTURA CONDICIONALVamos agora verificar como se utiliza a estrutura condicional em Python. A sintaxe básica é: if >: comando01 . . . comandoN else: comando01 . . . comandoN **Exemplo 01:**
###Code
Nota1 = float(input("Digite o nota 1: "))
Nota2 = float(input("Digite o nota 2: "))
Media = (Nota1 + Nota2)/2.0
print ("Média = " + str(Media))
if Media<=6:
print("Recuperação")
else:
print("Aprovada(o)")
###Output
Digite o nota 1: 3
Digite o nota 2: 6
Média = 4.5
Recuperação
###Markdown
**Exemplo 02:**
###Code
salario = float(input("Digite o valor do salario (em R$): "))
if salario>=1000:
taxa = 50
desconto = 100
else:
taxa = 25
desconto = 50
print("Taxa = " + str(taxa))
print("Desconto = " + str(desconto))
###Output
Digite o valor do salario (em R$): 100
Taxa = 25
Desconto = 50
###Markdown
**2.1.1 - Estruturas condicionais ANINHADAS** No Python, você pode aninhar novas estruturas condicionais if dentro de else ou fazer uso da estrutura elif. A estrutura elif é uma opção da codificação da seleção múltipla (correspondente ao switch case usado nas linguagens C++ e Java:
###Code
print ("1- Casada(o)")
print ("2- Solteira(o)")
print ("3- Viúva(o)")
print ("4- Divorciada(o)")
eCivil = int(input("Informe o estado civil: "))
if eCivil == 1:
print('Casada(o)')
elif eCivil == 2:
print('Solteira(o)')
elif eCivil == 3:
print('Viúva(o)')
elif eCivil == 4:
print ('Divorciada(o)')
else:
print ('Outros')
###Output
1- Casada(o)
2- Solteira(o)
3- Viúva(o)
4- Divorciada(o)
Informe o estado civil: 3
Viúva(o)
###Markdown
**2.2- ESTRUTURA DE REPETIÇÃO while **O loop while é uma instrução de controle de loop em Python e freqüentemente usada na programação para execução repetida de instrução(ões) em um loop. Ele executa uma sequência de instruções repetidamente, desde que a condição permaneça verdadeira. A sintaxe para o loop while é fornecida da seguinte maneira:while condição: comando(s)As instruções dentro do loop while serão executadas enquanto a condição for verdadeira, ou seja, enquanto ela for atendida.
###Code
Nota1 = float(input("Digite o nota 1: "))
while Nota1<0 or Nota1>10:
Nota1 = float(input("Digite o nota 1: "))
Nota2 = float(input("Digite o nota 2: "))
while Nota2<0 or Nota2>10:
Nota2 = float(input("Digite o nota 2: "))
Media = (Nota1 + Nota2)/2.0
print ("Média = " + str(Media))
if Media<6:
print("Recuperação")
else:
print("Aprovada(o)")
###Output
Digite o nota 1: -3
Digite o nota 1: 12
Digite o nota 1: 120
Digite o nota 1: 7
Digite o nota 2: 30
Digite o nota 2: 12
Digite o nota 2: -10
Digite o nota 2: 8
Média = 7.5
Aprovada(o)
###Markdown
**Exemplo 03:** Escreva um programa para imprimir os números de um a cinco usando o loop while.
###Code
count=0 # initialize the counter
while count<=5: # Test condition
print(str(count)) # print the value of count
count+=0.25 # Increment the value of count by 1
print("FIM DO LOOPING")
###Output
0
0.25
0.5
0.75
1.0
1.25
1.5
1.75
2.0
2.25
2.5
2.75
3.0
3.25
3.5
3.75
4.0
4.25
4.5
4.75
5.0
FIM DO LOOPING
###Markdown
**2.3- A FUNÇÃO range()**A função range() do Python é usada para gerar uma lista de inteiros. A função de intervalo possui um, dois ou três parâmetros. Os dois últimos parâmetros da função range () são opcionais.A forma geral da função de intervalo é:``` range(início, fim, passo)```O ‘início’ é o primeiro número inicial na sequência em que a lista começa.O 'fim' é o limite, ou seja, o último número na sequência.O 'passo' é o incremento entre cada número na sequência.[texto do link](https://)
###Code
#Para exibir os números de 0 a 17 com passo 1
list(range(18))
#Para exibir os números de 1 a 10 com passo 2
list(range(1, 10, 2))
###Output
_____no_output_____
###Markdown
OBSERVAÇÃO: A função range() só aceita passo INTEIRO. Não é possível usar valores ocm decimais **2.4- Para criar uma FUNÇÃO DEFINIDA PELO PROGRAMADOR**``` def NOME(INÍCIO, FIM, PASSO): COMANDO(s) e CÁLCULO(s) return RESULTADO```
###Code
#Para exibir os números de 1 a 10 com passo 0.5 é necessário criar uma função para definir este passo diferenciado
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
elif n == 1:
return([start])
else:
return([])
seq(1, 5, 0.25)
def MediaMassaki(v1, v2, v3):
return ((v1 + v2 + v3)/3)
MediaMassaki(5, 5, 5)
###Output
_____no_output_____
###Markdown
LOOPING FOR
###Code
n = int(input("Quantas vezes deseja repetir o programa?"))
for i in range(0, n):
Nota1 = int(input("Digite o nota 1: "))
while Nota1<0 or Nota1>10:
Nota1 = int(input("Digite o nota 1: "))
Nota2 = int(input("Digite o nota 2: "))
while Nota2<0 or Nota2>10:
Nota2 = int(input("Digite o nota 2: "))
Media = (Nota1 + Nota2)/2.0
print ("Média = " + str(Media))
if Media<6:
print("Recuperação")
else:
print("Aprovada(o)")
###Output
_____no_output_____
###Markdown
**2.5 - BIBLIOTECA DE FUNÇÕES MATEMÁTICAS **
###Code
import math
#Constante π = 3.141592…
math.pi
#Constate e = 2.718281…
math.e
#Constante matemática τ = 2π = 6.283185307179586...
math.tau
###Output
_____no_output_____
###Markdown
Potência de $ x^y $```math.pow(x, y)```
###Code
math.pow(2, 3)
###Output
_____no_output_____
###Markdown
RAIZ QUADRADA de X$\sqrt{x}$
###Code
math.sqrt(9)
###Output
_____no_output_____
###Markdown
LOGARITMOS
###Code
#logaritmo Nepteriano de x = ln(x)
math.log(2.718281828459045)
#logaritmo na base 10
math.log10(100)
#logaritmo na Base 2 de x
math.log2(8)
#math.log1p(x) = ln(1+x)
math.log1p(1.718281828459045)
#Logaritmo de a na Base b
#math.log(a,b)
math.log(27,3)
###Output
_____no_output_____
###Markdown
**Conversão de x GRAUS para RADIANOS**
###Code
Vg = float(input("Digite ângulo em graus: "))
x = math.radians(Vg)
Vrad = x
print("x em RADIANOS = " + str(x))
###Output
Digite ângulo em graus: 30
x em RADIANOS = 0.5235987755982988
###Markdown
**2.5.1 - FUNÇÕES TRIGONOMÁTRICAS**OBS: Necessário converter ângulo x para RADIANOS SENO(X)
###Code
round(math.sin(x), 2)
###Output
_____no_output_____
###Markdown
COSSENO(X)
###Code
math.cos(x)
###Output
_____no_output_____
###Markdown
TANGENTE(x)
###Code
math.tan(x)
###Output
_____no_output_____
###Markdown
ARCO-SENO
###Code
math.asin(x)
###Output
_____no_output_____
###Markdown
ARCO-COSSENO
###Code
math.acos(x)
###Output
_____no_output_____
###Markdown
ARCO-TANGENTE
###Code
math.atan(x)
###Output
_____no_output_____
###Markdown
ARCO-TANGENTE2(Y/X)math.atan2(y, x)
###Code
math.atan2(2, 3)
###Output
_____no_output_____
###Markdown
COMPRIMENTO DE UM VETORmath.hypot(Coordenadas x, y)equivale a$\sqrt{x^2 + y^2}$
###Code
math.hypot(3, 4)
###Output
_____no_output_____
###Markdown
Conversão de x RADIANOS para GRAUS
###Code
round(math.degrees(Vrad),2)
###Output
_____no_output_____
###Markdown
Conversão de x GRAUS para RADIANOS
###Code
math.radians(x)
###Output
_____no_output_____
###Markdown
**2.5.2 - FUNÇÕES HIPERBÓLICAS**SINH
###Code
math.sinh(x)
###Output
_____no_output_____
###Markdown
COSH
###Code
math.cosh(1.1547005383792515)
###Output
_____no_output_____
###Markdown
TANH
###Code
math.tanh(x)
###Output
_____no_output_____
###Markdown
ACOSHRetorna o inverso do cosseno hyperbolica de x.
###Code
math.acosh(1.1547005383792515)
###Output
_____no_output_____
###Markdown
ASINHRetorna o inverso do seno hyperbolica de x.
###Code
math.asinh(x)
###Output
_____no_output_____
###Markdown
ATANHRetorna o inverso da tangente hyperbolica de x.
###Code
math.atanh(x)
###Output
_____no_output_____ |
assets/analysis/notebooks/Sagemaker Example - Order Analysis.ipynb | ###Markdown
Orders data analysis with SagemakerThis notebook will demonstrate how to train and test LinearLearner model on SageMaker.
###Code
import os
import boto3
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDRegressor
import sagemaker
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from config import *
import warnings
warnings.filterwarnings('ignore')
INPUT_FILENAME = 'train_df.csv'
DATA_LOCAL_PATH = 'orders'
FULL_S3_INPUT_PATH = os.path.join('s3://', SAGEMAKER_S3_BUCKET, INPUT_S3_PATH, INPUT_FILENAME)
FULL_S3_OUTPUT_PATH = os.path.join('s3://', SAGEMAKER_S3_BUCKET, OUTPUT_S3_PATH)
ROLE = SAGEMAKER_ROLE_ARN
SAGEMAKER_SESSION = sagemaker.Session()
CONTAINERS = {
'us-west-2': '174872318107.dkr.ecr.us-west-2.amazonaws.com/linear-learner:latest',
'us-east-1': '382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:latest',
'us-east-2': '404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:latest',
'eu-west-1': '438346466558.dkr.ecr.eu-west-1.amazonaws.com/linear-learner:latest'
}
!echo "Syncing from $DATA_S3_PATH to $DATA_LOCAL_PATH"
!aws s3 sync "$DATA_S3_PATH" "$DATA_LOCAL_PATH"
def load_orders_df(data_path):
paths = os.listdir(data_path)
partial_order_dfs = [pd.read_json(os.path.join(data_path, path), lines=True) for path in paths]
orders_df = pd.concat(partial_order_dfs, copy=False)
orders_df['order_date'] = pd.to_datetime(orders_df['order_date'])
return orders_df
def split_train_test_df(daily_profit_df):
datetime_index = pd.to_datetime(daily_profit_df.index)
datetime_index_in_days = np.array(datetime_index).astype('datetime64[D]')
x = np.array(pd.to_numeric(datetime_index_in_days))
y = daily_profit_df['profit']
return train_test_split(x, y, test_size=0.2, random_state=0)
def profit_by_period(orders_df, date_period):
if date_period == 'day':
time_grouper = pd.TimeGrouper('D')
else:
time_grouper = pd.TimeGrouper('M')
return orders_df.set_index('order_date').groupby(time_grouper).sum().rename({'price': 'profit'}, axis=1)
def summarize_prices(orders_df):
plt.figure(figsize=(20,10))
orders_df['price'].plot.hist(bins=100)
def plot_df_by_date(orders_df, date_period):
profit_by_date = profit_by_period(orders_df, date_period)
profit_by_date.plot(figsize=(20,10))
plt.xticks(rotation='vertical')
plt.show()
def show_regression_report(x, y, y_pred, scaler=None):
x_time_int = pd.Series(np.rint(x.reshape(-1)))
x_time = pd.to_datetime(x_time_int, unit='d')
print('Test scores:')
print('R2:', r2_score(y, y_pred))
print('RMSE:', np.sqrt(mean_squared_error(y, y_pred)))
pred_df = pd.DataFrame({'profit': y, 'predicted_profit': y_pred}, index=x_time)
pred_df.plot(figsize=(20, 10))
plt.xticks(rotation='vertical')
plt.show()
def training_data_to_dataframe(x, y):
train_df = pd.DataFrame({'x': x.reshape(-1), 'y': y})
return train_df.reindex(['y', 'x'], axis=1)
def upload_sagemaker_input(df, sagemaker_bucket, input_s3_path, input_filename):
df.to_csv(input_filename, index=False, header=False)
input_s3_key = os.path.join(input_s3_path, input_filename)
print('Putting input data to {}'.format(os.path.join(sagemaker_bucket, input_s3_key)))
s3_client = boto3.client('s3')
s3_client.upload_file(
Bucket=sagemaker_bucket,
Key=input_s3_key,
Filename=input_filename
)
def create_sagemaker_linear_regression(train_instance_type, model_name, output_path, mini_batch_size=10):
linear_regression = sagemaker.estimator.Estimator(
CONTAINERS[REGION_NAME],
ROLE,
train_instance_count=1,
train_instance_type=train_instance_type,
output_path=output_path,
sagemaker_session=SAGEMAKER_SESSION
)
linear_regression.set_hyperparameters(
feature_dim=1,
predictor_type='regressor',
loss='squared_loss',
wd=1e-4,
optimizer='sgd',
learning_rate=0.1,
mini_batch_size=mini_batch_size,
epochs=5
)
return linear_regression
def predict_with_sagemaker(sagemaker_predictor, x):
result = sagemaker_predictor.predict(x)
predictions = result['predictions']
return np.array([prediction['score'] for prediction in predictions])
orders_df = load_orders_df(DATA_LOCAL_PATH)
###Output
_____no_output_____
###Markdown
Data explorationLet's explore orders using visual analysis and select an algorithm to predict future sales profit. Orders price distribution
###Code
summarize_prices(orders_df)
###Output
_____no_output_____
###Markdown
Profit by month
###Code
plot_df_by_date(orders_df, 'month')
###Output
_____no_output_____
###Markdown
Profit by day
###Code
plot_df_by_date(orders_df, 'day')
###Output
_____no_output_____
###Markdown
Data preprocessingWe need to convert data into proper format and split into train and test set.
###Code
daily_profit_df = profit_by_period(orders_df, 'day')
x_train, x_test, y_train, y_test = split_train_test_df(daily_profit_df)
###Output
_____no_output_____
###Markdown
Upload data to S3The code below will upload training data to S3.
###Code
train_df = training_data_to_dataframe(x_train, y_train)
upload_sagemaker_input(train_df, SAGEMAKER_S3_BUCKET, INPUT_S3_PATH, INPUT_FILENAME)
###Output
_____no_output_____
###Markdown
Fit Sagemaker's Linear LearnerModel training requires specyfing where data is located and what type of instance we want to use.
###Code
sagemaker_linear_regression = create_sagemaker_linear_regression(
train_instance_type=SAGEMAKER_TRAINING_INSTANCE_TYPE,
model_name=MODEL_NAME,
output_path=FULL_S3_OUTPUT_PATH
)
sagemaker_linear_regression.fit(
{
'train': sagemaker.s3_input(
FULL_S3_INPUT_PATH,
content_type='text/csv'
)
},
logs=False
)
###Output
_____no_output_____
###Markdown
Deploy model to an endpointAfter training, we use fitted object to build and deploy model. This creates a SageMaker endpoint that can be used to perform inference.
###Code
linear_predictor = sagemaker_linear_regression.create_model(name=MODEL_NAME).deploy(
initial_instance_count=1,
instance_type=SAGEMAKER_HOSTING_INSTANCE_TYPE,
endpoint_name=ENDPOINT_NAME
)
linear_predictor.content_type = 'text/csv'
linear_predictor.serializer = sagemaker.predictor.csv_serializer
linear_predictor.deserializer = sagemaker.predictor.json_deserializer
###Output
_____no_output_____
###Markdown
Validate resultsUsing the deployed endpoint we can check accuracy of our model on test data.
###Code
predictions = predict_with_sagemaker(linear_predictor, x_test.reshape(-1, 1))
show_regression_report(x_test, y_test, predictions)
###Output
_____no_output_____ |
Triple-RI Model Drafting.ipynb | ###Markdown
EDA
###Code
df.info()
df['school_name.x']
df['school_name.y']
df = df.drop(['school_name', 'school_name.y'], axis =1)
df.head()
nulls = sum(pd.isna(df.sharedbuilding))
nulls
#since all of the data is from the most recently completed school year, we can leave that out
df = df.drop(['sharedbuilding'], axis = 1)
df.head()
df.info()
dfZ = df[df.postcode.isna() == False]
dfZ.head()
dfZ.info()
dfZ.postcode = dfZ.postcode.apply(lambda x: str(int(x)))
dfZ.head()
import numpy as np
np.unique(dfZ['school_name.x'])
dfZ = dfZ.drop(['dbn', 'borough', 'school_name.x'], axis =1)
dfZ.head()
!pip install dython
from dython.nominal import associations
import matplotlib.pyplot as plt
associations(dfZ, theil_u=True, figsize=(12, 12))
plt.show()
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,10))
sns.heatmap(dfZ.corr(), cmap = "BuGn_r")
plt.show()
df = pd.read_csv('/project_data/data_asset/master_final.csv')
df.head()
df.info()
aggs_demo = []
for i in range(len(df.total_enrollment)):
aggs_demo.append(df.female_1[i]+ df.male_1[i] + df.asian_1[i] + df.black_1[i] + df.hispanic_1[i] + df.multiple_race_categories_1[i] + df.white_1[i] + df.students_with_disabilities_1[i] + df.poverty_1[i] + df.economic_need_index[i])
aggs_demo
df['Aggregate'] = aggs_demo
df.head(5)
df2 = pd.read_csv('/project_data/data_asset/master_final.csv')
df2.head()
df2.info()
school_risk_index = []
for i in range(len(df2.postcode)):
school_risk_index.append(df2.female_1[i]+ df2.male_1[i] + df2.asian_1[i] + df2.black_1[i] + df2.hispanic_1[i] +
df2.multiple_race_categories_1[i] + df2.white_1[i] + df2.students_with_disabilities_1[i] + df2.poverty_1[i] + df2.economic_need_index[i] +
df2.overcap[i] - df2.undercap[i]) # schools at undercapacity have lower risk
school_risk_index
from sklearn.preprocessing import normalize
school_risk_index = normalize(np.array(school_risk_index).reshape(-1,1), axis = 0, norm = 'max')
df2['school_risk_index'] = school_risk_index
df2['school_risk_index'].head()
df2.to_csv('/project_data/data_asset/master3_agg.csv')
dfC = pd.read_csv('/project_data/data_asset/covid_zip_ts.csv')
dfC.head()
import numpy as np
np.unique(dfC.postcode)
#zipcodes where people live
np.unique(df2.postcode)
#zipcodes where schools are located
from datetime import datetime
dfC.date2 = dfC.date2.apply(lambda x: datetime.strptime(x, "%Y-%m-%d"))
dfC.date2
dfM = pd.read_csv('/project_data/data_asset/master2_agg.csv')
dfM.head()
plt.scatter(dfC.date2, dfC.COVID_CASE_COUNT)
a = dfC.groupby(['postcode'])['date2','COVID_CASE_COUNT'].sum()
a= pd.DataFrame(a)
a
#divies up the COVID numbers by zip code and displays it as a time-series
###Output
_____no_output_____
###Markdown
Creating an environment index
###Code
dfC.head()
dfC.POP_DENOMINATOR.unique()
pop_norm = normalize(dfC.POP_DENOMINATOR.unique().reshape(-1,1), axis = 0, norm = 'max').ravel()
dfC['pop_norm'] = 0
j = 0
for i in range(len(dfC.postcode)):
dfC['pop_norm'].iloc[i] = pop_norm[j]
j += 1
if j == 177:
j = 0
dfC['pop_norm'].iloc[6000]
aggs_env = []
for i in range(len(dfC.postcode)):
aggs_env.append((dfC.COVID_CASE_COUNT[i] / dfC.POP_DENOMINATOR[i]) + (dfC.COVID_DEATH_COUNT[i] / dfC.POP_DENOMINATOR[i]))
aggs_env
dfC['env_index'] = aggs_env
dfC['env_index'] = normalize(np.array(aggs_env).reshape(-1,1), axis = 0, norm = 'max').ravel()
dfC = dfC.drop("pop_norm", axis = 1)
dfC.to_csv('/project_data/data_asset/master3_agg3.csv')
dfM2 = pd.read_csv('/project_data/data_asset/master2_agg2.csv')
dfM2 = dfM2.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis =1)
dfM2.head()
dfM.head()
###Output
_____no_output_____
###Markdown
Creating response variables
###Code
a = dfM2.groupby(['postcode'])['env_index'].mean()
#average environment index for a given zipcode
avg_envs = []
for b in a:
avg_envs.append(b)
avg_envs
import numpy as np
zips = []
for z in np.unique(dfM2.postcode):
zips.append(str(z))
zips
print(len(zips) == len(avg_envs))
zipdict = {}
for i in range(len(zips)):
zipdict[zips[i]] = [avg_envs[i], zips[i]]
zipdict
#saves a dictionary with zipcode-average pairs and the zips themselves
dfM.head()
avg_env = []
for p in dfM.postcode:
if str(p) in zipdict.keys():
avg_env.append(zipdict[str(p)][0])
else:
avg_env.append(np.mean(avg_envs))
avg_env
dfM['zip_env_index'] = avg_env
dfM.head()
dfM = dfM.drop(['env_index'], axis =1)
print(len(dfM['demo_index']), len(dfM['zip_env_index']))
for i in range(len(dfM['demo_index'])):
print(dfM['demo_index'][i], dfM['zip_env_index'][i])
hybrids = []
for i in range(len(dfM['demo_index'])):
a = dfM['demo_index'][i] + dfM['zip_env_index'][i]/10000
hybrids.append(a)
len(hybrids)
dfM['hybrid_index'] = hybrids
dfM.head()
print(max(dfM.hybrid_index))
print(min(dfM.hybrid_index))
print("")
import statistics
print(statistics.median(dfM.hybrid_index))
risks = []
for i in range(len(dfM.hybrid_index)):
if dfM['hybrid_index'][i] >= statistics.mean(dfM.hybrid_index):
risks.append(1)
else:
risks.append(0)
risks
len(risks)
dfM['Reopening_Risk'] = risks
dfM.head()
dfM.to_csv('/project_data/data_asset/tripleRI.csv')
import ibm_boto3
# Constants for IBM COS values
COS_ENDPOINT = "https://s3.us.cloud-object-storage.appdomain.cloud"
AWS_API_KEY_ID = "3d5e016674e746938b4ab071263cfda5"
AWS_SECRET_ACCESS_KEY = "f6e0459b99bada9efe0784a56a7ca5c93cda27db2ddd56b0"
COS_AUTH_ENDPOINT = "https://iam.cloud.ibm.com/identity/token"
# Create resource
cos = ibm_boto3.resource("s3",
aws_access_key_id=AWS_API_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
ibm_auth_endpoint=COS_AUTH_ENDPOINT,
endpoint_url=COS_ENDPOINT
)
bucket_name = 'intern-hackathon-2020'
item_names = ['master3_agg3.csv', 'school_index.csv']
for item_name in item_names:
print(item_name)
with open('/project_data/data_asset/'+item_name, "rb") as file_data:
cos.Object(bucket_name, item_name).upload_fileobj(
Fileobj=file_data
)
###Output
master3_agg3.csv
school_index.csv
|
notebooks/D6_L2_Filtering/06_signal_fourier.ipynb | ###Markdown
Analyzing the frequency components of a signal with a Fast Fourier Transform Fast Fourier Transforms (FFT) are a fast way to find dominant frequencies in any signal.Lets start with the standard imports.
###Code
import datetime
import numpy as np
import scipy as sp
import scipy.fftpack
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Now lets look at some weather data to see if we can find a dominant seasonal pattern.
###Code
df0 = pd.read_csv('data/weather.csv',
na_values=(-9999),
parse_dates=['DATE'])
df = df0[df0['DATE'] >= '19940101']
df.head()
df_avg = df.dropna().groupby('DATE').mean()
df_avg.head()
date = pd.to_datetime(df_avg.index)
temp = (df_avg['TMAX'] + df_avg['TMIN']) / 20.
N = len(temp)
###Output
_____no_output_____
###Markdown
If we plot the temperature results for the last several years, we see a pattern.
###Code
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
temp.plot(ax=ax, lw=.5)
ax.set_ylim(-10, 40)
ax.set_xlabel('Date')
ax.set_ylabel('Mean temperature')
###Output
_____no_output_____
###Markdown
Now lets apply the FFT to calculate the frequency of that pattern. Does it match our expected result of peaks in temperature roughly each year?
###Code
temp_fft = sp.fftpack.fft(temp)
temp_psd = np.abs(temp_fft) ** 2
fftfreq = sp.fftpack.fftfreq(len(temp_psd), 1. / 365)
i = fftfreq > 0
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(fftfreq[i], 10 * np.log10(temp_psd[i]))
ax.set_xlim(0, 5)
ax.set_xlabel('Frequency (1/year)')
ax.set_ylabel('PSD (dB)')
###Output
_____no_output_____
###Markdown
Sure enough there is a dominant frequency at 1. Therefore we can conclude that there is a perodicity of the data when we look at the annual temperature, and the period is almost exactly 1 year.Of course we are not suprised. We know this to be true.Can you think of other things that might reveal an underlying structure with a perodicity?Using these results, we can apply the period of 1 year to plot a "smooth" line of mean temperature, overlayed on the actual temperature. From this we could do futher analysis, but we will stop here.
###Code
temp_fft_bis = temp_fft.copy()
temp_fft_bis[np.abs(fftfreq) > 1.1] = 0
temp_slow = np.real(sp.fftpack.ifft(temp_fft_bis))
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
temp.plot(ax=ax, lw=.5)
ax.plot_date(date, temp_slow, '-')
ax.set_xlim(datetime.date(1994, 1, 1),
datetime.date(2000, 1, 1))
ax.set_ylim(-10, 40)
ax.set_xlabel('Date')
ax.set_ylabel('Mean temperature')
###Output
_____no_output_____ |
fundamentals-of-deeplearning/notebooks/Eager_Execution_in_TensorFlow_2.x.ipynb | ###Markdown
Eager Execution in TensorFlow 2.XEstimated time needed: **15** minutes ObjectivesAfter completing this lab you will be able to:- Understand the impact of eager execution and the need to enable it Section 1: What is Eager Execution TensorFlow's **eager execution** is an imperative programming environment that evaluates operations immediately without building graphs. Operations return concrete values instead of constructing a computational graph to run later. This makes it easy to get started with TensorFlow and debug models. With **TensorFlow 2.x**, **Eager Execution is enabled by default**. This allows TensorFlow code to be executed and evaluated line by line. Before version 2.x was released, Eager execution was disabled by default. This meant that every graph had to be run within a TensorFlow **session**. This only allowed for the entire graph to be run all at once and made it hard to debug the computation graph. Eager execution is a flexible machine learning platform for research and experimentation, which provides:- **An intuitive interface** - Structure your code naturally and use Python data structures. Quickly iterate on small models and small data.- **Easier debugging** - Execute operations directly to inspect code line by line and test changes. Use standard Python debugging tools for immediate error reporting.- **Natural control flow** — Use Python control flow instead of graph control flow, simplifying the specification of dynamic models. Section 2: Installing TensorFlow Let us begin by installing TensorFlow version 2.2.0 and its required prerequisites.
###Code
!pip install grpcio==1.24.3
!pip install tensorflow==2.2.0
###Output
_____no_output_____
###Markdown
Notice: This notebook has been created with TensorFlow version 2.2, and might not work with other versions. Therefore, let us verify the TensorFlow version:
###Code
import tensorflow as tf
if not tf.__version__ == '2.2.0':
print(tf.__version__)
raise ValueError('Please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)')
###Output
_____no_output_____
###Markdown
As mentioned above, in **TensorFlow 2.x**, eager execution is enabled by default. Run the code in the next cell to verify that eager execution is enabled.
###Code
tf.executing_eagerly()
###Output
_____no_output_____
###Markdown
A **True** response here means that eager execution is enabled and that the results of TensorFlow operations will return immediately. Let us first see how things get done without the eager execution in TensorFlow. Section 3: TensorFlow Operations Without Eager Execution Mode Let us use the **disable_eager_execution()** function in TensorFlow 2.x to disable eager execution:
###Code
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
###Output
_____no_output_____
###Markdown
Note: This function can only be called at the beginning - before any Graphs, Ops, or Tensors have been created. Now, let us verify that the eager execution is disabled.
###Code
tf.executing_eagerly()
###Output
_____no_output_____
###Markdown
The **False** in the output means that it eager execution is now disabled. Run the code in the next cell. Notice that we are creating an object **a** of type **tensorflow.python.framework.ops.Tensor**
###Code
import numpy as np
a = tf.constant(np.array([1., 2., 3.]))
type(a)
###Output
_____no_output_____
###Markdown
Let us create another Tensor **b** and apply the dot product between them. This gives us **c**.
###Code
b = tf.constant(np.array([4.,5.,6.]))
c = tf.tensordot(a, b, 1)
type(c)
print(c)
###Output
_____no_output_____
###Markdown
Note that **c** is a **tensorflow.python.framework.ops.Tensor** as well. So any node of the execution graph resembles a Tensor type. **But so far not a single computation has happened**. You need to execute the graph first. You can pass any graph or subgraph to the TensorFlow runtime for execution. Each TensorFlow graph runs within a TensorFlow Session. Let us create a TensorFlow Session: **Note:** Session can be accessed via **tf.compat.v1.Session()** in TensorFlow 2.x.
###Code
session = tf.compat.v1.Session()
output = session.run(c)
session.close()
print(output)
###Output
_____no_output_____
###Markdown
Since the graph has now been executed, the correct result of 32 is computed and returned. However, note that there is no way to debug the code before the complete graph was executed. So let us re-enable the **eager execution** and see how code execution works by default in TensorFlow 2.x. Section 4: TensorFlow Operations With Eager Execution Mode IMPORTANT! => Restart the kernel by clicking on "Kernel"->"Restart" so that the changes take effect.**Enabling or disabling of eager execution has to happen on program startup. This is the reason why the kernel needs to be restarted.** Import the required libraries again.
###Code
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Re-enable eager execution.
###Code
from tensorflow.python.framework.ops import enable_eager_execution
enable_eager_execution()
###Output
_____no_output_____
###Markdown
Now you can run TensorFlow operations and the results will return immediately:
###Code
x = [[4]]
m = tf.matmul(x, x)
print("Result, {}".format(m))
###Output
_____no_output_____
###Markdown
Enabling eager execution changes how TensorFlow operations behave — now they immediately evaluate and return their values to Python. Since there isn't a computational graph to build and run later in a session, it's easy to inspect results using print() or a debugger. Let us recreate the object **a** using the same code that was used before.
###Code
a = tf.constant(np.array([1., 2., 3.]))
type(a)
###Output
_____no_output_____
###Markdown
Notice how the same code created a different type of object. So now **a** is of type **tensorflow.python.framework.ops.EagerTensor**. This means that when eager execution is enabled, without changing any code, we obtain a tensor object which allows us to debug the code without execting a graph in a session:
###Code
print(a.numpy())
###Output
_____no_output_____
###Markdown
When eager execution is enabled, Tensors can be treated like ordinary python objects. You can work with them as usual, insert debug statements at any point or even use a debugger. Let us continue with the example.
###Code
b = tf.constant(np.array([4.,5.,6.]))
c = tf.tensordot(a, b,1)
type(c)
###Output
_____no_output_____
###Markdown
Notice again how **c** is an **tensorflow.python.framework.ops.EagerTensor** object which can be directly read:
###Code
print(c.numpy())
###Output
_____no_output_____
###Markdown
Without creating a session or a graph, we obtained the result of the defined computation. Section 5: Dynamic Control FlowA major benefit of eager execution is that all the functionality of the host language is available while your model is executing. So, for example, it is easy to write [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz):
###Code
def fizzbuzz(max_num):
counter = tf.constant(0)
max_num = tf.convert_to_tensor(max_num)
for num in range(1, max_num.numpy()+1):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print('FizzBuzz')
elif int(num % 3) == 0:
print('Fizz')
elif int(num % 5) == 0:
print('Buzz')
else:
print(num.numpy())
counter += 1
fizzbuzz(15)
###Output
_____no_output_____ |
_notebooks/Savitzky_Golay_Filter.ipynb | ###Markdown
"How to smoothen noisy data and find peaks and dips in a line plot using Python"> "In this small tutorial we will use the U.S. COVID-19 inoculation data to demonstrate the effect of the Savitzky-Golay filter and find the most prominent peaks and dips in daily vaccinations."- toc: false- branch: master- badges: true- comments: true- categories: [python, jupyter]- image: images/Savitzky-Golay-Filter.png- hide: false- search_exclude: false Presenting peaks and dips in a noisy line plot can be a bit of a challenge, as there is a lot of unnecessary visual information. Savitzky-Golay filter is a function that can be applied to such data in order to clarify the points with minimal distortion and precision loss. It was formulated for the exact purpose of finding maxima and minima in curve data by Savitzky themselves {% fn 1 %}. In this small tutorial we will use the U.S. COVID-19 inoculation data to demonstrate the effect of the filter and find the most prominent peaks and dips in daily vaccinations. We will use an interactive widget to tweak the optimal parameters for the filter (click one of the above links for an interactive version). Step 1: Install the following Python packages
###Code
!pip install widgetsnbextension ipywidgets jupyter-js-widgets-nbextension ipympl
###Output
_____no_output_____
###Markdown
Step 2: Enable widget support in your Jupyter environment
###Code
!jupyter nbextension enable --py widgetsnbextension --sys-prefix
###Output
Enabling notebook extension jupyter-js-widgets/extension...
- Validating: [32mOK[0m
###Markdown
Step 3: Importing the dependenciesWe will use Pandas to read and manipulate the `.csv` file, Matplotlib for plotting the data, `signal` method from the Scipy package to apply the filter, Numpy and `argrelextrema` function to find the "extreme" values in the data, and finally `interactive` to build the necessary sliders.
###Code
from ipywidgets import interactive
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
###Output
_____no_output_____
###Markdown
Step 4: Import and filter the data by location; we will use a CSV file from Our World in Data.
###Code
df_raw = pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv", usecols=["location", "date", "daily_vaccinations"], parse_dates=["date"])
df = df_raw[df_raw["location"] == "United States"]
df.set_index("location", inplace=True, drop=True)
df
###Output
_____no_output_____
###Markdown
Step 5: Build the function and plot the dataFirst, we assign our X and Y values. We feed the Y values, or the first 444 consecutive days of `daily_vaccinations`, into `signal.savgol_filter()` method. This function requires 2 parameters: `windows_size` and `polyorder`. According to the documentation, `windows_size` is always a positive *odd* integer and `polyorder` is *any* positive integer that is less than `window_length` {% fn 2 %}. Unfortunately my understanding of the concept is very limited, but the general goal is to "[keep] the important features and getting rid of the meaningless fluctuations" {% fn 3 %}. These variables control the smoothness of the curve: too low and the curve will lose the detail, too high -- it will become distorted; the rule of thumb is to start low and build up from that. Because the exact values vary with data, they cannot be known beforehand. Controlling them visually will help us find the optimal curve for our purpose, hence the need for a slider to set the inputs more intuitively.
###Code
def make_iplot(window_size, polyorder):
data_x = df["date"].values
data_y = df["daily_vaccinations"].values # original
data_y_filtered = signal.savgol_filter(data_y, window_size, polyorder) # smoothed
# Find peaks (np.greater)
peak_indexes = signal.argrelextrema(data_y_filtered, np.greater)
peak_indexes = peak_indexes[0]
# Find valleys (np.less)
valley_indexes = signal.argrelextrema(data_y_filtered, np.less)
valley_indexes = valley_indexes[0]
# Matplotlib plot
plt.figure(figsize=(20, 5))
plt.plot(data_x, data_y, color="grey") # line plot for the original data
plt.plot(data_x, data_y_filtered, color="black") # line plot for the filtered data
plt.plot(data_x[valley_indexes], data_y_filtered[valley_indexes], "o", label="dip", color="r")
plt.plot(data_x[peak_indexes], data_y_filtered[peak_indexes], "o", label="peak", color="g")
plt.show()
###Output
_____no_output_____
###Markdown
With that said, we arbitrarily set the range of 1 to 100 for the `windows_size` slider, and 1 to 10 for the `polyorder`.
###Code
# this line of code makes the figure appear in the output below
%matplotlib inline
iplot = interactive(
make_iplot,
window_size=(1,100,2),
polyorder=(1,10,1)
)
iplot
###Output
_____no_output_____
###Markdown
 Setting the initial values of 3 and 1 (`polyorder` < `windows_size`) as starting points gives us a result that isn't dissimilar to the original data (red dots represent dips and green ones -- peaks).
###Code
# this line of code makes the figure appear in the output below
%matplotlib inline
iplot = interactive(
make_iplot,
window_size=(1,100,2),
polyorder=(1,10,1)
)
iplot
###Output
_____no_output_____ |
ml_101/ml_101_ex01.ipynb | ###Markdown
AgendaWe are going to review one of the examples of machine learning. Importing LibraryThe following cell is importing the necessary library. Run each cell to load the input/output.For the basic operation of Jupyter Lab, please refer to the link(https://youtu.be/jZ952vChhuI?t=113).
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Here, we imported NumPy and matplotlib libraries. The following are a brief explanation1. NumPy is a library for the Python programming language for numerical analysis. We will import mathematical expressions and operations from this library but make a minimum for this tutorial.2. Matplotlib is a library for visualization. Introduction to Data ScienceData Science(DS) analyzes the data qualitatively to help decision making, business strategy, prediction, forecasting, or optimizing tasks such as predicting the upcoming market based on customers' data, recognizing customer behavior, etc. DS uses various approaches in mathematics and computer sciences to understand data in-depth. Each methodology has advantages and disadvantages. DS compares different approaches and finds the most suitable one to describe the problem. There are numerous theories, methods, and techniques to analyze the issue, and the DS skill set includes but is not limited to only mathematics. Task of Data SciencesData Science(DS) is a data-driven consultant in a broad sense. For DS, the following points are essential to delivering an appropriate solution.1. Business understanding.2. Choosing model/s for analysis.3. Proper data sets for analysis.Each of the previous points has multiple layers to process. In this notebook, we will focus on number 2 with Machine Learning (ML) methodology. Machine LearningThe following table is the current main approaches of Machine Learning(ML) and each of them has its files of strength fields and the brief.| Method | Analysis Type | Example of Use Cases | Requirements for the Algorithm || :--- | :--- | :--- | :--- || Supervised learning | 1. Classification 2. Regression | 1. Classification of images such as "is this a dog or cat image?" 2. Forecasting how many people are going to rent a car? | Labeled training data || Unsupervised learning | 1. Association analysis 2. Dimensionality reduction | 1. Recommendation system which finds similar products for customers 2. Reduce dimensions of data to get more insights. For example, lower the patient's data for explainability form and make a diagnose| Sufficient number of data which represents the phenomenon to analyze || Reinforcement lerning | Rewarding system | Learning chess | Function that describes the score of the system to get the maximum reward |We are going to discuss Supervised Learning in this notebook. The Fundamentals of Machine Learning Input/s and output/s of machine learning modelLet's visit some of the fundamental concepts of data analysis. We are going to explain some of the words with an example with a hands-on program.* Input: The input to the algorithm usually is values or data. For example, customer information, images, sounds, etc.* Output: The output from the algorithm which is values or data. For example, barometers of customer's health, classification of images, text converted from sounds, etc.* Model: Algorithm that inhales input data and output data. For example, it is perceptron which we are going to do hands-on exercise soon.Let's see what is the input/output with the actual example.The following is one of the basic models called "perceptron" for Deep-Neural-Network(DNN). DNN is one of the most famous algorithms for Artificial Intelligence. We will not step into the detail of DNN.First, we define the perceptron function that we are going to use. Let's focus on the functionality of the perceptron and not go too much into detail. The previous image is one example of the perceptron function. You can see that the violet circle which is the perceptron inhales three inputs. The input data are $x1$, $x2$, and $x3$. Also, the output data is $y$. The $x1$, $x2$, and $x3$ represent different data such as income, dates, and time. The y is the output such as how much possibility of being rejected for applying for a credit card. The following is the perceptron that we are going to use.
###Code
# Definition of a simple perceptron for this exercise
def perceptron(x, w):
z = x[0] * w[0] + x[1] * w[1] + x[2] * w[2] + w[3]
return 1.0 / (1.0 + np.exp(-z)) # Sigmoid activation function
###Output
_____no_output_____
###Markdown
The next part is, loading the input data. Please execute the following cell to load the input data.
###Code
# Target value
target = 1.0
# Load input values
x1 = 0.1
x2 = 0.2
x3 = -0.1
input_x_values = [x1, x2, x3]
###Output
_____no_output_____
###Markdown
The next cell is for loading the weights and bias. The weights and the bias are the parameters that represent which features, which are equivalent to input data, contribute to the output value. Getting the correct weights and biases are the most important part which is called training. We will revisit the concept of training later. Here, we assume the training is done and get the proper weights and bais, which is equivalent to building our model. The value can be loaded with the following cell.
###Code
# Load weights
w1 = -1.64
w2 = -0.98
w3 = 1.31
# Load Bias/es
b1 = -0.05
weight_values = [w1, w2, w3, b1]
result = perceptron(input_x_values, weight_values)
###Output
_____no_output_____
###Markdown
The following is the output value which is equivalent to the result. Based on the weights and given inputs, we can calculate the probability from the perceptron.
###Code
# Print the output
print("Output value from the given perceptron: ", '{:.5g}'.format(result))
# Plot figure
x = [result]
y = [0]
x_t = [target]
y_t = [0]
plt.plot(x, y, 'o', c="blue")
plt.plot(x_t, y_t, 'X', c="red")
plt.xlim([0, 1.2])
plt.ylim([-0.02, 0.02])
plt.show()
###Output
_____no_output_____
###Markdown
Now, we can see the blue dot and the red dot. The red dot is the target value. The blue dot is the probability with the given $x1$, $x2$, and $x3$ which is $0.36795$ ($\%36.795%$). Let's change the input data and see how the probability will change. We fix the $x1$ and $x2$ and see what will happen to change only $x3$.
###Code
# save previous result
pre_result = result
# Load input values
x1 = 0.1
x2 = 0.2
x3 = 0.9
input_x_values = [x1, x2, x3]
result = perceptron(input_x_values, weight_values)
# Print the output
print("Output value from the given perceptron: ", '{:.5g}'.format(result))
print()
# Plot figure
x_p = [pre_result]
y_p = [0]
x = [result]
y = [0]
x_t = [target]
y_t = [0]
plt.plot(x, y, '*', c='m')
plt.plot(x_p, y_p, 'o', c="blue")
plt.plot(x_t, y_t, 'X', c="red")
plt.xlim([0, 1.2])
plt.ylim([-0.02, 0.02])
plt.show()
###Output
_____no_output_____
###Markdown
Now, we can see the magenta point that the probability goes up to $0.6833$. This means that the difference of $x3$ from $-0.1$ to $0.9$ contributes to the probability of about $30$%. In other words, the $x3$ in this model may play an important role to determine the result. Are you convinced that the $x3$ is the most important feature for the output? Let's do some exercise and analysis. On the next cell, change the $x1$, $x2$, and $x3$ several times and derive the probability. Observe how the output will change. Which feature contributes the most? Exercise 1.1Change the inputs of $x1$, $x2$, and $x3$ values and run the cell sevearal times. Observe the output changes.
###Code
#####
### YOUR CODE STARTS HERE ###
x1 = None # Replace "None" with an arbitrary number
x2 = None # Replace "None" with an arbitrary number
x3 = None # Replace "None" with an arbitrary number
### YOUR CODE ENDS HERE ###
#####
input_x_values = [x1, x2, x3]
# Simulation for numerical results
result = perceptron(input_x_values, weight_values)
# Print the output
print("Output value from the given perceptron: ", '{:.5g}'.format(result))
print()
# Plot figure
x = [result]
y = [0]
x_t = [target]
y_t = [0]
plt.plot(x, y, '*', c='b')
plt.plot(x_t, y_t, 'X', c="red")
plt.xlim([0, 1.2])
plt.ylim([-0.02, 0.02])
plt.show()
###Output
_____no_output_____
###Markdown
Weights and biases Next, we are going to see the weights and biases. The weights and biases are the parameters that need to be tuned to reflect the actual phenomenon. For example, if you operate a prediction on a specific customer, the inputs are fixed but somehow find a way to build the model that outputs the reasonable value. This means we are going to tune the weights and biases. First, we load the input which is fixed.
###Code
# Load input values
x1 = 0.1
x2 = 0.2
x3 = -0.1
input_x_values = [x1, x2, x3]
###Output
_____no_output_____
###Markdown
Next, loading the weights and biases.
###Code
# Load weights
w1 = -1.64
w2 = -0.98
w3 = 1.31
# Loading and Bias/es
b1 = -0.05
weight_values = [w1, w2, w3, b1]
# Simulation for numerical results
result = perceptron(input_x_values, weight_values)
# Print the output
print("Output value from the given perceptron: ", '{:.5g}'.format(result))
# Plot figure
x = [result]
y = [0]
x_t = [target]
y_t = [0]
plt.plot(x, y, '*', c='gold')
plt.plot(x_p, y_p, 'o', c="blue")
plt.plot(x_t, y_t, 'X', c="red")
plt.xlim([0, 1.2])
plt.ylim([-0.02, 0.02])
plt.show()
###Output
_____no_output_____
###Markdown
Let's load different weights and biases and see the difference.
###Code
# Save previous result
pre_result = result
# Load weights
w1 = 4.64
w2 = 3.98
w3 = 2.31
# Load and Bias/es
b1 = 0.05
weight_values = [w1, w2, w3, b1]
# Simulation for numerical results
result = perceptron(input_x_values, weight_values)
# Print the output
print("Output value from the given perceptron: ", '{:.5g}'.format(result))
# Plot figure
x_p = [pre_result]
y_p = [0]
x = [result]
y = [0]
x_t = [target]
y_t = [0]
plt.plot(x, y, '*', c='m')
plt.plot(x_p, y_p, 'o', c="blue")
plt.plot(x_t, y_t, 'X', c="red")
plt.xlim([0, 1.2])
plt.ylim([-0.02, 0.02])
plt.show()
###Output
_____no_output_____
###Markdown
Do you see even the same input gives different outputs depending on the model? Inputs are fixed so we have to do the proper training to get the model. In other words, we have to find the correct weights and biases, or parameters throw the training. Otherwise, the model will not predict correctly. To get the right model, we have to have correct weights and biases which means the parameter. To get the parameter, we have to have a reasonable amount of good data for training. We will step into more detail to this part on the next example with supervised learning. Exercise 1.2.Change the weights and bias which is $w1$, $w2$, $w3$, and $b1$ and find the parameters that returns the highest value. This is the same process as manual model training.
###Code
#####
### YOUR CODE STARTS HERE ###
w1 = None # Replace "None" with an arbitrary number
w2 = None # Replace "None" with an arbitrary number
w3 = None # Replace "None" with an arbitrary number
b1 = None # Replace "None" with an arbitrary number
### YOUR CODE ENDS HERE ###
#####
# Load lists
weight_values = [w1, w2, w3, b1]
# Simulation for numerical results
result = perceptron(input_x_values, weight_values)
# Print the output
print("Output value from the given perceptron: ", '{:.5g}'.format(result))
# Plot figure
x = [result]
y = [0]
x_t = [target]
y_t = [0]
plt.plot(x, y, '*', c='blue')
plt.plot(x_t, y_t, 'X', c="red")
plt.xlim([0, 1.2])
plt.ylim([-0.02, 0.02])
plt.show()
###Output
_____no_output_____ |
graphomics/notebooks/pipeline/preprocess_zebrafish.ipynb | ###Markdown
Preprocess zebrafish data Format zebrafish gene data from https://www.pnas.org/content/114/5/E717.short to be in the format we want
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
import pandas as pd
base_dir = 'C:\\Users\\joewa\\Dropbox\\Analysis\\omics_integration\\Transcriptomic, proteomic, and metabolomic landscape\\data'
count_df = pd.read_csv(os.path.join(base_dir, 'gene_data.csv'), index_col='Identifier')
count_df
distal_vs_proximal_df = pd.read_csv(os.path.join(base_dir, 'deseq_distal_vs_proximal.csv'), index_col='id')
distal_vs_proximal_df = distal_vs_proximal_df[['log2FoldChange', 'pval']]
distal_vs_proximal_df = distal_vs_proximal_df.rename(columns={
'log2FoldChange': 'FC_distal_vs_proximal',
'pval': 'padj_distal_vs_proximal'
})
distal_vs_proximal_df
distal_vs_middle_df = pd.read_csv(os.path.join(base_dir, 'deseq_distal_vs_middle.csv'), index_col='id')
distal_vs_middle_df = distal_vs_middle_df[['log2FoldChange', 'pval']]
distal_vs_middle_df = distal_vs_middle_df.rename(columns={
'log2FoldChange': 'FC_distal_vs_middle',
'pval': 'padj_distal_vs_middle'
})
distal_vs_middle_df
middle_vs_proximal_df = pd.read_csv(os.path.join(base_dir, 'deseq_middle_vs_proximal.csv'), index_col='id')
middle_vs_proximal_df = middle_vs_proximal_df[['log2FoldChange', 'pval']]
middle_vs_proximal_df = middle_vs_proximal_df.rename(columns={
'log2FoldChange': 'FC_middle_vs_proximal',
'pval': 'padj_middle_vs_proximal'
})
middle_vs_proximal_df
res_df = pd.merge(count_df, distal_vs_proximal_df, how='left', left_index=True, right_index=True)
res_df = pd.merge(res_df, distal_vs_middle_df, how='left', left_index=True, right_index=True)
res_df = pd.merge(res_df, middle_vs_proximal_df, how='left', left_index=True, right_index=True)
res_df
res_df.to_csv(os.path.join(base_dir, 'gene_data_combined.csv'))
###Output
_____no_output_____ |
pan/COURSE 2 Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week5/Gradient Checking/Gradient Checking.ipynb | ###Markdown
Gradient CheckingWelcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking. You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker. But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".Let's do it!
###Code
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
###Output
_____no_output_____
###Markdown
1) How does gradient checking work?Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$. Let's look back at the definition of a derivative (or gradient):$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."We know the following:- $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly. - You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct. Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct! 2) 1-dimensional gradient checkingConsider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct. **Figure 1** : **1D linear model** The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation"). **Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = theta*x
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
###Output
J = 8
###Markdown
**Expected Output**: ** J ** 8 **Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
###Code
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
###Output
dtheta = 2
###Markdown
**Expected Output**: ** dtheta ** 2 **Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.**Instructions**:- First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow: 1. $\theta^{+} = \theta + \varepsilon$ 2. $\theta^{-} = \theta - \varepsilon$ 3. $J^{+} = J(\theta^{+})$ 4. $J^{-} = J(\theta^{-})$ 5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$- Then compute the gradient using backward propagation, and store the result in a variable "grad"- Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:$$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$You will need 3 Steps to compute this formula: - 1'. compute the numerator using np.linalg.norm(...) - 2'. compute the denominator. You will need to call np.linalg.norm(...) twice. - 3'. divide them.- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
###Code
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta+epsilon # Step 1
thetaminus = theta-epsilon # Step 2
J_plus = thetaplus*x # Step 3
J_minus = thetaminus*x # Step 4
gradapprox = (J_plus-J_minus)/(2*epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x,theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
np.linalg.norm?
numerator = np.linalg.norm(grad-gradapprox) # Step 1'
denominator = np.linalg.norm(grad+gradapprox) # Step 2'
difference = numerator/denominator # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
###Output
The gradient is correct!
difference = 2.919335883291695e-10
###Markdown
**Expected Output**:The gradient is correct! ** difference ** 2.9193358103083e-10 Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`. Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it! 3) N-dimensional gradient checking The following figure describes the forward and backward propagation of your fraud detection model. **Figure 2** : **deep neural network***LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*Let's look at your implementations for forward propagation and backward propagation.
###Code
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
###Output
_____no_output_____
###Markdown
Now, run backward propagation.
###Code
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
###Output
_____no_output_____
###Markdown
You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct. **How does gradient checking work?**.As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary. **Figure 2** : **dictionary_to_vector() and vector_to_dictionary()** You will need these functions in gradient_check_n()We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.**Exercise**: Implement gradient_check_n().**Instructions**: Here is pseudo-code that will help you implement the gradient check.For each i in num_parameters:- To compute `J_plus[i]`: 1. Set $\theta^{+}$ to `np.copy(parameters_values)` 2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$ 3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`. - To compute `J_minus[i]`: do the same thing with $\theta^{-}$- Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute: $$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
###Code
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0]+epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X,Y,vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0]-epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X,Y,vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i]-J_minus[i])/(2*epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad-gradapprox) # Step 1'
denominator = np.linalg.norm(grad+gradapprox) # Step 2'
difference = numerator/denominator # Step 3'
### END CODE HERE ###
if difference > 1e-7:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
###Output
[93mThere is a mistake in the backward propagation! difference = 1.1890913023330356e-07[0m
|
Tutorials/1_Utils/Data_Visualizations.ipynb | ###Markdown
Python Data Visualizations IntroductionThis notebook will introduce python data visualizations on the Iris datasetyou should download [Iris dataset](https://www.kaggle.com/benhamner/python-data-visualizations/data)
###Code
# import data process library
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
import sklearn
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
iris = pd.read_csv('/tmp/data_input/Iris.csv')
iris.head()
iris['Species'].value_counts()
iris.plot(kind="scatter", x="SepalLengthCm", y="SepalWidthCm")
#plt.scatter(iris["SepalLengthCm"][1], iris["SepalWidthCm"][1])
plt.show()
sns.jointplot(x="SepalLengthCm", y="SepalWidthCm", data=iris, size=5)
plt.show()
sns.FacetGrid(iris, hue="Species", size=5) \
.map(plt.scatter, "SepalLengthCm", "SepalWidthCm") \
.add_legend()
plt.show()
sns.boxplot(x="Species", y="PetalLengthCm", data=iris)
plt.show()
ax = sns.boxplot(x="Species", y="PetalLengthCm", data=iris)
ax = sns.stripplot(x="Species", y="PetalLengthCm", data=iris, jitter=True, edgecolor="gray")
plt.show()
sns.violinplot(x="Species", y="PetalLengthCm", data=iris, size=6)
plt.show()
sns.FacetGrid(iris, hue="Species", size=6) \
.map(sns.kdeplot, "PetalLengthCm") \
.add_legend()
plt.show()
sns.pairplot(iris.drop("Id", axis=1), hue="Species", size=3)
plt.show()
sns.pairplot(iris.drop("Id", axis=1), hue="Species", size=3, diag_kind="kde")
plt.show()
# Now that we've covered seaborn, let's go back to some of the ones we can make with Pandas
# We can quickly make a boxplot with Pandas on each feature split out by species
iris.drop("Id", axis=1).boxplot(by="Species", figsize=(12, 6))
plt.show()
# One cool more sophisticated technique pandas has available is called Andrews Curves
# Andrews Curves involve using attributes of samples as coefficients for Fourier series
# and then plotting these
from pandas.tools.plotting import andrews_curves
andrews_curves(iris.drop("Id", axis=1), "Species")
plt.show()
# Another multivariate visualization technique pandas has is parallel_coordinates
# Parallel coordinates plots each feature on a separate column & then draws lines
# connecting the features for each data sample
from pandas.tools.plotting import parallel_coordinates
parallel_coordinates(iris.drop("Id", axis=1), "Species")
plt.show()
# A final multivariate visualization technique pandas has is radviz
# Which puts each feature as a point on a 2D plane, and then simulates
# having each sample attached to those points through a spring weighted
# by the relative value for that feature
from pandas.tools.plotting import radviz
radviz(iris.drop("Id", axis=1), "Species")
plt.show()
###Output
_____no_output_____ |
tutorials/Titanic_Kaggle.ipynb | ###Markdown
TPOT tutorial on the Titanic dataset The Titanic machine learning competition on [Kaggle](https://www.kaggle.com/c/titanic) is one of the most popular beginner's competitions on the platform. We will use that competition here to demonstrate the implementation of TPOT.
###Code
# Import required libraries
from tpot import TPOTClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
# Load the data
titanic = pd.read_csv('data/titanic_train.csv')
titanic.head(5)
###Output
_____no_output_____
###Markdown
Data Exploration
###Code
titanic.groupby('Sex').Survived.value_counts()
titanic.groupby(['Pclass','Sex']).Survived.value_counts()
id = pd.crosstab([titanic.Pclass, titanic.Sex], titanic.Survived.astype(float))
id.div(id.sum(1).astype(float), 0)
###Output
_____no_output_____
###Markdown
Data Munging The first and most important step in using TPOT on any data set is to rename the target class/response variable to `class`.
###Code
titanic.rename(columns={'Survived': 'class'}, inplace=True)
###Output
_____no_output_____
###Markdown
At present, TPOT requires all the data to be in numerical format. As we can see below, our data set has 5 categorical variables which contain non-numerical values: `Name`, `Sex`, `Ticket`, `Cabin` and `Embarked`.
###Code
titanic.dtypes
###Output
_____no_output_____
###Markdown
We then check the number of levels that each of the five categorical variables have.
###Code
for cat in ['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']:
print("Number of levels in category '{0}': \b {1:2.2f} ".format(cat, titanic[cat].unique().size))
###Output
Number of levels in category 'Name': 891.00
Number of levels in category 'Sex': 2.00
Number of levels in category 'Ticket': 681.00
Number of levels in category 'Cabin': 148.00
Number of levels in category 'Embarked': 4.00
###Markdown
As we can see, `Sex` and `Embarked` have few levels. Let's find out what they are.
###Code
for cat in ['Sex', 'Embarked']:
print("Levels for catgeory '{0}': {1}".format(cat, titanic[cat].unique()))
###Output
Levels for catgeory 'Sex': ['male' 'female']
Levels for catgeory 'Embarked': ['S' 'C' 'Q' nan]
###Markdown
We then code these levels manually into numerical values. For `nan` i.e. the missing values, we simply replace them with a placeholder value (-999). In fact, we perform this replacement for the entire data set.
###Code
titanic['Sex'] = titanic['Sex'].map({'male':0,'female':1})
titanic['Embarked'] = titanic['Embarked'].map({'S':0,'C':1,'Q':2})
titanic = titanic.fillna(-999)
pd.isnull(titanic).any()
###Output
_____no_output_____
###Markdown
Since `Name` and `Ticket` have so many levels, we drop them from our analysis for the sake of simplicity. For `Cabin`, we encode the levels as digits using Scikit-learn's [`MultiLabelBinarizer`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MultiLabelBinarizer.html) and treat them as new features.
###Code
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer()
CabinTrans = mlb.fit_transform([{str(val)} for val in titanic['Cabin'].values])
CabinTrans
###Output
_____no_output_____
###Markdown
Drop the unused features from the dataset.
###Code
titanic_new = titanic.drop(['Name','Ticket','Cabin','class'], axis=1)
assert (len(titanic['Cabin'].unique()) == len(mlb.classes_)), "Not Equal" #check correct encoding done
###Output
_____no_output_____
###Markdown
We then add the encoded features to form the final dataset to be used with TPOT.
###Code
titanic_new = np.hstack((titanic_new.values,CabinTrans))
np.isnan(titanic_new).any()
###Output
_____no_output_____
###Markdown
Keeping in mind that the final dataset is in the form of a numpy array, we can check the number of features in the final dataset as follows.
###Code
titanic_new[0].size
###Output
_____no_output_____
###Markdown
Finally we store the class labels, which we need to predict, in a separate variable.
###Code
titanic_class = titanic['class'].values
###Output
_____no_output_____
###Markdown
Data Analysis using TPOT To begin our analysis, we need to divide our training data into training and validation sets. The validation set is just to give us an idea of the test set error. The model selection and tuning is entirely taken care of by TPOT, so if we want to, we can skip creating this validation set.
###Code
training_indices, validation_indices = training_indices, testing_indices = train_test_split(titanic.index, stratify = titanic_class, train_size=0.75, test_size=0.25)
training_indices.size, validation_indices.size
###Output
_____no_output_____
###Markdown
After that, we proceed to calling the `fit`, `score` and `export` functions on our training dataset. To get a better idea of how these functions work, refer the TPOT documentation [here](http://rhiever.github.io/tpot/using/).An important TPOT parameter to set is the number of generations. Since our aim is to just illustrate the use of TPOT, we have set it to 5. On a standard laptop with 4GB RAM, it roughly takes 5 minutes per generation to run. For each added generation, it should take 5 mins more. Thus, for the default value of 100, total run time could be roughly around 8 hours.
###Code
tpot = TPOTClassifier(verbosity=2, max_time_mins=2, max_eval_time_mins=0.04, population_size=40)
tpot.fit(titanic_new[training_indices], titanic_class[training_indices])
tpot.score(titanic_new[validation_indices], titanic.loc[validation_indices, 'class'].values)
tpot.export('tpot_titanic_pipeline.py')
###Output
_____no_output_____
###Markdown
Let's have a look at the generated code. As we can see, the random forest classifier performed the best on the given dataset out of all the other models that TPOT currently evaluates on. If we ran TPOT for more generations, then the score should improve further.
###Code
# %load tpot_titanic_pipeline.py
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_classes, testing_classes = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = RandomForestClassifier(bootstrap=False, max_features=0.4, min_samples_leaf=1, min_samples_split=9)
exported_pipeline.fit(training_features, training_classes)
results = exported_pipeline.predict(testing_features)
###Output
_____no_output_____
###Markdown
Make predictions on the submission data
###Code
# Read in the submission dataset
titanic_sub = pd.read_csv('data/titanic_test.csv')
titanic_sub.describe()
###Output
_____no_output_____
###Markdown
The most important step here is to check for new levels in the categorical variables of the submission dataset that are absent in the training set. We identify them and set them to our placeholder value of '-999', i.e., we treat them as missing values. This ensures training consistency, as otherwise the model does not know what to do with the new levels in the submission dataset.
###Code
for var in ['Cabin']: #,'Name','Ticket']:
new = list(set(titanic_sub[var]) - set(titanic[var]))
titanic_sub.ix[titanic_sub[var].isin(new), var] = -999
###Output
_____no_output_____
###Markdown
We then carry out the data munging steps as done earlier for the training dataset.
###Code
titanic_sub['Sex'] = titanic_sub['Sex'].map({'male':0,'female':1})
titanic_sub['Embarked'] = titanic_sub['Embarked'].map({'S':0,'C':1,'Q':2})
titanic_sub = titanic_sub.fillna(-999)
pd.isnull(titanic_sub).any()
###Output
_____no_output_____
###Markdown
While calling `MultiLabelBinarizer` for the submission data set, we first fit on the training set again to learn the levels and then transform the submission dataset values. This further ensures that only those levels that were present in the training dataset are transformed. If new levels are still found in the submission dataset then it will return an error and we need to go back and check our earlier step of replacing new levels with the placeholder value.
###Code
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer()
SubCabinTrans = mlb.fit([{str(val)} for val in titanic['Cabin'].values]).transform([{str(val)} for val in titanic_sub['Cabin'].values])
titanic_sub = titanic_sub.drop(['Name','Ticket','Cabin'], axis=1)
# Form the new submission data set
titanic_sub_new = np.hstack((titanic_sub.values,SubCabinTrans))
np.any(np.isnan(titanic_sub_new))
# Ensure equal number of features in both the final training and submission dataset
assert (titanic_new.shape[1] == titanic_sub_new.shape[1]), "Not Equal"
# Generate the predictions
submission = tpot.predict(titanic_sub_new)
# Create the submission file
final = pd.DataFrame({'PassengerId': titanic_sub['PassengerId'], 'Survived': submission})
final.to_csv('data/submission.csv', index = False)
final.shape
###Output
_____no_output_____ |
webscraping_Analysis.ipynb | ###Markdown
**Stock fenviz articles data**
###Code
from urllib.request import urlopen,Request
from bs4 import BeautifulSoup
###Output
_____no_output_____
###Markdown
extract the url from the page and also the 'Tickers'
###Code
finviz_url='https://finviz.com/quote.ashx?t='
markets=['AMZN','TSLA','FB','NFLX','GOOGL','AAPL','MSFT']
###Output
_____no_output_____
###Markdown
now you have to request the data from the url using the modules imported later
###Code
news_tables={}
for market in markets:
url=finviz_url+market
req=Request(url=url,headers={'user-agent':'my-app'})
reponse=urlopen(req)
html=BeautifulSoup(reponse,'html')
news_table=html.find(id="news-table")
news_tables[market]=news_table
###Output
_____no_output_____
###Markdown
***Manipulating Finviz Data*** obtain the title and the date of each article for a specific market, to obtain the data concerning AMZN for example we do as follows:
###Code
'''
AMZN_data=news_tables['AMZN']
AMZN_rows=AMZN_data.findall('tr')
for index,row in enume(rateAMZN_rows):
title=row.a.text
date=row.td.text
'''
###Output
_____no_output_____
###Markdown
now we build a general function to automate this:
###Code
parsed_data=[]
for market,news_table in news_tables.items():
for row in news_table.findAll('tr'):
title=row.a.text
date_data=row.td.text.split(' ')
if len(date_data)==1:
time=date_data[0]
else:
time=date_data[1]
date=date_data[0]
parsed_data.append([market,date,time,title])
###Output
_____no_output_____
###Markdown
organization of the data extracted in a table
###Code
import pandas as pd
import matplotlib.pyplot as plt
df = pd.DataFrame(parsed_data, columns=['market', 'date', 'time', 'title'])
df
###Output
_____no_output_____
###Markdown
***3-Applying Sentiment Analysis*** Now is the time to perform sentiment analysis with nltk.sentiment.vader:
###Code
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
nltk.download('vader_lexicon')
vader = SentimentIntensityAnalyzer()
f = lambda title: vader.polarity_scores(title)['compound']
df['compound'] = df['title'].apply(f)
df
df['date'] = pd.to_datetime(df.date).dt.date
df
###Output
_____no_output_____
###Markdown
***4-Visualization of Sentiment Analysis*** The following code takes the average sentiment scores for all the news headlines collected on each date and plots it on a 'bar chart'. we need the average scores for each day, to get the general feeling for a day.
###Code
mean_df = df.groupby(['market', 'date']).mean()
mean_df
%matplotlib inline
mean_df = df.groupby(['market', 'date']).mean().unstack()
mean_df = mean_df.xs('compound', axis="columns").transpose()
mean_df.plot(kind='bar',figsize=(20,8),width=1.5)
mean_df.plot(kind='bar',figsize=(20,8),width=1.5)
plt.show()
###Output
_____no_output_____ |
notebooks/legacy/credit_initial_experiment.ipynb | ###Markdown
Load dataWe assume the target linear model will work on one-hot discretized features. This is a common practice, since a linear model can only capture linear relationship between response and a continuous feature. If the continuous feature, however, is quantized, the relationship captured can be non-linear.Therefore, we discretize and one-hot encode all features. We quantize the continuous features, like credit amount, age, etc., into 5 bins.
###Code
# Load the file
df = pd.read_csv('data/german_credit_data.csv')
df = df.drop(df.columns[0], axis=1) # remove the index column
# Quantize credit amount, duration and age into 5 bins
amount_series = df.loc[:, 'Credit amount']
df.loc[:, 'Credit amount'] = pd.qcut(amount_series, 5)
duration_series = df.loc[:, 'Duration']
df.loc[:, 'Duration'] = pd.qcut(duration_series, 5)
duration_series = df.loc[:, 'Age']
df.loc[:, 'Age'] = pd.qcut(duration_series, 5)
# Set Job type to object for one-hot encoding
df.loc[:, 'Job'] = df.loc[:, 'Job'].astype(object)
# Perform one-hot encoding
df = pd.get_dummies(df)
# Drop binary features
df = df.drop(columns=['Sex_male', 'Risk_bad'])
# Separate features from targets
df_X = df.iloc[:, :-1]
df_y = df.iloc[:, -1]
print('Examples are represented as {}-dimensional vectors.'.format(df_X.shape[1]))
# Convert to numpy
X = df_X.values.astype('int8')
y = df_y.values.astype('int8')
print('Shape of X: {}. Shape of y: {}.'.format(X.shape, y.shape))
# Split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
###Output
Shape of X: (1000, 38). Shape of y: (1000,).
###Markdown
Fit the target model We start with an SVM-RBF just to see how much will we potentially lose by using plain logistic regression
###Code
# Fit SVM with RBF kernel using CV
tuned_parameters = [
{
'kernel': ['rbf'],
'gamma': [0.01, 0.03, 0.05, 0.07],
'C': [1, 3, 5, 7, 10]
}
]
clf = GridSearchCV(SVC(), tuned_parameters, cv=5, n_jobs=-1)
clf.fit(X_train, y_train)
print('Best params are: {} with score: {:.2f}%.'.format(clf.best_params_, clf.best_score_ * 100))
print('Test score is: {:.2f}%.'.format(clf.score(X_test, y_test)*100))
# Best params are: {'C': 5, 'gamma': 0.03, 'kernel': 'rbf'} with score: 74.44%.
# Test score is: 71.00%.
###Output
Best params are: {'kernel': 'rbf', 'gamma': 0.03, 'C': 5} with score: 74.44%.
Test score is: 71.00%.
###Markdown
Now we fit the target logistic regression model.
###Code
# Fit logistic regression and perform CV
clf = LogisticRegressionCV(
Cs=21,
cv=5,
n_jobs=-1,
random_state=seed
)
clf.fit(X_train, y_train)
# Get best score and C value
mean_scores = np.mean(clf.scores_[1], axis=0)
best_idx = np.argmax(mean_scores)
best_score = mean_scores[best_idx]
best_C = clf.Cs_[best_idx]
print('Best score is: {:.2f}%. Best C is: {:.4f}.'.format(best_score*100, best_C))
print('Test score is: {:.2f}%.'.format(clf.score(X_test, y_test)*100))
# Best score is: 73.44%. Best C is: 0.1585.
# Test score is: 71.000%.
###Output
Best score is: 73.44%. Best C is: 0.1585.
Test score is: 71.00%.
###Markdown
Perform adversarial examples search Context-local counter we'll use for counting expanded graph nodes.
###Code
@with_default_context(use_empty_init=True)
class Counter:
def __init__(self):
self.cnt = 0
def increment(self):
self.cnt += 1
def count(self):
return self.cnt
###Output
_____no_output_____
###Markdown
The `Node` class hosts the transformation code. By setting `decrease_amount` and `decrease_duration` we can get in total three different transformation graphs.
###Code
class Node:
amount_start_idx = df_X.columns.get_loc("Credit amount_(249.999, 1262.0]")
duration_start_idx = df_X.columns.get_loc("Duration_(3.999, 12.0]")
purpose_start_idx = df_X.columns.get_loc("Purpose_business")
def __init__(self, x, decrease_amount=True, decrease_duration=True):
self.root = x
self.decrease_amount = decrease_amount
self.decrease_duration = decrease_duration
# Slices in the vector for each features
self.static = self.root[:Node.amount_start_idx]
self.amount = self.root[Node.amount_start_idx:Node.duration_start_idx]
self.duration = self.root[Node.duration_start_idx:Node.purpose_start_idx]
self.purpose = self.root[Node.purpose_start_idx:]
def _get_neighbour(self, x, direction='pos'):
"""Get the neighbouring value in a quantized one-hot feature vector."""
idx = np.argmax(x)
if direction == 'pos' and idx != len(x) - 1:
return np.roll(x, 1).tolist()
elif direction == 'neg' and idx != 0:
return np.roll(x, -1).tolist()
return []
def _expand_neighbours(self, field, directions=None):
"""Expand neighbouring values of a quantized feature."""
if directions is None:
directions = ['pos', 'neg']
child_fields = []
for direction in directions:
child_fields.append(self._get_neighbour(field, direction=direction))
child_fields = [x for x in child_fields if len(x) > 0]
return np.array(child_fields, dtype='uint8')
def _expand_all(self, field):
"""Expand all values of a categorical feature."""
child_fields = []
for i in range(1, len(field)):
child_fields.append(np.roll(field, i))
return child_fields
def expand(self):
"""Generate all children of the current node."""
# Increment the counter of expanded nodes.
counter = Counter.get_default()
counter.increment()
children = []
# Expand "credit amount".
for c in self._expand_neighbours(
self.amount,
directions=['pos', 'neg'] if self.decrease_amount else ['pos']):
child = np.concatenate((self.static, c, self.duration, self.purpose))
children.append(child)
# Expand "duration".
for c in self._expand_neighbours(
self.duration,
directions=['pos', 'neg'] if self.decrease_duration else ['pos']):
child = np.concatenate((self.static, self.amount, c, self.purpose))
children.append(child)
# Expand "purpose".
for c in self._expand_all(self.purpose):
child = np.concatenate((self.static, self.amount, self.duration, c))
children.append(child)
return children
def __repr__(self):
return 'Node({})'.format(self.root)
###Output
_____no_output_____
###Markdown
All the functions that need to be passed into the search, in the expected format.
###Code
def _expand_fn(x, p_norm=1, **kwargs):
"""Wrap the example in `Node`, expand the node, and compute the costs.
Returns a list of tuples (child, cost)
"""
node = Node(x, **kwargs)
children = node.expand()
costs = [np.linalg.norm(x - c, ord=p_norm) for c in children]
return list(zip(children, costs))
def _goal_fn(x, clf, target_confidence=0.5):
"""Tell whether the example has reached the goal."""
return clf.predict_proba([x])[0, 1] >= target_confidence
def _heuristic_fn(x, clf, q_norm=np.inf):
"""Distance to the decision boundary of a logistic regression classifier.
By default the distance is w.r.t. L1 norm. This means that the denominator
has to be in terms of the Holder dual norm (`q_norm`), so L-inf. I know,
this interface is horrible.
NOTE: The value has to be zero if the example is already on the target side
of the boundary.
"""
score = clf.decision_function([x])[0]
if score >= 0:
return 0.0
h = np.abs(score) / np.linalg.norm(
clf.coef_[0, Node.amount_start_idx:], ord=q_norm)
return h
def hash_fn(x):
"""Hash function for examples."""
return hash(x.tostring())
@profiled
def find_adversarial(x, clf, p_norm=1, q_norm=np.inf,
target_confidence=0.5, return_path=False,
heuristic_fn=None, **kwargs):
"""Transform an example until it is classified with target confidence."""
if heuristic_fn is None:
heuristic_fn = lambda x: _heuristic_fn(x, clf, q_norm=q_norm)
if clf.predict_proba([x])[0, 1] >= target_confidence:
raise Exception('Initial example is already classified as positive.')
return a_star_search(
start_node=x,
expand_fn=lambda x: _expand_fn(x, p_norm=p_norm, **kwargs),
goal_fn=lambda x: _goal_fn(x, clf, target_confidence),
heuristic_fn=heuristic_fn,
hash_fn=hash_fn,
return_path=return_path
)
###Output
_____no_output_____
###Markdown
Compare BFS to A*
###Code
def get_expanded_nodes_stats(p_norm=1, q_norm=np.inf):
stats = pd.DataFrame(columns=['A*', 'BFS'])
for i, x in enumerate(X):
if clf.decision_function([x])[0] < 0:
with Counter().as_default() as cnt_astar:
find_adversarial(x, clf, p_norm=p_norm, q_norm=q_norm)
with Counter().as_default() as cnt_bfs:
find_adversarial(x, clf, p_norm=p_norm, q_norm=q_norm, heuristic_fn=lambda x: 0)
stats.loc[i] = [cnt_astar, cnt_bfs]
return stats
stats_l1 = get_expanded_nodes_stats(1, np.inf)
stats_l1
###Output
_____no_output_____
###Markdown
Run the experiments
###Code
def find_adv_examples(X, target_confidence,
graph='all', p_norm=1, q_norm=np.inf,
heuristic=None):
"""Find adversarial examples for a whole dataset"""
if graph == 'all':
node_kwargs = dict(decrease_amount=True, decrease_duration=True)
if graph == 'increase_amount':
node_kwargs = dict(decrease_amount=False, decrease_duration=True)
if graph == 'increase_amount_and_duration':
node_kwargs = dict(decrease_amount=False, decrease_duration=False)
# Dataframe for storing the results.
results = pd.DataFrame(
columns=['index', 'found', 'x_adv', 'confidence',
'real_cost', 'path_cost', 'nodes_expanded', 'runtime'])
# Indices of examples classified as negative.
neg_indices, = np.where(clf.predict_proba(X)[:, 1] < target_confidence)
for i, original_index in enumerate(neg_indices):
x = X[original_index]
# Instantiate a counter for expanded nodes, and a profiler.
expanded_counter = Counter()
per_example_profiler = Profiler()
with expanded_counter.as_default(), per_example_profiler.as_default():
x_adv, path_cost = find_adversarial(
x, clf, target_confidence=target_confidence, **node_kwargs)
nodes_expanded = expanded_counter.count()
runtime = per_example_profiler.compute_stats()['find_adversarial']['tot']
# If an adversarial example was not found, only record index, runtime, and
# the number of expanded nodes.
if x_adv is None:
results.loc[i] = [original_index, False, [], None,
None, None, nodes_expanded, runtime]
else:
confidence = clf.predict_proba([x_adv])[0, 1]
real_cost = np.linalg.norm(x - x_adv, ord=p_norm)
results.loc[i] = [original_index, True, x_adv, confidence,
real_cost, path_cost, nodes_expanded, runtime]
return results
###Output
_____no_output_____
###Markdown
Fix several target confidence levels.
###Code
confidence_levels = np.linspace(0.50, 0.95, 10)
confidence_levels
###Output
_____no_output_____
###Markdown
Find adversarial examples for the whole dataset using different transformation graphs
###Code
results_graph_1 = []
for level in tqdm_notebook(confidence_levels):
results_graph_1.append(find_adv_examples(X, level))
results_graph_2 = []
for level in tqdm_notebook(confidence_levels):
results_graph_2.append(
find_adv_examples(X, level, graph='increase_amount'))
results_graph_3 = []
for level in tqdm_notebook(confidence_levels):
results_graph_3.append(
find_adv_examples(X, level, graph='increase_amount_and_duration'))
###Output
_____no_output_____
###Markdown
Sanity check for the optimality This part was used for debugging. It checks the costs of the found adversarial examples for all three graphs. Since the second graph is a subgraph of the first, and the third is a subgraph of the second ($G_3 \subset G_2 \subset G_1$), the path cost of the adversarial examples has to follow $C(x, x^*_{G_3}) \geq C(x, x^*_{G_2}) \geq C(x, x^*_{G_1})$.
###Code
def example_diff(a, b):
different_indices = np.where(a != b)
return pd.DataFrame(
[a[different_indices], b[different_indices]],
columns=df_X.columns[different_indices],
)
from IPython.display import display
for result_graph_1, result_graph_2, result_graph_3 in zip(
results_graph_1, results_graph_2, results_graph_3):
for i, x in enumerate(X):
a = result_graph_1['path_cost'][result_graph_1['index'] == i].values
b = result_graph_2['path_cost'][result_graph_2['index'] == i].values
c = result_graph_3['path_cost'][result_graph_3['index'] == i].values
try:
if len(a) > 0 and len(b) > 0 and not pd.isna(a) and not pd.isna(b):
assert a[0] <= b[0]
if len(b) > 0 and len(c) > 0 and not pd.isna(b) and not pd.isna(c):
assert b[0] <= c[0]
if len(a) > 0 and len(c) > 0 and not pd.isna(a) and not pd.isna(c):
assert a[0] <= c[0]
# Output information about violating examples.
except AssertionError:
print('Assumption violated at index', i)
print('Real costs:', a, b, c)
path_a = result_graph_1['path_cost'][result_graph_1['index'] == i].values
path_b = result_graph_2['path_cost'][result_graph_2['index'] == i].values
path_c = result_graph_3['path_cost'][result_graph_3['index'] == i].values
print('Path costs:', path_a, path_b, path_c)
conf_a = result_graph_1['confidence'][result_graph_1['index'] == i].values[0]
conf_b = result_graph_2['confidence'][result_graph_2['index'] == i].values[0]
conf_c = result_graph_3['confidence'][result_graph_3['index'] == i].values[0]
print('Confidences:', conf_a, conf_b, conf_c)
x_adv_a = result_graph_1['x_adv'][result_graph_1['index'] == i].values[0]
x_adv_b = result_graph_2['x_adv'][result_graph_2['index'] == i].values[0]
x_adv_c = result_graph_3['x_adv'][result_graph_3['index'] == i].values[0]
if len(x_adv_a) > 0:
display(example_diff(X[i], x_adv_a))
if len(x_adv_b) > 0:
display(example_diff(X[i], x_adv_b))
if len(x_adv_c) > 0:
display(example_diff(X[i], x_adv_c))
continue
###Output
_____no_output_____
###Markdown
Plots Success rates for crafting adversarial examples, and average confidence of these.
###Code
def plot_confidence(results):
success_prop = [result['found'].mean() for result in results]
avg_confidence = [result['confidence'].mean() for result in results]
fig, ax = plt.subplots()
ax.set_xticks(confidence_levels)
ax.plot(confidence_levels, success_prop,
label='Success proportion')
ax.plot(confidence_levels, avg_confidence,
label='Avg confidence of adversarial examples')
ax.plot(confidence_levels, confidence_levels,
linestyle='dashed', color='gray', label='Baseline confidence')
ax.set_xlabel('Target confidence')
ax.set_ylabel('')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Graph 1 (changing "amount", "duration", and "purpose" in any direction)
###Code
plot_confidence(results_graph_1)
###Output
_____no_output_____
###Markdown
Graph 2 (changing "duration" in any direction, and "purpose", and only increasing "amount")
###Code
plot_confidence(results_graph_2)
###Output
_____no_output_____
###Markdown
Graph 3 (changing "purpose" in any direction, and only increasing "amount" and "duration")
###Code
plot_confidence(results_graph_3)
###Output
_____no_output_____
###Markdown
Number of expanded nodes for graph 1 (strongly correlated with runtime)
###Code
avg_expanded = pd.Series(
[np.mean(result['nodes_expanded'][result['found']])
for result in results_graph_1])
sigma_expanded = pd.Series(
[np.std(result['nodes_expanded'][result['found']])
for result in results_graph_1])
avg_runtime = pd.Series(
[np.mean(result['runtime'][result['found']])
for result in results_graph_1])
fig, ax = plt.subplots()
avg_expanded.plot(label='Number of expanded nodes', ax=ax)
ax.fill_between(range(len(confidence_levels)),
avg_expanded - 2 * sigma_expanded,
avg_expanded + 2 * sigma_expanded, alpha=.2)
ax.set_xticks(range(len(confidence_levels)))
ax.set_xticklabels(['%2.2f' % level for level in confidence_levels])
ax.set_xlim(0, len(confidence_levels) - 1)
ax.set_xlabel('Target confidence')
ax.set_ylabel('Number of expanded nodes')
# avg_runtime.plot(secondary_y=True, label='Avg runtime')
plt.show()
###Output
_____no_output_____ |
S17_A126 - Scatter plot.ipynb | ###Markdown
Montando um grafico Scatter plot
###Code
a = []
b = []
plt.scatter(a,b)
# Escalas são diferntes pares
a=[1,2,3,4,5,6,7,8,9,10,11,12,13]
b =[2,4,6,8,10,12,14,16,18,20,22,24,26]
plt.scatter(a,b)
a=[1,2,3,4,5,6,7,8,9,10,11,12,13]
b =[2,4,4,6,8,10,10,12,14,12,10,10,12]
plt.scatter(a,b)
###Output
_____no_output_____
###Markdown
Inserindo mais elementos nos gráficos
###Code
x = []
y = []
for i in range(-50,100):
x.append(i)
y.append(i + 2)
print(x,y)
plt.scatter(x,y)
###Output
_____no_output_____
###Markdown
Aumentando a distância entre os pontos (pares)
###Code
x = []
y = []
for i in range (-50, 100, 5):
x.append(i)
y.append(i)
print(x,y)
plt.scatter(x,y)
x = []
y = []
for i in range (-50, 100, 15):
x.append(i)
y.append(i)
plt.scatter(x,y)
###Output
_____no_output_____
###Markdown
Modificando o tamanho dos pares
###Code
plt.scatter(x,y,s=100)
plt.scatter(x,y, s=5)
###Output
_____no_output_____
###Markdown
Alterando a cor dos pares
###Code
plt.scatter(x,y, c='green')
plt.scatter(x,y, c='yellow')
###Output
_____no_output_____
###Markdown
Alterando o marcados de pares
###Code
plt.scatter(x,y, s=100, c="red", marker='+')
plt.scatter(x,y, s=100, c="purple", marker='^')
plt.scatter(x,y, s=100, c="gray", marker='o')
###Output
_____no_output_____
###Markdown
Gráficos multi linhas
###Code
x = []
y = []
y2 = []
y3 = []
for i in range (-50, 100, 5):
x.append(i)
y.append(i)
y2.append(i * 2)
y3.append(i + 50)
plt.scatter(x,y, s=100, c='gray', marker='o')
plt.scatter(x,y2, s=30, c='purple', marker='^')
plt.scatter(x,y3, s=100, c='blue', marker='+')
###Output
_____no_output_____ |
Chapter_08_Data_Wrangling_Join_Combine_Reshape.ipynb | ###Markdown
Data Wrangling: Join, Combine,
###Code
import numpy as np
import pandas as pd
pd.options.display.max_rows = 20
np.random.seed(12345)
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4, suppress=True)
###Output
_____no_output_____
###Markdown
Hierarchical IndexingHave multiple index levels on an axis.
###Code
data = pd.Series(np.random.randn(9),
index=[['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'd'],
[1, 2, 3, 1, 3, 1, 2, 2, 3]])
data
data.index
data['b']
data['b':'c']
data.loc[['b', 'd']]
# select from an inner level: all from first level, 2 for the second level
data.loc[:, 2]
# rearrange data into a DataFrame
data.unstack()
# stack back
data.unstack().stack()
# hierarchical index for both axes
frame = pd.DataFrame(np.arange(12).reshape((4, 3)),
index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],
columns=[['Ohio', 'Ohio', 'Colorado'],
['Green', 'Red', 'Green']])
frame
frame.index.names = ['key1', 'key2']
frame.columns.names = ['state', 'color']
frame
# partial column indexing
frame['Ohio']
###Output
_____no_output_____
###Markdown
MultiIndex.from_arrays([['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']], names=['state', 'color']) Reordering and Sorting Levels* `swaplevel` returns a new object with levels interchanged * `sort_index` sorts the data using only values in a single level.
###Code
frame.swaplevel('key1', 'key2')
print('Sort by key2: \n')
print(frame.sort_index(level=1))
print('\n \n Sort by key1: \n ')
frame.swaplevel(0, 1).sort_index(level=0)
###Output
Sort by key2:
state Ohio Colorado
color Green Red Green
key1 key2
a 1 0 1 2
b 1 6 7 8
a 2 3 4 5
b 2 9 10 11
Sort by key1:
###Markdown
Summary Statistics by Level
###Code
# sum group by key2
print(frame.sum(level='key2'))
# sum group by color across columns
frame.sum(level='color', axis = 1) # use axis = 1 to specify it's a column level
###Output
state Ohio Colorado
color Green Red Green
key2
1 6 8 10
2 12 14 16
###Markdown
Indexing with a DataFrame's columns* `set_index` create a new DataFrame using one or more of its columns as index * `reset_indecx` does the opposite of `set_index`
###Code
frame = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1),
'c': ['one', 'one', 'one', 'two', 'two',
'two', 'two'],
'd': [0, 1, 2, 0, 1, 2, 3]})
frame
frame2 = frame.set_index(['c', 'd'])
frame2
# keep the columns in the DataFrame while setting them to be index
frame.set_index(['c', 'd'], drop=False)
# reset
frame2.reset_index()
###Output
_____no_output_____
###Markdown
Combining and Merging Datasets* `pd.merge` connects rows in DataFrames based on one or more keys. * `pd.concat` concatenates or stacks together objects along an axis. * `combine_first` enables splicing together overlapping data to fill in missing values in one object with values from another. Database-Style DataFrame Joins
###Code
df1 = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'a', 'b'],
'data1': range(7)})
df2 = pd.DataFrame({'key': ['a', 'b', 'd'],
'data2': range(3)})
df1
df2
pd.merge(df1, df2)
pd.merge(df1, df2, on='key')
df3 = pd.DataFrame({'lkey': ['b', 'b', 'a', 'c', 'a', 'a', 'b'],
'data1': range(7)})
df4 = pd.DataFrame({'rkey': ['a', 'b', 'd'],
'data2': range(3)})
pd.merge(df3, df4, left_on='lkey', right_on='rkey')
pd.merge(df1, df2, how='outer')
df1 = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'],
'data1': range(6)})
df2 = pd.DataFrame({'key': ['a', 'b', 'a', 'b', 'd'],
'data2': range(5)})
df1
df2
pd.merge(df1, df2, on='key', how='left')
pd.merge(df1, df2, how='inner')
left = pd.DataFrame({'key1': ['foo', 'foo', 'bar'],
'key2': ['one', 'two', 'one'],
'lval': [1, 2, 3]})
right = pd.DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'],
'key2': ['one', 'one', 'one', 'two'],
'rval': [4, 5, 6, 7]})
pd.merge(left, right, on=['key1', 'key2'], how='outer')
pd.merge(left, right, on='key1')
pd.merge(left, right, on='key1', suffixes=('_left', '_right'))
###Output
_____no_output_____
###Markdown
Merging on Index
###Code
left1 = pd.DataFrame({'key': ['a', 'b', 'a', 'a', 'b', 'c'],
'value': range(6)})
right1 = pd.DataFrame({'group_val': [3.5, 7]}, index=['a', 'b'])
left1
right1
pd.merge(left1, right1, left_on='key', right_index=True)
pd.merge(left1, right1, left_on='key', right_index=True, how='outer')
lefth = pd.DataFrame({'key1': ['Ohio', 'Ohio', 'Ohio',
'Nevada', 'Nevada'],
'key2': [2000, 2001, 2002, 2001, 2002],
'data': np.arange(5.)})
righth = pd.DataFrame(np.arange(12).reshape((6, 2)),
index=[['Nevada', 'Nevada', 'Ohio', 'Ohio',
'Ohio', 'Ohio'],
[2001, 2000, 2000, 2000, 2001, 2002]],
columns=['event1', 'event2'])
lefth
righth
pd.merge(lefth, righth, left_on=['key1', 'key2'], right_index=True)
pd.merge(lefth, righth, left_on=['key1', 'key2'],
right_index=True, how='outer')
left2 = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]],
index=['a', 'c', 'e'],
columns=['Ohio', 'Nevada'])
right2 = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [13, 14]],
index=['b', 'c', 'd', 'e'],
columns=['Missouri', 'Alabama'])
left2
right2
pd.merge(left2, right2, how='outer', left_index=True, right_index=True)
left2.join(right2, how='outer')
left1.join(right1, on='key')
another = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [16., 17.]],
index=['a', 'c', 'e', 'f'],
columns=['New York', 'Oregon'])
another
left2.join([right2, another])
left2.join([right2, another], how='outer')
###Output
_____no_output_____
###Markdown
Concatenating Along an Axis
###Code
arr = np.arange(12).reshape((3, 4))
arr
np.concatenate([arr, arr], axis=1)
s1 = pd.Series([0, 1], index=['a', 'b'])
s2 = pd.Series([2, 3, 4], index=['c', 'd', 'e'])
s3 = pd.Series([5, 6], index=['f', 'g'])
pd.concat([s1, s2, s3])
pd.concat([s1, s2, s3], axis=1)
s4 = pd.concat([s1, s3])
s4
pd.concat([s1, s4], axis=1)
pd.concat([s1, s4], axis=1, join='inner')
pd.concat([s1, s4], axis=1, join_axes=[['a', 'c', 'b', 'e']])
result = pd.concat([s1, s1, s3], keys=['one', 'two', 'three'])
result
result.unstack()
pd.concat([s1, s2, s3], axis=1, keys=['one', 'two', 'three'])
df1 = pd.DataFrame(np.arange(6).reshape(3, 2), index=['a', 'b', 'c'],
columns=['one', 'two'])
df2 = pd.DataFrame(5 + np.arange(4).reshape(2, 2), index=['a', 'c'],
columns=['three', 'four'])
df1
df2
pd.concat([df1, df2], axis=1, keys=['level1', 'level2'])
pd.concat({'level1': df1, 'level2': df2}, axis=1)
pd.concat([df1, df2], axis=1, keys=['level1', 'level2'],
names=['upper', 'lower'])
df1 = pd.DataFrame(np.random.randn(3, 4), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.random.randn(2, 3), columns=['b', 'd', 'a'])
df1
df2
pd.concat([df1, df2], ignore_index=True)
###Output
_____no_output_____
###Markdown
Combining Data with Overlap
###Code
a = pd.Series([np.nan, 2.5, np.nan, 3.5, 4.5, np.nan],
index=['f', 'e', 'd', 'c', 'b', 'a'])
b = pd.Series(np.arange(len(a), dtype=np.float64),
index=['f', 'e', 'd', 'c', 'b', 'a'])
b[-1] = np.nan
a
b
np.where(pd.isnull(a), b, a)
b[:-2].combine_first(a[2:])
df1 = pd.DataFrame({'a': [1., np.nan, 5., np.nan],
'b': [np.nan, 2., np.nan, 6.],
'c': range(2, 18, 4)})
df2 = pd.DataFrame({'a': [5., 4., np.nan, 3., 7.],
'b': [np.nan, 3., 4., 6., 8.]})
df1
df2
df1.combine_first(df2)
###Output
_____no_output_____
###Markdown
Reshaping and Pivoting Reshaping with Hierarchical Indexing
###Code
data = pd.DataFrame(np.arange(6).reshape((2, 3)),
index=pd.Index(['Ohio', 'Colorado'], name='state'),
columns=pd.Index(['one', 'two', 'three'],
name='number'))
data
result = data.stack()
result
result.unstack()
result.unstack(0)
result.unstack('state')
s1 = pd.Series([0, 1, 2, 3], index=['a', 'b', 'c', 'd'])
s2 = pd.Series([4, 5, 6], index=['c', 'd', 'e'])
data2 = pd.concat([s1, s2], keys=['one', 'two'])
data2
data2.unstack()
data2.unstack()
data2.unstack().stack()
data2.unstack().stack(dropna=False)
df = pd.DataFrame({'left': result, 'right': result + 5},
columns=pd.Index(['left', 'right'], name='side'))
df
df.unstack('state')
df.unstack('state').stack('side')
###Output
_____no_output_____
###Markdown
Pivoting “Long” to “Wide” Format
###Code
data = pd.read_csv('examples/macrodata.csv')
data.head()
periods = pd.PeriodIndex(year=data.year, quarter=data.quarter,
name='date')
columns = pd.Index(['realgdp', 'infl', 'unemp'], name='item')
data = data.reindex(columns=columns)
data.index = periods.to_timestamp('D', 'end')
ldata = data.stack().reset_index().rename(columns={0: 'value'})
ldata[:10]
pivoted = ldata.pivot('date', 'item', 'value')
pivoted
ldata['value2'] = np.random.randn(len(ldata))
ldata[:10]
pivoted = ldata.pivot('date', 'item')
pivoted[:5]
pivoted['value'][:5]
unstacked = ldata.set_index(['date', 'item']).unstack('item')
unstacked[:7]
###Output
_____no_output_____
###Markdown
Pivoting “Wide” to “Long” Format
###Code
df = pd.DataFrame({'key': ['foo', 'bar', 'baz'],
'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9]})
df
melted = pd.melt(df, ['key'])
melted
reshaped = melted.pivot('key', 'variable', 'value')
reshaped
reshaped.reset_index()
pd.melt(df, id_vars=['key'], value_vars=['A', 'B'])
pd.melt(df, value_vars=['A', 'B', 'C'])
pd.melt(df, value_vars=['key', 'A', 'B'])
###Output
_____no_output_____ |
test/papermill_notebook.ipynb | ###Markdown
Papermill notebook testMore information in [papermill's GitHub](https://github.com/nteract/papermill).
###Code
import papermill as pm
version='1.0'
integer=5
def plus_five(num):
return num+5
result = plus_five(integer)
print(result)
pm.record("result", result)
def check_version(version):
pm_version = pm.__version__
if version < pm_version:
raise ValueError("Error")
else:
return True
checked_version = check_version(version)
print(checked_version)
pm.record("checked_version", checked_version)
pm.record("integer", integer)
###Output
_____no_output_____ |
day5_XGBoost_Hyperopt.ipynb | ###Markdown
###Code
!pip install --upgrade tables
!pip install eli5
!pip install xgboost
!pip install hyperopt
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
from hyperopt import hp, fmin, tpe, STATUS_OK, STATUS_FAIL
import eli5
from eli5.sklearn import PermutationImportance
cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car"
df = pd.read_hdf('data/car.h5')
df.shape
###Output
_____no_output_____
###Markdown
Feature Engineering
###Code
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
df['price_currency'] = df[df['price_currency'] !='EUR']
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]))
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(x.split('cm')[0].replace(' ', '')))
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat',
]
len(feats)
xgb_params = {
'max_depth': 5,
'n_estimators': 50,
'learning_rate': 0.1,
'seed': 0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
###Output
[16:59:27] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
[16:59:31] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
[16:59:35] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
###Markdown
Hyperopt
###Code
def obj_func(params):
print("Training with params: ")
print(params)
mean_mae, score_std, = run_model(xgb.XGBRegressor(**params), feats)
try:
return {'loss': np.abs(mean_mae), 'status': STATUS_OK}
except:
return {'loss': np.abs(mean_mae), 'status': STATUS_FAIL}
#space
xgb_reg_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(5, 12, 1, dtype=int)),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.6, 0.98, 0.02),
'objective': 'reg:squarederror',
'n_estimators': 100,
'seed': 0,
'tree_method': 'gpu_hist'
}
#run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=30)
best
###Output
_____no_output_____ |
pytorch/Part 6 - Saving and Loading Models.ipynb | ###Markdown
Saving and Loading ModelsIn this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.
###Code
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import fc_model
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
# Download and load the training data
trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
Here we can see one of the images.
###Code
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
###Output
_____no_output_____
###Markdown
Train a networkTo make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.
###Code
# Create the network, define the criterion and optimizer
model = fc_model.Network(784, 10, [512, 256, 128])
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)
###Output
Epoch: 1/2.. Training Loss: 1.720.. Test Loss: 1.016.. Test Accuracy: 0.624
Epoch: 1/2.. Training Loss: 1.075.. Test Loss: 0.746.. Test Accuracy: 0.717
Epoch: 1/2.. Training Loss: 0.836.. Test Loss: 0.675.. Test Accuracy: 0.737
Epoch: 1/2.. Training Loss: 0.797.. Test Loss: 0.674.. Test Accuracy: 0.751
Epoch: 1/2.. Training Loss: 0.786.. Test Loss: 0.611.. Test Accuracy: 0.763
Epoch: 1/2.. Training Loss: 0.691.. Test Loss: 0.607.. Test Accuracy: 0.774
Epoch: 1/2.. Training Loss: 0.678.. Test Loss: 0.586.. Test Accuracy: 0.785
Epoch: 1/2.. Training Loss: 0.669.. Test Loss: 0.569.. Test Accuracy: 0.788
Epoch: 1/2.. Training Loss: 0.691.. Test Loss: 0.552.. Test Accuracy: 0.792
Epoch: 1/2.. Training Loss: 0.601.. Test Loss: 0.544.. Test Accuracy: 0.797
Epoch: 1/2.. Training Loss: 0.646.. Test Loss: 0.540.. Test Accuracy: 0.793
Epoch: 1/2.. Training Loss: 0.609.. Test Loss: 0.543.. Test Accuracy: 0.796
Epoch: 1/2.. Training Loss: 0.636.. Test Loss: 0.519.. Test Accuracy: 0.807
Epoch: 1/2.. Training Loss: 0.619.. Test Loss: 0.533.. Test Accuracy: 0.808
Epoch: 1/2.. Training Loss: 0.615.. Test Loss: 0.504.. Test Accuracy: 0.813
Epoch: 1/2.. Training Loss: 0.577.. Test Loss: 0.518.. Test Accuracy: 0.809
Epoch: 1/2.. Training Loss: 0.612.. Test Loss: 0.500.. Test Accuracy: 0.817
Epoch: 1/2.. Training Loss: 0.535.. Test Loss: 0.519.. Test Accuracy: 0.815
Epoch: 1/2.. Training Loss: 0.614.. Test Loss: 0.520.. Test Accuracy: 0.804
Epoch: 1/2.. Training Loss: 0.594.. Test Loss: 0.489.. Test Accuracy: 0.820
Epoch: 1/2.. Training Loss: 0.588.. Test Loss: 0.494.. Test Accuracy: 0.823
Epoch: 1/2.. Training Loss: 0.607.. Test Loss: 0.481.. Test Accuracy: 0.821
Epoch: 1/2.. Training Loss: 0.563.. Test Loss: 0.478.. Test Accuracy: 0.825
Epoch: 2/2.. Training Loss: 0.576.. Test Loss: 0.482.. Test Accuracy: 0.825
Epoch: 2/2.. Training Loss: 0.545.. Test Loss: 0.463.. Test Accuracy: 0.831
Epoch: 2/2.. Training Loss: 0.547.. Test Loss: 0.484.. Test Accuracy: 0.824
Epoch: 2/2.. Training Loss: 0.547.. Test Loss: 0.467.. Test Accuracy: 0.830
Epoch: 2/2.. Training Loss: 0.535.. Test Loss: 0.467.. Test Accuracy: 0.830
Epoch: 2/2.. Training Loss: 0.535.. Test Loss: 0.466.. Test Accuracy: 0.831
Epoch: 2/2.. Training Loss: 0.544.. Test Loss: 0.465.. Test Accuracy: 0.831
Epoch: 2/2.. Training Loss: 0.523.. Test Loss: 0.480.. Test Accuracy: 0.824
Epoch: 2/2.. Training Loss: 0.563.. Test Loss: 0.472.. Test Accuracy: 0.831
Epoch: 2/2.. Training Loss: 0.545.. Test Loss: 0.461.. Test Accuracy: 0.835
Epoch: 2/2.. Training Loss: 0.557.. Test Loss: 0.444.. Test Accuracy: 0.839
Epoch: 2/2.. Training Loss: 0.543.. Test Loss: 0.443.. Test Accuracy: 0.838
Epoch: 2/2.. Training Loss: 0.518.. Test Loss: 0.451.. Test Accuracy: 0.832
Epoch: 2/2.. Training Loss: 0.546.. Test Loss: 0.452.. Test Accuracy: 0.827
Epoch: 2/2.. Training Loss: 0.556.. Test Loss: 0.468.. Test Accuracy: 0.836
Epoch: 2/2.. Training Loss: 0.515.. Test Loss: 0.461.. Test Accuracy: 0.832
Epoch: 2/2.. Training Loss: 0.539.. Test Loss: 0.447.. Test Accuracy: 0.837
Epoch: 2/2.. Training Loss: 0.489.. Test Loss: 0.445.. Test Accuracy: 0.840
Epoch: 2/2.. Training Loss: 0.507.. Test Loss: 0.462.. Test Accuracy: 0.834
Epoch: 2/2.. Training Loss: 0.532.. Test Loss: 0.452.. Test Accuracy: 0.835
Epoch: 2/2.. Training Loss: 0.493.. Test Loss: 0.439.. Test Accuracy: 0.841
Epoch: 2/2.. Training Loss: 0.500.. Test Loss: 0.442.. Test Accuracy: 0.843
Epoch: 2/2.. Training Loss: 0.502.. Test Loss: 0.455.. Test Accuracy: 0.838
###Markdown
Saving and loading networksAs you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.
###Code
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
###Output
Our model:
Network(
(hidden_layers): ModuleList(
(0): Linear(in_features=784, out_features=512, bias=True)
(1): Linear(in_features=512, out_features=256, bias=True)
(2): Linear(in_features=256, out_features=128, bias=True)
)
(output): Linear(in_features=128, out_features=10, bias=True)
(dropout): Dropout(p=0.5)
)
The state dict keys:
odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])
###Markdown
The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.
###Code
torch.save(model.state_dict(), 'checkpoint.pth')
###Output
_____no_output_____
###Markdown
Then we can load the state dict with `torch.load`.
###Code
state_dict = torch.load('checkpoint.pth')
print(state_dict.keys())
###Output
odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])
###Markdown
And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.
###Code
model.load_state_dict(state_dict)
###Output
_____no_output_____
###Markdown
Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.
###Code
# Try this
model = fc_model.Network(784, 10, [400, 200, 100])
# This will throw an error because the tensor sizes are wrong!
model.load_state_dict(state_dict)
###Output
_____no_output_____
###Markdown
This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.
###Code
checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
###Output
_____no_output_____
###Markdown
Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints.
###Code
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = fc_model.Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'])
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
print(model)
###Output
Network(
(hidden_layers): ModuleList(
(0): Linear(in_features=784, out_features=400, bias=True)
(1): Linear(in_features=400, out_features=200, bias=True)
(2): Linear(in_features=200, out_features=100, bias=True)
)
(output): Linear(in_features=100, out_features=10, bias=True)
(dropout): Dropout(p=0.5)
)
|
juliacolab1_6_new.ipynb | ###Markdown
###Code
# Installation cell
%%capture
%%shell
if ! command -v julia 3>&1 > /dev/null
then
wget -q 'https://julialang-s3.julialang.org/bin/linux/x64/1.6/julia-1.6.0-linux-x86_64.tar.gz' \
-O /tmp/julia.tar.gz
tar -x -f /tmp/julia.tar.gz -C /usr/local --strip-components 1
rm /tmp/julia.tar.gz
fi
julia -e 'using Pkg; pkg"add IJulia; precompile;"'
echo 'Done'
###Output
_____no_output_____
###Markdown
After you run the first cell (the the cell directly above this text), go to Colab's menu bar and select **Edit** and select **Notebook settings** from the drop down. Select *Julia 1.6* as the runtime and *GPU* as the hadware accelerator.You should see something like this:> Click on SAVE**We are ready to get going**
###Code
VERSION
###Output
_____no_output_____
###Markdown
The main reason we are interested in running Julia on Colab is the GPU functionality. So let us benchmark the performance on the GPU.**The next three cells are optional and are for people using it for the first time.** Optional GPU Experiments
###Code
using Pkg
Pkg.add(["BenchmarkTools", "CUDA"])
using BenchmarkTools, CUDA
mcpu = rand(2^10, 2^10)
@benchmark mcpu*mcpu
println("The CuArrray operation should take around 0.5 ms(excluding CUDA downloading time which is a one time process), and should be much faster. If so, the GPU is working.")
mgpu = cu(mcpu)
@benchmark CUDA.@sync mgpu*mgpu
has_cuda_gpu()
CUDA.device()
###Output
_____no_output_____ |
school_tiles_download.ipynb | ###Markdown
Workflow for training dataset generation for ML UNICEF school detection This notebook will work through few steps we took to generate training dataset for machine learning work of school detection. - Training dataset validation and cleaning by Development Seed Data Team, a group of eight expert mappers. - The reviewed schools were classified into three groups against DG vivid base satellite layer by the expert mappers; - confirmed schools - unreconginized schools - not-schools- we added few more objects, including hospitals, courthouses, marketplaces, parks, and farms, to not-school class to balace the training classes;- we genrate tile grids with DevSeed Geokit from schools and not-schools points;- we wrote python function to download the tiles for both classes. At the end these are the number we have:|Tasks|Confirmed | Unreconginized | not-schools| Total || ----| -------- | -------------- | ---------- | ----- || Data Cleaning | 6,663 | 11,774 | 2,268 | 20,705| | Tile generation | 5452 | N/A | 3,953 | 9,405| Adding more objects from OSM - Step one, download [OSM Colombia dataset](http://download.geofabrik.de/south-america/colombia.html) from Geofabric;- Step two, split training dataset into confirmed schools and not-schools from the valided/cleaned points;- Step three, using [Development Seed Geokit](https://github.com/developmentseed/geokit) to exact hospitals, courthouses, marketplaces, parks and farms; - using hospitals extraction from OSM as an example: - run 'docker run --rm -v ${PWD}:/app developmentseed/geokit osmfilter colombia.osm --keep="amenity=hospital" -o=hospital_r_colombia.osm' - run "docker run --rm -v ${PWD}:/app developmentseed/geokit osmtogeojson hospital_r_colombia.osm > hospitals_c.geojson" - from QGIS only selected points for the training dataset as hospitals_c_final.geojson. Create tiles from points - Merge all the geojsonsAfter adding all the objects, e.g. hospitals, parks, farms, courthouses, and marketplaces, you will need to merge all of the objects' geojsons into `not_schools_final.geojson`.Run 'docker run --rm -v ${PWD}:/app developmentseed/geokit geojson-merge input1.geojson input2.geojson > output.geojson', remember to replace all the geojson names accordingly.- Genrate not-school tile-grid from not-school pointsRun 'docker run --rm -v ${PWD}:/app developmentseed/geokit point2tile data/combined_not_schools_final.geojson --zoom=17 --buffer=0.001 > data/not_schools_tiles_1m.geojson'- generate school tile-grid from school points;Run 'docker run --rm -v ${PWD}:/app developmentseed/geokit point2tile data/confirmed_schools_final.geojson --zoom=17 --buffer=0.001 > data/schools_tiles_1m.geojson' Down all the tiles for school and not-schoolUse following script to download tiles.
###Code
## Remember to replace your token with "TOKEN" list following
%%file unicef_school_tiles.json
{"school": "schools_tiles_1m.geojson",
"not_school": "not_schools_tiles_1m.geojson",
"school_url":"https://a.tiles.mapbox.com/v4/digitalglobe.2lnpeioh/{z}/{x}/{y}.tif?access_token=TOKEN",
"not_school_url": "https://a.tiles.mapbox.com/v4/digitalglobe.2lnpeioh/{z}/{x}/{y}.png?access_token=TOKEN"}
import json
from urllib.parse import urlparse, parse_qs
import requests
import os
from os import makedirs, path as op
import rasterio
def get_tile(geojson, base_url):
"""
Function to download tiles for school and not-school.
The tile index was created using DevSeed Geokit with 1m buffer to the geolocation points for school and not-school classes;
:param geojson: geojson for tile and tile index from geokit (poin2tile);
:param base_url: url to access DG vivid and given the token to download the tiles.
:return tiles: a list of tiles
"""
# open geojson and get tile index
with open(geojson, 'r') as data:
tile_geojson = json.load(data)
features = tile_geojson["features"]
# get the tile index as x, y, z formats.
xyz = [features[i]['properties']['tiles'] for i in range(len(features))]
# create tile folder
tiles_folder = op.splitext(geojson)[0].split("/")[0]
if not op.isdir(tiles_folder):
makedirs(tiles_folder)
# download and get the list of tiles
tiles = list()
for i in range(len(xyz)):
# x, y, z = str(xyz[i])
x=str(xyz[i][0])
y=str(xyz[i][1])
z=str(xyz[i][2])
url = base_url.replace('{x}', x).replace('{y}', y).replace('{z}', z)
o = urlparse(url)
_, image_format = op.splitext(o.path)
tile_bn ="{}-{}-{}{}".format(z, x, y,image_format)
r = requests.get(url)
tile= op.join(tiles_folder, tile_bn)
tiles.append(tile)
with open(tile, 'wb')as w:
w.write(r.content)
return tiles
with open("unicef_school_tiles.json", 'r') as config:
all_data = json.load(config)
school_geojson = all_data["school"]
school_turl = all_data["school_url"]
not_school_geojson= all_data["not_school"]
not_school_turl = all_data["not_school_url"]
# download all the school tiles
school_tiles = get_tile(school_geojson, school_turl)
# download all the none school tiles
not_school_tiles = get_tile(not_school_geojson, not_school_turl)
###Output
_____no_output_____ |
chemberta/visualization/ChemBERTA_dimensionaliy_reduction_BBBP.ipynb | ###Markdown
Compute transformer embeddings (using BPE tokenization)
###Code
print (len(smiles))
# compute transformer embeddings
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def gen_embeddings (model, tokenizer):
#Tokenize sentences
encoded_input = tokenizer(smiles, padding=True, truncation=True, max_length=128, return_tensors='pt')
#Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
#Perform pooling. In this case, mean pooling
return mean_pooling(model_output, encoded_input['attention_mask'])
#Load AutoModel from huggingface model repository
tokenizer = AutoTokenizer.from_pretrained("seyonec/PubChem10M_SMILES_BPE_396_250")
model = AutoModel.from_pretrained("seyonec/PubChem10M_SMILES_BPE_396_250")
tokenizer_2 = AutoTokenizer.from_pretrained("seyonec/SMILES_tokenized_PubChem_shard00_160k")
model_2 = AutoModel.from_pretrained("seyonec/SMILES_tokenized_PubChem_shard00_160k")
bpe_sentence_embeddings = gen_embeddings(model, tokenizer)
st_sentence_embeddings = gen_embeddings(model_2, tokenizer_2)
###Output
Some weights of the model checkpoint at seyonec/PubChem10M_SMILES_BPE_396_250 were not used when initializing RobertaModel: ['lm_head.decoder.bias', 'lm_head.decoder.weight', 'lm_head.bias', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.layer_norm.weight', 'lm_head.dense.weight']
- This IS expected if you are initializing RobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing RobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
Some weights of the model checkpoint at seyonec/SMILES_tokenized_PubChem_shard00_160k were not used when initializing RobertaModel: ['lm_head.decoder.bias', 'lm_head.decoder.weight', 'lm_head.bias', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.layer_norm.weight', 'lm_head.dense.weight']
- This IS expected if you are initializing RobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing RobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
###Markdown
Compute ECFP descriptors
###Code
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import umap
%%time
#embed with umap
umap_model = umap.UMAP(metric = "jaccard",
n_neighbors = 25,
n_components = 2,
low_memory = False,
min_dist = 0.001)
X_umap = umap_model.fit_transform(bpe_sentence_embeddings)
bbbp["BPE_UMAP_0"], bbbp["BPE_UMAP_1"] = X_umap[:,0], X_umap[:,1]
%%time
#embed with umap
umap_model = umap.UMAP(metric = "jaccard",
n_neighbors = 25,
n_components = 2,
low_memory = False,
min_dist = 0.001)
X_umap = umap_model.fit_transform(st_sentence_embeddings)
bbbp["st_UMAP_0"], bbbp["st_UMAP_1"] = X_umap[:,0], X_umap[:,1]
# Compute desrciptors and keep track of which failed to featurize
ecfp_descriptors, keep_idx = compute_ecfp_descriptors(bbbp["smiles"])
# Only keep those that sucessfully featurized
bbbp = bbbp.iloc[keep_idx]
ecfp_umap_model = umap.UMAP(metric = "jaccard",
n_neighbors = 25,
n_components = 2,
low_memory = False,
min_dist = 0.001)
ecfp_X_umap = umap_model.fit_transform(ecfp_descriptors)
bbbp["ecfp_UMAP_0"], bbbp["ecfp_UMAP_1"] = ecfp_X_umap[:,0], ecfp_X_umap[:,1]
palette = sns.color_palette(["hotpink", "dodgerblue"])
for method in ["BPE_UMAP", "st_UMAP", "ecfp_UMAP"]:
plt.figure(figsize=(8,8))
sns.scatterplot(data=bbbp,
x=f"{method}_0",
y=f"{method}_1",
hue="permeable",
alpha=0.5,
palette=palette)
plt.title(f"{method} Embedding of BBBP Dataset")
plt.show()
###Output
_____no_output_____
###Markdown
modify to try with n_neighbours = 100, same other params
###Code
%%time
#embed with umap
umap_model = umap.UMAP(metric = "jaccard",
n_neighbors = 100,
n_components = 2,
low_memory = False,
min_dist = 0.001)
X_umap = umap_model.fit_transform(bpe_sentence_embeddings)
bbbp["bpe_UMAP_0"], bbbp["bpe_UMAP_1"] = X_umap[:,0], X_umap[:,1]
%%time
#embed with umap
umap_model = umap.UMAP(metric = "jaccard",
n_neighbors = 100,
n_components = 2,
low_memory = False,
min_dist = 0.001)
X_umap = umap_model.fit_transform(st_sentence_embeddings)
bbbp["st_UMAP_0"], bbbp["st_UMAP_1"] = X_umap[:,0], X_umap[:,1]
ecfp_umap_model = umap.UMAP(metric = "jaccard",
n_neighbors = 100,
n_components = 2,
low_memory = False,
min_dist = 0.001)
ecfp_X_umap = umap_model.fit_transform(ecfp_descriptors)
bbbp["ecfp_UMAP_0"], bbbp["ecfp_UMAP_1"] = ecfp_X_umap[:,0], ecfp_X_umap[:,1]
palette = sns.color_palette(["hotpink", "dodgerblue"])
for method in ["bpe_UMAP", "st_UMAP", "ecfp_UMAP"]:
plt.figure(figsize=(8,8))
sns.scatterplot(data=bbbp,
x=f"{method}_0",
y=f"{method}_1",
hue="permeable",
alpha=0.5,
palette=palette)
plt.title(f"{method} Embedding of BBBP Dataset")
plt.show()
###Output
_____no_output_____ |
versions/0.01/numpy analsis.ipynb | ###Markdown
array([[array([[255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255], [ 0, 0, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255]], dtype=uint8), list([0, 0, 0, 0, 0, 0, 0, 0, 1]), list([0, 0, -13184, 30461, 1627, -2722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) ]], dtype=object)
###Code
a = np.array((1,2,3))
b = np.array((2,3,4))
np.column_stack((a,b))
a = numpy.array([1, 2, 3])
b = numpy.array([5, 6])
np.concatenate((a, b), axis=None)
numpy.concatenate([a,b])
import numpy as np
np_2d = np.array([[1.73, 1.68, 1.71, 1.89, 1.79],
[65.4, 59.2, 63.6, 88.4, 68.7]])
arr = np_2d
a = np.array([[1, 2, 3],
[4, 5, 6]])
b = np.array([[9, 8, 7],
[6, 5, 4]])
c=np.concatenate((a, b))
d=np.vstack((a, b))
import numpy as np
np_3d = np.array([
list([1, 2 ,3]),
list([4, 5 ,6]),
list([7, 8 ,9]),
list([8, 10 ,11])
]
)
arr = np_3d
import numpy
a = numpy.array([1, 2, 3])
b = numpy.array([5, 6])
numpy.concatenate([a,b])
###Output
_____no_output_____ |
Chapter02/Activity2.05/Activity2_05.ipynb | ###Markdown
Activity 2.01
###Code
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv("wholesale_customers_data.csv")
data.isnull().sum()
outliers = {}
for i in range(data.shape[1]):
min_t = data[data.columns[i]].mean() - (3 * data[data.columns[i]].std())
max_t = data[data.columns[i]].mean() + (3 * data[data.columns[i]].std())
count = 0
for j in data[data.columns[i]]:
if j < min_t or j > max_t:
count += 1
outliers[data.columns[i]] = [count,data.shape[0]-count]
print(outliers)
plt.hist(data["Fresh"])
plt.show()
plt.figure(figsize=(8,8))
plt.pie(outliers["Detergents_Paper"],autopct="%.2f")
plt.show()
data_standardized = (data - data.mean())/data.std()
data_standardized.head()
###Output
_____no_output_____
###Markdown
Activity 2.02
###Code
from sklearn.cluster import KMeans
ideal_k = []
for i in range(1,21):
est_kmeans = KMeans(n_clusters=i, random_state = 0)
est_kmeans.fit(data_standardized)
ideal_k.append([i,est_kmeans.inertia_])
ideal_k = np.array(ideal_k)
plt.plot(ideal_k[:,0],ideal_k[:,1])
plt.show()
###Output
_____no_output_____
###Markdown
A number of 6 cluster was selected
###Code
est_kmeans = KMeans(n_clusters=6, random_state = 0)
est_kmeans.fit(data_standardized)
pred_kmeans = est_kmeans.predict(data_standardized)
plt.subplots(1, 2, sharex='col', sharey='row', figsize=(16,8))
plt.scatter(data.iloc[:,5], data.iloc[:,3], c=pred_kmeans, s=20)
plt.xlim([0, 20000])
plt.ylim([0,20000])
plt.xlabel('Frozen')
plt.subplot(1, 2, 1)
plt.scatter(data.iloc[:,4], data.iloc[:,3], c=pred_kmeans, s=20)
plt.xlim([0, 20000])
plt.ylim([0,20000])
plt.xlabel('Grocery')
plt.ylabel('Milk')
plt.show()
###Output
_____no_output_____
###Markdown
Activity 2.03
###Code
from sklearn.cluster import MeanShift
est_meanshift = MeanShift(0.4)
est_meanshift.fit(data_standardized)
pred_meanshift = est_meanshift.predict(data_standardized)
plt.subplots(1, 2, sharex='col', sharey='row', figsize=(16,8))
plt.scatter(data.iloc[:,5], data.iloc[:,3], c=pred_meanshift, s=20)
plt.xlim([0, 20000])
plt.ylim([0,20000])
plt.xlabel('Frozen')
plt.subplot(1, 2, 1)
plt.scatter(data.iloc[:,4], data.iloc[:,3], c=pred_meanshift, s=20)
plt.xlim([0, 20000])
plt.ylim([0,20000])
plt.xlabel('Grocery')
plt.ylabel('Milk')
plt.show()
###Output
_____no_output_____
###Markdown
Activity 2.04
###Code
from sklearn.cluster import DBSCAN
est_dbscan = DBSCAN(eps=0.8)
pred_dbscan = est_dbscan.fit_predict(data_standardized)
plt.subplots(1, 2, sharex='col', sharey='row', figsize=(16,8))
plt.scatter(data.iloc[:,5], data.iloc[:,3], c=pred_dbscan, s=20)
plt.xlim([0, 20000])
plt.ylim([0,20000])
plt.xlabel('Frozen')
plt.subplot(1, 2, 1)
plt.scatter(data.iloc[:,4], data.iloc[:,3], c=pred_dbscan, s=20)
plt.xlim([0, 20000])
plt.ylim([0,20000])
plt.xlabel('Grocery')
plt.ylabel('Milk')
plt.show()
###Output
_____no_output_____
###Markdown
Activity 2.05
###Code
from sklearn.metrics import silhouette_score
from sklearn.metrics import calinski_harabasz_score
kmeans_score = silhouette_score(data_standardized, pred_kmeans, metric='euclidean')
meanshift_score = silhouette_score(data_standardized, pred_meanshift, metric='euclidean')
dbscan_score = silhouette_score(data_standardized, pred_dbscan, metric='euclidean')
print(kmeans_score, meanshift_score, dbscan_score)
kmeans_score = calinski_harabasz_score(data_standardized, pred_kmeans)
meanshift_score = calinski_harabasz_score(data_standardized, pred_meanshift)
dbscan_score = calinski_harabasz_score(data_standardized, pred_dbscan)
print(kmeans_score, meanshift_score, dbscan_score)
###Output
145.73031893182392 112.90534400805596 42.45114955569689
|
notebooks/3i. Gibbs sampling.ipynb | ###Markdown
Introduction to MCMC Takeaways and objectives from this notebook1. The concept of Gibbs sampling using full conditional distributions. When is Gibbs sampling suitable?2. Combining Gibbs and Metropolis: Metropolis-within-Gibbs. Gibbs samplingSo-far we have seen the Metropolis algorithm as the workhorse of sampling from a posterior distribution. Another method of accomplishing the same is Gibbs sampling, which rests on different justifications and has different properties.Gibbs sampling is the method of sampling from *full conditional posteriors*, so we need to compute the conditional distribution of each variable given the others and sample from that.The algorithm for round robin Gibbs assuming that our state vector has $n$ variables $\mathbf{x}^{(t)} = [x_1^{(t)}, x_2^{(t)}, ..., x_n^{(t)}]$, the Gibbs sampler proceeds to sample $\mathbf{x}^{(t+1)}$ as follows:1. Sample $x_1^{(t+1)}$ from $p(x_1^{(t+1)} \mid x_2^{(t)}, ..., x_n^{(t)})$,2. Sample $x_2^{(t+1)}$ from $p(x_2^{(t+1)} \mid x_1^{(t+1)}, x_2^{(t)}, ..., x_n^{(t)})$3. ...4. Sample $x_i^{(t+1)}$ from $p(x_i^{(t+1)} \mid x_1^{(t+1)}, ..., x_{i-1}^{(t+1)}, x_{i+1}^{(t)}, ..., x_n^{(t)})$5. ...6. Sample $x_n^{(t+1)}$ from $p(x_n^{(t+1)} \mid x_2^{(t+1)}, ..., x_{n-1}^{(t+1)}$.We then set $\mathbf{x}^{(t+1)} = [x_1^{(t+1)}, ..., x_{n-1}^{(t+1)}]$, as expected. So theoretically the algorithm is very simple. Programmatically we can think of the algorithm as going through the variables from first to last and updating them in-place. On the $i$-th variable, we simply use all the updated values for variables $0..i-1$.Alternatively, we can also use the *random scan Gibbs* sampling algorithm, which proceeds as follows:1. Pick index $i$ uniformly at random from $1,...,n$,2. Sample $x_i^{(t+1)}$ from $p(x_i^{(t+1)} \mid x_1^{(t+1)}, ..., x_{i-1}^{(t+1)}, x_{i+1}^{(t)}, ..., x_n^{(t)}),$3. Set $\mathbf{x}^{(t+1)} = [x_1^{(t)}, ..., x_i^{(t+1)},... x_{n-1}^{(t)}].$*Blocked Gibbs sampling* treats some groups of variables in blocks and samples an update to them at the same time. This is useful for example if that group of variables is highly correlated. Considering them separately would then result in missing this structure. We will not consider blocking in this notebook.Good introductory reading on Gibbs sampling is for example in Casella and George [2].All of the above procedures correspond to valid Gibbs sampling algorithms, so there is considerable flexibility in the design. A summary and comparison: Metropolis vs. GibbsA (biased) summary of the differences between Metropolis and Gibbs sampling could go as follows. While deriving a Gibbs sampler is more work, as a result the Gibbs sampler actually has better guidance in selecting new values for random variables. Where Metropolis performs blind guesses and relies on the accept/reject step to check if the step is good, the Gibbs sampler samples directly from the conditional distribution and thus is guided in it's choice of next value by both the data and prior (as applicable to the current random variable).We can also see [Gibbs sampling as a Metropolis-type proposal](https://en.wikipedia.org/wiki/Gibbs_samplingIntroduction) (but in a more general incarnation Metropolis-Hastings) which is always accepted. There are some interesting connections between Gibbs and Metropolis that allow embedding one in the other, which will become useful later on. How to select a strategy?It is a natural question whether some of these strategies are better than others. It's difficult to find information on these topics but the following considerations seem useful.**Blocked sampler** If some variables are highly correlated it makes sense to sample them together given the other variables. Thus if we imagine trying to use the Gibbs sampler on the 2D Gaussian with high correlation we have used previously, we see that being able to incorporate the correlation into the proposal would allow us to move diagonally instead of axis-aligned (per variable).**Random/systematic scan** there don't seem to be results preferring one or the other except for Andrieu 2016 [3] (!!), where it is shown that systematic scan is better than random scan in terms of reducing the variance of the estimator **if** the problem consists of two variables and discusses why the proof does not hold for more variables then that. Example: Ising modelA very popular example for the Gibbs algorithm is the [Ising model](https://en.wikipedia.org/wiki/Ising_model) which can be observed live for example on [this page](https://cs.stanford.edu/people/karpathy/visml/ising_example.html) [1].A simple version of an Ising model is a two-dimensional lattice of binary variables $x_i \in \{-1,+1\}$ that are horizontally and vertically connected using pairwise factor potentials. Image source: https://jgtechnologysolutions.orgWe may also apply a potential field $\bf{b}$, which attract some elements of the Ising model toward 1 or toward -1. The total *energy* of the field is given by the Hamiltonian and can then be written as:$$H(\textbf{x}) = - J \sum_{(i,j) \in E} x_i x_j - J_b \sum_{i \in V} b_i x_i,$$where $J$ is the strength of the interactions, $J_b$ is the strength of the external field and $b_i$ are the desired values. The first sum is over edges $E$ connecting the elements (see figure below) and the other is over the elements $V$ themselves.We define the (unnormalized) distribution over states to be:$$\pi(\textbf{x}) = \exp (- H(\textbf{x}))$$ The $\pi(x)$ distribution is our posterior that we wish to sample from (we didn't explictly build any priors or data likelihood - let's assume that the function above is the result of such considerations). Let's break down how the probability depends on a value of a fixed element $x_i$.Hence forth we will denote $E_i(x_i)$ as the part of the energy $E$ that depends on $x_i$ and $E_{-i}$ as the rest of the function. In Gibbs sampling we select one (or more) variables that we want to update and the rest of the model (here represented by $E_{-i}$ remains unchanged. For concreteness, we only select a single variable to update here. Note that $E = E_i(x_i) + E_{-i}$.For the purposes of analyzing the update, we will denote the neighbouring nodes $x_a, x_b, x_c, x_d$. For Gibbs sampling, we need to compute $\pi(x_i \mid x_{-i})$ which can be computed as$$ \pi(x_i = v \mid x_{-i}) = \frac{\pi(x_i=v \mid x_{-i})}{\pi(x_i=+1 \mid x_{-i}) + \pi(x_i=-1 \mid x_{-i})}.$$Where $v \in \{ -1, +1 \}$.**Note** that here we casually sidestep the normalization problem. Normalization is intractable for the entire model $\pi(x)$ but since here we restricted our analysis only to one variable, it's easy and is solved by the formula above, which returns a valid conditional distribution $\pi(x_i = v \mid x_{-i})$ even if $\pi(x)$ is not normalized.These considerations lead us to consider $E_i(x_i)$ and $E_{-1}$. Rewriting the equation in logs again, we get$$ \pi(x_i = +1 \mid x_{-i}) = \frac{\exp(E_i(x_i=+1) + E_{-i})}{\exp(E_i(x_i=+1)+E_{-i}) + \exp(E_i(x_i=+1)+E_{-i}) }$$from which we can factor and cancel out $E_{-i}$ to obtain$$ \pi(x_i = +1 \mid x_{-i}) = \frac{\exp(E_i(x_i=+1))}{\exp(E_i(x_i=+1)) + \exp(E_i(x_i=-1))}.$$**Note** that this results in a huge computational efficiency gain, we can evaluate the conditional with respect to only the neighbouring nodes as the state of $x_i$ is conditionally independent from the rest of the model given it's neighbors. In effect, we can compute the posterior in O(1) time and update the entire state in $O(N)$ time where $N$ is the number of elements.We thus have$$E_i(x_i) = J(x_ix_a + x_ix_b + x_ix_c + x_ix_d) + J_bb_ix_i,$$which factorizes nicely$$E_i(x_i) = x_i \left ( J (x_a + x_b + x_c + x_d) + J_bb_i \right ),$$which makes the value extremely simple to compute for both options $x_i=+1$ and $x_i=-1$. Setting it upBelow we build the force field that will act on the system, a random state generator and a function that computes the Hamiltonian for us.
###Code
import numpy as np
import numpy.random as nr
# rows and columns
rows, cols = 8, 8
b = np.array([[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, +1, -1, +1, +1, -1, -1],
[-1, -1, +1, +1, +1, +1, -1, -1],
[-1, -1, +1, -1, +1, -1, -1, -1],
[-1, -1, +1, +1, +1, +1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1]])
def random_state(prob_one):
X = np.where(nr.uniform(size=(8,8)) < 1. - prob_one, -1, +1)
return X
X_init = random_state(0.5)
X_init
def compute_hamiltonian(X, b, J, Jb):
H = 0.0
for r in range(rows):
for c in range(cols):
# evaluate the E_i term for the actual value of X[r,c]
# sum over elements
H -= Jb * b[r,c] * X[r,c]
# sum over edges
if r > 0:
H -= J * X[r-1,c] * X[r,c]
if c > 0:
H -= J * X[r, c-1] * X[r,c]
return H
H = compute_hamiltonian(X_init, b, 1., 1.)
pi = np.exp(-H)
H, pi
###Output
_____no_output_____
###Markdown
Exercise: derive the full conditionalA key part of developing the Gibbs sampler is the function that computes the conditional distribution of $x_i$ given $x_{-i}$, or given all the other states. In the following, write the function that computes this efficiently. We supply a function that computes the value by definition but has terrible time complexity.To derive the more effective function, analyze how a change in the value of $x_i$ affects the posterior and what remains unchanged. Only that part must be evaluated.
###Code
def compute_conditional_xrc_is_minus1_bad(X,b,r,c,J,Jb):
memory = X[r,c]
# compute conditional from definition
X[r,c] = +1.
pi_x_plus1 = np.exp(-compute_hamiltonian(X, b, J, Jb))
X[r,c] = -1
pi_x_minus1 = np.exp(-compute_hamiltonian(X, b, J, Jb))
return pi_x_minus1 / (pi_x_minus1 + pi_x_plus1)
def compute_conditional_xrc_is_minus1(X,b,r,c,J,Jb):
# EXERCISE: compute value proportional to p(x_[r,c] = 1|all other variables)
p_xrc_minus1 = 0.5
# EXERCISE: compute value proportional to p(x_[r,c] = -1| all other variables)
p_xrc_plus1 = 0.5
return p_xrc_minus1 / (p_xrc_minus1 + p_xrc_plus1)
###Output
_____no_output_____
###Markdown
Testing your functionThe two functions below must give the same output given the same inputs.
###Code
compute_conditional_xrc_is_minus1(X_init, b, 2, 1, 1., 0.5), compute_conditional_xrc_is_minus1_bad(X_init,b,2,1,1.,0.5)
###Output
_____no_output_____
###Markdown
Let's run the samplerThe function `run_gibbs_systematic_scan` below performs systematic scan Gibbs sampling. Using your function, run the sampler a few times and examine the visualizations to see what happens under various field and connection strengths.
###Code
def run_gibbs_systematic_scan(X, b, J, Jb, steps=1):
# note that X is modified in place
for s in range(steps):
# pre-sample a bunch of uniform random numbers
random_buffer = np.random.uniform(size=(rows, cols))
for r in range(rows):
for c in range(cols):
cond_p_xi_minus1 = compute_conditional_xrc_is_minus1(X, b, r, c, J, Jb)
X[r,c] = -1. if random_buffer[r,c] < cond_p_xi_minus1 else +1.
Xstate = np.copy(X_init)
run_gibbs_systematic_scan(Xstate, b, 1., 0.5)
Xstate
###Output
_____no_output_____
###Markdown
Visualizing samplesBelow we run the Gibbs sampler iteratively and display every second sample.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
steps_per_iter = 2
plt.figure(figsize=(16,16))
Xstate = random_state(0.1)
for step in range(36):
plt.subplot(6,6,step+1)
plt.imshow(Xstate)
plt.xticks([])
plt.yticks([])
plt.title('$H({\\bf x})=$%g [%d]' % (compute_hamiltonian(Xstate, b, 0.5, 1.6), step*steps_per_iter))
run_gibbs_systematic_scan(Xstate, b, 0.8, 1.6, steps=steps_per_iter)
# let's generate 1000 samples
N = 1000
X = random_state(0.5)
Xs = np.zeros((N, rows, cols))
for i in range(N):
# variable is modified in place
run_gibbs_systematic_scan(X, b, 0.3, 0.6)
Xs[i,:,:] = X
plt.figure(figsize=(12,8))
plt.subplot(1,2,1)
plt.imshow(np.mean(Xs, axis=0))
plt.title('Expected values')
plt.colorbar(orientation='horizontal')
plt.clim([-1,1])
plt.subplot(1,2,2)
plt.imshow(np.std(Xs, axis=0))
plt.colorbar(orientation='horizontal')
plt.title('Standard deviations');
###Output
_____no_output_____
###Markdown
ExerciseBuild a `run_gibbs_random_scan` function to perform random scan Gibbs sampling, run it in place of `run_gibbs_systematic_scan` and observe whether there are any changes in the output. Metropolis-within-GibbsThe Gibbs algorithm requires that we can derive and evaluate conditional probabilities for all variables. This is impractical in many situations. In such cases, it's possible to to sample with a design called Metropolis-within-Gibbs, where we combine Gibbs and Metropolis sampling. Extra reading on designing hybrid sampling systems is for example in Tierney [4]. Here we only note that there is significant flexibility in constructing hybrid samplers and the following is just one example.Let's solve a simple mixture problem with two Gaussian mixtures that we want to fit a set of points $d_i, i=1,...,40$ in one dimension.$$\begin{array}{rcl} \mu_0 &\sim& {\cal N}(0,1) \\ \mu_1 &\sim& {\cal N}(0,1) \\ Z_i &\sim& \text{Bernoulli}(0.5) \\ d_i &\sim& {\cal N}(\mu_0(1-Z_i) + \mu_1 Z_i) \\\end{array}$$Writing a Metropolis sampler for this model should be a breeze, but this time we will do something different: write a hybrid sampler that uses different methods of proposing different variables:- $\mu_0, \mu_1$ using Metropolis random walk (with accept/reject) and- $Z$ using Gibbs sampling (always accepted).If we cycle these two proposals, we have a valid hybrid sampler.
###Code
def generate_data(mu1, mu2, N):
return np.hstack([nr.randn(N) + mu1, nr.randn(N) + mu2])
def log_normal(x, mu, sd):
return - np.log(sd) - 0.5 * (x - mu)**2/sd**2
def log_prior(v):
return log_normal(v['mu0'], 0, 10) + log_normal(v['mu0'], 0, 10) + len(v['Z']) * np.log(0.5)
def log_likelihood(v, data):
Z = v['Z']
return np.sum(log_normal(data, v['mu0'] * (1-Z) + v['mu1'] * Z, 1.0))
def log_posterior(v, data):
# add a factor potential that forces v['mu0'] <= v['mu1']
ordering_potential = -np.inf if v['mu0'] > v['mu1'] else 0.
return log_prior(v) + log_likelihood(v, data) + ordering_potential
# Let's make the problem easy to visualize
N = 20
true_mu0, true_mu1 = 2.0, 6.0
data = generate_data(true_mu0, true_mu1, N)
import seaborn as sns
sns.distplot(data[:N], hist=False, rug=True)
sns.distplot(data[N:], hist=False, rug=True)
plt.title('Density of underlying data');
data
v_init = {
'mu0': nr.randn() * 10,
'mu1': nr.randn() * 10,
'Z' : np.where(nr.rand(2*N) < 0.5, 0, 1)
}
# ensure ordering holds
if v_init['mu1'] < v_init['mu0']:
v_init['mu0'], v_init['mu1'] = v_init['mu1'], v_init['mu0']
v_init, log_posterior(v_init, data)
log_prior(v_init), log_likelihood(v_init, data)
def plot_state(v):
global data
Z=v['Z']
plt.plot([v['mu0']], [0.1], 'go')
plt.plot([v['mu1']], [0.1], 'ro')
sns.distplot(data[Z==0], hist=False, rug=True)
sns.distplot(data[Z==1], hist=False, rug=True)
plot_state(v_init)
plt.title('Initial state');
def propose_mus(v, logp):
v_prime = { 'mu0' : v['mu0'] + nr.randn() * 0.4,
'mu1' : v['mu1'] + nr.randn() * 0.4,
'Z' : v['Z']
}
logp_prime = log_posterior(v_prime, data)
if logp_prime - logp > np.log(nr.rand()):
return v_prime, logp_prime, True
else:
return v, logp, False
def propose_Z_metropolis(v, logp):
Z = v['Z']
flips = nr.uniform(size=Z.shape) < 0.1
Z_prime = flips * (1-Z) + (1-flips)*Z
v_prime = {
'mu0' : v['mu0'],
'mu1' : v['mu1'],
'Z' : Z_prime
}
logp_prime = log_posterior(v_prime, data)
if logp_prime - logp > np.log(nr.rand()):
return v_prime, logp_prime, True
else:
return v, logp, False
def propose_Z_gibbs(v, logp):
mu0, mu1, Z = v['mu0'], v['mu1'], v['Z']
# 1. compute for each Z_i P(Z_i|all other variables)
# 2. sample from this conditional to obtain Znew, the new values for Z
# not a good guess, replace with Gibbs sampler
Z_new = np.zeros_like(v['Z'])
v_prime = {
'mu0' : v['mu0'],
'mu1' : v['mu1'],
'Z' : Z_new
}
return v_prime, log_posterior(v_prime, data)
###Output
_____no_output_____
###Markdown
ExerciseThe sampler below uses two Metropolis steps in sequence to sample from the $\mu_{0,1}$ and from $Z$. There is a block that is commented out with the function `propose_Z_gibbs` that you should uncomment (after writing the function that is).1. Compare the number of steps required for convergence using Metropolis and using the hybrid sampler. The number of samples is intentionally set to a low number (100) to display the differences.2. Run the `propose_Z_gibbs` function a few times yourself while printing out the state and note that the proposed `Z` values match the data very well compared to the random guesses in `propose_Z_metropolis`.
###Code
def metropolis_within_gibbs(v_init, n, status_period = None):
global data
v, logp = v_init, log_posterior(v_init, data)
states, states_logp = [v_init], [logp]
was_accept1, was_accept2 = [], []
if status_period is None:
status_period = n // 10
for i in range(1,n):
# propose mu1 and mu2 via a Metropolis step
v1, logp_v1, accept1 = propose_mus(v, logp)
was_accept1.append(accept1)
# EXERCISE: the two blocks below sample Z, currently we have an inefficient Metropolis sampler,
# but we would like to have a sleek Gibbs sampler - uncomment the gibbs block below and comment
# the Metropolis block when you have your Gibbs proposal.
# now v1, logp_v1 are either v, logp if a reject occured or a new state
v2, logp_v2, accept2 = propose_Z_metropolis(v1, logp_v1)
was_accept2.append(accept2)
#v2, logp_v2 = propose_Z_gibbs(v1, logp_v1)
#was_accept2.append(True)
# append whatever came out
states.append(v2)
states_logp.append(logp_v2)
v, logp = v2, logp_v2
if i > 1 and i % status_period == 0:
print('Stats @ %d: accept1_ratio=%g accept2_ratio=%g avg_logp=%g' %
(i, float(np.sum(was_accept1[-status_period:]))/status_period,
float(np.sum(was_accept2[-status_period:]))/status_period,
np.mean(states_logp[-status_period:])))
return states, float(np.sum(was_accept1)) / n, float(np.sum(was_accept2)) / n
states, ar1, ar2 = metropolis_within_gibbs(v_init, 100, status_period=10)
mu0s = np.array([s['mu0'] for s in states])
mu1s = np.array([s['mu1'] for s in states])
Zs = np.vstack([s['Z'] for s in states])
burn_in = 0
import seaborn as sns
plt.figure(figsize=(16,9))
plt.subplot(3,2,1)
sns.kdeplot(mu0s[burn_in:]);
plt.ylabel('$\\mu_0$')
plt.subplot(3,2,2)
plt.plot(mu0s)
plt.subplot(3,2,3)
sns.kdeplot(mu1s[burn_in:]);
plt.ylabel('$\\mu_1$')
plt.subplot(3,2,4)
plt.plot(mu1s)
plt.subplot(3,1,3)
plt.errorbar(np.arange(2*N), np.mean(Zs, axis=0), yerr=np.std(Zs, axis=0))
plt.xlim([-1,2*N])
plt.xticks(range(2*N))
plt.ylabel('Z');
print('mu0: %g +- %g [true value: %g]' % (np.mean(mu0s[burn_in:]), np.std(mu0s[burn_in:]), true_mu0))
print('mu1: %g +- %g [true value: %g]' % (np.mean(mu1s[burn_in:]), np.std(mu1s[burn_in:]), true_mu1))
###Output
_____no_output_____ |
OPEN_AI+Jokes+Analysis.ipynb | ###Markdown
rnn model
###Code
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM, SimpleRNN
from keras.layers.wrappers import TimeDistributed
import argparse
# Parsing arguments for Network definition
ap = argparse.ArgumentParser()
ap.add_argument('-data_dir', default='./data/test.txt')
ap.add_argument('-batch_size', type=int, default=50)
ap.add_argument('-layer_num', type=int, default=2)
ap.add_argument('-seq_length', type=int, default=50)
ap.add_argument('-hidden_dim', type=int, default=500)
ap.add_argument('-generate_length', type=int, default=500)
ap.add_argument('-nb_epoch', type=int, default=20)
ap.add_argument('-mode', default='train')
ap.add_argument('-weights', default='')
args = vars(ap.parse_args())
DATA_DIR = args['data_dir']
BATCH_SIZE = args['batch_size']
HIDDEN_DIM = args['hidden_dim']
SEQ_LENGTH = args['seq_length']
WEIGHTS = args['weights']
GENERATE_LENGTH = args['generate_length']
LAYER_NUM = args['layer_num']
# Creating and compiling the Network
model = Sequential()
model.add(LSTM(HIDDEN_DIM, input_shape=(None, VOCAB_SIZE), return_sequences=True))
for i in range(LAYER_NUM - 1):
model.add(LSTM(HIDDEN_DIM, return_sequences=True))
model.add(TimeDistributed(Dense(VOCAB_SIZE)))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop") # optimiser = adam will also do the work
###Output
_____no_output_____ |
module3/reference_notebook_part2.ipynb | ###Markdown
Module 3: Image Segmentation - Part 2: Edge Detection
###Code
%matplotlib inline
import matplotlib
from plantcv import plantcv as pcv
from skimage import img_as_ubyte
from skimage.morphology import disk
matplotlib.rcParams["figure.max_open_warning"] = False
pcv.params.debug = "plot"
pcv.params.text_size = 30
pcv.params.text_thickness = 20
pcv.params.line_thickness = 10
pcv.__version__
###Output
_____no_output_____
###Markdown
Edge detection
###Code
# Open image file
color_img, imgpath, imgname = pcv.readimage(filename="./images/VIS_SV_0_z1_h1_g0_e65_v500_190106_0.png")
###Output
_____no_output_____
###Markdown
Excess Green IndexIn Part 1 we created a grayscale image for thresholding by using colorspace properties (e.g. RGB, HSV, LAB). Here we will use the [Excess Green vegetation index](https://plantcv.readthedocs.io/en/stable/spectral_index/egi), which combines RGB information into a single index.
###Code
# Use RGB values to calculate the Excess Green Index
egi = pcv.spectral_index.egi(rgb_img=color_img)
# Use the automatic Otsu threshold method to segment the EGI image
egi_thresh = pcv.threshold.otsu(gray_img=img_as_ubyte(egi.array_data), max_value=255, object_type="light")
###Output
_____no_output_____
###Markdown
Thresholding the EGI image works well but misses some leaf areaSome leaves or parts of leaves are not segmented. In the image we can clearly see the delineation between plant and background, regardless of color, because there is sharp contrast between the darker plant and the bright background. Edge detection can be used to help us detect these transitions
###Code
# Use edge detection to detect outlines in the image
edges = pcv.canny_edge_detect(img=color_img)
###Output
_____no_output_____
###Markdown
Combine the thresholded image and edges to create boundaries around the segmented pixels
###Code
# Combine the edges and binary mask
mask = pcv.logical_or(bin_img1=egi_thresh, bin_img2=edges)
###Output
_____no_output_____
###Markdown
Some leaves have gaps but are bound by the edges, closing these gaps fills in the missing parts
###Code
# Close holes in the leaves
filled_edges = pcv.closing(gray_img=mask, kernel=disk(4))
###Output
_____no_output_____ |
modules/module-02/module2-strings.ipynb | ###Markdown
Module 2.2 Strings, SummarizedCreated By: Matthew HallEach code block is designed to be an independent program for ease of use!---***Disclaimer***> Copyright (c) 2020 Matthew Hall> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:> The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Part 1: Fundamentals of Strings* Declaring Strings* Printing Out Strings* Addition Operator & Strings* Converting Numerical Data into Strings* Escape Character `\`* Inserting Variables into Strings Using `.format()` Declaring StringsStrings can be declared into variables using the standard assignment operator `=`
###Code
name = "Matt"
city = "New York"
###Output
_____no_output_____
###Markdown
Printing Out StringsWe use the built in function `print()` to get output from our program!
###Code
intro = 'My name is'
name = 'Aneli'
print(intro, name)
###Output
My name is Aneli
###Markdown
Addition Operator & StringsWe can use the addition operator `+` to concatenate two strings:
###Code
a = "abc"
b = "def"
c = a + b
print(c)
###Output
abcdef
###Markdown
Converting Numerical Data into StringsWe can convert numerical data into strings for better output using `str()` !
###Code
# ### This one errors out, cannot add numerical data and a string together!
sample_mean = 3.567
print("The sample mean is: " + sample_mean)
# ### This one works, converted to string!
sample_mean = 3.567
print("The sample mean is: " + str(sample_mean))
###Output
The sample mean is: 3.567
###Markdown
Escape CharacterWe can include special characters into our code using the escape character `\` before it.
###Code
# ### This one errors out, as the quotation marks confuse the string declaration
escape_example = 'I don't know who that is'
print(escape_example)
# ### This one works with the escape character!
escape_example = 'I don\'t know who that is'
print(escape_example)
###Output
I don't know who that is
###Markdown
Inserting Variables into Strings EasilyWe can use the `.format()` method of a string to easily input variables. (And we don't need to worry about converting numerical data into strings!
###Code
feet = 6
inches = 3
statement = 'I am {} feet {} inches tall.'
print(statement.format(feet, inches))
###Output
I am 6 feet 3 inches tall.
###Markdown
Part 2: Essential String Methods & Functions**Essential Functions*** `len()`**Essential Methods*** `.find()`* `.replace()`* `.split()`**Formatting Methods*** `.upper()`* `.lower()`* `.title()`These are some the essential string methods to be familiar with, but not all! See Python's documentation for a complete list.Read the docs: https://docs.python.org/3/library/stdtypes.htmlstring-methods Calculate Length with `len()`
###Code
statement = "We are coding in Python!"
print(len(statement))
###Output
24
###Markdown
Find Starting Index of Substring with `.find()`
###Code
quote = "Can you see me now?"
print(quote.find('see'))
###Output
8
###Markdown
Find and Replace Substrings using `.replace()`This will find and replace all instances of the substring!
###Code
sentence = "How hot is it outside?"
substring_to_find = "o"
substring_to_replace = "$"
print(sentence.replace(substring_to_find, substring_to_replace))
###Output
H$w h$t is it $utside?
###Markdown
Split String Using `.split()`Split returns a list of the individual words, split at the spaces.
###Code
quote = "Can you see me now?"
print(quote.split())
###Output
['Can', 'you', 'see', 'me', 'now?']
###Markdown
Some Formatting Methods for StringsThese are easy to show by example:* `.upper()`* `.lower()`* `.title()`
###Code
case_string = "Breaking news: Stocks hit record Highs this afternoon!"
print(case_string.upper())
print(case_string.lower())
print(case_string.title())
###Output
BREAKING NEWS: STOCKS HIT RECORD HIGHS THIS AFTERNOON!
breaking news: stocks hit record highs this afternoon!
Breaking News: Stocks Hit Record Highs This Afternoon!
|
case_study/case_study.ipynb | ###Markdown
Домашнее задание к лекции "Статистика. Практика" Задание 1 Вернемся к [набору данных о видеоиграх](https://github.com/obulygin/pyda_homeworks/blob/master/stat_case_study/vgsales.csv).Ответьте на следующие вопросы:1) Как критики относятся к спортивным играм? 2) Критикам нравятся больше игры на PC или на PS4? 3) Критикам больше нравятся стрелялки или стратегии? Для каждого вопроса:- сформулируйте нулевую и альтернативную гипотезы;- выберите пороговый уровень статистической значимости;- опишите полученные результаты статистического теста.
###Code
import pandas as pd
import numpy as np
from scipy import stats
import re
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import gensim
from gensim import corpora
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
vg_df = pd.read_csv('https://raw.githubusercontent.com/obulygin/pyda_homeworks/master/stat_case_study/vgsales.csv')
vg_df.head()
scores = vg_df[np.logical_not(vg_df['Critic_Score'].isna())]['Critic_Score']
scores.mean()
# 1. 1) Как критики относятся к спортивным играм?
# Критики ставят играм в основом оценки выше среднего (больше 69 баллов)
# H0 - средняя оценка игр критиками <= 69
# HA - средняя оценка игр критиками > 69
# alpha = 0.05 пороговый уровень статистической значимости
alpha = 0.05
result = stats.ttest_1samp(scores, 69.0, alternative='greater')
print(result)
if result.pvalue < alpha:
print('Отвергаем нулевую гипотезу, средняя оценка игр критиками выше 69 баллов')
else:
print('Не отвергаем нулевую нулевую гипотезу, средняя оценка игр критиками меньше 69 баллов')
# 2) Критикам нравятся больше игры на PC или на PS4?
# Средняя оценка игр на PC и на PS4 одинакова?
# H0: средняя оценка игр на PC и на PS4 одинакова
# H1: средняя оценка игр на PC и на PS4 различается
# alpha = 0.05 пороговый уровень статистической значимости
PC_scores = vg_df[np.logical_and(np.logical_not(vg_df['Critic_Score'].isna()),
vg_df['Platform'] == 'PC')]['Critic_Score']
PS4_scores = vg_df[np.logical_and(np.logical_not(vg_df['Critic_Score'].isna()),
vg_df['Platform'] == 'PS4')]['Critic_Score']
PC_scores.head()
PS4_scores.head()
result = stats.ttest_ind(PC_scores, PS4_scores, equal_var=False)
print(result)
if (result.pvalue < alpha):
print('Отвергаем нулевую гипотезу, средняя оценка игр на PC и на PS4 различается')
else:
print('Не отвергаем нулевую гипотезу, средняя оценка игр на PC и на PS4 одинакова')
# 3) Критикам больше нравятся стрелялки или стратегии?
# Средняя оценка 'стрелялок' и стратегий одинакова?
# H0 - Средняя оценка стрелялок и стратегий одинакова
# HA - Средняя оценка стрелялок и стратегий различается
# alpha = 0.05 пороговый уровень статистической значимости
shoter_scores = vg_df[np.logical_and(np.logical_not(vg_df['Critic_Score'].isna()),
vg_df['Genre'] == 'Shooter')]['Critic_Score']
strategy_scores = vg_df[np.logical_and(np.logical_not(vg_df['Critic_Score'].isna()),
vg_df['Genre'] == 'Strategy')]['Critic_Score']
shoter_scores.head()
strategy_scores.head()
result = stats.ttest_ind(shoter_scores, strategy_scores, equal_var=False)
print(result)
if (result.pvalue < alpha):
print('Отвергаем нулевую гипотезу, средняя оценка стрелялок и стратегий различается')
else:
print('Не отвергаем нулевую гипотезу, средняя оценка стрелялок и стратегий одинакова')
###Output
Ttest_indResult(statistic=-2.2972408230640315, pvalue=0.021938989522304823)
Отвергаем нулевую гипотезу, средняя оценка стрелялок и стратегий различается
###Markdown
Задание 2 Реализуйте базовую модель логистической регрессии для классификации текстовых сообщений (используемые данные [здесь](https://github.com/obulygin/pyda_homeworks/blob/master/stat_case_study/spam.csv)) по признаку спама. Для этого:1) Привидите весь текст к нижнему регистру; 2) Удалите мусорные символы; 3) Удалите стоп-слова; 4) Привидите все слова к нормальной форме; 5) Преобразуйте все сообщения в вектора TF-IDF. Вам поможет следующий код: ```from sklearn.feature_extraction.text import TfidfVectorizertfidf = TfidfVectorizer()tfidf_matrix = tfidf.fit_transform(df.Message)names = tfidf.get_feature_names()tfidf_matrix = pd.DataFrame(tfidf_matrix.toarray(), columns=names)```Можете поэкспериментировать с параметрами [TfidfVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html); 6) Разделите данные на тестовые и тренировочные в соотношении 30/70, укажите `random_state=42`. Используйте [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html); 7) Постройте модель [логистической регрессии](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html), укажите `random_state=42`, оцените ее точность на тестовых данных; 8) Опишите результаты при помощи [confusion_matrix](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html?highlight=confusion_matrixsklearn.metrics.confusion_matrix); 9) Постройте датафрейм, который будет содержать все исходные тексты сообщений, классифицированные неправильно (с указанием фактического и предсказанного).
###Code
df = pd.read_csv('https://raw.githubusercontent.com/obulygin/pyda_homeworks/master/stat_case_study/spam.csv')
df.head()
# 1. Приведите весь текст к нижнему регистру
df['Message'] = df['Message'].str.lower()
df.head()
# 2. Удалите мусорные символы
df['words'] = df['Message'].map(lambda x: re.sub('[\W_]+',' ', x))
df.head()
# 3. Удалите стоп-слова
df['words'] = df['words'].map(lambda x: x.split())
df.head()
stopwords_set = set(stopwords.words('english'))
df['no_stopwords'] = df['words'].map(lambda x: [word for word in x if word not in stopwords_set] )
df.head()
# 4. Приведите все слова к нормальной форме
lemmatizer = WordNetLemmatizer()
df['lemmatized'] = df['no_stopwords'].map(lambda x: [lemmatizer.lemmatize(word) for word in x] )
df.head()
df['result_message'] = df['lemmatized'].str.join(sep=' ')
df.head()
# 5. Преобразуйте все сообщения в вектора TF-IDF
df = df.drop(columns=['Message', 'words', 'no_stopwords', 'lemmatized'])
df.head()
tfidf = TfidfVectorizer()
tfidf_matrix = tfidf.fit_transform(df['result_message'])
names = tfidf.get_feature_names()
matrix = pd.DataFrame(tfidf_matrix.toarray(), columns=names)
matrix.head()
# 6. Разделите данные на тестовые и тренировочные в соотношении 30/70, укажите random_state=42
df['is_spam'] = (df['Category'] == 'spam') * 1
df.head()
X_train, X_test, y_train, y_test = train_test_split(matrix, df['is_spam'], test_size=0.30, random_state=42)
# 7. Постройте модель логистической регрессии, укажите random_state=42, оцените ее точность на тестовых данных
lda = LinearDiscriminantAnalysis()
lda.fit(X_train, y_train)
# Точность
accuracy_score(y_test, lda.predict(X_test))
# 8. Опишите результаты при помощи confusion_matrix
confusion_arr = confusion_matrix(y_test, lda.predict(X_test))
confusion_arr
print(f'Верно предсказано {confusion_arr[0,0] + confusion_arr[1,1]} значений')
print(f'Ошибок {confusion_arr[0,1] + confusion_arr[1,0]}')
# 9. Постройте датафрейм, который будет содержать все исходные тексты сообщений, классифицированные неправильно (с указанием фактического и предсказанного)
errors_df = pd.concat(
[df[df.index.isin(y_test.index)].reset_index(),
pd.Series(lda.predict(X_test),
np.arange(len(lda.predict(X_test))),
name = 'predict')], axis=1)
errors_df = errors_df[errors_df['is_spam'] != errors_df['predict']]
errors_df
###Output
_____no_output_____ |
examples/1. Introduction.ipynb | ###Markdown
ORBIT IntroductionORBIT is organized into two different types of modules: design and installation. Design modules are intended to model the sizing and cost of offshore wind subcomponents and installation modules simulate the installation of these subcomponents in a discrete event simulation framework. The easiest way to start working with ORBIT is to look at one module. This tutorial will look at the monopile design module and the next tutorial will look at the monopile installation module.
###Code
# To import a design module:
from ORBIT.phases.design import MonopileDesign
# Each module has a property `.expected_config` that gives hints as to how to configure the module properly.
# This property returns a nested dictionary with all of the inputs (including optional ones) that can be used
# to configure this module.
# For example:
MonopileDesign.expected_config
# For now, lets ignore the optional inputs in the 'monopile_design' subdict and just look at the required inputs:
config_unfilled = {
'site': { # Inputs are grouped into subdicts, eg. site, plant, etc.
'depth': 'm', # The value represents the unit where applicable
'mean_windspeed': 'm/s'
},
'plant': {
'num_turbines': 'int'
},
'turbine': {
'rotor_diameter': 'm',
'hub_height': 'm',
'rated_windspeed': 'm/s'
}
}
# Filling out the config for a simple fixed bottom project:
config = {
'site': {
'depth': 25,
'mean_windspeed': 9.5
},
'plant': {
'num_turbines': 50
},
'turbine': {
'rotor_diameter': 220,
'hub_height': 120,
'rated_windspeed': 13
}
}
# To run the module, create an instance by passing the config into the module and then use module.run()
module = MonopileDesign(config)
module.run()
print(f"Total Substructure Cost: {module.total_cost/1e6:.2f} M")
# If a required input is missing, an error message will be raised with the input and it's location within the configuration.
# This error message used 'dot-notation' to show the structure of the dictionary. Each "." represents a lower level in the dictionary.
# "site.depth" indicates that it is the 'depth' input in the 'site' subdict.
# In the example below, the 'site' inputs have been removed.
# The following inputs will be missing: '['site.depth', 'site.mean_windspeed']'
tmp = deepcopy(config)
_ = tmp.pop("site")
module = MonopileDesign(tmp)
###Output
_____no_output_____
###Markdown
Optional Inputs
###Code
# Now lets add more optional inputs:
config = {
'site': {
'depth': 25,
'mean_windspeed': 9.5
},
'plant': {
'num_turbines': 50
},
'turbine': {
'rotor_diameter': 220,
'hub_height': 120,
'rated_windspeed': 13
},
# --- New Inputs ---
'monopile_design': {
'monopile_steel_cost': 3500, # USD/t
'tp_steel_cost': 4500 # USD/t
}
}
module = MonopileDesign(config)
module.run()
print(f"Total Substructure Cost: {module.total_cost/1e6:.2f} M")
# To look at more detailed results:
module.design_result
###Output
_____no_output_____ |
examples/vision/python_unet/unet_inference.ipynb | ###Markdown
Import MIGraphX Python Library
###Code
import migraphx
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Fetch U-NET ONNX Model
###Code
!wget -nc https://www.dropbox.com/s/3ntkhyk30x05uuv/unet_13_256.onnx
###Output
_____no_output_____
###Markdown
Load ONNX Model
###Code
model = migraphx.parse_onnx("unet_13_256.onnx")
model.compile(migraphx.get_target("gpu"))
###Output
_____no_output_____
###Markdown
Print model parameters
###Code
print(model.get_parameter_names())
print(model.get_parameter_shapes())
def preprocess(pil_img, newW, newH):
w, h = pil_img.size
assert newW > 0 and newH > 0, 'Scale is too small'
pil_img = pil_img.resize((newW, newH))
img_nd = np.array(pil_img)
if len(img_nd.shape) == 2:
img_nd = np.expand_dims(img_nd, axis=2)
# HWC to CHW
img_print = pil_img
img_trans = img_nd.transpose((2, 0, 1))
if img_trans.max() > 1:
img_trans = img_trans / 255
img_trans = np.expand_dims(img_trans, 0)
return img_trans, img_print
def plot_img_and_mask(img, mask):
classes = mask.shape[0] if len(mask.shape) > 3 else 1
print(classes)
fig, ax = plt.subplots(1, classes + 1)
ax[0].set_title('Input image')
ax[0].imshow(img)
if classes > 1:
for i in range(classes):
ax[i+1].set_title(f'Output mask (class {i+1})')
ax[i+1].imshow(mask[:, :, i])
else:
ax[1].set_title(f'Output mask')
ax[1].imshow(mask[0,0])
plt.xticks([]), plt.yticks([])
plt.show()
img = Image.open("./car1.jpeg")
img, imPrint = preprocess(img, 256, 256)
input_im = np.zeros((1,3,256,256),dtype='float32')
np.lib.stride_tricks.as_strided(input_im, shape=img.shape, strides=input_im.strides)[:] = img #getting correct stride
print(input_im.strides)
print(input_im.shape)
imPrint.show()
mask = model.run({'inputs':input_im}) # Your first inference would take longer than the following ones.
output_mask = np.array(mask[0])
print(output_mask.shape)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
probs = sigmoid(output_mask)
full_mask = probs > 0.996
plot_img_and_mask(imPrint, full_mask)
###Output
_____no_output_____ |
Py3K Tutorial.ipynb | ###Markdown
Moving to Py3KThere are a fair number of differences between Python 2.7 and Py3k. Some of them we have encountered already through the `from __future__ import ...`, and a few through `six`. A quick refresher: * `print` is now a function (`from __future__ import print_function`)* Division between integers results in a float (`from __future__ import division`). Use `//` to retain old behavior.
###Code
%%python2
from __future__ import print_function
print(1 / 2)
print(1 / 2.0)
print(1 // 2)
print(1 // 2.0)
###Output
_____no_output_____
###Markdown
* `range(5)` returns a range object, not a list. Range objects behave a lot like a list. Effectively, only thing you can't do with it that you can do with a list is mutate it.
###Code
%%python2
from __future__ import print_function
a = range(5)
print(a)
a[3] = 1000
print(a)
###Output
_____no_output_____
###Markdown
* String literals are now always unicode (`from __future__ import unicode_literals`). This can cause odd behavior in some places when mixing old data with new code (or vice versa). Some data loaders may return a "bytes object" instead of a "text object" (which is what strings are nowadays).
###Code
%%python2
from __future__ import print_function
some_text_data = b"bar" # regular string in py2, bytes object in py3
if some_text_data == "bar":
print("They are the same!")
else:
print("They are not the same!")
###Output
_____no_output_____
###Markdown
* floats now round-trip through text representation
###Code
%%python2
from __future__ import print_function
print(float(str(1000.0123456789)) == 1000.0123456789)
###Output
_____no_output_____
###Markdown
* Can't compare or sort mixed types
###Code
%%python2
from __future__ import print_function
print(sorted([4, '2', 3]))
###Output
_____no_output_____
###Markdown
For a fuller examination of the kinds of changes to expect, see https://portingguide.readthedocs.io/ Where are the .pyc files?You are probably used to seeing .pyc files appearing in your directories, shadowing your python modules whenever you import them. In Py3k, the .pyc files are placed in a `__pycache__` directory. Numerical literal tweak (3.6)You can now use as many underscores whereever you like for writing out a number. Typically, one would use it for thousands groupings, but it can be done anywhere. Note, not all editors properly recognize this and so you may get funky syntax coloring. This can also be done for hexadecimals and octals.
###Code
a_num = 100_000.5
print(a_num)
a_num = 0.000_123_05
print(a_num)
###Output
_____no_output_____
###Markdown
This is also recognized for string to numbers conversion:
###Code
a_num = float("10_000.888_88")
print(a_num)
###Output
_____no_output_____
###Markdown
And, you can even have it included in text output for machine-readable output that is easier for humans to read as well.
###Code
a_num = 10000.88888
str_comma = "{a_num:,}".format(a_num=a_num)
str_underscore = "{a_num:_}".format(a_num=a_num)
print(str_comma)
print(str_underscore)
float(str_underscore)
###Output
_____no_output_____
###Markdown
f-strings (3.6)This gives you a feature that some have been used to having in languages like Perl and Bash. If you prepend an `f` to a quoted string, then it is basically like doing `.format(**all_the_things)` to that string automatically.
###Code
name = "Bob"
age = 23
sentence = f"{name} is {age} years old"
print(sentence)
###Output
_____no_output_____
###Markdown
Simple statement evaluationThis sort of stuff is available *only* in f-strings, not in regular strings that are being `.format()`-ed.
###Code
print(f"{name} will be {age + 5} years old in 5 years")
###Output
_____no_output_____
###Markdown
debug f-string (3.8)Nifty convenience feature just added in python 3.8.
###Code
print(f"{name=} {age=}")
###Output
_____no_output_____
###Markdown
Optional type annotations (3.0)In python, you don't need to specify that a variable is a list or an integer, unlike in languages like C/C++. This allows us a lot of ease in just simply writing algorithms rather than focusing on ensuring we got all of the types exactly right. However, there are times when it would be good to know what types are expected where. So, Python introduced syntax for type annotations (a.k.a., type hints).
###Code
def my_add(a: int, b: int) -> int:
return a + b
###Output
_____no_output_____
###Markdown
Note, this is *not* enforced. It is strictly considered to be an annotation.
###Code
my_add("foo", "bar")
###Output
_____no_output_____
###Markdown
But, one can use a python linter to try and detect mistakes in a codebase using this feature. This feature is still being developed and slowly getting adopted by the wider community. The one place that is having trouble adopting it is the SciPy community, largely because we have not yet adopted rich semantics for numpy arrays. Breakpoints! (3.7)You don't need to do `import pdb; pdb.set_trace()` as your way of setting a breakpoint. What is really nice is that the `breakpoint()` function is actually extensible, so tools like Spyder and Jupyter can have integrated debuggers start up for you!
###Code
def foo():
a = 1
breakpoint()
b = 2
return a + b
foo()
###Output
_____no_output_____
###Markdown
Walrus Operator `:=` (3.8)In python 3.8, you can use a special kind of assignment operator that works a lot like C/C++'s regular assignment operator for those who are familiar with that. Essentially, in Python, the regular assignment operator can only be used by itself. You can't have it be in an if clause, or a list comprehension, or some other construct. But, in py3.8, the `:=` was introduced to allow those situations to be possible.A simple example:
###Code
foo = "bar"
print(foo)
###Output
_____no_output_____
###Markdown
Can now be expressed as:
###Code
print(foo := "baz")
###Output
_____no_output_____
###Markdown
Now, where could this be useful, you ask? I find this particularly useful for conditional list comprehensions and generators:
###Code
def some_expensive_func(a, b):
return a + b
###Output
_____no_output_____
###Markdown
Suppose you need a list of values from this function, but only those greater than 15.
###Code
[some_expensive_func(x1, x2)
for x1, x2 in zip(range(10), range(20, 0, -2))
if some_expensive_func(x1, x2) > 15]
###Output
_____no_output_____
###Markdown
Which means you have to compute it twice for each iteration! There are a few other alternative approaches to avoid doing twice the number of function calls, but none of them are very clean or readable. But with a walrus operator, you can do this:
###Code
[val
for x1, x2 in zip(range(10), range(20, 0, -2))
if (val := some_expensive_func(x1, x2)) > 15]
###Output
_____no_output_____
###Markdown
Be superfluous with your parentheses. The above won't work without the parens around the assignment operation.WARNING: While it isn't a syntax error to use this assignment operation just about anywhere, it may still be logically incorrect:
###Code
[(val := some_expensive_func(x1, x2))
for x1, x2 in zip(range(10), range(20, 0, -2))
if val > 15]
###Output
_____no_output_____
###Markdown
Datetime changesIn Py3k, a `datetime.timezone` submodule was created, along with a `utc` timezone object. So, from now on, avoid using `datetime.utcnow()` and `datetime.utcfromtimestamp()`. They are a bit misleading because they will produce naive datetime objects. It is now very easy to create a UTC-aware datetime object that will always do the right thing no matter how it is used:
###Code
from datetime import datetime, timezone
dt_now = datetime.now(tz=timezone.utc)
print(dt_now)
dt_ts = datetime.fromtimestamp(1571595618, timezone.utc)
print(dt_ts)
dt = datetime(2020, 6, 4, 10, 30, tzinfo=timezone.utc)
print(dt)
###Output
_____no_output_____
###Markdown
This is further complicated by the fact that in Py3k (3.6, I think), some operations on naive datetime objects will now implicitly assume that the naive datetime object represents a time in your system's timezone (previously, they would raise an error). So, it may now be better practice to always specify the UTC timezone when we know that we are referencing a time in the UTC timezone, rather than ignoring the timezone altogether. Function argument enhancementsTraditionally, arguments to a function can be thought of as positional and keyword arguments:
###Code
def foo(a, b=20):
return a * b
###Output
_____no_output_____
###Markdown
In `foo()`, both `a` and `b` can be supplied via positional arguments:`foo(1, 2)`or via keyword (named) arguments:`foo(b=2, a=1)`or a careful mix of the two:`foo(1, b=2)`And this has served the Python community well for many years and is certainly better than function argument handling in other languages. But, for those who maintain long-lived libraries, this makes API changes tricky because subtle changes may accidentally break people code. So, in Python 3.0, keyword-only arguments were introduced, and in Python 3.8, positional-only arguments were introduced. All this mean is that there is now a way to specify that particular arguments can only be supplied positionally or via keyword. Keyword-only arguments (3.0)Any arguments with a default value (or `**kwargs`) that come after the `*` entity are considered to be "keyword-only" arguments. Note that arguments without a default value can still come after the `*`, which makes them required keyword arguments.
###Code
def foo(a, *, b=20):
return a * b
###Output
_____no_output_____
###Markdown
Now, if you want to supply the `b` argument, you _have_ to name it:
###Code
foo(1, b=2)
###Output
_____no_output_____
###Markdown
Positional-only arguments (3.8)Any positional arguments that come before the `/` entity are considered "positional-only" arguments. Note that arguments with default values can still come before the `/`, which makes them optional positional arguments.
###Code
def foo(a, /, b=20):
return a * b
###Output
_____no_output_____
###Markdown
Now, `a` is only known as `a` within the function. It cannot be referenced as such outside the function. It is only known as "the first argument".
###Code
foo(a=1, b=2)
###Output
_____no_output_____
###Markdown
Matrix multiplication operator `@` (3.5)
###Code
import numpy as np
a = np.array([1.3, 1.5, 1.7])
b = np.array([[2.5, 3.1], [0.2, 0.3], [1.7, -2.0]])
###Output
_____no_output_____
###Markdown
Traditionally, matrix multiplication was done like so:
###Code
np.matmul(a, b)
###Output
_____no_output_____
###Markdown
or
###Code
a.dot(b)
###Output
_____no_output_____
###Markdown
But now you can do:
###Code
a @ b
###Output
_____no_output_____
###Markdown
Unpacking Fun!Unpacking is the term used for taking the parts of a collection and assigning them to other variables. We have encountered unpacking before in various forms:
###Code
def foo(a, b=20):
return a * b
baz = {'b': 14, 'a': 1}
foo(**baz)
bar = [10, 15]
foo(*bar)
a, b = bar
print(a, b)
###Output
_____no_output_____
###Markdown
But now, we can do more! Sequence unpacking - Basic (3.0)
###Code
bar = [10, 11, 12, 13, 14, 15]
a, b, *c = bar
print(a)
print(b)
print(c)
a, *b, c = bar
print(a)
print(b)
print(c)
*_, a, b = bar
print(a)
print(b)
ranges = [range(3), range(4)]
for a, *b in ranges:
print(a, b)
###Output
_____no_output_____
###Markdown
Sequence unpacking - Additional (3.5)We also now have new ways to build a list:
###Code
a = [1, 2, 3]
b = [7, 6, 5, 4]
c = [*a, *b]
d = [*b, 10, 11, *a]
print(c)
print(d)
###Output
_____no_output_____
###Markdown
Set unpacking (3.5)We can do something similar for sets. It doesn't matter whether the source is a list or a set.
###Code
a = [1, 2, 3, 4, 5]
b = {4, 5, 6, 7}
c = {*a, *b}
d = {*b, 10, 14, 4, *a}
print(c)
print(d)
###Output
_____no_output_____
###Markdown
Dictionary unpacking (3.5)Similarly to how we can now create a list using `*` unpacking, we can also create a dictionary using `**` unpacking:
###Code
a = {'foo': 1, 'bar': 2}
b = {'bar': 3, 'baz': 4}
c = {**a, **b}
d = {**a, 'foo': 10, 'baz': 14, **b}
print(c)
print(d)
###Output
_____no_output_____ |
Aula 12/EDA- Line Charts.ipynb | ###Markdown
###Code
# Uploading files from your local file system
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
###Output
_____no_output_____
###Markdown
1.0 Representation of Data So far, we've mostly been manipulating and working with data that are represented as tables. Microsoft Excel, the pandas library in Python, and the CSV file format for datasets were all developed around this representation. Because a table neatly organizes values into rows and columns, we can easily look up specific values at the intersection of a row value and a column value. Unfortunately, it's very difficult to explore a dataset to uncover patterns when it's represented as a table, especially when that dataset contains many values. We need a different representation of data that can help us identify patterns more easily.In this mission, we'll learn the basics of **data visualization**, a discipline that focuses on the visual representation of data. As humans, our brains have evolved to develop powerful visual processing capabilities. We can quickly find patterns in the visual information we encounter, which was incredibly important from a survivability standpoint. Unfortunately, when data is represented as tables of values, we can't really take advantage of our visual pattern matching capabilities. This is because our ability to quickly process symbolic values (like numbers and words) is very poor. Data visualization focuses on transforming data from table representations visual ones.In this lesson, named **Exploratory Data Analysis**, we'll focus on data visualization techniques to explore datasets and help us uncover patterns. In this mission, we'll use a specific type of data visualization to understand U.S. unemployment data. 2.0 Introduction to the Data The **United States Bureau of Labor Statistics (BLS)** surveys and calculates the monthly unemployment rate. The unemployment rate is the percentage of individuals in the labor force without a job. While unemployment rate isn't perfect, it's a commonly used proxy for the health of the economy. You may have heard politicians and reporters state the unemployment rate when commenting on the economy. You can read more about how the BLS calculates the unemployment rate [here](http://www.bls.gov/cps/cps_htgm.htm).The BLS releases monthly unemployment data available for download as an Excel file, with the **.xlsx** file extension. While the pandas library can read in XLSX files, it relies on an external library for actually parsing the format. Let's instead download the same dataset as a CSV file from the website of the [Federal Reserve Bank of St. Louis](https://www.stlouisfed.org/). We've downloaded the monthly unemployment rate as a CSV from January 1948 to August 2016, saved it as **unrate.csv**, and made it available in this mission.To download this dataset on your own, head to the Federal Reserve Bank of St. Louis's [website](https://fred.stlouisfed.org/series/UNRATE/downloaddata), select **Text, Comma Separated** as the **File Format**, make sure the **Date Range** field starts at **1948-01-01** and ends at **2016-08-01**.Before we get into visual representations of data, let's first read this CSV file into pandas to explore the table representation of this data. The dataset we'll be working with is a [time series](https://en.wikipedia.org/wiki/Time_series) dataset, which means the data points (monthly unemployment rates) are ordered by time. Here's a preview of the dataset:When we read the dataset into a DataFrame, pandas will set the data type of the **DATE** column as a text column. Because of how pandas reads in strings internally, this column is given a data type of **object**. We need to convert this column to the **datetime** type using the [pandas.to_datetime()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html) function, which returns a Series object with the **datetime** data type that we can assign back to the DataFrame:```pythonimport pandas as pddf['col'] = pd.to_datetime(df['col'])```**Exercise****Description**:1. Read **unrate.csv** into a DataFrame and assign to **unrate**.2. Use the [pandas.to_datetime()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html) function to convert the **DATE** column into a series of **datetime** values.3. Display the first 12 rows in unrate.
###Code
# put your code here
import pandas as pd
unrate = pd.read_csv("unrate.csv")
unrate["DATE"] = pd.to_datetime(unrate["DATE"])
unrate.head(12)
###Output
_____no_output_____
###Markdown
3.0 Table representation The dataset contains 2 columns:- DATE: date, always the first of the month. Here are some examples: - 1948-01-01: January 1, 1948. - 1948-02-01: February 1, 1948. - 1948-03-01: March 1, 1948. - 1948-12-01: December 1, 1948.- VALUE: the corresponding unemployment rate, in percent.The first 12 rows reflect the unemployment rate from January 1948 to December 1948:Take a minute to visually scan the table and observe how the monthly unemployment rate has changed over time. When you're finished, head to the next cell in this notebook. 4.0 Observation from the table representation We can make the following observations from the table:- In 1948: - monthly unemployment rate ranged between **3.4** and **4.0**. - highest unemployment rate was reached in both March and December. - lowest unemployment rate was reached in January.- From January to March, unemployment rate trended up.- From March to May, unemployment rate trended down.- From May to August, unemployment rate trended up.- From August to October, unemployment rate trended down.- From October to December, unemployment rate trended up.Because the table only contained the data from 1948, it didn't take too much time to identify these observations. If we scale up the table to include all 824 rows, it would be very time-consuming and painful to understand. Tables shine at presenting information precisely at the intersection of rows and columns and allow us to perform quick lookups when we know the row and column we're interested in. In addition, problems that involve comparing values between adjacent rows or columns are well suited for tables. Unfortunately, many problems you'll encounter in data science require comparisons that aren't possible with just tables.For example, one thing we learned from looking at the monthly unemployment rates for 1948 is that every few months, the unemployment rate switches between trending up and trending down. It's not switching direction every month, however, and this could mean that there's a seasonal effect. **Seasonality** is when a pattern is observed on a regular, predictable basis for a specific reason. A simple example of seasonality would be a large increase textbook purchases every August every year. Many schools start their terms in August in north hemisphere and this spike in textbook sales is directly linked.We need to first understand if there's any seasonality by comparing the unemployment trends across many years so we can decide if we should investigate it further. The faster we're able to assess our data, the faster we can perform high-level analysis quickly. If we're reliant on just the table to help us figure this out, then we won't be able to perform a high level test quickly. Let's see how a visual representation of the same information can be more helpful than the table representation. 5.0 Visual representation Instead of representing data using text like tables do, visual representations use visual objects like dots, shapes, and lines on a grid. [Plots](https://en.wikipedia.org/wiki/Plot_%28graphics%29) are a category of visual representations that allow us to easily understand the relationships between variables. There are many types of plots and selecting the right one is an important skill that you'll hone as you create data visualizations. Because we want to compare the unemployment trends across time, we should use line charts. Here's an overview of **line charts** using 4 sample data points:Line charts work best when there is a logical connection between adjacent points. In our case, that connection is the flow of time. Between 2 reported monthly unemployment values, the unemployment rate is fluctuating and time is passing. To emphasize how the visual representation of the line chart helps us observe trends easily, let's look at the same 12 data points from 1948 as a line chart.We can reach the same observations about the data from the line chart as we did from the table representation:In the rest of this mission, we'll explore how to recreate this line chart in Python. In the next mission, we'll explore how to create multiple line charts to help us compare unemployment trends. 6. Introduction to matplotlib To create the line chart, we'll use the [matplotlib](http://matplotlib.org/) library, which allows us to:- quickly create common plots using high-level functions- extensively tweak plots- create new kinds of plots from the ground upTo help you become familiar with matplotlib, we'll focus on the first 2 use cases. When working with commonly used plots in matplotlib, the general workflow is:- create a plot using data- customize the appearance of the plot- display the plot- edit and repeat until satisfiedThis interactive style aligns well with the exploratory workflow of data visualization because we're asking questions and creating data visualizations to help us get answers. The pyplot module provides a high-level interface for matplotlib that allows us to quickly create common data plots and perform common tweaks to them.The pyplot module is commonly imported as **plt** from **matplotlib**:```pythonimport matplotlib.pyplot as plt```Using the different pyplot functions, we can create, customize, and display a plot. For example, we can use 2 functions to :```pythonplt.plot()plt.show()```Because we didn't pass in any arguments, the [plot()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.plot) function would generate an empty plot with just the axes and ticks and the [show()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.show) function would display that plot. You'll notice that we didn't assign the plot to a variable and then call a method on the variable to display it. We instead called 2 functions on the pyplot module directly.This is because every time we call a pyplot function, the module maintains and updates the plot internally (also known as state). When we call **show()**, the plot is displayed and the internal state is destroyed. While this workflow isn't ideal when we're writing functions that create plots on a repeated basis as part of a larger application, it's useful when exploring data.Let's run this code to see the default properties matplotlib uses. If you'd like to follow along on your own computer, we recommend installing matplotlib using Anaconda: **conda install matplotlib**. We recommend working with matplotlib using Jupyter Notebook because it can render the plots in the notebook itself. You will need to run the following Jupyter magic in a code cell each time you open your notebook: **%matplotlib inline**. Whenever you call **show()**, the plots will be displayed in the output cell. You can read more [here](http://ipython.readthedocs.io/en/stable/interactive/plotting.html).**Exercise****Description**:1. Generate an empty plot using **plt.plot()** and display it using **plt.show()**.
###Code
# put your code here
import matplotlib.pyplot as plt
plt.plot()
plt.show()
###Output
_____no_output_____
###Markdown
7. Adding data By default, Matplotlib displayed a coordinate grid with:- the x-axis and y-axis values ranging from **-0.06** to **0.06**- no grid lines- no dataEven though no data was plotted, the x-axis and y-axis ticks corresponding to the **-0.06** to **0.06** value range. The axis ticks consist of tick marks and tick labels. Here's a focused view of the x-axis tick marks and x-axis tick labels:To create a line chart of the unemployment data from 1948, we need:- the x-axis to range from **01-01-1948** to **12-01-1948** (which corresponds to the first and last months in 1948)- the y-axis to range from **3.4** to **4.0** (which correspond to the minimum and maximum unemployment values)Instead of manually updating the ticks, drawing each marker, and connecting the markers with lines, we can just specify the data we want plotted and let matplotlib handle the rest. To generate the line chart we're interested in, we pass in the list of x-values as the first parameter and the list of y-values as the second parameter to [plot()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.plot):```pythonplt.plot(x_values, y_values)```Matplotlib will accept any iterable object, like NumPy arrays and **pandas.Series** instances.**Exercise****Description**:1. Generate a line chart that visualizes the unemployment rates from 1948: - x-values should be the first 12 values in the **DATE** column - y-values should be the first 12 values in the **VALUE** column2. Display the plot.
###Code
# put your code here
plt.plot(unrate.loc[:11, "DATE"], unrate.loc[:11, "VALUE"])
plt.show()
###Output
_____no_output_____
###Markdown
8. Fixing axis ticks While the y-axis looks fine, the x-axis **tick labels** are too close together and can be unreadable. The line charts from earlier in the mission suggest a better way to display the x-axis tick labels.We can rotate the x-axis tick labels by 90 degrees so they don't overlap. The **xticks()** function within pyplot lets you customize the behavior of the x-axis ticks. If you head over to the [documentation for that function](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.xticks), it's not immediately obvious the arguments it takes:```pythonmatplotlib.pyplot.xticks(*args, **kwargs)```In the documentation for the function, you'll see a link to the matplotlib [Text](http://matplotlib.org/api/text_api.htmlmatplotlib.text.Text) class, which is what pyplot uses to represent the x-axis tick labels. You'll notice that there's a **rotation** parameter that accepts degrees of rotation as a parameter. We can specify degrees of rotation using a float or integer value.As a side note, if you read the documentation for [pyplot](http://matplotlib.org/api/pyplot_api.html), you'll notice that many functions for tweaking the x-axis have matching functions for the y-axis. For example, the y-axis counterpart to the [xticks()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.xticks) function is the yticks() function.Use what we've discussed so far to rotate the x-axis tick labels by 90 degrees.**Exercise****Description**:1. Generate the same line chart from the last screen that visualizes the unemployment rates from 1948: - x-values should be the first 12 values in the **DATE** column - y-values should be the first 12 values in the **VALUE** column2. Use **pyplot.xticks()** to rotate the x-axis tick labels by **90** degrees.3. Display the plot.
###Code
# put your code here
plt.plot(unrate.loc[:11, "DATE"], unrate.loc[:11, "VALUE"])
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
9. Adding axis label and a title Let's now finish tweaking this plot by adding axis labels and a title. Always adding axis labels and a title to your plot is a good habit to have, and is especially useful when we're trying to keep track of multiple plots down the road.Here's an overview of the pyplot functions we need to tweak the axis labels and the plot title:- [xlabel()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.xlabel): accepts a string value, which gets set as the x-axis label.- [ylabel()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.ylabel): accepts a string value, which is set as the y-axis label.- [title()](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.title): accepts a string value, which is set as the plot title.**Exercise****Description**:1. Generate the same line chart from the last screen that visualizes the unemployment rates from 1948: - x-values should be the first 12 values in the **DATE** column - y-values should be the first 12 values in the **VALUE** column - Rotate the x-axis tick labels by **90** degrees.2. Set the x-axis label to **"Month"**.3. Set the y-axis label to **"Unemployment Rate"**.4. Set the plot title to **"Monthly Unemployment Trends, 1948"**.5. Display the plot.
###Code
# put your code here
plt.plot(unrate.loc[:11, "DATE"], unrate.loc[:11, "VALUE"])
plt.xticks(rotation=90)
plt.xlabel("Month")
plt.ylabel("Unemployment Rate")
plt.title("Monthly Unemployment Trends, 1948")
plt.show()
###Output
_____no_output_____ |
features/.ipynb_checkpoints/Extraction - Nearest Neighbors 17.05-checkpoint.ipynb | ###Markdown
src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/lemmatized/doc2vec/'q1_vec = np.load(src + 'train_q1_doc2vec_vectors_trainquora.npy')q2_vec = np.load(src + 'train_q2_doc2vec_vectors_trainquora.npy')full_vec = np.concatenate([q1_vec, q2_vec])
###Code
feats_src2 = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/NER/'
q1_vec = np.load(feats_src2 + 'q1train_NER_128len.npy')
q2_vec = np.load(feats_src2 + 'q2train_NER_128len.npy')
q1_vect = np.load(feats_src2 + 'q1test_NER_128len.npy')
q2_vect = np.load(feats_src2 + 'q2test_NER_128len.npy')
full_vec = np.concatenate([q1_vec, q2_vec, q1_vect, q2_vect])
src_train = 'df_train_spacylemmat_fullclean.csv'
src_test = 'df_test_spacylemmat_fullclean.csv'
df_train = pd.read_csv(src_train)[['question1', 'question2']]
df_train.fillna('NULL', inplace = True)
df_test = pd.read_csv(src_test)[['question1', 'question2']]
df_test.fillna('NULL', inplace = True)
df_full = pd.concat([df_train, df_test], ignore_index = True)
full_questions = df_full.question1.tolist() + df_full.question2.tolist()
del df_full, q1_vec, q2_vec, q2_vect, q1_vect
gc.collect()
f = 128
t = AnnoyIndex(f)
start_time = time.time()
for i in tqdm(range(len(full_vec))):
t.add_item(i, full_vec[i])
t.build(50)
t.save('train_doc2vec_annoymodel_50trees_angular.ann')
print('Time it took:', time.time() - start_time)
f = 128
t = AnnoyIndex(f)
t.load('train_doc2vec_annoymodel_50trees_angular.ann')
f = 128
t2 = AnnoyIndex(f, metric = 'euclidean')
start_time = time.time()
for i in tqdm(range(len(full_vec))):
t2.add_item(i, full_vec[i])
t2.build(50)
t2.save('train_doc2vec_annoymodel_50trees_euclidean.ann')
print('time it took:', time.time() - start_time)
dists_angular = []
for i in tqdm(range(len(full_vec))):
dists_angular.append(t.get_nns_by_item(i, 10, include_distances = True)[1])
dists_euclidean = []
for i in tqdm(range(len(full_vec))):
dists_euclidean.append(t2.get_nns_by_item(i, 10, include_distances = True)[1])
def get_count_ang02(i):
return len(i[i > 0.2])
def get_count_ang04(i):
return len(i[i > 0.4])
def get_count_ang05(i):
return len(i[i > 0.5])
def get_count_euc100(i):
return len(i[i > 100])
def get_count_euc500(i):
return len(i[i > 500])
def get_count_euc1000(i):
return len(i[i > 1000])
def get_count_euc5000(i):
return len(i[i > 5000])
np.save('full_annoy10NN_distances_angular', np.array(dists_angular))
np.save('full_annoy10NN_distances_euclidean', np.array(dists_euclidean))
dists_angular_train_q1 = np.array(dists_angular[:df_train.shape[0]])
dists_angular_train_q2 = np.array(dists_angular[df_train.shape[0]:df_train.shape[0] * 2])
dists_euclidean_train_q1 = np.array(dists_euclidean[:df_train.shape[0]])
dists_euclidean_train_q2 = np.array(dists_euclidean[df_train.shape[0]:df_train.shape[0] * 2])
print(dists_angular_train_q1.shape, dists_angular_train_q2.shape,
dists_euclidean_train_q1.shape, dists_euclidean_train_q2.shape)
dists_angular_test_q1 = np.array(dists_angular[df_train.shape[0] * 2:df_train.shape[0] * 2 + df_test.shape[0]])
dists_angular_test_q2 = np.array(dists_angular[df_train.shape[0] * 2 + df_test.shape[0]:])
dists_euclidean_test_q1 = np.array(dists_euclidean[df_train.shape[0] * 2:df_train.shape[0] * 2 + df_test.shape[0]])
dists_euclidean_test_q2 = np.array(dists_euclidean[df_train.shape[0] * 2 + df_test.shape[0]:])
print(dists_angular_test_q1.shape, dists_angular_test_q2.shape,
dists_euclidean_test_q1.shape, dists_euclidean_test_q2.shape)
ang_02q1 = np.apply_along_axis(get_count_ang02, axis = 1, arr = dists_angular_train_q1)
ang_04q1 = np.apply_along_axis(get_count_ang04, axis = 1, arr = dists_angular_train_q1)
ang_05q1 = np.apply_along_axis(get_count_ang05, axis = 1, arr = dists_angular_train_q1)
euc_100q1 = np.apply_along_axis(get_count_euc100, axis = 1, arr = dists_euclidean_train_q1)
euc_500q1 = np.apply_along_axis(get_count_euc500, axis = 1, arr = dists_euclidean_train_q1)
euc_1000q1 = np.apply_along_axis(get_count_euc1000, axis = 1, arr = dists_euclidean_train_q1)
euc_5000q1 = np.apply_along_axis(get_count_euc5000, axis = 1, arr = dists_euclidean_train_q1)
ang_02q2 = np.apply_along_axis(get_count_ang02, axis = 1, arr = dists_angular_train_q2)
ang_04q2 = np.apply_along_axis(get_count_ang04, axis = 1, arr = dists_angular_train_q2)
ang_05q2 = np.apply_along_axis(get_count_ang05, axis = 1, arr = dists_angular_train_q2)
euc_100q2 = np.apply_along_axis(get_count_euc100, axis = 1, arr = dists_euclidean_train_q2)
euc_500q2 = np.apply_along_axis(get_count_euc500, axis = 1, arr = dists_euclidean_train_q2)
euc_1000q2 = np.apply_along_axis(get_count_euc1000, axis = 1, arr = dists_euclidean_train_q2)
euc_5000q2 = np.apply_along_axis(get_count_euc5000, axis = 1, arr = dists_euclidean_train_q2)
x = pd.DataFrame()
x['train_10nn_min_angular_q1'] = np.min(dists_angular_train_q1, axis = 1)
x['train_10nn_mean_angular_q1'] = np.mean(dists_angular_train_q1, axis = 1)
x['train_10nn_max_angular_q1'] = np.max(dists_angular_train_q1, axis = 1)
x['train_10nn_min_angular_q2'] = np.min(dists_angular_train_q2, axis = 1)
x['train_10nn_mean_angular_q2'] = np.mean(dists_angular_train_q2, axis = 1)
x['train_10nn_max_angular_q2'] = np.max(dists_angular_train_q2, axis = 1)
x['train_10nn_min_euclidean_q1'] = np.min(dists_euclidean_train_q1, axis = 1)
x['train_10nn_mean_euclidean_q1'] = np.mean(dists_euclidean_train_q1, axis = 1)
x['train_10nn_max_euclidean_q1'] = np.max(dists_euclidean_train_q1, axis = 1)
x['train_10nn_min_euclidean_q2'] = np.min(dists_euclidean_train_q2, axis = 1)
x['train_10nn_mean_euclidean_q2'] = np.mean(dists_euclidean_train_q2, axis = 1)
x['train_10nn_max_euclidean_q2'] = np.max(dists_euclidean_train_q2, axis = 1)
x['10nn_ang02_neighbors_q1'] = ang_02q1
x['10nn_ang04_neighbors_q1'] = ang_04q1
x['10nn_ang05_neighbors_q1'] = ang_05q1
x['10nn_euc100_neighbors_q1'] = euc_100q1
x['10nn_euc500_neighbors_q1'] = euc_500q1
x['10nn_euc1000_neighbors_q1'] = euc_1000q1
x['10nn_euc5000_neighbors_q1'] = euc_5000q1
x['10nn_ang02_neighbors_q2'] = ang_02q2
x['10nn_ang04_neighbors_q2'] = ang_04q2
x['10nn_ang05_neighbors_q2'] = ang_05q2
x['10nn_euc100_neighbors_q2'] = euc_100q2
x['10nn_euc500_neighbors_q2'] = euc_500q2
x['10nn_euc1000_neighbors_q2'] = euc_1000q2
x['10nn_euc5000_neighbors_q2'] = euc_5000q2
x.to_csv('train_annoy10NN_distances.csv', index = False)
ang_02q1 = np.apply_along_axis(get_count_ang02, axis = 1, arr = dists_angular_test_q1)
ang_04q1 = np.apply_along_axis(get_count_ang04, axis = 1, arr = dists_angular_test_q1)
ang_05q1 = np.apply_along_axis(get_count_ang05, axis = 1, arr = dists_angular_test_q1)
euc_100q1 = np.apply_along_axis(get_count_euc100, axis = 1, arr = dists_euclidean_test_q1)
euc_500q1 = np.apply_along_axis(get_count_euc500, axis = 1, arr = dists_euclidean_test_q1)
euc_1000q1 = np.apply_along_axis(get_count_euc1000, axis = 1, arr = dists_euclidean_test_q1)
euc_5000q1 = np.apply_along_axis(get_count_euc5000, axis = 1, arr = dists_euclidean_test_q1)
ang_02q2 = np.apply_along_axis(get_count_ang02, axis = 1, arr = dists_angular_test_q2)
ang_04q2 = np.apply_along_axis(get_count_ang04, axis = 1, arr = dists_angular_test_q2)
ang_05q2 = np.apply_along_axis(get_count_ang05, axis = 1, arr = dists_angular_test_q2)
euc_100q2 = np.apply_along_axis(get_count_euc100, axis = 1, arr = dists_euclidean_test_q2)
euc_500q2 = np.apply_along_axis(get_count_euc500, axis = 1, arr = dists_euclidean_test_q2)
euc_1000q2 = np.apply_along_axis(get_count_euc1000, axis = 1, arr = dists_euclidean_test_q2)
euc_5000q2 = np.apply_along_axis(get_count_euc5000, axis = 1, arr = dists_euclidean_test_q2)
x = pd.DataFrame()
x['test_10nn_min_angular_q1'] = np.min(dists_angular_test_q1, axis = 1)
x['test_10nn_mean_angular_q1'] = np.mean(dists_angular_test_q1, axis = 1)
x['test_10nn_max_angular_q1'] = np.max(dists_angular_test_q1, axis = 1)
x['test_10nn_min_angular_q2'] = np.min(dists_angular_test_q2, axis = 1)
x['test_10nn_mean_angular_q2'] = np.mean(dists_angular_test_q2, axis = 1)
x['test_10nn_max_angular_q2'] = np.max(dists_angular_test_q2, axis = 1)
x['test_10nn_min_euclidean_q1'] = np.min(dists_euclidean_test_q1, axis = 1)
x['test_10nn_mean_euclidean_q1'] = np.mean(dists_euclidean_test_q1, axis = 1)
x['test_10nn_max_euclidean_q1'] = np.max(dists_euclidean_test_q1, axis = 1)
x['test_10nn_min_euclidean_q2'] = np.min(dists_euclidean_test_q2, axis = 1)
x['test_10nn_mean_euclidean_q2'] = np.mean(dists_euclidean_test_q2, axis = 1)
x['test_10nn_max_euclidean_q2'] = np.max(dists_euclidean_test_q2, axis = 1)
x['10nn_ang02_neighbors_q1'] = ang_02q1
x['10nn_ang04_neighbors_q1'] = ang_04q1
x['10nn_ang05_neighbors_q1'] = ang_05q1
x['10nn_euc100_neighbors_q1'] = euc_100q1
x['10nn_euc500_neighbors_q1'] = euc_500q1
x['10nn_euc1000_neighbors_q1'] = euc_1000q1
x['10nn_euc5000_neighbors_q1'] = euc_5000q1
x['10nn_ang02_neighbors_q2'] = ang_02q2
x['10nn_ang04_neighbors_q2'] = ang_04q2
x['10nn_ang05_neighbors_q2'] = ang_05q2
x['10nn_euc100_neighbors_q2'] = euc_100q2
x['10nn_euc500_neighbors_q2'] = euc_500q2
x['10nn_euc1000_neighbors_q2'] = euc_1000q2
x['10nn_euc5000_neighbors_q2'] = euc_5000q2
x.to_csv('test_annoy10NN_distances.csv', index = False)
x
###Output
_____no_output_____ |
Model3_transferLearning.ipynb | ###Markdown
both datasets 1 and 2 are imbalanced since the number of non-clickbait tweets in datasets 1 and 2 are 2.1 and 3.1 times bigger than the number of clickbait tweets respectively.
###Code
#change df_trn and df_val
Tdf["label"] = Tdf.iloc[:,0].apply(clean_up)
l = []
for i in range(len(Tdf)):
l.append(int(Tdf.iloc[:,0][i]))
Tdf["label"] = l
t = []
for i in range(len(Tdf)):
t.append(Tdf.iloc[:,1][i][0])
Tdf["text"] = t
Tdf.head()
df_trn, df_val = Tdf, df
df_trn.shape, df_val.shape
# Language model data
data_lm = TextLMDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "")
# Classifier model data
data_clas = TextClasDataBunch.from_df(path = "", train_df = df_trn, valid_df = df_val, vocab=data_lm.train_ds.vocab, bs=32)
learn = language_model_learner(data_lm, pretrained_model=URLs.WT103_1, drop_mult=0.3)
# train the learner object with learning rate = 1e-2
learn.fit_one_cycle(1, 1e-2)
#Like a computer vision model, we can then unfreeze the model and fine-tune it.
learn.unfreeze()
learn.fit_one_cycle(1, 1e-3)
#Finally we save the encoder to be able to use it for classification in the next section.
learn.save_encoder('ft_enc')
learn = text_classifier_learner(data_clas, drop_mult=0.5)
learn.load_encoder('ft_enc')
learn.fit_one_cycle(2, slice(1e-3,1e-2))
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(5e-3/2., 5e-3))
preds, targets = learn.get_preds()
predictions = np.argmax(preds, axis = 1)
pd.crosstab(predictions, targets,rownames=['Preds'], colnames=['targets'])
###Output
_____no_output_____ |
reports/Project Proposal.ipynb | ###Markdown
DATA1030 Project Proposal -- Hand Gesture RecognitionCangcheng(Joseph) TangBanner ID: B01628536[GitHub Link](https://github.com/tangcc35/DATA1030-Project-Cangcheng); [Data Link](https://archive.ics.uci.edu/ml/datasets/Motion+Capture+Hand+Postures)--- 1.Obejective* The target variable is different "Class" for gestures and "User". This project's main purpose is to train a model to recognize hand gestures based on the coordinates of 12 joints of the hand. There is also an exploratory objective: identifying users. * As the target variable is categorical, not continuous, classification methods and models will be used to solve the problems above. * Hand gesture recognition is a very powerful tool in interaction with an electronic device. Such technology allows hands-free control over smartphones, computers, or even smart household appliances. So remote controls will no longer be needed. Also, accessibility features with hand gesture recognition can do more than reading things very loudly. Many smartphone companies, including LG and Google, have already been doing this, and it is going to be a trend in human-computer interaction. 2. Dataset Description* There are 78095 data points and 38 features: 5 types of hand postures from 14 users were recorded using unlabeled markers on fingers of a glove in a motion capture environment. Due to resolution and occlusion, missing values are common. * Data collection: A rigid pattern of markers on the back of a glove was used to establish a local coordinate system for the hand, and 11 other markers were attached to the thumb and fingers of the glove. 3 markers were attached to the thumb with one above the thumbnail and the other two on the knuckles. 2 markers were attached to each finger with one above the fingernail and the other on the joint between the proximal and middle phalanx. * Features Information: * 'Class' - Integer. The class ID of the given record. Ranges from 1 to 5 with 1=Fist(with thumb out), 2=Stop(hand flat), 3=Point1(point with pointer finger), 4=Point2(point with pointer and middle fingers), 5=Grab(fingers curled as if to grab). * 'User' - Integer. The ID of the user that contributed the record. No meaning other than as an identifier. * 'Xi' - Real. The x-coordinate of the i-th marker position. 'i' ranges from 0 to 11. * 'Yi' - Real. The y-coordinate of the i-th marker position. 'i' ranges from 0 to 11. * 'Zi' - Real. The z-coordinate of the i-th marker position. 'i' ranges from 0 to 11. i = 1 Pinky Finger (Joint); i = 2 Pinky Finger (Nail); i = 3 Ring Finger (Joint); i = 4 Ring Finger (Nail); i = 5 Middle Finger (Joint); i = 6 Middle Finger (Nail); i = 7 Pointer Finger (Joint); i = 8 Pointer Finger (Nail); i = 9 Thumb (Metacarpophalangeal Joint); i = 10 Thumb (Interphalangeal Joint); i = 11 Thumb (Nail) * Public Paper * _[A. Gardner, J. Kanno, C. A. Duncan, and R. Selmic. 'Measuring distance between unordered sets of different sizes,' in 2014 IEEE Conference on Computer Vision and Pattern Recognition(CVPR), June 2014, pp. 137-143.](http://openaccess.thecvf.com/content_cvpr_2014/papers/Gardner_Measuring_Distance_Between_2014_CVPR_paper.pdf)_ In this paper, the authors introduced a new parameterizable metric, which measures the distance between unordered sets. This metric can be useful in premetrics, multisets, and multiple minimum-cost mappings from each set's perspective. Hand gesture recognition is used to demonstrate the function's usage in machine learning and pattern recognition. * _[Gardner, A., Duncan, C.A., Kanno, J. and Selmic, R., 2014, October. 3d hand posture recognition from small unlabeled point sets. In 2014 IEEE International Conference on Systems, Man, and Cybernetics (SMC) (pp. 164-169). IEEE.](https://ieeexplore.ieee.org/document/6973901)_ This paper compared the performance of various classification models when recognizing static hand postures. The researchers found that aggregate feature classifiers have balanced performance among different users, but their accuracy is not the best. And pseudo-rasterization is the one performed best among all the classification models tested. The difficulty of user classification is also stated. 3. Data Preprocess* For the coordinate data, MinMaxScaler is used, because there are clear boundaries in coordinates. As the size of human's hands are limited. * For the target variables "Class" and "User", LabelEncoder is used. * When predicting "Class", "User" will be dropped, and "Class" will be the target variable. Train, cv, and test will be split based on different Users. * When predicting "User", "Class" will be dropped, and "User" will be the target variable. Train, cv, and test will be split based on different "Class" of gestures. Unprocessed data|Class|User|X0 |Y0 |Z0 |X1 |...||:----|:---|:--------|:--------|:---------|:--------|:--||1 |0 |54.263880|71.466776|-64.807709|76.895635|...||1 |0 |56.527558|72.266609|-61.935252|39.135978|...||1 |0 |55.849928|72.469064|-62.562788|37.988804|...||1 |0 |55.329647|71.707274|-63.688956|36.561862|...||1 |0 |55.142400|71.435607|-64.177303|36.175817|...| Preprocessed data for predicting gestures"User" column is dropped. MinMaxScaler is used to preprocess coordinate data |Class|X0 |Y0 |Z0 |X1 |Y1 |...||:----|:--------|:--------|:--------|:--------|:--------|:--||0 |0.5453203|0.6346098|0.2580551|0.6278134|0.5203832|...||0 |0.5529020|0.6376009|0.2700179|0.5021060|0.6708459|...||0 |0.5506325|0.6383580|0.2674044|0.4982869|0.6711944|...||0 |0.5488899|0.6355092|0.2627143|0.4935364|0.6683312|...||0 |0.5482628|0.6344933|0.2606805|0.4922512|0.6671603|...| Preprocessed data for predicting users"Class" column is dropped. MinMaxScaler is used to preprocess coordinate data |User|X0 |Y0 |Z0 |X1 |Y1 |...||:---|:--------|:--------|:--------|:--------|:--------|:--||0 |0.5453203|0.6346098|0.2580551|0.6278134|0.5203832|...||0 |0.5529020|0.6376009|0.2700179|0.5021060|0.6708459|...||0 |0.5506325|0.6383580|0.2674044|0.4982869|0.6711944|...||0 |0.5488899|0.6355092|0.2627143|0.4935364|0.6683312|...||0 |0.5482628|0.6344933|0.2606805|0.4922512|0.6671603|...| Code Part, to be deleted in PDF
###Code
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, MinMaxScaler, StandardScaler, LabelEncoder
df = pd.read_csv('Original Postures Data.csv')
df = df.drop(0) # Dropping initial dummy record
df = df.replace('?', np.NaN) # Replacing missing value with NaN
df.head()
###Output
_____no_output_____
###Markdown
###Code
# Use MinMaxScaler to preprocess coordinate data
scaler_minmax = MinMaxScaler()
le = LabelEncoder()
coord_column = df.columns[2:]
df[coord_column] = scaler_minmax.fit_transform(df[coord_column])
df["Class"] = le.fit_transform(df["Class"])
# Dropping User for predicting Class
df_pred_class_processed = df.drop(columns = "User")
df_pred_class_processed.to_csv("Class Prediction Processed.csv")
df_pred_class_processed.head()
# Dropping Class for predicting User
df_pred_user_processed = df.drop(columns = "Class")
df_pred_user_processed.to_csv("User Prediction Processed.csv")
df_pred_user_processed.head()
###Output
_____no_output_____ |
notebooks/artwork_ld.ipynb | ###Markdown
Pairwise LD for functional SNPsThis notebook computes and plots pairwise LD between interesting functional SNPs. Setup
###Code
%run setup.ipynb
%matplotlib inline
# load in selected missense variants
tbl_variants_selected = etl.frompickle('../data/tbl_variants_missense_selected.pkl')
tbl_variants_selected.nrows()
# load in haplotypes
callset_haps = np.load('../data/haps_phase1.npz')
haps = allel.HaplotypeArray(callset_haps['haplotypes'])
pos = allel.SortedIndex(callset_haps['POS'])
pos.shape, haps.shape
def lewontin_d_prime(h, i, j, a=1, b=1):
"""Compute LD between a pair of alleles.
Parameters
----------
h : array
Haplotype array.
i : int
First variant index.
j : int
Second variant index.
a : int
First variant allele.
b : int
Second variant allele.
Returns
-------
ld : float
"""
# setup
h = allel.HaplotypeArray(h)
n_a = n_b = 0 # allele counts
n_ab = 0 # haplotype counts
n = 0 # allele number (i.e., number of calls)
# iterate over haplotypes, counting alleles and haplotypes
for k in range(h.n_haplotypes):
# access alleles
allele_ik = h[i, k]
allele_jk = h[j, k]
# only count if allele non-missing at both sites
if allele_ik < 0 or allele_jk < 0:
continue
# accumulate
if allele_ik == a:
n_a += 1
if allele_jk == b:
n_b += 1
if allele_ik == a and allele_jk == b:
n_ab += 1
n += 1
# log('D_prime counts:', 'i', i, 'j', j, 'a', a, 'b', b, 'n', n, 'n_a', n_a, 'n_b', n_b)
# bail out if no data or either allele is absent or fixed
if n == 0 or n_a == 0 or n_b == 0 or n == n_a or n == n_b:
return None
# N.B., compute D prime using counts rather than frequencies to avoid floating-point errors
# N.B., preserve the sign of D prime to retain information about linkage versus repulsion
# compute coefficient of linkage disequilibrium * n**2
D_ab = (n * n_ab) - (n_a * n_b)
# compute normalisation coefficient * n**2
if D_ab >= 0:
D_max = min(n_a * (n - n_b), (n - n_a) * n_b)
else:
D_max = min(n_a * n_b, (n - n_a) * (n - n_b))
# compute D prime
D_prime = D_ab / D_max
# log('D_prime', D_prime, i, j, a, b, n, n_a, n_b, D_ab, D_max)
# if np.isnan(D_prime):
# log('nan')
# log(D_prime, i, j, a, b, n, n_a, n_b, D_ab, D_max)
return D_prime
pos_selected = allel.SortedIndex(sorted(tbl_variants_selected.values('POS').set()))
pos_selected
tbl_variants_selected
pos_selected.shape
loc_selected = pos.locate_keys(pos_selected)
np.count_nonzero(loc_selected)
haps_selected = haps[loc_selected]
haps_selected
ac = haps_selected.count_alleles()
ac.displayall()
###Output
_____no_output_____
###Markdown
Compute and plot allele frequencies
###Code
def compute_allele_af(ax=None):
global allele_af
recs = list(tbl_variants_selected.records())
n = len(recs)
allele_af = np.zeros(n, dtype='f8')
for i in range(n):
i_pos = recs[i].POS
i_allele = recs[i].ALTIX + 1
i_vidx = pos_selected.locate_key(i_pos)
# log('row', i, i_vidx, i_pos, i_allele)
x = ac[i_vidx, i_allele] * 100 / haps_selected.shape[1]
allele_af[i] = x
compute_allele_af()
###Output
_____no_output_____
###Markdown
Compute and plot pairwise LD
###Code
def compute_ld():
global ld
recs = list(tbl_variants_selected.records())
n = len(recs)
ld = np.zeros((n, n), dtype='f8')
for i in range(n):
i_pos = recs[i].POS
i_allele = recs[i].ALTIX + 1
i_vidx = pos_selected.locate_key(i_pos)
# log('row', i, i_vidx, i_pos, i_allele)
for j in range(i+1, n):
j_pos = recs[j].POS
j_allele = recs[j].ALTIX + 1
j_vidx = pos_selected.locate_key(j_pos)
# log('col', j, j_vidx, j_pos, j_allele)
v = lewontin_d_prime(haps_selected, i_vidx, j_vidx, i_allele, j_allele)
# log('D_prime', v)
ld[i, j] = v
ld[j, i] = v
compute_ld()
ld[11]
def plot_allele_af(ax=None, **kwargs):
n = len(allele_af)
if ax is None:
fig, ax = plt.subplots(figsize=(7, 2))
left = np.arange(n) + 0.2
ax.bar(left, allele_af, align='edge', width=0.6, **kwargs)
ax.set_ylabel('Allele frequency (%)')
ax.set_xlim(0, n)
ax.set_xticks([])
ax.set_yticks(range(0, 60, 10))
ax.set_xticklabels([])
plot_allele_af()
def fig_pw_ld():
fig = plt.figure(figsize=(7, 7.3), dpi=120)
gs = mpl.gridspec.GridSpec(2, 2, height_ratios=[1.3, 6], width_ratios=[7, .5])
# sns.despine(ax=ax, offset=5)
#sns.heatmap(ld, vmin=-1, vmax=1, center=0, square=True, ax=ax, cmap='Blues', cbar_kws=dict(ticks=[-1, -.5, 0, .5, 1]))
ax = fig.add_subplot(gs[0, 0])
sns.despine(ax=ax)
plot_allele_af(ax, color='k')
ax = fig.add_subplot(gs[1, 0])
im = ax.pcolormesh(ld, vmin=-1, vmax=1, cmap='Blues', shading='flat', edgecolors='gray', linewidths=.5, antialiased=True)
labels = ['%s:%s>%s %s' % (rec.POS, rec.REF, rec.ALT, rec['AGAP004707-RA'].rjust(6))
for rec in tbl_variants_selected.records()]
# ax.invert_yaxis()
ticks = np.arange(ld.shape[0]) + .5
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(labels, rotation=90, ha='center', va='top', fontdict=dict(family='monospace'))
ax.set_yticklabels(labels, rotation=0, va='center', ha='right', fontdict=dict(family='monospace'));
ax.set_xlim(0, ld.shape[0])
ax.set_ylim(0, ld.shape[0])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
for i in range(ld.shape[0] + 1):
ax.add_patch(plt.Rectangle((i-1, i-1), 1, 1, color='gray'))
cax = fig.add_subplot(gs[1, 1])
fig.colorbar(im, cax=cax, )
# cax.set_title("Linkage disequilibrium (D')", loc='left')
cax.set_ylabel("Linkage disequilibrium (D')", va='top')
fig.tight_layout(pad=0.1)
fig.savefig('../artwork/fig_ld.png', dpi=300, bbox_inches='tight')
fig_pw_ld()
###Output
_____no_output_____ |
*Machine_Learning/Trees/XGBoost.ipynb | ###Markdown
Table of Contents1 Load Data2 Cross Validation3 Viz4 Feature Importance5 Train Test Split6 Model6.1 OOB: Out of Bag6.2 Results7 Plotting the Tree8 Variable Importance XGBoostCredit:
###Code
import warnings
warnings.filterwarnings("always")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
import xgboost as xgb
# python version
import sys
print('python vers:', sys.version[:31])
###Output
python vers: 3.6.5 |Anaconda custom (64-bit)
###Markdown
Load Data
###Code
from sklearn.datasets import load_boston
boston = load_boston()
data = pd.DataFrame(boston.data)
data.columns = boston.feature_names
data['PRICE'] = boston.target
data.head()
print(boston.keys())
print(boston.data.shape)
print(boston.feature_names)
# Separate the target variable and rest of the variables
X, y = data.iloc[:,:-1],data.iloc[:,-1]
# convert the dataset into an optimized data structure called Dmatrix that XGBoost supports
data_dmatrix = xgb.DMatrix(data=X,label=y)
data_dmatrix
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
xg_reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
max_depth = 5, alpha = 10, n_estimators = 10)
xg_reg.fit(X_train,y_train)
preds = xg_reg.predict(X_test)
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(y_test, preds))
print("RMSE: %f" % (rmse))
###Output
RMSE: 10.868649
###Markdown
Cross Validation
###Code
params = {"objective":"reg:linear",'colsample_bytree': 0.3,'learning_rate': 0.1,
'max_depth': 5, 'alpha': 10}
cv_results = xgb.cv(dtrain=data_dmatrix, params=params, nfold=3,
num_boost_round=50,early_stopping_rounds=10,metrics="rmse", as_pandas=True, seed=123)
cv_results.head()
print((cv_results["test-rmse-mean"]).tail(1))
###Output
49 4.031162
Name: test-rmse-mean, dtype: float64
###Markdown
Viz
###Code
xg_reg = xgb.train(params=params, dtrain=data_dmatrix, num_boost_round=10)
import matplotlib.pyplot as plt
xgb.plot_tree(xg_reg,num_trees=0)
plt.rcParams['figure.figsize'] = [500, 100]
plt.show()
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
xgb.plot_importance(xg_reg)
plt.rcParams['figure.figsize'] = [5, 5]
plt.show()
###Output
_____no_output_____ |
Folder 4 - Delivery & Acceptance/Final_Project.ipynb | ###Markdown
Credit Card Fraud Detection Rational Statement Although the increasing digitalization of the payments and money transfer services has indisputable benefits, it has some disadvantages as well. Certainly, one of these is the growth of frauds. According to the Nilson Report, publication covering the card and mobile payment industry, payment card fraud losses worldwide reached US\$27.85 billion in 2018 and are expected to rise to $35.67 billion in 2024. As transaction times becomes faster and fraudster schemes are continuously innovating, it is almost impossible to identify, predict and counteract fraudulent operations without an robust automation process. Thus, trying to reduce their loss payouts, payment card issuers and merchants have been trying ingenious tools. Machine learning, for instance, is one of the most efficient and, therefore, is progressively being used. Hence, in line with market trends, this project aims to build a machine learning model that, based on labeled historical data, detect fraudulent operations. Data Requirements The dataset, gathered from Kaggle.com, comprises of 284,807 credit card operations, being 284,315 non-fraudulent and 492 fraudulents. Therefore, highly unbalaced. There are a total of 31 features, all of them numerical. Due to confidentiality reasons, Principal Component Analysis was used to transform data from 28 features. The only features which have not been transformed are: Time, Amount and Class. The first refers to the time elapsed between each transaction and the first one occured in the dataset. The second is the transaction amount. Lastly, the third is a binary variable, which has value 1 for fraudulent transactions and 0 otherwise. Assumptions, Limitations and Constraints The first challenge is to handle the unbalanced data. In this case, in order to verify the quality of the model, we can not rely only in its accuracy. In other words, we will need additional metrics, such as precision and recall, to be certain of its quality.Considering the trade off between false positive and false negative predictions, as we are dealing with frauds, it is better to try to minimize the ratio of false negatives. That is, fraudulent transaction considered as non-fraudulent. Another critical question is the wide range of the variable ‘Amount’, which goes from $0 to \$25,691.16. Since, depending on the model being used, the great difference in scales between variables might bias it towards one feature, we should opt for feature scaling. Test Process To guarantee the quality of the work, three machine learning models will be fitted and tested: logistic regression, decision tree and random forest. Before doing it, however, we will identify and eventually replace missing values. Similar treatment will be used for the outliers .Finally, in order to avoid overfitting, the k-fold cross-validation technique will be used.
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import warnings
import matplotlib.pyplot as plt
import scipy.stats as stats
import seaborn as sns
plt.style.use('ggplot')
warnings.filterwarnings('ignore')
df = pd.read_csv('creditcard.csv')
df.info()
df.head()
pd.set_option('precision', 3)
df.loc[:, ['Amount']].describe()
###Output
_____no_output_____
###Markdown
Highly Unbalanced Dataset. Risk associated: models don’t work too well at identifying the minority classes.
###Code
count_classes = pd.value_counts(df['Class'], sort = True).sort_index()
count_classes
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12,6))
s = sns.boxplot(ax = ax1, x="Class", y="Amount", hue="Class",data=df, palette="PRGn",showfliers=True)
s = sns.boxplot(ax = ax2, x="Class", y="Amount", hue="Class",data=df, palette="PRGn",showfliers=False)
plt.show()
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
ax1 = sns.distplot(df['Time'], ax=ax1, color='y')
ax2 = sns.distplot(df['Amount'], ax=ax2, color='r')
ax1.set_title('Distribution of Time', fontsize=13)
ax2.set_title('Distribution of Amount', fontsize=13)
corr = df.corr(method='kendall')
plt.figure(figsize=(25,15))
sns.heatmap(corr, annot=True)
df.columns
###Output
_____no_output_____
###Markdown
We see high correlation between some features. Therefore, we might use PCA in order to reduce complexity and increase performance.
###Code
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler2 = StandardScaler()
#scaling time
scaled_time = scaler.fit_transform(df[['Time']])
flat_list1 = [item for sublist in scaled_time.tolist() for item in sublist]
scaled_time = pd.Series(flat_list1)
#scaling the amount column
scaled_amount = scaler2.fit_transform(df[['Amount']])
flat_list2 = [item for sublist in scaled_amount.tolist() for item in sublist]
scaled_amount = pd.Series(flat_list2)
#concatenating newly created columns w original df
df = pd.concat([df, scaled_amount.rename('scaled_amount'), scaled_time.rename('scaled_time')], axis=1)
df.sample(5)
#manual train test split using numpy's random.rand
mask = np.random.rand(len(df)) < 0.9
train = df[mask]
test = df[~mask]
print('Train Shape: {}\nTest Shape: {}'.format(train.shape, test.shape))
from sklearn.preprocessing import StandardScaler
df['normalizedAmount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1,1))
df = df.drop(['Amount'],axis=1)
train.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
#how many random samples from normal transactions do we need?
no_of_frauds = train.Class.value_counts()[1]
print('There are {} fraudulent transactions in the train data.'.format(no_of_frauds))
###Output
There are 432 fraudulent transactions in the train data.
###Markdown
Undersampling. Disadvantage: discard potentially useful information
###Code
#randomly selecting 442 random non-fraudulent transactions
non_fraud = train[train['Class'] == 0]
fraud = train[train['Class'] == 1]
selected = non_fraud.sample(no_of_frauds)
selected.head()
#concatenating both into a subsample data set with equal class distribution
selected.reset_index(drop=True, inplace=True)
fraud.reset_index(drop=True, inplace=True)
subsample = pd.concat([selected, fraud])
len(subsample)
new_counts = subsample.Class.value_counts()
plt.figure(figsize=(8,6))
sns.barplot(x=new_counts.index, y=new_counts)
plt.title('Count of Fraudulent vs. Non-Fraudulent Transactions In Subsample')
plt.ylabel('Count')
plt.xlabel('Class (0:Non-Fraudulent, 1:Fraudulent)')
#shuffling our data set
subsample = subsample.sample(frac=1).reset_index(drop=True)
subsample.head(10)
###Output
_____no_output_____
###Markdown
Removing outliers
###Code
Q1 = subsample.quantile(0.25)
Q3 = subsample.quantile(0.75)
IQR = Q3 - Q1
df2 = subsample[~((subsample < (Q1 - 2.5 * IQR)) |(subsample > (Q3 + 2.5 * IQR))).any(axis=1)]
len_after = len(df2)
len_before = len(subsample)
len_difference = len(subsample) - len(df2)
print('We reduced our data size from {} transactions by {} transactions to {} transactions.'.format(len_before, len_difference, len_after))
df2.head()
#df = df.drop(['Time'],axis=1)
len(df2)
X = df2.drop('Class', axis=1)
y = df2['Class']
###Output
_____no_output_____
###Markdown
Using PCA It enables removing the redundancy or correlation that can exist when explaining data using large number of features. Here, the number of features is reduced from 32 to 20.
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state=0)
from sklearn.decomposition import PCA
pca=PCA(n_components = 20)
pca.fit(X_train)
X_pca = pca.transform(X)
X_pca.shape
###Output
_____no_output_____
###Markdown
DECISION TREE
###Code
from sklearn.tree import DecisionTreeClassifier
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train,y_train)
y_pred = decision_tree.predict(X_test)
decision_tree.score(X_test,y_test)
import matplotlib.pyplot as plt
import itertools
from sklearn import svm, datasets
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
cnf_matrix = confusion_matrix(y_test,y_pred)
plot_confusion_matrix(cnf_matrix,classes=[0,1])
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
y_pred = decision_tree.predict(X)
y_expected = pd.DataFrame(y)
cnf_matrix = confusion_matrix(y_expected,y_pred.round())
plot_confusion_matrix(cnf_matrix,classes=[0,1])
plt.show()
###Output
Confusion matrix, without normalization
[[382 7]
[ 12 212]]
###Markdown
RANDOM FOREST
###Code
from sklearn.ensemble import RandomForestClassifier
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train,y_train)
y_pred = random_forest.predict(X_test)
random_forest.score(X_test,y_test)
import matplotlib.pyplot as plt
import itertools
from sklearn import svm, datasets
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
cnf_matrix = confusion_matrix(y_test,y_pred)
plot_confusion_matrix(cnf_matrix,classes=[0,1])
print(classification_report(y_test,y_pred))
###Output
precision recall f1-score support
0 0.92 0.99 0.95 116
1 0.98 0.85 0.91 68
accuracy 0.94 184
macro avg 0.95 0.92 0.93 184
weighted avg 0.94 0.94 0.94 184
###Markdown
1. accuracy: (tp + tn)/(tp + tn + fp + fn) 2. precision: tp/(tp + fp)3. recall: tp/(tp + fn)4. f1-score: 2\*precision*recall/(precision + recall)
###Code
y_pred = random_forest.predict(X)
cnf_matrix = confusion_matrix(y,y_pred.round())
plot_confusion_matrix(cnf_matrix,classes=[0,1])
###Output
Confusion matrix, without normalization
[[388 1]
[ 10 214]]
###Markdown
LOGISTIC REGRESSION
###Code
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
y_pred = logmodel.predict(X_test)
logmodel.score(X_test,y_test)
cnf_matrix = confusion_matrix(y_test,y_pred)
plot_confusion_matrix(cnf_matrix,classes=[0,1])
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
y_pred = random_forest.predict(X)
cnf_matrix = confusion_matrix(y,y_pred)
plot_confusion_matrix(cnf_matrix,classes=[0,1])
###Output
Confusion matrix, without normalization
[[388 1]
[ 10 214]]
|
python/politician/facebook.ipynb | ###Markdown
Politician Activity on FacebookThe parameters in the cell below can be adjusted to explore other Politicians and time frames. How to explore other politicians?The ***politician_id*** is an internal identifier that connects the different social media accounts. You can [use this other notebook](../Politicians.ipynb?autorun=true) to get other the identifiers of other Politicians.***Alternatively***, you can direcly use the [Politicians API](http://mediamonitoring.gesis.org/api/Politicians/swagger/), or access it with the [SMM Wrapper](https://pypi.org/project/smm-wrapper/). A. Set Up parameters
###Code
# Parameters:
politician_id = 1274143252682258
from_date = '2017-09-01'
to_date = '2018-12-31'
aggregation = 'week'
###Output
_____no_output_____
###Markdown
B. Using the SMM Politician API
###Code
# Create an instance to the smm wrapper
from smm_wrapper import SMMPoliticians
smm = SMMPoliticians()
# using the api to get the posts and comments activity
posts = smm.api.posts_by(_id=politician_id, from_date=from_date, to_date=to_date, aggregate_by=aggregation)
comments = smm.api.comments_by(_id=politician_id, from_date=from_date, to_date=to_date, aggregate_by=aggregation)
###Output
_____no_output_____
###Markdown
C. Plotting C.1 Plot Facebook Post Activity
###Code
import plotly
from plotly import graph_objs as go
plotly.offline.init_notebook_mode(connected=True)
#plot for facebook posts activity
plotly.offline.iplot({
"data": [go.Scatter(x=posts['labels'], y=posts['values'], name='Posts', line_shape='spline'),
go.Scatter(x=posts['labels'], y=posts['replies'], name='Replies', line_shape='spline'),
go.Scatter(x=posts['labels'], y=posts['shares'], name='Shares', line_shape='spline'),
go.Scatter(x=posts['labels'], y=posts['reactions'], name='Reactions', line_shape='spline'),
go.Scatter(x=posts['labels'], y=posts['likes'], name='Likes', line_shape='spline')],
"layout": go.Layout(title='Facebook (Posts Activity)', yaxis=dict(title='N'))
})
###Output
_____no_output_____
###Markdown
C.2 Plot Facebook Comment Activity
###Code
# plot for facebook comments activity
plotly.offline.iplot({
"data": [go.Scatter(x=comments['labels'], y=comments['values'], name='Comments', line_shape='spline'),
go.Scatter(x=comments['labels'], y=comments['replies'], name='Replies', line_shape='spline'),
go.Scatter(x=comments['labels'], y=comments['likes'], name='Likes', line_shape='spline')],
"layout": go.Layout(title='Facebook (Comments Activity)', yaxis=dict(title='N'))
})
###Output
_____no_output_____ |
Deep GAN.ipynb | ###Markdown
**INITIALIZATION:**- I use these three lines of code on top of my each notebooks because it will help to prevent any problems while reloading the same project. And the third line of code helps to make visualization within the notebook.
###Code
#@ INITIALIZATION:
%reload_ext autoreload
%autoreload 2
%matplotlib inline
###Output
_____no_output_____
###Markdown
**DOWNLOADING LIBRARIES AND DEPENDENCIES:**- I have downloaded all the libraries and dependencies required for the project in one particular cell.
###Code
#@ DOWNLOADING THE LIBRARIES AND DEPENDENCIES:
# !pip install -U d2l
from d2l import torch as d2l
import warnings
import torch
import torchvision
from torch import nn
###Output
_____no_output_____
###Markdown
**THE POKEMON DATASET:**- The dataset is a collection of Pokemon sprites obtained from [**PokemonDB**](https://pokemondb.net/sprites). I will download, extract and load the dataset.
###Code
#@ GETTING THE DATASET:
d2l.DATA_HUB["pokemon"] = (d2l.DATA_URL + 'pokemon.zip',
'c065c0e2593b8b161a2d7873e42418bf6a21106c') # Path to Dataset.
data_dir = d2l.download_extract("pokemon") # Downloading and Extracting the Dataset.
pokemon = torchvision.datasets.ImageFolder(data_dir) # Initializing DataLoader.
###Output
_____no_output_____
###Markdown
- I will resize each image into 64X64 and normalize the data with 0.5 mean and 0.5 standard deviation.
###Code
#@ PREPARING THE DATASET:
batch_size = 256 # Initialization.
transformer = torchvision.transforms.Compose([ # Initializing Compose Instance.
torchvision.transforms.Resize((64, 64)), # Resizing Images.
torchvision.transforms.ToTensor(), # Converting into Tensors.
torchvision.transforms.Normalize(0.5, 0.5)]) # Normalizing the Data.
pokemon.transform = transformer # Transforming the Pokemon Dataset.
data_iter = torch.utils.data.DataLoader(
pokemon, batch_size=batch_size, shuffle=True,
num_workers=2) # Initializing Data Iterations.
#@ VISUALIZING THE DATASET:
warnings.filterwarnings("ignore")
d2l.set_figsize((5, 5))
for X, y in data_iter:
imgs = X[0:20, :, :, :].permute(0, 2, 3, 1) / 2 + 0.5 # Getting Images.
d2l.show_images(imgs, num_rows=4, num_cols=5) # Visualizing Images.
break
###Output
_____no_output_____
###Markdown
**THE GENERATOR:**- The Generator needs to map the noise variable to a RGB image. I will use transposed convolutional layer to enlarge the input image. The basic block of Generator contains a transposed convolution layer followed by the batch normalization and RELU activation function.
###Code
#@ DEFINING THE GENERATOR:
class G_Block(nn.Module): # Initializing Generator.
def __init__(self, out_channels, in_channels=3, kernel_size=4, strides=2,
padding=1, **kwargs): # Initializing Constructor Function.
super(G_Block, self).__init__(**kwargs)
self.conv2d_trans = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, strides, padding,
bias=False) # Initializing Transposed Convolution Layer.
self.batch_norm = nn.BatchNorm2d(out_channels) # Initializing Batch Normalization Layer.
self.activation = nn.ReLU() # Initializing RELU Activation Layer.
def forward(self, X): # Forward Propagation Layer.
return self.activation(self.batch_norm(self.conv2d_trans(X))) # Implementation of Layers.
#@ INSPECTING THE IMPLEMENTATION:
X = torch.zeros((2, 3, 16, 16)) # Initializing Tensor.
G_block = G_Block(20) # Implementation.
G_block(X).shape # Inspecting Shape.
###Output
_____no_output_____
###Markdown
- The Generator consists of four blocks that increase height and width of input from 1 to 32. The transposed convolution layer is used to generate the output. The tanh activation function is used to project output values into the range of -1 and 1.
###Code
#@ INITIALIZING THE GENERATOR:
n_G = 64 # Initialization.
net_G = nn.Sequential(G_Block(in_channels=100, out_channels=n_G*8,
strides=1, padding=0), # Output: 64*8, 4, 4.
G_Block(in_channels=n_G*8, out_channels=n_G*4), # Output: 64*4, 8, 8.
G_Block(in_channels=n_G*4, out_channels=n_G*2), # Output: 64*2, 16, 16.
G_Block(in_channels=n_G*2, out_channels=n_G), # Output: 64, 32, 32.
nn.ConvTranspose2d(in_channels=n_G,out_channels=3,
kernel_size=4, stride=2,
padding=1, bias=False), # Implementation of Transposed Convolution.
nn.Tanh()) # Implementation of Tanh Activation.
#@ IMPLEMENTATION OF GENERATOR:
X = torch.zeros((1, 100, 1, 1,)) # Initializing Tensor.
net_G(X).shape # Inspecting Shape of Output.
###Output
_____no_output_____
###Markdown
**THE DISCRIMINATOR:**- The Discriminator is a convolution layer followed by a batch normalization layer and Leaky RELU activation function. Leaky RELU is a nonlinear function that gives a non zero output for a negative input. It aims to fix the RELU problem that a neuron might always output a negative value and therefore cannot make any progress since the gradient of RELU is 0.
###Code
#@ DEFINING THE DISCRIMINATOR:
class D_Block(nn.Module): # Initializing Discriminator.
def __init__(self, out_channels, in_channels=3, kernel_size=4, strides=2,
padding=1, alpha=0.2, **kwargs): # Initializing Constructor Function.
super(D_Block, self).__init__(**kwargs)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
strides, padding, bias=False) # Initializing Convolution Layer.
self.batch_norm = nn.BatchNorm2d(out_channels) # Initializing Batch Normalization Layer.
self.activation = nn.LeakyReLU(alpha, inplace=True) # Initializing Leaky RELU Activation.
def forward(self, X): # Initializing Forward Propagation.
return self.activation(self.batch_norm(self.conv2d(X))) # Implementation of Layers.
#@ IMPLEMENTATION OF THE DISCRIMINATOR:
X = torch.zeros((2, 3, 16, 16)) # Initializing a Tensor.
d_block = D_Block(20) # Initializing Discriminator.
d_block(X).shape # Implementation of Discriminator.
#@ INITIALIZING THE DISCRIMINATOR:
n_D = 64 # Initialization.
net_D = nn.Sequential(D_Block(n_D), # Output: 64, 32, 32.
D_Block(in_channels=n_D, out_channels=n_D*2), # Output: 64*2, 16, 16.
D_Block(in_channels=n_D*2,out_channels=n_D*4), # Output: 64*4, 8, 8.
D_Block(in_channels=n_D*4,out_channels=n_D*8), # Output: 64*8, 4, 4.
nn.Conv2d(in_channels=n_D*8, out_channels=1,
kernel_size=4, bias=False)) # Implementation of Convolution Layer.
#@ IMPLEMENTATION:
X = torch.zeros((1, 3, 64, 64)) # Initializing Tensor.
net_D(X).shape # Inspecting the Shape.
###Output
_____no_output_____
###Markdown
**TRAINING:**- I will be using the same learning rate for both generator and discriminator since the networks are similar to each other. I will change β1 in Adam from 0.9 to 0.5. It decreases the smoothness of the momentum which is the exponentially weighted moving average of past gradients to take care of the rapid changing gradients because the generator and the discriminator fight with each other. The random generated noise Z is a 4D tensor.
###Code
#@ TRAINING THE MODEL:
def train(net_D, net_G, data_iter, num_epochs, lr, latent_dim,
device=d2l.try_gpu()): # Function for Training.
loss = nn.BCEWithLogitsLoss(reduction="sum") # Initializing Cross Entropy Loss Function.
for w in net_D.parameters(): # Discriminator.
nn.init.normal_(w, 0, 0.02) # Normal Initialization.
for w in net_G.parameters(): # Generator.
nn.init.normal_(w, 0, 0.02) # Normal Initialization.
net_D, net_G = net_D.to(device), net_G.to(device) # Enabling GPU.
trainer_hp = {"lr": lr, "betas": [0.5, 0.999]} # Initializing Optimization Parameters.
trainer_D = torch.optim.Adam(net_D.parameters(), **trainer_hp) # Adam Optimizer for Discriminator.
trainer_G = torch.optim.Adam(net_G.parameters(), **trainer_hp) # Adam Optimizer for Generator.
animator = d2l.Animator(xlabel="epoch", ylabel="loss",
xlim=[1, num_epochs],nrows=2,figsize=(5, 5),
legend=["discriminator", "generator"]) # Initializing Animator.
animator.fig.subplots_adjust(hspace=0.3) # Initializing Subplots.
for epoch in range(1, num_epochs + 1):
timer = d2l.Timer() # Initializing Timer.
metric = d2l.Accumulator(3) # Initializing Accumulator.
for X, _ in data_iter:
batch_size = X.shape[0] # Initializing Batch Size.
Z = torch.normal(0, 1, size=(batch_size, latent_dim, 1, 1)) # Initializing Tensor.
X, Z = X.to(device), Z.to(device) # Enabling GPU.
metric.add(d2l.update_D(X, Z, net_D, net_G, loss, trainer_D), # Updating Discriminator.
d2l.update_G(Z, net_D, net_G, loss, trainer_G), # Updating Generator.
batch_size) # Acccumulating Updates.
Z = torch.normal(0, 1, size=(21, latent_dim, 1, 1), device=device) # Initializing Tensor.
fake_x = net_G(Z).permute(0, 2, 3, 1) / 2 + 0.5 # Normalizing Synthetic Data.
imgs = torch.cat([torch.cat([fake_x[i*7 + j].cpu().detach() \
for j in range(7)], dim=1) \
for i in range(len(fake_x) // 7)], dim=0)
animator.axes[1].cla()
animator.axes[1].imshow(imgs)
loss_D, loss_G = metric[0] / metric[2], metric[1] / metric[2] # Getting Discriminator and Generator Loss.
animator.add(epoch, (loss_D, loss_G))
print(f"loss_D {loss_D:.3f}, loss_G {loss_G:.3f}, "
f"{metric[2]/timer.stop():.1f} examples/sec on {str(device)}")
#@ TRAINING THE MODEL:
latent_dim, lr, num_epochs = 100, 0.005, 25 # Initializing Parameters.
train(net_D, net_G, data_iter, num_epochs, lr, latent_dim) # Training the Model.
###Output
loss_D 0.134, loss_G 8.398, 538.7 examples/sec on cuda:0
|
notebooks/introduction_to_tensorflow/labs/intro_logistic_regression_TF2.0.ipynb | ###Markdown
Introduction to Logistic Regression Using TF 2.0**Learning Objectives**1. Build a neural network that classifies images.2. Train this neural network.3. Evaluate the accuracy of the model. Introduction This short introduction uses [Keras](https://keras.io/), a high-level API to build and train models in TensoFlow. In this lab, you Load and prepare the MNIST dataset, convert the samples from integers to floating-point numbers, build and train a neural network that classifies images and the evaluate then accuracy of the model.Each learning objective will correspond to a __TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/intro_logistic_regression_TF2.0.ipynb) -- try to complete that notebook first before reviewing this solution notebook. Load necessary libraries We will start by importing the necessary libraries for this lab.
###Code
import tensorflow as tf
print("TensorFlow version: ", tf.version.VERSION)
###Output
TensorFlow version: 2.1.0
###Markdown
Load and prepare the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). Convert the samples from integers to floating-point numbers:
###Code
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
###Output
_____no_output_____
###Markdown
**Lab Task 1:** Build the `tf.keras.Sequential` model by stacking layers. Choose an optimizer and loss function for training:
###Code
model = # TODO 1 -- Your code here.
###Output
_____no_output_____
###Markdown
For each example the model returns a vector of "[logits](https://developers.google.com/machine-learning/glossarylogits)" or "[log-odds](https://developers.google.com/machine-learning/glossarylog-odds)" scores, one for each class.
###Code
predictions = model(x_train[:1]).numpy()
predictions
###Output
WARNING:tensorflow:Layer flatten_2 is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.
If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.
To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.
###Markdown
The `tf.nn.softmax` function converts these logits to "probabilities" for each class:
###Code
tf.nn.softmax(predictions).numpy()
###Output
_____no_output_____
###Markdown
Note: It is possible to bake this `tf.nn.softmax` in as the activation function for the last layer of the network. While this can make the model output more directly interpretable, this approach is discouraged as it's impossible toprovide an exact and numerically stable loss calculation for all models when using a softmax output. The `losses.SparseCategoricalCrossentropy` loss takes a vector of logits and a `True` index and returns a scalar loss for each example. **Lab Task 2:** Usage of losses.SparseCategoricalCrossentropy with logits vectors and a True index.
###Code
loss_fn = # TODO 2 -- Your code here.
###Output
_____no_output_____
###Markdown
This loss is equal to the negative log probability of the true class:It is zero if the model is sure of the correct class.This untrained model gives probabilities close to random (1/10 for each class), so the initial loss should be close to `-tf.log(1/10) ~= 2.3`.
###Code
loss_fn(y_train[:1], predictions).numpy()
model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
The `Model.fit` method adjusts the model parameters to minimize the loss:
###Code
model.fit(x_train, y_train, epochs=5)
###Output
Train on 60000 samples
Epoch 1/5
60000/60000 [==============================] - 4s 74us/sample - loss: 0.2948 - accuracy: 0.9159
Epoch 2/5
60000/60000 [==============================] - 4s 68us/sample - loss: 0.1449 - accuracy: 0.9575
Epoch 3/5
60000/60000 [==============================] - 4s 67us/sample - loss: 0.1086 - accuracy: 0.9669
Epoch 4/5
60000/60000 [==============================] - 4s 67us/sample - loss: 0.0890 - accuracy: 0.9722
Epoch 5/5
60000/60000 [==============================] - 4s 67us/sample - loss: 0.0760 - accuracy: 0.9761
###Markdown
The `Model.evaluate` method checks the models performance, usually on a "[Validation-set](https://developers.google.com/machine-learning/glossaryvalidation-set)" or "[Test-set](https://developers.google.com/machine-learning/glossarytest-set)".
###Code
model.evaluate(x_test, y_test, verbose=2)
###Output
10000/10000 - 0s - loss: 0.0789 - accuracy: 0.9762
###Markdown
The image classifier is now trained to ~98% accuracy on this dataset. To learn more, read the [TensorFlow tutorials](https://www.tensorflow.org/tutorials/). If you want your model to return a probability, you can wrap the trained model, and attach the softmax to it:
###Code
probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
probability_model(x_test[:5])
###Output
_____no_output_____ |
gmt-testing/gmt-setup.ipynb | ###Markdown
GMT/Python Installation on Linux/MacThe [instructions from the official documentation](https://www.gmtpython.xyz/latest/install.html) need to be modified slightly to prevent `conda` configuration problems, and additional steps are required in order to use GMT/Python in Jupyter notebook.Because the GMT/Python library installation process has not been fully implemented for the `conda` package manager, installation requires the use of a virtual environment so that components of the library can be installed with `pip` inside an isolated environment without interfering with the system Python installation. To learn about virtual environments, check out [this tutorial](https://medium.freecodecamp.org/why-you-need-python-environments-and-how-to-manage-them-with-conda-85f155f4353c). 1. Cleanup from previous installation attemptFollowing [these instructions from the documentation](https://www.gmtpython.xyz/latest/install.html) creates some configuration problems, so first we need to do some cleanup from the previous installation attempt, and then start over from scratch. a) Delete previously created virtual environmentClose any open Terminals, then open a new Terminal (this will be in the `conda` root environment by default). At the prompt, run the following command to delete the previously created virtual environment:```conda env remove --name gmt-python``` b) Reset `conda` channel configurationThe instructions in the documentation have you change the `conda` configuration globally (not just for installing the `gmt-python` library). This means that for any other libraries you install, `conda` will first look for the library in the `conda-forge`, and then `conda-forge/label/dev` channels before it looks in the `default` channel. This is very likely not the configuration that you want.To fix it, in your home directory, open the file called `.condarc` (it may be hidden). It will probably look like this:```channels: - conda-forge - conda-forge/label/dev - defaults```Delete the lines ` - conda-forge` and ` - conda-forge/label/dev` so that the file looks like:```channels: - defaults```Save and close. This resets `conda` configuration to the proper default channels. 2. Configure JupyterIn the Terminal (still in `conda` root environment), run the following command:```conda install nb_conda```This library allows Jupyter notebooks to use `conda` virtual environments as kernels. 3. Create virtual environment and install GMT/PythonCreate a new text file and paste the following into it:```name: gmt-pythonchannels:- conda-forge- conda-forge/label/devdependencies:- python=3.6- gmt=6.0.0*- pip- ipython- ipykernel- numpy- pandas- xarray- packaging - pytestam- pytest-mpl- sphinx- jinja2- docutils```Save the file as `gmt-python-environment.yml` in whatever directory you like - it could be in your main user directory, or you could create a sub-directory and save it there. This file is only needed temporarily, to create the virtual environment. Once the environment is created, you can delete the file, or keep it in case you want to use it later (e.g. on another computer).In the Terminal, change the working directory to wherever you saved the `.yml` file, and run the following command:```conda env create --file gmt-python-environment.yml```Once it completes, you'll have a new virtual environment called `gmt-python`, which should appear in the list of environments when you run the command:```conda env list``` 4. Update GMT/Python and configure kernel for JupyterIn the Terminal (in any directory, doesn't matter which one you're in), activate `gmt-python` virtual environment with the command:```source activate gmt-python```The command prompt should change to show the environment name in parentheses at the start, similar to this:```(gmt-python) jennifer@firefly-X1:~$```Now install the latest GMT/Python source code from GitHub with the following command:```pip install https://github.com/GenericMappingTools/gmt-python/archive/master.zip```Next, run the following command so that the `gmt-python` environment will appear in the list of available kernels in JupyterLab / Jupyter notebook:```python -m ipykernel install --user --name gmt-python --display-name "Python (gmt-python)"```Finally, deactivate the `gmt-python` virtual environment with:```source deactivate```This will return you to the `conda` root environment. 5. Test GMT/Python in JupyterLabLaunch JupyterLab from Anaconda Navigator or from the Terminal (in the `conda` root environment). In the Launcher screen of JupyterLab, there should now be a second icon listed under "Notebook" which shows the label `Python (gmt-python)` when you hover over it. Click this icon to create a new notebook with the `gmt-python` virtual environment as its kernel. In the notebook, you should see the kernel name `Python (gmt-python)` displayed in the top right corner.Now you can run the test suite and create a figure to try out the library:
###Code
import gmt
gmt.test()
fig = gmt.Figure()
fig.coast(region=[-90, -70, 0, 20], projection='M6i', land='chocolate', frame=True)
fig.show()
###Output
_____no_output_____ |
CS110 PCW 17 Randomly built BSTs.ipynb | ###Markdown
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).Note that this Pre-class Work is estimated to take **43 minutes**.Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
###Code
NAME = "Jeong woo Park"
COLLABORATORS = ""
###Output
_____no_output_____
###Markdown
--- CS110 Pre-class Work - Randomly built BSTs Part A. Average number of comparisons when searching Question 0 [time estimate: 1 minute] Paste in the working code from last session's PCW for the class Node, which contains the methods for insertion, searching, and deletion.
###Code
class Node:
def __init__(self, val):
self.l_child = None
self.r_child = None
self.parent = None
self.data = val
def insert(self, node):
"""inserts a node into a *non-empty* tree rooted at the node, returns
the root"""
if self.data > node.data:
if self.l_child is None:
self.l_child = node
node.parent = self
else:
self.l_child.insert(node)
else:
if self.r_child is None:
self.r_child = node
node.parent = self
else:
self.r_child.insert(node)
return self
def minimum(self):
node = self
while node.l_child != None:
node = node.l_child
return node
def search_data(self, value):
"""searches a *non-empty* tree rooted at the node for a node with
data = value, returns the value if found, None otherwise"""
node = self.search(value)
if node:
return node.data
else:
return node
def to_string(self):
print('self.data', self.data)
root=self
if not root:
return 'Nil'
else:
r = root.r_child.to_string() if root.r_child else 'Nil'
l = root.l_child.to_string() if root.l_child else 'Nil'
return 'Node(' + str(root.data) + ' L: ' + l + ' R: ' + r + ')'
def search(self, value):
if self.data > value:
return self.l_child.search(value)
elif self.data == value:
return self
else:
return self.r_child.search(value)
def delete(self, value):
if self.data == value:
self.l_child = None
self.r_child = None
return self.parent
elif self.data > value:
return self.l_child.delete(value)
else :
return self.r_child.delete(value)
def inorder(self):
printout = []
while self.parent.value != None:
printout.append(self.l_child.value)
printout.append(self.value)
printout.append(self.r_child.value)
return self.parent.inorder()
###Output
_____no_output_____
###Markdown
Question 1 [time estimate: 5 minutes]Complete the following function that computes the depth of a given node in a BST identified by its root. Use the test below to make sure your code works properly (Optional: Why does the test work?)
###Code
def depth(root, node):
"""
Finds the depth of the node in a BST. depth of root is 0.
Parameters
----------
root
A node, the root of the BST
node
A node to compute the depth of
Returns
-------
d : int
Distance from node to root
"""
d = 0
while node.parent != None:
d += 1
node = node.parent
return d
# Testing code
import math
bst = None
nodes = [Node(15), Node(6), Node(18), Node(3), Node(7),
Node(17), Node(20), Node(2), Node(4)]
for node in nodes:
if not bst:
bst = node
else:
bst.insert(node)
# insert(bst, node)
for i in range(len(nodes)):
assert(depth(bst, nodes[i]) == int(math.log(i+1,2)))
# Please ignore this cell. This cell is for us to implement the tests
# to see if your code works properly.
###Output
_____no_output_____
###Markdown
Question 2 [time estimate: 5 minutes]Complete the following function, making use of `depth`, to calculate the average number of comparisons required to search for a randomly chosen element of a standard BST. For example, the following tree: 12 \ 14will have an average number of comparisons of 1.5. This is because 50% of the time we will be searching for 12 (1 comparison), and 50% of the time we will be searching for 14 (2 comparisons).You can test your function with this test case by constructing the tree using the code from the last session's pre-class work. (*Hint*: the number of comparisons required to search for node a is `depth(root, a) + 1`.)
###Code
def avg_cmp(bst):
"""
Finds the average number of comparisons required
to search for a randomly chosen element of a standard BST.
Parameters
----------
bst
A Node, the root of the BST
Returns
-------
n : float
Average number of comparisons
"""
node = bst
lst = []
sum_ = 0
length = len(lst)
for i in bst.inorder():
lst.append(depth(bst, i)+1)
for i in range(length):
sum_ += lst[i]
return sum_/length
# Please ignore this cell. This cell is for us to implement the tests
# to see if your code works properly.
###Output
_____no_output_____
###Markdown
Part B. Depth of randomly-built treesThe average number of comparisons for a randomly chosen element within the tree is related to the “average depth” of the tree. Here the average depth of a tree is defined to be the average of the depths of all the nodes in the tree. Question 1 [time estimate: 10 minutes]Complete the following function to find the maximum depth of a node in a given BST. The function must run in $\mathrm{O}(N)$ time and use $\mathrm{O}(h)$ space (where $N$ is the number of elements in the BST and $h$ is the height of the tree). Also, note that the maximum depth is equal to the height of the tree.
###Code
def max_depth(bst):
"""
Finds the maximum depth of node in a BST.
Parameters
----------
bst
A Node, the root of the BST
Returns
-------
h : int
The maximum depth in a BST
"""
node = bst
lst = []
while node.l_child != None:
node = node.l_child
while node.r_child != None:
node = node.r_child
n = depth(node, bst) + 1
lst.append(n)
while node.r_child != None:
node = node.r_child
while node.l_child != None:
node = node.l_child
n = depth(node, bst) + 1
lst.append(n)
lst.sort
return lst[0]
# Please ignore this cell. This cell is for us to implement the tests
# to see if your code works properly.
###Output
_____no_output_____
###Markdown
Question 2 [time estimate: 5 minutes]Using the `avg_cmp` function, complete the following function to return the average depth of the tree. The average depth is related to the average number of comparisons as `average depth = (average comparisons - 1)`, since the root has depth zero.
###Code
def avg_depth(bst):
"""
Computes the average depth of a BST
Parameters
----------
bst
A Node, root of the BST
Returns
-------
avg_d : float
Average depth of the BST
"""
return avg_cmp - 1
# Please ignore this cell. This cell is for us to implement the tests
# to see if your code works properly.
###Output
_____no_output_____
###Markdown
Question 3 [time estimate: 10 minutes]Now, insert randomly shuffled lists into BSTs, and measure the average depth and the maximum depth. How do these statistics scale as you increase $N$, the number of nodes? Make sure to give a compelling argument that motivates this scaling behaviour (you do not need to provide a technical derivation). sometimes close to nlogn which is n(log_2(n^3+6n^2+11n+6)), sometimes as far as the worst case. As the N increase, the comparisons also increase in O(nlogn) in 'average'(the equation above) Question 4 [time estimate: 7 minutes]Produce a plot showing the scaling behavior that you saw of both the average depth and the maximum depth as a function of the length of the shuffled list. For a meaningful figure, be sure to scale out to a list of size 10,000 and average the timings 50 times. (You don’t have to sample each value from 1 to 10,000!). Is this plot in agreement with the theoretical result you obtained in the previous question? Explain.
###Code
import numpy as np
import random
N = np.linspace(1,100)
max_depths = []
avg_depths = []
for i in range(100):
X = [j for j in range(i)]
Y = random.shuffle(X)
bst = None # bst is a misnormer, this variable contains the Node that is the root of the BST of interest
lst = []
for x in [Node(_) for _ in Y]:
print("###################")
print('Inserting the following node: ', x.data)
if not bst:
bst = x
else:
bst = bst.insert(x)
print(bst.to_string())
max_depths.append(max_depth(bst))
avg_depths.append(avg_depth(bst))
plt.plot(N, max_depths, color = 'red',label = 'max depth', linewidth = 1.0)
plt.plot(N, avg_depths, color = 'blue',label = 'average depth', linewidth = 1.0)
plt.xlabel('$N$', fontsize=10)
plt.ylabel('depths', fontsize=10)
plt.legend()
plt.show()
###Output
_____no_output_____ |
Code/utils/logging_utils.ipynb | ###Markdown
|等級 | 等級數值 | 輸出函數 | 說明 ||----------|----------|--------------------|:--------:||NOTSET |0 |無對應的輸出函數 |未設定 ||DEBUG |10 |logging.debug() |除錯 ||INFO |20 |logging.info() |訊息 ||WARNING |30 |logging.warning() |警告 ||ERROR |40 |logging.error() |錯誤 ||CRITICAL |50 |logging.critical() |嚴重錯誤 | ***This module(Logging) will write a log in model.log (which is a log file). When model.log exceeded the capacity limit which is module variable, model.log will rename to model.log.1 and so on. Eventually, if we set backupCount=3 than we will get model.log, model.log.1, and model.log.2. They record the newest to the oldest log.*****Rotating File Handler (backupCount=3):**If the log file exceeds the capacity limit, pass the path to another stage. new record 📃 ↓ (write in) ↓ model.log → → model.log.1 (rename) ↓ ↓ (rename) model.log.3 ← ← model.log.2 ↓ (rename) (drop) ↓ 🗑️
###Code
# loglevel: 記錄最低等級
def _get_logger(logdir, logname, loglevel=logging.INFO):
fmt = '[%(asctime)s] %(levelname)s: %(message)s'
formatter = logging.Formatter(fmt)
handler = logging.handlers.RotatingFileHandler(
filename=os.path.join(logdir, logname),
maxBytes=10*1024*1024, # 大小不超過 10 MB,若紀錄檔已超過就會轉換為backup的log檔,
backupCount=10) # 並重新創建一個新的紀錄檔,紀錄新的log
handler.setFormatter(formatter)
logger = logging.getLogger('')
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
# convert notebook.ipynb to a .py file
!jupytext --to py logging_utils.ipynb
###Output
[jupytext] Reading logging_utils.ipynb in format ipynb
[jupytext] Writing logging_utils.py (destination file replaced)
|
Examples/regional_raster_average.ipynb | ###Markdown
Regional average of raster valuesHere an average of raster values is desired over the context of some region. A RegionMask object is created, which then allows raster data to be warped onto the RegionMask's characteristics (as in the RegionMask's resolution, extent, and spatial reference system). This create a numpy matrix, from which only the pixels which lie in the region can be easily extracted and operated on.*last tested: version 1.0.2*
###Code
from os.path import join
import geokit as gk
# (Here a shape file of Aachen, Germany will be used)
regionPath = join(gk._test_data_, "aachenShapefile.shp")
# Create a region mask from a region definition
rm = gk.RegionMask.fromVector(regionPath)
# Warp a raster file onto the RegionMask's context (Here, a simple elevation raster file is used)
rasterFile = join(gk._test_data_, "elevation.tif")
warpedMatrix = rm.warp(rasterFile)
# Compute the average of pixels the region mask
meanValue = warpedMatrix[ rm.mask ].mean()
print("Mean raster value:", meanValue)
###Output
Mean raster value: 307.869
|
LAB3/1_NB_Classifier_Whether.ipynb | ###Markdown
**Aim: Implement Naive Bayes classifier : Whether Example** Step 1: Import necessary libraries.We will use preprocessing and naive bayes libraries of sklearn
###Code
from sklearn import preprocessing
from sklearn.naive_bayes import GaussianNB, MultinomialNB
import pandas
import numpy as np
###Output
_____no_output_____
###Markdown
Step 2: Prepare dataset.Create feature set for weather and temperature, and classlabel play.
###Code
weather = ['Sunny', 'Sunny', 'Overcast', 'Rainy', 'Rainy','Rainy', 'Overcast',
'Sunny', 'Sunny', 'Rainy', 'Sunny', 'Overcast', 'Overcast', 'Rainy']
temp = ['Hot','Hot','Hot','Mild','Cool','Cool','Cool','Mild',
'Cool','Mild','Mild','Mild','Hot','Mild']
play=['No','No','Yes','Yes','Yes','No','Yes','No','Yes',
'Yes','Yes','Yes','Yes','No']
###Output
_____no_output_____
###Markdown
Step 3: Digitize the data set using encoding
###Code
#creating labelEncoder
le = preprocessing.LabelEncoder()
# Converting string labels into numbers.
weather_encoded=le.fit_transform(weather)
print("Weather:" ,weather_encoded)
temp_encoded=le.fit_transform(temp)
label=le.fit_transform(play)
print("Temp:",temp_encoded)
print("Play:",label)
###Output
Temp: [1 1 1 2 0 0 0 2 0 2 2 2 1 2]
Play: [0 0 1 1 1 0 1 0 1 1 1 1 1 0]
###Markdown
Step 4: Merge different features to prepare dataset
###Code
#Combinig weather and temp into single listof tuples
features=tuple(zip(weather_encoded,temp_encoded))
print("Features:",features)
###Output
Features: ((2, 1), (2, 1), (0, 1), (1, 2), (1, 0), (1, 0), (0, 0), (2, 2), (2, 0), (1, 2), (2, 2), (0, 2), (0, 1), (1, 2))
###Markdown
Step 5: Train ’Naive Bayes Classifier’
###Code
#Create a Classifier
model=MultinomialNB()
# Train the model using the training sets
model.fit(features,label)
###Output
_____no_output_____
###Markdown
Step 6: Predict Output for new data
###Code
#Predict Output
predicted= model.predict([[0,2]]) # 0:Overcast, 2:Mild
print("Predicted Value:", predicted)
predicted= model.predict([[0,1]]) # 0:Overcast, 1:Hot
print("Predicted Value:", predicted)
predicted= model.predict([[2,2]]) # 2:Sunny, 2:Mild
print("Predicted Value:", predicted)
###Output
Predicted Value: [1]
###Markdown
Exercise:**Manually calculate output for the following cases and compare it with system’s output.**(1) Will you play if the temperature is 'Hot' and weather is 'overcast'?(2) Will you play if the temperature is 'Mild' and weather is 'Sunny'?
###Code
# p(y=1/overcast,hot)=p(overcast,hot/y=1)*p(y=1)/p(overcast,hot)
class customNB:
def __init__(self,features,label):
self.features=np.array(features)
self.label=np.array(label)
def prior_prob(self,on_which):
tot=self.label.shape[0]
on_which_tot=np.sum(self.label==on_which)
return on_which_tot/tot
def conditional_prob(self,feature_col,feature_val,on_which):
new_features=self.features[self.label==on_which]
numerator=np.sum(new_features[:,feature_col]==feature_val)
return numerator/len(new_features)
def getVal(self,on_which,feature_col,feature_val):
prob=self.prior_prob(on_which)
for col,val in zip(feature_col,feature_val):
prob=prob*self.conditional_prob(col,val,on_which)
return prob
custom_model=customNB(features,label)
yes_prob,no_prob=custom_model.getVal(1,[0,1],[0,1]),custom_model.getVal(0,[0,1],[0,1])
print(yes_prob,no_prob)
predicted=model.predict([[0,1]])
print(predicted)
yes_prob,no_prob=custom_model.getVal(1,[0,1],[2,2]),custom_model.getVal(0,[0,1],[2,2])
print(yes_prob,no_prob)
predicted=model.predict([[2,2]])
print(predicted)
###Output
[1]
|
Clothing_Classifier.ipynb | ###Markdown
Fashion MNIST Clothing ClassificationModel Evaluation MethodologyHow to Develop a Baseline ModelHow to Develop an Improved ModelHow to Finalize the Model and Make PredictionsWant Results with Deep Learning for Computer Vision?Take my free 7-day email crash course now (with sample code).Click to sign-up and also get a free PDF Ebook version of the course.Download Your FREE Mini-CourseFashion MNIST Clothing ClassificationThe Fashion-MNIST dataset is proposed as a more challenging replacement dataset for the MNIST dataset.It is a dataset comprised of 60,000 small square 28×28 pixel grayscale images of items of 10 types of clothing, such as shoes, t-shirts, dresses, and more. The mapping of all 0-9 integers to class labels is listed below.0: T-shirt/top1: Trouser2: Pullover3: Dress4: Coat5: Sandal6: Shirt7: Sneaker8: Bag9: Ankle bootIt is a more challenging classification problem than MNIST and top results are achieved by deep learning convolutional neural networks with a classification accuracy of about 90% to 95% on the hold out test dataset.The example below loads the Fashion-MNIST dataset using the Keras API and creates a plot of the first nine images in the training dataset.
###Code
# example of loading the fashion mnist dataset
from matplotlib import pyplot
from keras.datasets import fashion_mnist
# load dataset
(trainX, trainy), (testX, testy) = fashion_mnist.load_data()
# summarize loaded dataset
print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape))
print('Test: X=%s, y=%s' % (testX.shape, testy.shape))
# plot first few images
for i in range(9):
# define subplot
pyplot.subplot(330 + 1 + i)
# plot raw pixel data
pyplot.imshow(trainX[i], cmap=pyplot.get_cmap('gray'))
# show the figure
pyplot.show()
###Output
/Users/Chanti/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
/Users/Chanti/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:455: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/Users/Chanti/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:456: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/Users/Chanti/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:457: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/Users/Chanti/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:458: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/Users/Chanti/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:459: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/Users/Chanti/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:462: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Running the example loads the Fashion-MNIST train and test dataset and prints their shape.We can see that there are 60,000 examples in the training dataset and 10,000 in the test dataset and that images are indeed square with 28×28 pixels.A plot of the first nine images in the dataset is also created showing that indeed the images are grayscale photographs of items of clothing. Model Evaluation MethodologyThe Fashion MNIST dataset was developed as a response to the wide use of the MNIST dataset, that has been effectively “solved” given the use of modern convolutional neural networks.Fashion-MNIST was proposed to be a replacement for MNIST, and although it has not been solved, it is possible to routinely achieve error rates of 10% or less. Like MNIST, it can be a useful starting point for developing and practicing a methodology for solving image classification using convolutional neural networks.Instead of reviewing the literature on well-performing models on the dataset, we can develop a new model from scratch.The dataset already has a well-defined train and test dataset that we can use.In order to estimate the performance of a model for a given training run, we can further split the training set into a train and validation dataset. Performance on the train and validation dataset over each run can then be plotted to provide learning curves and insight into how well a model is learning the problem.The Keras API supports this by specifying the “validation_data” argument to the model.fit() function when training the model, that will, in turn, return an object that describes model performance for the chosen loss and metrics on each training epoch.In order to estimate the performance of a model on the problem in general, we can use k-fold cross-validation, perhaps 5-fold cross-validation. This will give some account of the model’s variance with both respect to differences in the training and test datasets and the stochastic nature of the learning algorithm. The performance of a model can be taken as the mean performance across k-folds, given with the standard deviation, that could be used to estimate a confidence interval if desired.We can use the KFold class from the scikit-learn API to implement the k-fold cross-validation evaluation of a given neural network model. There are many ways to achieve this, although we can choose a flexible approach where the KFold is only used to specify the row indexes used for each split.
###Code
# load dataset
(trainX, trainY), (testX, testY) = fashion_mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
from keras.utils.np_utils import to_categorical
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = fashion_mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
###Output
_____no_output_____
###Markdown
Prepare Pixel DataWe know that the pixel values for each image in the dataset are unsigned integers in the range between black and white, or 0 and 255.We do not know the best way to scale the pixel values for modeling, but we know that some scaling will be required.A good starting point is to normalize the pixel values of grayscale images, e.g. rescale them to the range [0,1]. This involves first converting the data type from unsigned integers to floats, then dividing the pixel values by the maximum value.
###Code
# convert from integers to floats
train_norm = trainX.astype('float32')
test_norm = testX.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
###Output
_____no_output_____
###Markdown
Define ModelNext, we need to define a baseline convolutional neural network model for the problem.The model has two main aspects: the feature extraction front end comprised of convolutional and pooling layers, and the classifier backend that will make a prediction.For the convolutional front-end, we can start with a single convolutional layer with a small filter size (3,3) and a modest number of filters (32) followed by a max pooling layer. The filter maps can then be flattened to provide features to the classifier.Given that the problem is a multi-class classification, we know that we will require an output layer with 10 nodes in order to predict the probability distribution of an image belonging to each of the 10 classes. This will also require the use of a softmax activation function. Between the feature extractor and the output layer, we can add a dense layer to interpret the features, in this case with 100 nodes.All layers will use the ReLU activation function and the He weight initialization scheme, both best practices.We will use a conservative configuration for the stochastic gradient descent optimizer with a learning rate of 0.01 and a momentum of 0.9. The categorical cross-entropy loss function will be optimized, suitable for multi-class classification, and we will monitor the classification accuracy metric, which is appropriate given we have the same number of examples in each of the 10 classes.The define_model() function below will define and return this model.
###Code
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
###Output
_____no_output_____
###Markdown
Evaluate ModelAfter the model is defined, we need to evaluate it.The model will be evaluated using 5-fold cross-validation. The value of k=5 was chosen to provide a baseline for both repeated evaluation and to not be too large as to require a long running time. Each test set will be 20% of the training dataset, or about 12,000 examples, close to the size of the actual test set for this problem.The training dataset is shuffled prior to being split and the sample shuffling is performed each time so that any model we evaluate will have the same train and test datasets in each fold, providing an apples-to-apples comparison.We will train the baseline model for a modest 10 training epochs with a default batch size of 32 examples. The test set for each fold will be used to evaluate the model both during each epoch of the training run, so we can later create learning curves, and at the end of the run, so we can estimate the performance of the model. As such, we will keep track of the resulting history from each run, as well as the classification accuracy of the fold.The evaluate_model() function below implements these behaviors, taking the training dataset as arguments and returning a list of accuracy scores and training histories that can be later summarized.
###Code
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# append scores
scores.append(acc)
histories.append(history)
return scores, histories
###Output
_____no_output_____
###Markdown
Present ResultsOnce the model has been evaluated, we can present the results.There are two key aspects to present: the diagnostics of the learning behavior of the model during training and the estimation of the model performance. These can be implemented using separate functions.First, the diagnostics involve creating a line plot showing model performance on the train and test set during each fold of the k-fold cross-validation. These plots are valuable for getting an idea of whether a model is overfitting, underfitting, or has a good fit for the dataset.We will create a single figure with two subplots, one for loss and one for accuracy. Blue lines will indicate model performance on the training dataset and orange lines will indicate performance on the hold out test dataset. The summarize_diagnostics() function below creates and shows this plot given the collected training histories.
###Code
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(histories[i].history['loss'], color='blue', label='train')
pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(histories[i].history['accuracy'], color='blue', label='train')
pyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')
pyplot.show()
###Output
_____no_output_____
###Markdown
Next, the classification accuracy scores collected during each fold can be summarized by calculating the mean and standard deviation. This provides an estimate of the average expected performance of the model trained on this dataset, with an estimate of the average variance in the mean. We will also summarize the distribution of scores by creating and showing a box and whisker plot.The summarize_performance() function below implements this for a given list of scores collected during model evaluation.
###Code
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
###Output
_____no_output_____
###Markdown
Complete Example
###Code
# baseline cnn model for fashion mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
from keras.datasets import fashion_mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = fashion_mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# append scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(histories[i].history['loss'], color='blue', label='train')
pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(histories[i].history['acc'], color='blue', label='train')
pyplot.plot(histories[i].history['val_acc'], color='orange', label='test')
pyplot.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness()
###Output
> 91.192
> 90.825
> 91.442
> 91.358
> 90.742
|
MintNFT.ipynb | ###Markdown
Requirements Python
###Code
import sys
python_version = (
f"{sys.version_info.major}."
f"{sys.version_info.minor}."
f"{sys.version_info.micro}"
)
print(f"Python version {python_version}")
###Output
Python version 3.8.12
###Markdown
Choose any following methods to install dependcies* poetry (recommend) `poetry install` * pip `pip install -r requirements.txt` IPFS CLIFollow this URL https://docs.ipfs.io/install/command-line/official-distributions Import modules
###Code
import brownie
from brownie import accounts, project, config, network
# Compile smart contracts
p = project.load('.', name="pynft")
p.load_config()
###Output
_____no_output_____
###Markdown
Connect to chain
###Code
import os
print("Available networks")
print(os.popen("brownie networks list").read())
###Output
Available networks
Brownie v1.17.0 - Python development framework for Ethereum
The following networks are declared:
Ethereum
[0;1;30m ├─[0;mMainnet (Infura): [0;32mmainnet[0;m
[0;1;30m ├─[0;mRopsten (Infura): [0;32mropsten[0;m
[0;1;30m ├─[0;mRinkeby (Infura): [0;32mrinkeby[0;m
[0;1;30m ├─[0;mGoerli (Infura): [0;32mgoerli[0;m
[0;1;30m ├─[0;mKovan (Infura): [0;32mkovan[0;m
[0;1;30m ├─[0;mmainnet-fork-2: [0;32mmainnet-fork-2[0;m
[0;1;30m ├─[0;mmainnet-fork-3: [0;32mmainnet-fork-3[0;m
[0;1;30m ├─[0;mmainnet-fork-4: [0;32mmainnet-fork-4[0;m
[0;1;30m └─[0;mmainnet-fork-5: [0;32mmainnet-fork-5[0;m
Ethereum Classic
[0;1;30m ├─[0;mMainnet: [0;32metc[0;m
[0;1;30m └─[0;mKotti: [0;32mkotti[0;m
Arbitrum
[0;1;30m └─[0;mMainnet: [0;32marbitrum-main[0;m
Binance Smart Chain
[0;1;30m ├─[0;mTestnet: [0;32mbsc-test[0;m
[0;1;30m └─[0;mMainnet: [0;32mbsc-main[0;m
Fantom Opera
[0;1;30m ├─[0;mTestnet: [0;32mftm-test[0;m
[0;1;30m └─[0;mMainnet: [0;32mftm-main[0;m
Harmony
[0;1;30m └─[0;mMainnet (Shard 0): [0;32mharmony-main[0;m
Polygon
[0;1;30m ├─[0;mMainnet (Infura): [0;32mpolygon-main[0;m
[0;1;30m └─[0;mMumbai Testnet (Infura): [0;32mpolygon-test[0;m
XDai
[0;1;30m ├─[0;mMainnet: [0;32mxdai-main[0;m
[0;1;30m └─[0;mTestnet: [0;32mxdai-test[0;m
Eherium
[0;1;30m └─[0;mganache-local: [0;32mganache-local[0;m
Etherium
[0;1;30m └─[0;mganache-local-2: [0;32mganache-local-2[0;m
cronos
[0;1;30m └─[0;mcronos-testnet: [0;32mcronos-testnet[0;m
Development
[0;1;30m ├─[0;mGanache-CLI: [0;32mdevelopment[0;m
[0;1;30m ├─[0;mGeth Dev: [0;32mgeth-dev[0;m
[0;1;30m ├─[0;mHardhat: [0;32mhardhat[0;m
[0;1;30m ├─[0;mHardhat (Mainnet Fork): [0;32mhardhat-fork[0;m
[0;1;30m ├─[0;mGanache-CLI (Mainnet Fork): [0;32mmainnet-fork[0;m
[0;1;30m ├─[0;mGanache-CLI (BSC-Mainnet Fork): [0;32mbsc-main-fork[0;m
[0;1;30m ├─[0;mGanache-CLI (FTM-Mainnet Fork): [0;32mftm-main-fork[0;m
[0;1;30m ├─[0;mGanache-CLI (Polygon-Mainnet Fork): [0;32mpolygon-main-fork[0;m
[0;1;30m ├─[0;mGanache-CLI (XDai-Mainnet Fork): [0;32mxdai-main-fork[0;m
[0;1;30m ├─[0;mlocal: [0;32mlocal[0;m
[0;1;30m ├─[0;mmainnet-fork-dev: [0;32mmainnet-fork-dev[0;m
[0;1;30m └─[0;mcronos-mainnet-fork: [0;32mcronos-mainnet-fork[0;m
###Markdown
Connect
###Code
NETWORK = "rinkeby"
# network.connect(NETWORK) # Main net fork
# network.connect(NETWORK) # Main net
network.connect(NETWORK) # Test net rinkeby
###Output
_____no_output_____
###Markdown
Account
###Code
!brownie accounts list
if NETWORK == "mainnet" and False:
from scripts.helpful_script import get_account
account = get_account()
else:
from brownie import accounts
accounts.load("eth-main") # Main net
account = accounts[0]
print(account)
###Output
0xf6EfbD8142A18E741360b41301eDFdbD2719D03C
###Markdown
Dump variables
###Code
for key, value in p.__dict__.items():
print(key)
brownie.__dict__[key] = p.__dict__[key]
###Output
_path
_envvars
_structure
_build_path
_name
_active
_sources
_build
_compiler_config
interface
_containers
PYNFT
ERC721
Strings
__all__
_namespaces
###Markdown
Deploy contracts
###Code
from brownie import PYNFT
pynft = PYNFT.deploy({"from": account})
###Output
Transaction sent: [0;1;34m0xa2a16103b7ce13a0015421ea2cfdbb3319f6617d16e65c6175a50ea57a09bfab[0;m
Gas price: [0;1;34m1.705944452[0;m gwei Gas limit: [0;1;34m1419476[0;m Nonce: [0;1;34m29[0;m
PYNFT.constructor confirmed Block: [0;1;34m10057857[0;m Gas used: [0;1;34m1290433[0;m ([0;1;34m90.91%[0;m)
PYNFT deployed at: [0;1;34m0xeee906fAE06C0456b57A11cBd5Cec4628fBDAAf2[0;m
###Markdown
Upload file to IPFS Assets
###Code
prefix = "assets/"
def upload_to_ipfs(filepath: str) -> str:
return os.popen(f"ipfs add -q {filepath}").read().strip()
def is_an_image(filename: str):
"""
Description:
Check if a file is an image
paramgs:
filename:
(str) filename, should be with its extension
returns:
(str) qid of ipfs
"""
for extention in [".png", "jpg"]:
if filename.endswith(extention):
return True
return False
def set_ipfs_share_link(prefix: str, asset: str) -> str:
"""
description:
Run IPFS upload process and return share link URL
params:
prefix:
(str) Local prefix file
asset:
(str) Filename
returns:
(str) share link url
"""
IPFS_PREFIX = "https://ipfs.io/ipfs"
qid = upload_to_ipfs(f"{prefix}{asset}".replace(" ", "\ "))
asset = asset.replace(" ", "-")
return f"{IPFS_PREFIX}/{qid}?filename={asset}"
class Attribute:
pass
attribute = Attribute()
for asset in os.listdir(prefix):
if asset.startswith(".") or asset == "metadata.json":
# Skip
continue
ipfs_share_link = set_ipfs_share_link(prefix, asset)
if is_an_image(asset):
setattr(attribute, "image_url", ipfs_share_link)
else:
setattr(attribute, "external_url", ipfs_share_link)
print(ipfs_share_link)
###Output
https://ipfs.io/ipfs/Qmda8x41dArU36LKveHzSN3NPJjWxFgyeNNLM8uUMqtMNr?filename=my-first-python-code.png
https://ipfs.io/ipfs/QmUoGY6GbvzSQi2gP7YHtVMRocNW1Xs16ZUHxWcTgWHwJb?filename=fruit-list.py
###Markdown
Set up metadata Render metadata from template
###Code
metadata = {
"description": """It was in 2015, I was an engineer intern.
I was assigned to code in Python for satellite signal processing.
At that time, I had no experience with it, but I had completed C#, C and Matlab courses.
It inspired me to transfer from electrical to software engineer career path.
Now, I partially succeed in software engineering, machine learning and A.I implementation.
This NFT determines where I started.
Moreover, I minting this NFT with my Solidity code I created by myself.
No, it's not my first Solidity project :D
But it's my first Solidity project deployed in `production`.
Which means this is the starting point for my smart contract development.
Smart contract repository is located in https://github.com/batprem/pynft
""",
"external_url": attribute.external_url,
"image": attribute.image_url,
"name": "My first Python code",
"attributes": [],
}
###Output
_____no_output_____
###Markdown
Save metadata into JSON
###Code
import json
metadata_filename = "metadata.json"
with open(f"assets/{metadata_filename}", "w") as json_file:
json.dump(metadata, json_file)
metadata_share_link = set_ipfs_share_link(prefix, metadata_filename)
###Output
_____no_output_____
###Markdown
Assign to smart contract Mint
###Code
PYNFT[-1].createCollectable({"from": account}).wait(1)
###Output
Transaction sent: [0;1;34m0xce49cf20c4a9a7b70408ef8ccd9449ef6001c7a300f679af65d29cee4fe851f2[0;m
Gas price: [0;1;34m1.721482323[0;m gwei Gas limit: [0;1;34m102072[0;m Nonce: [0;1;34m30[0;m
PYNFT.createCollectable confirmed Block: [0;1;34m10057863[0;m Gas used: [0;1;34m92793[0;m ([0;1;34m90.91%[0;m)
PYNFT.createCollectable confirmed Block: [0;1;34m10057863[0;m Gas used: [0;1;34m92793[0;m ([0;1;34m90.91%[0;m)
###Markdown
Set URI
###Code
PYNFT[-1].setBaseURI(metadata_share_link, {"from": account}).wait(1)
PYNFT[-1].tokenURI(0)
PYNFT[-1].tokenCounter()
###Output
_____no_output_____ |
jupyter/TPS_tanks.ipynb | ###Markdown
Inputs:
###Code
P_tanks = 1.379e+6 # Pressure in tanks, Pascals (200 PSI)
# Pressure_Tanks = 101325*2; # Pascals (2ATM)
D_tanks = in_to_m(10) # Diameter of tanks, meters (12 inches)
T_cryo = 90.15 # Kevlin *CHANGE ME* ACCORDING TO WHICH CRYOGRENIC FUEL YOU WANT TO EXAMINE
T3 = 270 # Kelvin * CHANGE ME* ACCORDING TO VEHICLE SIZING
###Output
_____no_output_____
###Markdown
Constants:
###Code
simgay_al = 324e+6 # Tensile_Strength_Yield_Al_6061T, Pascals (4700 PSI) @ 77.15 K -196 Celsius
# Tensile Chosen because structure will be in tension.
# http://www.matweb.com/search/datasheet_print.aspx?matguid=1b8c06d0ca7c456694c7777d9e10be5b
K_CFRP = 7.0 # CFRP Thermal Conductivity, Watts/Meter Kelvin
K_PU = 0.025 # Polyurethane_Thermal_Conductivity, Watts/Meter Kelvin
T_ambient = 299.15 # Kelvin
H = 35 # Convective Heat Transfer Coefficient, Watts/SQR Meter Kelvin
FS = 1.5 # Safety Factor
###Output
_____no_output_____
###Markdown
Calculations:
###Code
R1 = D_tanks /2
t = Symbol('t')
R = Symbol('R')
t_al = solve((P_tanks*D_tanks)/(4*t) - simgay_al, t) # thickness of aluminum, meters
t_al = float(t_al[0]) # convert to floating point number
t_al = 0.00635
R2 = R1 + 1.5 * t_al # Meters
T2 = T_cryo # Kelvin Assumption: WORST CASE
L = 1.0
R_soln = solve(2*np.pi*R*L*H*(T_ambient-T3) - ((2*pi*L)*K_PU*(T3-T2)/log(R/R2)), R)
print('Thickness Aluminum:', m_to_in(t_al), 'in')
print('Radius3:', m_to_in(R_soln[0]), 'in')
print('Thickness of Polyurethane:', m_to_in(R_soln[0]-R2), 'in')
###Output
Thickness Aluminum: 0.250000135 in
Radius3: 5.54581854038781 in
Thickness of Polyurethane: 0.170818337887813 in
|
machine_learning/lecture/week_1/ii_linear_regression_with_one_variable_week_1/.ipynb_checkpoints/abtesting-overview-udacity-checkpoint.ipynb | ###Markdown
A/B Testing is the test that we want to test for particular product. Usually A/B testing works for testing changes in elements in the web page. A/B testing framework is following sequence:* Design a research question.* Choose test statistics method or metrics to evaluate experiment.* Designing the control group and experiment group.* Analyzing results, and draw valid conclusions.A/B testing is used to validate whether the changes that we have applied in our product is significantly affected our users instead of relying solely on the expert opinion. *Screenshot taken from [Udacity](https://www.udacity.com/course/viewer!/c-ud257/l-4018018619/m-40043986740) 0:03* When to use A/B testing  A/B Testing can be used to make a convergence to global minimum, but not useful for comparing two global minimum. It also not particulary useful to make overall testing.Consider example above:1. This example can't be used for A/B testing. It tries to answer vague question, and it's too general. We don't which specific metric/method to use to answer this. Hence the only way we can do is to test for specific product.2. It's not design to test premium service of your site. Suppose that you have a bunch of premium features and decide to do some A/B testing and divide control and experiment group. This group would not going to be roughly equal since there will be users that opt for premium, or not. And it will not affect overall users. So we could only gather knowledge, but not for full blown test.3. The third one is where A/B testing can be shine. It will affect all users, so can divide them to both groups, and we have clear metrics to test the algorithm, for example by ranking.4. The fourth one is also where A/B testing can be useful. We have all set of product that we want to test, has some metrics that we can test, as long we have both computing power. Let's take another examples *Screenshot taken from [Udacity](https://www.udacity.com/course/viewer!/c-ud257/l-4018018619/e-4004398678/m-4004398680) 0:51* In the first part, we know that things like cars can be sold for a very long time. A/B testing only collect data that occurs in small window (at least for us to analyze). People comes to the website maybe 6 months to 1 year in the future to buy. And it maybe not by the website, but by other referrals. We couldn't wait that long for testing purposes, and the data won't be enough. The second part, also relates to the first reason. Updating company's logo, will take time until customer reacts. So it won't be good for A/B testing. The final part, we have clear control and experiment groups, and also clear metrics. So A/B testing does useful for this situation. A/B testing is used as a general method online to test features, decide audience control and experiment set, and which is better. A/B testing is used to find the global maximum significant of one changes, between control group and experiment group. It doesn't too useful however to compare two changes that already at its best. Amazon personal recommendation can be increased with A/B testing, one hundres of millisecond delay at page view can be tested by A/B testing, as it always decrease 1% revenue.A/B testing is really tricky to measure, as we need good metric to analyse. A/B testing can tell different about one changes, but not for overall changes. That's why when we usually have one big experiment, we have multiple A/B testing across multiple experiments. Other techniquesWell then if we can't use A/B testing, what other techniques can be used to test changes in our product? A/B testing can be used to observe users log, make it as observational studies when the hypothesis changes, use behaviour randomized restrospective analysis. Restropective analysis give you small, but deep qualitative data, but A/B testing give you form of quantitative data.The difference for quantitative vs qualitative is also applied when you do online vs traditional experiment. In traditional experiment, you know each of the people in your group. Not only their health status if you test new medicine for example, but also their habbit, their occupation, their family, their hobby. You know them deeply by interacting with them. But in an online experiment, all of this are gone. You only new time and possibily some ip and user agent, but that's it. You can get millions of users in an online testing, but not so deeply as small qualitative group. History of A/B testingThere was no official record stated about the origin of A/B testing. But it was long applied in the aggriculture field, where farmers divide section and apply various techniques and observe which is better for distinct crop. *Screenshot taken from [Udacity](https://www.udacity.com/course/viewer!/c-ud257/l-4018018619/m-4004398683) 1:56* We can take a web company, called Udacity for example, that want to apply changes like in the experiment in the image above stated. Usually in every digital web company, they have some funnel analysis. That is the number of users, from homepage visits, to gain actual conversion, that is final act that we actually care about. Either it's creating an account, or maybe complete a purchase. This funnel describe how number of users can be decreased down as we move to deeper layer. The idea is, if we apply this change in homepage visits, it should be increase more users to one deeper layer, 'Exploring the site'. If it's not giving significance increase, we don't want to launch it. And we absolutely don't want to launch it if the change even decrease the number of users to the next layer. Ideally if we can increase number of users to one deeper layer, then it would also have an increase to even deeper layer, that got actual conversion. *Screenshot taken from [Udacity](https://www.udacity.com/course/viewer!/c-ud257/l-4018018619/m-4004398684) 2:19* After we have design a research question, like in the previous paragraph, we want to have test statistics that fit in our experiment. One popular alternative is CTR, click-through-rate that measures number of clicks for each particular users. But for our particular case, CTR not the best case scenario. What we actually want is CTP, click-through-probability. CTR won't give you number of unique users, it instead give number of clicks. If for example we have two person, as described above, with first person, never click, and second person have 5 clicks(probably because lagging and he's rapid clicking the button). CTR will give you clicks/person = 5/2 = 2.5 CTP will give you atleastone/person = 0.5 So you see in our case, CTP is the right decision to choose, and we revise our Hypothesis with CTP as test statistics. So when do we use CTR, and when do we use CTP? CTR usually measure the visibility (button in our example), while CTP measures the impact. CTP will avoid us the problem when users click more than once. CTP can be acquired when working with engineers, to capture number of click every change, and get CTR with at least one click. *Screenshot taken from [Udacity](https://www.udacity.com/course/viewer!/c-ud257/l-4018018619/e-4004398686/m-4004398688) 0:22* Considering large number of samples (1000) 150 is even further, if you know about standard error. We only have two possible possible outcome in click-through-probability, and concern probablity as metric. This would means we want to use binomial distribution, with success is the click, and failure is not click.If you want to refresh your binomial distribution, check out my other [blog](http://napitupulu-jon.appspot.com/posts/distribution-coursera-statistics-ud827.htmlBinomial-Distribution). *Screenshot taken from [Udacity](https://www.udacity.com/course/viewer!/c-ud257/l-4018018619/e-4004398690/m-4004398692) 1:32* 1. dependent2. independent, 2 exclusive3. same user, same search result4. could be binomial, one event is one user complete one course. One user can't finish twice or more in one course5. can be one user multiple purchases. Since there is two possible outcome, we would expect two peak at the distribution. When we're talking about the probability, especially with the law of large numbers, the probability becomes the proportion. p-success with click, and (1-p)-failuure with no click. Binomial require sample size that at least events occurs with click and no click are 5 times, to follow normal distribution. If follow normal distirbution, then ME = 1.96xStandard Error.If you need to refresh your statistics of Confidence Interval for proportion, check out my other [blog](http://napitupulu-jon.appspot.com/posts/ht-ci-categorical-coursera-statistics.htmlConfidence-Interval). So for example, if samples is 2000, and number of click 300, 99% confidence interval gives us....
###Code
%load_ext rpy2.ipython
%%R
c = 300
n = 2000
pe = 300/2000
CL = 0.99
SE = sqrt(pe*(1-pe)/n)
z_star = round(qnorm((1-CL)/2,lower.tail=F),digits=2)
ME = z_star * SE
c(pe-ME, pe+ME)
###Output
_____no_output_____ |
assignment2/BatchNormalization_2017.ipynb | ###Markdown
Batch NormalizationOne way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.[3] Sergey Ioffe and Christian Szegedy, "Batch Normalization: Accelerating Deep Network Training by ReducingInternal Covariate Shift", ICML 2015.
###Code
# As usual, a bit of setup
from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
###Output
X_train: (49000, 3, 32, 32)
y_train: (49000,)
X_val: (1000, 3, 32, 32)
y_val: (1000,)
X_test: (1000, 3, 32, 32)
y_test: (1000,)
###Markdown
Batch normalization: ForwardIn the file `cs231n/layers.py`, implement the batch normalization forward pass in the function `batchnorm_forward`. Once you have done so, run the following to test your implementation.
###Code
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print(' means: ', a.mean(axis=0))
print(' stds: ', a.std(axis=0))
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print(' mean: ', a_norm.mean(axis=0))
print(' std: ', a_norm.std(axis=0))
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print('After batch normalization (nontrivial gamma, beta)')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in range(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
###Output
After batch normalization (test-time):
means: [-0.03927354 -0.04349152 -0.10452688]
stds: [1.01531428 1.01238373 0.97819988]
###Markdown
Batch Normalization: backwardNow implement the backward pass for batch normalization in the function `batchnorm_backward`.To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.Once you have finished, run the following to numerically check your backward pass.
###Code
# Gradient check batchnorm backward pass
np.random.seed(231)
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)
db_num = eval_numerical_gradient_array(fb, beta.copy(), dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
###Output
dx error: 1.7029241291468676e-09
dgamma error: 7.420414216247087e-13
dbeta error: 2.8795057655839487e-12
###Markdown
Batch Normalization: alternative backward (OPTIONAL, +3 points extra credit)In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.Surprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function `batchnorm_backward_alt` and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.NOTE: This part of the assignment is entirely optional, but we will reward 3 points of extra credit if you can complete it.
###Code
np.random.seed(231)
N, D = 500, 5000
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print('dx difference: ', rel_error(dx1, dx2))
print('dgamma difference: ', rel_error(dgamma1, dgamma2))
print('dbeta difference: ', rel_error(dbeta1, dbeta2))
print('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))
###Output
dx difference: 1.6485297612864814e-09
dgamma difference: 0.0
dbeta difference: 0.0
speedup: 2.51x
###Markdown
Fully Connected Nets with Batch NormalizationNow that you have a working implementation for batch normalization, go back to your `FullyConnectedNet` in the file `cs2312n/classifiers/fc_net.py`. Modify your implementation to add batch normalization.Concretely, when the flag `use_batchnorm` is `True` in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.HINT: You might find it useful to define an additional helper layer similar to those in the file `cs231n/layer_utils.py`. If you decide to do so, do it in the file `cs231n/classifiers/fc_net.py`.
###Code
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
if reg == 0: print()
###Output
Running check with reg = 0
Initial loss: 2.2611955101340957
W1 relative error: 1.10e-04
W2 relative error: 2.85e-06
W3 relative error: 4.05e-10
b1 relative error: 2.22e-07
b2 relative error: 2.31e-08
b3 relative error: 1.01e-10
beta1 relative error: 7.33e-09
beta2 relative error: 1.89e-09
gamma1 relative error: 6.96e-09
gamma2 relative error: 1.96e-09
Running check with reg = 3.14
Initial loss: 6.996533220108303
W1 relative error: 1.98e-06
W2 relative error: 2.29e-06
W3 relative error: 2.79e-08
b1 relative error: 1.07e-08
b2 relative error: 7.99e-07
b3 relative error: 2.10e-10
beta1 relative error: 6.65e-09
beta2 relative error: 4.23e-09
gamma1 relative error: 6.27e-09
gamma2 relative error: 5.28e-09
###Markdown
Batchnorm for deep networksRun the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
###Code
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
###Output
(Iteration 1 / 200) loss: 2.340975
(Epoch 0 / 10) train acc: 0.107000; val_acc: 0.115000
(Epoch 1 / 10) train acc: 0.314000; val_acc: 0.266000
(Epoch 2 / 10) train acc: 0.385000; val_acc: 0.279000
(Epoch 3 / 10) train acc: 0.494000; val_acc: 0.308000
(Epoch 4 / 10) train acc: 0.531000; val_acc: 0.307000
(Epoch 5 / 10) train acc: 0.574000; val_acc: 0.313000
(Epoch 6 / 10) train acc: 0.634000; val_acc: 0.338000
(Epoch 7 / 10) train acc: 0.689000; val_acc: 0.325000
(Epoch 8 / 10) train acc: 0.770000; val_acc: 0.331000
(Epoch 9 / 10) train acc: 0.804000; val_acc: 0.335000
(Epoch 10 / 10) train acc: 0.769000; val_acc: 0.322000
(Iteration 1 / 200) loss: 2.302332
(Epoch 0 / 10) train acc: 0.129000; val_acc: 0.131000
(Epoch 1 / 10) train acc: 0.283000; val_acc: 0.250000
(Epoch 2 / 10) train acc: 0.316000; val_acc: 0.277000
(Epoch 3 / 10) train acc: 0.373000; val_acc: 0.282000
(Epoch 4 / 10) train acc: 0.390000; val_acc: 0.310000
(Epoch 5 / 10) train acc: 0.434000; val_acc: 0.300000
(Epoch 6 / 10) train acc: 0.535000; val_acc: 0.345000
(Epoch 7 / 10) train acc: 0.530000; val_acc: 0.304000
(Epoch 8 / 10) train acc: 0.628000; val_acc: 0.339000
(Epoch 9 / 10) train acc: 0.661000; val_acc: 0.340000
(Epoch 10 / 10) train acc: 0.726000; val_acc: 0.318000
###Markdown
Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
###Code
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
###Output
/home/maxis/anaconda3/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
warnings.warn(message, mplDeprecation, stacklevel=1)
###Markdown
Batch normalization and initializationWe will now run a small experiment to study the interaction of batch normalization and weight initialization.The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.
###Code
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gca().set_ylim(1.0, 3.5)
plt.gcf().set_size_inches(10, 15)
plt.show()
###Output
_____no_output_____ |
module2-regression-2/Marilyn_L_E_Assignment_Regression_Classification_2.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2 AssignmentYou'll continue to **predict how much it costs to rent an apartment in NYC,** using the dataset from renthop.com.- [ ] Do train/test split. Use data from April & May 2016 to train. Use data from June 2016 to test.- [ ] Engineer at least two new features. (See below for explanation & ideas.)- [ ] Fit a linear regression model with at least two features.- [ ] Get the model's coefficients and intercept.- [ ] Get regression metrics RMSE, MAE, and $R^2$, for both the train and test data.- [ ] What's the best test MAE you can get? Share your score and features used with your cohort on Slack!- [ ] As always, commit your notebook to your fork of the GitHub repo. [Feature Engineering](https://en.wikipedia.org/wiki/Feature_engineering)> "Some machine learning projects succeed and some fail. What makes the difference? Easily the most important factor is the features used." — Pedro Domingos, ["A Few Useful Things to Know about Machine Learning"](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)> "Coming up with features is difficult, time-consuming, requires expert knowledge. 'Applied machine learning' is basically feature engineering." — Andrew Ng, [Machine Learning and AI via Brain simulations](https://forum.stanford.edu/events/2011/2011slides/plenary/2011plenaryNg.pdf) > Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work. Feature Ideas- Does the apartment have a description?- How long is the description?- How many total perks does each apartment have?- Are cats _or_ dogs allowed?- Are cats _and_ dogs allowed?- Total number of rooms (beds + baths)- Ratio of beds to baths- What's the neighborhood, based on address or latitude & longitude? Stretch Goals- [ ] If you want more math, skim [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 3.1, Simple Linear Regression, & Chapter 3.2, Multiple Linear Regression- [ ] If you want more introduction, watch [Brandon Foltz, Statistics 101: Simple Linear Regression](https://www.youtube.com/watch?v=ZkjP5RJLQF4)(20 minutes, over 1 million views)- [ ] Add your own stretch goal(s) !
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
# Exploring the data
df.head()
# Checking for null values
df.isnull().sum()
# Dropping NaN and checking
df = df.dropna()
df.isnull().sum()
df.dtypes
# Engineer at least two new features.
# Are cats or dogs allowed?
# Creating feature cats_or_dogs
df['cats_or_dogs'] = (df.cats_allowed.eq(1)) | (df.dogs_allowed.eq(1))
df.head()
# Are cats and dogs allowed?
# Creating feature cats_and_dogs
df['cats_and_dogs'] = (df['cats_allowed']== 1) & (df['dogs_allowed']== 1)
df.head()
df['cats_or_dogs']= df['cats_or_dogs'].replace({False:int(0),True:int(1)})
df['cats_and_dogs'] = df['cats_and_dogs'].replace({False:int(0),True:int(1)})
df.sample(5)
# Do train/test split. Use data from April & May 2016 to train. Use data from June 2016 to test.
train = df[df['created'].str.contains('2016-04|2016-05')]
test = df[df['created'].str.contains('2016-06')]
train.head()
test.head()
train.shape, test.shape
# Fit a linear regression model with at least two features.
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
model = LinearRegression()
# Arrange y target vectors
target = 'price'
y_train = train[target]
y_test = test[target]
# Arrange X features matrices
features = ['bedrooms','cats_and_dogs']
X_train = train[features]
X_test = test[features]
print(f'Linear Regression, dependent on: {features}')
# Fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
# Get regression metrics RMSE, MAE, and R2 # train data.
mse = mean_squared_error(y_train, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print(f'Test Mean Squared Error: {mse:.2f}')
print(f'Test Root Mean Squared Error: {rmse:.2f}')
print(f'Test Mean Absolute Error: {mae:.2f}')
print(f'Test R^2 Error: {r2:.2f}')
# Apply the model to new data
y_pred = model.predict(X_test)
# Get regression metrics RMSE, MAE, and R2 # test data.
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f'Train Mean Squared Error: {mse:.2f}')
print(f'Train Root Mean Squared Error: {rmse:.2f}')
print(f'Train Mean Absolute Error: {mae:.2f}')
print(f'Train R^2 Error: {r2:.2f}')
# Get the model's coefficients and intercept.
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, features)
print(coefficients.to_string())
# Stretch Goals
# scatterplot of the relationship between 2 features and the target.
import itertools
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
def regression_3d(df, x, y, z, num=100, **kwargs):
"""
Visualize linear regression in 3D: 2 features + 1 target
df : Pandas DataFrame
x : string, feature 1 column in df
y : string, feature 2 column in df
z : string, target column in df
num : integer, number of quantiles for each feature
"""
# Plot data
fig = px.scatter_3d(df, x, y, z, **kwargs)
# Fit Linear Regression
features = [x, y]
target = z
model = LinearRegression()
model.fit(df[features], df[target])
# Define grid of coordinates in the feature space
xmin, xmax = df[x].min(), df[x].max()
ymin, ymax = df[y].min(), df[y].max()
xcoords = np.linspace(xmin, xmax, num)
ycoords = np.linspace(ymin, ymax, num)
coords = list(itertools.product(xcoords, ycoords))
# Make predictions for the grid
predictions = model.predict(coords)
Z = predictions.reshape(num, num).T
# Plot predictions as a 3D surface (plane)
fig.add_trace(go.Surface(x=xcoords, y=ycoords, z=Z))
return fig
regression_3d(
train,
x='bedrooms',
y='price',
z='cats_and_dogs',
title='Rent an apartment in NYC'
)
###Output
_____no_output_____ |
Economics_Milestone5_Causal_ipynb.ipynb | ###Markdown
Data Preparation Story Background: https://www.gemini.com/cryptopedia/the-dao-hack-makerdaoEvent: July 20, 2016, at block 192,000 [Data Metrics](https://github.com/coinmetrics-io/data/blob/master/csv/metrics.csv)
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
import the data for Ethereum
###Code
df_eth=pd.read_csv("https://raw.githubusercontent.com/coinmetrics-io/data/master/csv/eth.csv")
df_eth.head()
df_eth['time']=pd.to_datetime(df_eth['time'])
df_eth.dtypes
df_eth['Type'] = 'eth'
df_eth.head()
###Output
_____no_output_____
###Markdown
Import the Data for Ethereum Classic
###Code
df_etc=pd.read_csv("https://raw.githubusercontent.com/coinmetrics-io/data/master/csv/etc.csv")
df_etc.head()
### change to dateime
df_etc['time']=pd.to_datetime(df_etc['time'])
df_etc.dtypes
df_etc['Type'] = 'etc'
df_etc.head()
###Output
_____no_output_____
###Markdown
Regression Discontinguity https://youtu.be/TfKwgGT2fSM create the identifier varaible for Dao Hack
###Code
from datetime import date
df_eth ['After_DaoHack'] =df_eth['time'].apply(lambda x: 1 if x>= date(2016,7,20) else 0)
df_eth[df_eth.time.dt.date>=date(2016,7,18)].head()
df_eth['days']=df_eth.index
df_eth.head()
###Output
_____no_output_____
###Markdown
normalize the data
###Code
df_eth['days'][df_eth.time.dt.date==date(2016,7,20)]
df_eth['days']=df_eth['days']-356
df_eth[df_eth.time.dt.date>=date(2016,7,18)].head()
###Output
_____no_output_____
###Markdown
create the regression function https://www.statsmodels.org/stable/index.html
###Code
import statsmodels.api as sm
import statsmodels.formula.api as smf
results = smf.ols('PriceUSD ~ 1+ After_DaoHack +days+TxTfrValAdjUSD ', data=df_eth).fit()
print(results.summary())
results = smf.ols('PriceUSD ~ 1+ After_DaoHack + days +After_DaoHack*days +TxTfrValAdjUSD', data=df_eth).fit()
print(results.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PriceUSD R-squared: 0.703
Model: OLS Adj. R-squared: 0.702
Method: Least Squares F-statistic: 1262.
Date: Tue, 15 Jun 2021 Prob (F-statistic): 0.00
Time: 14:51:30 Log-Likelihood: -15327.
No. Observations: 2138 AIC: 3.066e+04
Df Residuals: 2133 BIC: 3.069e+04
Df Model: 4
Covariance Type: nonrobust
======================================================================================
coef std err t P>|t| [0.025 0.975]
--------------------------------------------------------------------------------------
Intercept 11.5152 33.840 0.340 0.734 -54.848 77.878
After_DaoHack 13.0086 37.019 0.351 0.725 -59.588 85.605
days 0.0385 0.169 0.229 0.819 -0.292 0.369
After_DaoHack:days 0.2216 0.169 1.309 0.191 -0.110 0.554
TxTfrValAdjUSD 1.007e-07 1.88e-09 53.502 0.000 9.7e-08 1.04e-07
==============================================================================
Omnibus: 2501.011 Durbin-Watson: 1.002
Prob(Omnibus): 0.000 Jarque-Bera (JB): 2321078.664
Skew: -5.171 Prob(JB): 0.00
Kurtosis: 164.084 Cond. No. 3.06e+10
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 3.06e+10. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
Difference in Difference https://towardsdatascience.com/causal-inference-101-difference-in-differences-1fbbb0f55e85 prepare the datasets
###Code
Columns =["time","PriceUSD","TxTfrValAdjUSD","FeeMedUSD","RevUSD","GasLmtTxMean","Type"]
df_eth =df_eth[Columns]
df_etc =df_etc[Columns]
df_etc =df_etc.dropna()
df_etc.head()
df_eth =df_eth[df_eth.time.dt.date>=date(2016,7,25)]
df_etc =df_etc[df_etc.time.dt.date>=date(2016,7,25)]
df_eth=df_eth.reset_index()
df_etc=df_etc.reset_index()
df_eth["days"]=df_eth.index
df_eth.head()
df_etc["days"]=df_etc.index
df_etc.head()
df = pd.concat([df_eth, df_etc], axis=0)
df.head()
df["etc"]=df["Type"].apply(lambda x: 1 if x=="etc" else 0)
df.head()
###Output
_____no_output_____
###Markdown
run the regressionTreatment: ETC hard-fork
###Code
import statsmodels.api as sm
import statsmodels.formula.api as smf
results = smf.ols('PriceUSD ~ 1+ etc +days+TxTfrValAdjUSD+FeeMedUSD+RevUSD+GasLmtTxMean', data=df).fit()
print(results.summary())
results = smf.ols('PriceUSD ~ 1+ etc +days +etc*days+TxTfrValAdjUSD+FeeMedUSD+RevUSD+GasLmtTxMean', data=df).fit()
print(results.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PriceUSD R-squared: 0.978
Model: OLS Adj. R-squared: 0.978
Method: Least Squares F-statistic: 2.260e+04
Date: Tue, 15 Jun 2021 Prob (F-statistic): 0.00
Time: 14:59:42 Log-Likelihood: -20294.
No. Observations: 3572 AIC: 4.060e+04
Df Residuals: 3564 BIC: 4.065e+04
Df Model: 7
Covariance Type: nonrobust
==================================================================================
coef std err t P>|t| [0.025 0.975]
----------------------------------------------------------------------------------
Intercept -111.3056 3.908 -28.480 0.000 -118.968 -103.643
etc 107.9392 4.980 21.676 0.000 98.176 117.702
days 0.1478 0.004 39.728 0.000 0.141 0.155
etc:days -0.1459 0.005 -29.593 0.000 -0.156 -0.136
TxTfrValAdjUSD 4.288e-09 6.74e-10 6.359 0.000 2.97e-09 5.61e-09
FeeMedUSD -129.8503 1.586 -81.879 0.000 -132.960 -126.741
RevUSD 5.7e-05 3.36e-07 169.506 0.000 5.63e-05 5.77e-05
GasLmtTxMean -1.37e-05 8.93e-06 -1.535 0.125 -3.12e-05 3.8e-06
==============================================================================
Omnibus: 3818.674 Durbin-Watson: 0.562
Prob(Omnibus): 0.000 Jarque-Bera (JB): 2184150.593
Skew: -4.561 Prob(JB): 0.00
Kurtosis: 123.797 Cond. No. 1.62e+10
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.62e+10. This might indicate that there are
strong multicollinearity or other numerical problems.
|
USF_Web_Scraping_Project.ipynb | ###Markdown
Web-Scraping Project Preparing for Web-Scraping on the following web sites.https://admin.nber.org/xsearch?q=early+childhood+development+OR+Education&whichsearch=ftpub&restrict_papers=yes&fullresults=1&datefilter=&b=search+again https://openknowledge.worldbank.org/discoverhttps://www.unicef.org/search/search.php?querystring_en=%28%22early+childhood+development%22+OR+%22early+development%22+OR+%22early+child+care%22+OR+%22ecd%22%29+AND+%28%22literacy%22+OR+%22cognition%22+OR+%22education%22+OR+%22school*%22%29&hits=&type=&navigation=&Go.x=0&Go.y=0
###Code
# Import Dependencies
# !pip install BeautifulSoup4 as bs
# !pip install pandas
# !pip install splinter
from splinter import Browser
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
import random
###Output
_____no_output_____
###Markdown
Mac Users
###Code
# https://splinter.readthedocs.io/en/latest/drivers/chrome.html
!which chromedriver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
###Output
_____no_output_____
###Markdown
Windows Users
###Code
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# Using googlechrom drive open test explorer
url = 'https://openknowledge.worldbank.org/discover'
browser.visit(url)
# Retrieve page from the request module
response = requests.get(url)
# Create BeautifulSoup Object; parse with 'html.parser'
soup = BeautifulSoup(response.text, 'html.parser')
# Print the main page html
print(soup.prettify())
# results are returned as an iterable list
headers = soup.find_all('h4')
# Check the list of the headers
print(headers[0])
print(headers[1])
print(headers[3])
print(headers[9])
# Loop through the headers on the main page
t = []
titles = []
for i in range(0, 10):
try:
#Look for the title on the main page under 'h4' tag
title = soup.find_all('h4')[i].text
# append the title name into an empty list, 't'
if (title):
t.append(title)
# print(title)
# Remove the new line split '\n' from the title name
name = t[i].splitlines()
titles.append(name[1])
# print(titles)
except AttributeError as e:
print(e)
# Creating a click action on the book title
browser.click_link_by_text(titles[2])
# Create loop to click on each report link
for i in range(0, 10):
browser.click_link_by_text(titles[i])
html = browser.html
reports_soup = BeautifulSoup(html, 'html.parser')
slide_element = reports_soup.select_one('div.main-content')
publish_date = slide_element.find('div', class_='simple-item-view-other word-break').get_text()
print(publish_date)
browser.back()
print('page' + str(i+1))
browser.is_element_present_by_css('a[class="no-decor"]')
report_link = browser.find_by_css('a[class="no-decor"]')
report_link[0].click()
# Go back to the previous page
browser.back()
# Find the next page button and click the link to the next page
# page = soup.find('a', class_='next-page-link')
# print(page)
browser.is_element_present_by_css('a[class="next-page-link"]')
next_page = browser.find_by_css('a[class="next-page-link"]')
print(next_page[1])
next_page[1].click()
# Find the first title and click the link
browser.is_element_present_by_css('a[class="no-decor"]')
report_link = browser.find_by_css('a[class="no-decor"]')
report_link.click()
print(report_link)
# Create report soup for extracting data from the report page
html = browser.html
reports_soup = BeautifulSoup(html, 'html.parser')
# Create slide element to find the information to store
slide_element = reports_soup.select_one('div.main-content')
print(slide_element)
# Target the title header text
slide_element.find('h2', class_='ds-div-head')
# Store the journal title, summary, citation, link, publish date, and author
title = [slide_element.find('h2', class_='ds-div-head').get_text()]
summary = [slide_element.find('div', class_='okr-item-page-field-wrapper abstract').get_text().splitlines()]
citation = [slide_element.find('div', class_='citation').get_text().splitlines()]
link_path = slide_element.find('div', class_='okr-item-page-field-wrapper uri').get_text()
link = [link_path[4:].splitlines()]
publish_date = [slide_element.find('div', class_='simple-item-view-other word-break').get_text()]
author = [slide_element.find('div', class_='authorprofile-item-view-link').get_text().splitlines()]
print(title)
print(summary)
print(citation)
print(link)
print(publish_date)
print(author)
# Go back to the previous page
browser.back()
# Loop through the headers on the main page
titles = []
report_title = []
report_summary = []
report_citation = []
report_link = []
report_publish_date = []
report_author = []
for i in range(0, 10):
try:
#Look for the title on the main page under 'h4' tag
name = soup.find_all('h4')[i].text
# Remove the new line split '\n' from the title name
if (name):
t = name.splitlines()
titles.append(t[1])
# print(titles)
# Click on the book title link and nevigate to the report page
browser.click_link_by_text(titles[i])
# Creating the BeautifulSoup parser to extract the information from the report page
html = browser.html
reports_soup = BeautifulSoup(html, 'html.parser')
report = reports_soup.select_one('div.main-content')
# Extracting the information from the report page
title = report.find('h2', class_='ds-div-head').get_text()
summary = report.find('div', class_='okr-item-page-field-wrapper abstract').get_text().splitlines()
citation = report.find('div', class_='citation').get_text().splitlines()
link_path = report.find('div', class_='okr-item-page-field-wrapper uri').get_text()
link = link_path[4:].splitlines()
publish_date = report.find('div', class_='simple-item-view-other word-break').get_text()
author = report.find('div', class_='authorprofile-item-view-link').get_text().splitlines()
# Append the information into the empty lists
report_title.append(title)
report_summary.append(summary[1])
report_citation.append(citation[1])
report_link.append(link[1])
report_publish_date.append(publish_date)
report_author.append(author[1])
# Go back to the main page after extracting the information
browser.back()
except AttributeError as e:
print(e)
print(report_title)
print(report_link)
# print the popup windown html
html = browser.html
page_soup = BeautifulSoup(html, 'html.parser')
print(page_soup.prettify())
# Click the "No, thanks." button on the popup window
browser.click_link_by_text('No, thanks.')
# Create a loop that can navigate to the next page
report_title = []
report_summary = []
report_citation = []
report_link = []
report_publish_date = []
report_author = []
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url = 'https://openknowledge.worldbank.org/discover'
browser.visit(url)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# Loop through the first 5 pages
for j in range(0, 5):
titles = []
for i in range(0, 10):
try:
html = browser.html
page_soup = BeautifulSoup(html, 'html.parser')
if browser.is_text_present('No, thanks.') == True:
browser.click_link_by_text('No, thanks.')
#Look for the title on the main page under 'h4' tag
browser.is_element_present_by_css('h4')
name = page_soup.find_all('h4')[i].text
# Remove the new line split '\n' from the title name
if (name):
t = name.splitlines()
titles.append(t[1])
# Click on the book title link and nevigate to the report page
browser.is_element_present_by_text(titles[i], wait_time=2)
browser.click_link_by_text(titles[i])
# Creating the BeautifulSoup parser to extract the information from the report page
html = browser.html
reports_soup = BeautifulSoup(html, 'html.parser')
report = reports_soup.select_one('div.main-content')
# Extracting the information from the report page
title = report.find('h2', class_='ds-div-head').get_text()
summary = report.find('div', class_='okr-item-page-field-wrapper abstract').get_text().splitlines()
citation = report.find('div', class_='citation').get_text().splitlines()
link_path = report.find('div', class_='okr-item-page-field-wrapper uri').get_text()
link = link_path[4:].splitlines()
publish_date = report.find('div', class_='simple-item-view-other word-break').get_text()
author = report.find('div', class_='authorprofile-item-view-link').get_text().splitlines()
# Append the information into the empty lists
report_title.append(title)
report_summary.append(summary[1])
report_citation.append(citation[1])
report_link.append(link[1])
report_publish_date.append(publish_date)
report_author.append(author[1])
# Go back to the main page after extracting the information
browser.back()
# Stop the loop for 2 seconds to make sure catching the popup window
time.sleep(2)
except AttributeError as e:
print(e)
# Create a click action to navigate to the next page
browser.is_element_present_by_css('a[class="next-page-link"]', wait_time=1)
next_page = browser.find_by_css('a[class="next-page-link"]')
next_page[1].click()
time.sleep(2)
print(report_link)
print(len(report_summary))
print(len(report_link))
print(len(report_publish_date))
print(report_summary[0])
# Test to extract title from the second main page
# Create page_soup when the browser is at the second main page
html = browser.html
page_soup = BeautifulSoup(html, 'html.parser')
name = page_soup.find_all('h4')[1].text
print(name)
report_abstract = []
for i in report_summary:
abstract = i[8:]
# print(abstract)
report_abstract.append(abstract)
print(report_abstract)
# Storing the variables into a dataframe
data_df = pd.DataFrame(list(zip(report_title, report_abstract, report_link, report_publish_date, report_author)),
columns=["title", "summary", "link", "publish_date", "author"])
# Replace the "\n" in the dataframe
data_df = data_df.replace('\n',' ', regex=True)
data_df.head()
# Saving the dataframe into CSV
data_df.to_csv("data.csv", index=False, encoding='utf-8')
# Scraping NEBR
url = "https://admin.nber.org/xsearch?q=early+childhood+development+OR+Education&whichsearch=ftpub&restrict_papers=yes&fullresults=1&datefilter=&b=search+again"
browser.visit(url)
# Retrieve page from the request module
response = requests.get(url)
# Create BeautifulSoup Object; parse with 'html.parser'
soup = BeautifulSoup(response.text, 'html.parser')
# Print the main page html
print(soup.prettify())
# header results are returned as an iterable list
headers = soup.find_all('a', class_="resultTitle")
# Check the list of the headers
print(len(headers))
print(headers[0].get_text())
print(headers[1].get_text())
print(headers[3].get_text())
print(headers[9].get_text())
# publish date
publish_date = soup.find_all('span', class_='searchResultNiceDate')
print(len(publish_date))
print(publish_date[0].get_text())
# Authors
authors = soup.find_all('span', class_='searchResultAuthor')
print(len(authors))
print(authors[0].get_text())
# Summary
summary = soup.find_all('div', class_='searchResultAbstract')
print(len(summary))
print(summary[0].get_text())
# URL
link = soup.find_all('p', class_='url')
print(len(link))
print(link[0].get_text())
# Find the next page button and click on it
browser.is_text_present('More results.')
browser.click_link_by_text('More results.')
# Create a new soup for the new page
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Creating Loop for browsing on NBER web site
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url = "https://admin.nber.org/xsearch?q=early+childhood+development+OR+Education&whichsearch=ftpub&restrict_papers=yes&fullresults=1&datefilter=&b=search+again"
browser.visit(url)
for i in range(0, 10):
try:
html = browser.html
page_soup = BeautifulSoup(html, 'html.parser')
# results are returned as an iterable list
headers = soup.find_all('a', class_="resultTitle")
publish_date = soup.find_all('span', class_='searchResultNiceDate')
authors = soup.find_all('span', class_='searchResultAuthor')
summary = soup.find_all('div', class_='searchResultAbstract')
link = soup.find_all('p', class_='url')
print(len(headers))
t = random.randint(5, 15)
browser.is_element_present_by_text('More results', wait_time=t)
browser.click_link_by_text('More results.')
except AttributeError as e:
print(e)
random.randint(5, 15)
###Output
_____no_output_____ |
templates/.ipynb_checkpoints/issues_template-checkpoint.ipynb | ###Markdown
Issue Analysis
###Code
import psycopg2
import pandas as pd
import sqlalchemy as salc
import matplotlib
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
import datetime
warnings.filterwarnings('ignore')
dbschema='augur_data' # Searches left-to-right
engine = salc.create_engine(
'postgres+psycopg2://augur:[email protected]:5433/augur_zephyr',
connect_args={'options': '-csearch_path={}'.format(dbschema)})
###Output
_____no_output_____
###Markdown
Pull Request Filter
###Code
## List of repository IDs for the report
repo_set = {25158}
###Output
_____no_output_____
###Markdown
Identifying the Longest Running Pull Requests Getting the Data
###Code
issues_all = pd.DataFrame()
for repo_id in repo_set:
issue_query = salc.sql.text(f"""
SELECT
issues.issue_id,
issues.issue_state,
repo.repo_id,
repo.repo_name,
date_part('year', issues.created_at::DATE) AS created_year,
date_part('year', issues.closed_at::DATE) AS closed_year,
date_part('month', issues.created_at::DATE) AS created_month,
date_part('month', issues.closed_at::DATE) AS closed_month,
issues.created_at,
issues.closed_at,
msg_timestamp,
M.cntrb_id
FROM
repo,
issues
LEFT OUTER JOIN issue_message_ref K ON issues.issue_id = K.issue_id
LEFT OUTER JOIN message M ON K.msg_id = M.msg_id
WHERE
pull_request IS NULL
AND issues.repo_id = repo.repo_id
AND issues.repo_id = {repo_id}
ORDER BY
created_month
""")
issues_a = pd.read_sql(issue_query, con=engine)
if not issues_all.empty:
df = pd.concat([issues_all, issues_a])
else:
# first repo
df = issues_a
months_df = pd.DataFrame()
months_query = salc.sql.text(f"""
SELECT
*
FROM
(
SELECT
date_part( 'year', created_month :: DATE ) AS created_year,
date_part( 'month', created_month :: DATE ) AS created_month
FROM
(SELECT * FROM ( SELECT created_month :: DATE FROM generate_series (TIMESTAMP '2017-01-01', TIMESTAMP '2020-04-30', INTERVAL '1 month' ) created_month ) d ) x
) y
""")
months_df = pd.read_sql(months_query, con=engine)
display(df.head())
df.dtypes
display(months_df)
issues_open = df.loc[df['issue_state'] != 'closed']
issues_closed = df.loc[df['issue_state'] == 'closed']
issues_closed[['created_month', 'created_year', 'closed_month', 'closed_year']] = issues_closed[['created_month', 'created_year', 'closed_month', 'closed_year']].astype(int).astype(str)
issues_closed['created_yearmonth'] = issues_closed['created_month'] + '/' + issues_closed['created_year']
issues_closed[ 'created_yearmonth'] = pd.to_datetime(issues_closed['created_yearmonth'])
issues_closed['closed_yearmonth'] = issues_closed['closed_month'] + '/' + issues_closed['closed_year']
issues_closed[ 'closed_yearmonth'] = pd.to_datetime(issues_closed['closed_yearmonth'])
begin_date = '2018-01-01'
end_date = '2019-04-30'
months_df[['created_month', 'created_year']] = months_df[['created_month', 'created_year']].astype(int).astype(str)
months_df['created_yearmonth'] = months_df['created_month'] + '/' + months_df['created_year']
months_df['created_yearmonth'] = pd.to_datetime(months_df['created_yearmonth'])
issues_closed['comments'] = 1
df['comments'] = 1
display(issues_closed)
#months_df = months_df[(months_df['created_yearmonth'] > start_date) & (months_df['created_yearmonth'] < end_date)]
df.dtypes
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.models import Label, LabelSet, ColumnDataSource, Legend, TableColumn, DateFormatter, DataTable
from bokeh.palettes import mpl, magma, viridis, Colorblind
from bokeh.transform import dodge
def vertical_bar_chart(input_df, months_df,repo_name='', group_by='month', contributor_type = 'All', y_max=None, y_axis='new_contributors', title="{}: {} {} Time Contributors Per {}", save_file=False, rank = 1):
output_notebook()
driver_df = input_df.copy()
if repo_name:
driver_df = driver_df.loc[driver_df['repo_name'] == repo_name]
else:
repo_name = "All repos"
#mask = (driver_df['yearmonth'] < begin_date)
#driver_df= driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])]
#mask = (driver_df['yearmonth'] < end_date)
#driver_df = driver_df.loc[mask]
#driver_df = driver_df.loc[driver_df['rank'] == rank]
#adds all months to driver_df so the lists of dates will include all months and years
driver_df = pd.concat([driver_df, months_df])
data = pd.DataFrame()
if group_by == 'year':
#x-axis dates
data['dates'] = driver_df['created_year'].unique()
data['dates'] = data['dates'].dropna()
display(data['dates'])
#average issues each month
data['average_comments_per_issue'] = driver_df.groupby(['created_year']).mean().reset_index()['comments'] / driver_df.groupby(['created_year', 'issue_id']).count().reset_index()['comments']
#display(driver_df.groupby(['created_year']).mean().reset_index()['comments'])
display(driver_df.groupby(['issue_id']).count().reset_index()['comments'])
#used to format x-axis and title
group_by_format_string = "Year"
elif group_by == 'month':
#x-axis dates
dates = np.datetime_as_string(driver_df['yearmonth'], unit='M')
dates = np.unique(dates)
data['dates'] = dates
display(data['dates'])
#new contributor counts for y-axis
data['average_comments_per_issue'] = driver_df.groupby(['yearmonth']).sum().reset_index()[y_axis]
#used to format x-axis and title
group_by_format_string = "Month"
if len(data['average_comments_per_issue']) >= 15:
plot_width = 46 * len(data['average_comments_per_issue'])
else:
plot_width = 670
p = figure(x_range=data['dates'], plot_height=400, plot_width = plot_width, title='Title',
toolbar_location=None, y_range=(0, max(data['average_comments_per_issue'])* 1.15), margin = (0, 0, 200, 0))
p.vbar(x=data['dates'], top=data['average_comments_per_issue'], width=0.8)
source = ColumnDataSource(data=dict(dates=data['dates'], average_comments_per_issue=data['average_comments_per_issue']))
labels = LabelSet(x='dates', y='average_comments_per_issue', text='average_comments_per_issue', y_offset=4,
text_font_size="13pt", text_color="black",
source=source, text_align='center')
p.add_layout(labels)
caption = 'caption'
caption = Label(x=-10, y=-120, x_units='screen', y_units='screen',
text='{}'.format(caption), render_mode='css',
background_fill_color='white', text_font_size = '15pt')
p.add_layout(caption)
p.xgrid.grid_line_color = None
p.y_range.start = 0
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.title.align = "center"
p.title.text_font_size = "18px"
p.yaxis.axis_label = 'Average Comments per Issues per Year'
p.xaxis.axis_label = group_by_format_string
p.xaxis.axis_label_text_font_size = "18px"
p.yaxis.axis_label_text_font_size = "16px"
p.xaxis.major_label_text_font_size = "16px"
p.xaxis.major_label_orientation = 45.0
p.yaxis.major_label_text_font_size = "16px"
show(p)
vertical_bar_chart(issues_closed, months_df, group_by = 'year')
###Output
_____no_output_____ |
01_Convolution_Layer/Convolutional Layer.ipynb | ###Markdown
Convolutional Layer Visualize four filtered outputs (a.k.a. feature maps) of a convolutional layer. Import the imageImport the Necessary Packages
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# image path
img_path = 'images/AA.JPG'
# load color image (cv2: bgr)
bgr_img = cv2.imread(img_path)
# convert to gray
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize scale to [0,1]
gray_img = gray_img.astype('float32')/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
###Output
_____no_output_____
###Markdown
Create 4-Filters (vertical and horizontal)
###Code
# used 4x4 filter
filter_vals = np.ones((4,4))
filter_vals[:,0:2] *= -1
# print(filter_vals)
print('Filter shape: ', filter_vals.shape)
# make four different filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# visualize four filters
fig = plt.figure(figsize=(10,5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter {}'.format(str(i+1)))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]),
xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y] < 0 else 'black')
###Output
Filter shape: (4, 4)
###Markdown
Define a Network ArchitectureFor CNN* Convolutional layers
###Code
# CNN with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# 1 input image channel(grayscale), 4 output channels/feature maps (filter)
# 4x4 square convolution kernel
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
# define the feedforward behavior
def forward(self, x):
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# return both layers
return conv_x, activated_x
# instantiate the model ans set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
print(weight.shape)
model = Net(weight)
print(model)
###Output
torch.Size([4, 1, 4, 4])
Net(
(conv): Conv2d(1, 4, kernel_size=(4, 4), stride=(1, 1), bias=False)
)
###Markdown
Visualize the output of each filter
###Code
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
# visualize the output of an activated conv layer
viz_layer(activated_layer)
###Output
_____no_output_____ |
vector_repr.ipynb | ###Markdown
document-term matrix =- each **col** represents a word and- each **row** represents a documnent- the value in every cell can represent things: - Most traditionally, it is a count of how many times a word appears in a doc. - It can also be a boolean, does this word appear at all. - TF-IDF: term-frequency inverse document-frequency Bag of Words = Counting the Appearance of Words to try to predict something.Bag of words can be good for classification problems.- Why is bag of words good for classification?
###Code
import spacy
# load NN sm, md, lg etc.
nlp = spacy.load("en_core_web_lg")
tokens = []
# Tokenize Corpus
for doc in nlp.pipe(corpus['Top1'], batch_size=500):
doc_tokens = []
for token in doc:
if (token.is_stop == False) & (token.is_punct == False) & (token.is_space == False):
doc_tokens.append(token.text.lower())
tokens.append(doc_tokens)
# Create New Tokens Column in Corpus DF
corpus.insert(3, 'tokens', tokens)
corpus.head(2)
###Output
_____no_output_____
###Markdown
But... we **won't be using Tokenization** with the Vectorizors Count Vectorizor Vectorizor does NOT mean Word Embeddings-> Vectorizor are things that build Document Term Matricies- One of 3 BOW methods
###Code
# Building a Document Term Matrix
from sklearn.feature_extraction.text import CountVectorizer
# --- Key Arguments/Parameters for Count Vectorizor ---
# max_features -> sets limit on vocabulary size aka Limit Features
# - Only most common words are made into features.
# max_df/min_df -> Limit Features by Document Frequency
# -> Ignores terms that show up in greater than 90% of docs
# --> Or less than 2% of docs for example
# ngram_range -> range of n-values for different n-gram
# or char n-grams to be extracted
# You can also override scikit-learn's default tokenization.
# Scikit-learns default tokenization is not as good as spaCy's.
# Instantiate the Transformer
vect = CountVectorizer(stop_words=nlp.Defaults.stop_words,
max_features=1000)
# Build Vocab
# We simply pass an iterable of docs...
# It 'tokenizes on its own...'
# AKA it Builds the Vocab on its own
# Tokenization -> One way to Build Vocab I think...
# ?- Can this be improved using lemmitization? -?
vect.fit(corpus['Top1'])
# transform text - Count Vocab
# Build the Matrix Using the Vocab Determined during the fit Command
dtm = vect.transform(corpus['Top1'])
# Print Out Words in Vocabulary
print(vect.get_feature_names()[0:50])
# The DTM is Values and their Location in Coordinates
print('Type:', type(dtm))
print('Shape:', dtm.shape)
print('First 2 Values:')
print(dtm[2]) # These are coordinates of the matrix :)
# dtm sparce to df
# Get Word Counts for Each Document
dtm = pd.DataFrame(dtm.todense(), columns=vect.get_feature_names())
print(dtm.shape)
dtm.head()
dtm.head()
# Examine Distribution of Doc Lengths
# AKA Distrubution of Headline Lengths
doc_lengths = [len(doc) for doc in corpus['Top1']]
pd.Series(doc_lengths).hist()
###Output
_____no_output_____
###Markdown
-> Think about it... Word counts are affected by document size. Enter -> TF-IDF TF-IDF -> Term Frequency Inverse Documnet Frequency Helps Control for Different Document Lengths in the Same CorpusTerm Frequency = Percentage Words in Document for each word.Document Frequency = A penalty for the word existing in a high number of documents.The purpose of TF-IDF is to find what is unique to each document. It penalizes the term frequencies of words that are common across all documents which will allow for each document's most different topics to rise to the top. Document Term Matrix with TF-IDF
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
# ?-- What percentage of overall corpus size
# sould max_features be? --?
# Instantiate Vectorizer Object
tfidf = TfidfVectorizer(stop_words='english', max_features=2500)
# Create a vocabulary and get word counts per document.
# Similar to fit_predict
dtm = tfidf.fit_transform(corpus['Top1'])
# Print Word Counts
# Get feature names to use as dataframe column headers.
dtm = pd.DataFrame(dtm.todense(), columns=tfidf.get_feature_names())
# View Feature Matrix as DataFrame
print(dtm.shape)
dtm.head()
###Output
(1989, 2500)
###Markdown
We want to trim down our DTM by reducing noise while improving signal. We will use spaCy tokenization, stop-words, n-grams, and statistical trimming to help us refine the results of our dtm.
###Code
def tokenize(document):
# Return Lemmas
doc = nlp(document)
return [token.lemma_.strip() for token in doc if (token.is_stop != True) and (token.is_punct != True)]
# n-grams = multi-word phrases
# bi-gram = 2 words
# tri-gram = 3 words
# sklearn will search through all the valid combinations of words
# and return all those possible phrases as they appear in text....
# This results in WAYYY more words in a vocabulary as you have all the words
# singular ANDDD the words as combinations SO...
# When specifying ngram_range -> ALWAYS limit the features!
print(len(corpus['Top1']))
# NOTE
# Tunning Parameters
# Instantiate Vectorizor Object
tfidf = TfidfVectorizer(stop_words='english',
ngram_range=(1, 2),
# Term or N-Gram Must Appear in no more than 97 docs.
max_df=97,
# Term or N-Gram Must Appear in at least 3 Docs
min_df=4,
max_features=2000,
# spaCy Tokenization from Func above
tokenizer=tokenize)
# Create a Vocabulary and Get Word Counts per Document
dtm = tfidf.fit_transform(corpus['Top1']) # Similar to Fit-Predict
# Print Word Counts
# Get Feature Names to Use ad DF Column Headers
dtm = pd.DataFrame(dtm.todense(), columns=tfidf.get_feature_names())
# View Feature Matrix as DataFrame
print(dtm.shape)
dtm.head()
###Output
(1989, 1534)
###Markdown
Cosine Similarity
###Code
# Calculate Distance of TF-IDF Vectors
# cosine similarity == the idea of measuring the Euclidian (straight-line)
# distance between two points and normalizing this
# cosine similarity == normalization of euclidean distance
from sklearn.metrics.pairwise import cosine_similarity
# We are finding the distance between each document's TF-IDF vector
dist_matrix = cosine_similarity(dtm)
# Let's find a headline similar to the first row of Top1.
corpus['Top1'][0]
# Turn the Dist Matrix into a Dataframe
dist = pd.DataFrame(dist_matrix)
# Similarity Accross DF
print(dist.shape)
dist.head()
# Each row in the similarity of one document to all other documents.
dist[0][0:15]
# Reccomendation Engine
# Find all rows that are not 1 (aka, identical to headline)
# Orderthem by most similar.
dist[dist[11] < 1][11].sort_values(ascending=False).index[0]
# We can see the similarity.
print(corpus['Top1'][11])
print(' ')
print(corpus['Top1'][848])
def find_top_similarity(index_value):
print(corpus['Top1'][index_value])
# Find all rows that are not 1 (aka, identical to headline)
# Orderthem by most similar -> Get Top One
similar = dist[dist[index_value] < 1]\
[index_value].sort_values(ascending=False).index[0]
print('\n')
print(corpus['Top1'][similar])
find_top_similarity(20)
###Output
'A French judge has ordered two branches of Scientologists and their leaders to stand trial for fraud '
Airliner crashes in French Alps
###Markdown
Cosine Similarity is too computationally expensive to work in the majority of situationsBesides cosine similarity euclidian distance there are 2 main ways to store similarity:- KD Trees- Ball TreesThese are alternatives to storing the distance between every combination of vectors.
###Code
# Document Term Matrix with TF-IDF as Values
print(dtm.shape)
dtm.head(3)
from sklearn.neighbors import NearestNeighbors
# Fit on DTM
# Specify we want 5 neighbors per doc
nn = NearestNeighbors(n_neighbors=5, algorithm='kd_tree')
nn.fit(dtm)
dtm.iloc[0]
# We pass our model a doc vector to get the model to point us to it's
# K- Nearest Neighbors
nn.kneighbors([dtm.iloc[0]])
# Query Using Kneighbors
nn.kneighbors([dtm.iloc[14]])
corpus['Top1'][14]
# Looks Like Euclidean distance was more effective...
print(corpus['Top1'][14])
print(' ')
print(corpus['Top1'][606])
print(' ')
print(corpus['Top1'][385])
# Let's try again with a random number:
from numpy import random
randint = random.randint(len(corpus['Top1']))
print("Random Integer", randint, "\n")
kneighbors_query = nn.kneighbors([dtm.iloc[randint]])
print(kneighbors_query, "\n")
print(corpus['Top1'][randint], "\n")
print(corpus['Top1'][kneighbors_query[1][0][1]], "\n")
print(corpus['Top1'][kneighbors_query[1][0][2]])
# From seeing a few examples I get the sense that the system is biased
# towards shorter headlines... I wonder why??
# ?-- TODO --? The Following Show Up Disproportionately:
# 'What a surprise...' [385]
# 'Scenes from the recession' [152]
# 'June 4th, 1989' [205]
###Output
Random Integer 1794
(array([[0., 1., 1., 1., 1.]]), array([[1794, 205, 606, 1472, 385]]))
Canadian mining company Barrick Gold leaks a million liters of cyanide into river in Argentina
'June 4th, 1989'
Punjab(Pakistan) governor Salman Taseer assassinated in Islamabad
###Markdown
Making a match with an outside text source.
###Code
new_doc = ["There is a New President."]
# Query for Sim of Random Doc to Our Reddit Headlines
# Create a DTM Row (With TDIF Values) for the new doc.
# -> Relative the Reddit Headline Corpus
new = tfidf.transform(new_doc)
new # LOOK -> Only 2 stored elements! Only 2 words from the sample new doc are
# included in the Term Set.. a good indicator that this headline might not
# work as well with the present model.
###Output
_____no_output_____
###Markdown
We could decide on a threshold for `number of stored_elements` under-which we refuse to provide a reccomendation in order to reduce the amount of impractical, inaccurate results.
###Code
# Turn the Sparse Matrix into a Dense Matrix
new = new.todense()
# Now we have a new doc/row that is expressed in a dense TF-IDF Term Vector
# and we are going to plug that row into our KNN model to get an output of our
# K- nearest neighbors
nn.kneighbors(new)
corpus['Top1'][192]
###Output
_____no_output_____
###Markdown
The limitation exists in that... docs that have few tokens that are included in the vocabulary of the Doc-Term Matrix will be extra sparse (aka. many zeros).--> Other vectors that also contain a high proportion of zeros will be identified by the KNN model as 'similar', even though the only similarity the 2 docs have in common is thier lack of tokens that are a part of the term set. --> Thats why the KNN model might be biased to favor shorter headlines when comparing a headline that does not include many high TF-IDF values. How would JC set a distance threshold for recommendations from my KNN model?1. Select a sample size of query articles.2. Then he would get the knn recommendations for those 10 articles ~5 recommendations each.3. For each of the 50 recs, he would label them as useful or not useful.4. Take one of the following values as the threshold: - Max Distance of the Useful Labelled recs - 75% percentile distance of the use labelled recs - Min of the not useful labelled recsThen after he has his model in production, he would run a similar experiment using A/B testing. Word Embeddings
###Code
nlp = spacy.load('en_core_web_lg')
doc = nlp("Two bananas in pyjamas")
# Get the vector for the token "bananas"
# ?-- How does this spaCy vector differ from the vectors of the DTM? --?
# For each term-document pair, the TF-IDF DTM ascribes only 1 TF-IDF Value
# ?-- In this case we would see a matrix for each document-term pair..
# or a matrix for each term/token? --?
# -- SOLVED -> It is querying from a complex data-structure that represents
# the english language. The vector of a doc represents the location of the doc
# among this "english map"...
bananas_vector = doc.vector
# These values are un-interpretable.
# For ALL Docs of ANY Length, a vector of length 300 will be constructed to
# represent said vector.
# ?-- New Question -> How are the multiple tokens that form a doc aggregated
# to always have a vector of length 300? Are they averaged? --?
print(len(bananas_vector))
print(bananas_vector[:50])
from sklearn.decomposition import PCA
def get_word_vectors(words):
# Converts a list of words into their word vectors.
# Returns a List of Word Vectors
return [nlp(word).vector for word in words]
words = ['car', 'truck', 'suv', 'race', 'elves', 'dragon', 'sword', 'king',
'queen', 'prince', 'horse', 'fish', 'lion', 'tiger', 'lynx', 'potato',
'baking', 'textbook', 'student', 'homework', 'studying', 'fear']
# Initalize pca model and tell it to project data down onto 2 dimensions.
pca = PCA(n_components=2)
# fit the pca model to our 300-D data, this will work out which is the best way
# to project the data down that will best maintain the relative distances
# between data points.
# It will store these instructions on how to transform the data.
pca.fit(get_word_vectors(words))
# Tell our (fitted) pca model to transform our 300D data down onto 2D using the
# instructions it learned during the fit phase.
word_vecs_2d = pca.transform(get_word_vectors(words))
# let's look at our new 2D word vectors
word_vecs_2d
print("Length of Embeddings List: ", len(get_word_vectors(words)))
print("Size of Original Embedding Vector: ", len(get_word_vectors(words)[0]))
print("Size of Reduced Embedding Vector: ", len(word_vecs_2d[0]))
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 15))
# plot the catter plot of where the words will be
plt.scatter(word_vecs_2d[:, 0], word_vecs_2d[:,1])
# for each word and coordinate pair: draw the text on the plot
for word, coord in zip(words, word_vecs_2d):
x, y = coord
plt.text(x, y, word, size=15)
# show the plot
plt.show()
# Most popular word embeddings today is 'Berts' or 'Elmo'?
###Output
_____no_output_____ |
Fairness_Survey/ALGORITHMS/EO/LawSchool.ipynb | ###Markdown
INSTALLATION
###Code
!pip install aif360
!pip install fairlearn
!apt-get install -jre
!java -version
!pip install h2o
!pip install xlsxwriter
###Output
Collecting xlsxwriter
Downloading XlsxWriter-3.0.1-py3-none-any.whl (148 kB)
[?25l
[K |██▏ | 10 kB 29.5 MB/s eta 0:00:01
[K |████▍ | 20 kB 25.6 MB/s eta 0:00:01
[K |██████▋ | 30 kB 17.8 MB/s eta 0:00:01
[K |████████▉ | 40 kB 15.1 MB/s eta 0:00:01
[K |███████████ | 51 kB 7.1 MB/s eta 0:00:01
[K |█████████████▏ | 61 kB 8.3 MB/s eta 0:00:01
[K |███████████████▍ | 71 kB 8.6 MB/s eta 0:00:01
[K |█████████████████▋ | 81 kB 8.1 MB/s eta 0:00:01
[K |███████████████████▉ | 92 kB 6.8 MB/s eta 0:00:01
[K |██████████████████████ | 102 kB 7.4 MB/s eta 0:00:01
[K |████████████████████████▎ | 112 kB 7.4 MB/s eta 0:00:01
[K |██████████████████████████▍ | 122 kB 7.4 MB/s eta 0:00:01
[K |████████████████████████████▋ | 133 kB 7.4 MB/s eta 0:00:01
[K |██████████████████████████████▉ | 143 kB 7.4 MB/s eta 0:00:01
[K |████████████████████████████████| 148 kB 7.4 MB/s
[?25hInstalling collected packages: xlsxwriter
Successfully installed xlsxwriter-3.0.1
###Markdown
IMPORTS
###Code
import numpy as np
from mlxtend.feature_selection import ExhaustiveFeatureSelector
from xgboost import XGBClassifier
# import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import openpyxl
import xlsxwriter
from openpyxl import load_workbook
import shap
#suppress setwith copy warning
pd.set_option('mode.chained_assignment',None)
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest, SelectFwe, SelectPercentile,SelectFdr, SelectFpr, SelectFromModel
from sklearn.feature_selection import chi2, mutual_info_classif
# from skfeature.function.similarity_based import fisher_score
import aif360
import matplotlib.pyplot as plt
from aif360.metrics.classification_metric import ClassificationMetric
from aif360.algorithms.postprocessing import EqOddsPostprocessing
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.datasets import StandardDataset , BinaryLabelDataset
from sklearn.preprocessing import MinMaxScaler
MM= MinMaxScaler()
import h2o
from h2o.automl import H2OAutoML
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import sys
sys.path.append("../")
import os
h2o.init()
###Output
Checking whether there is an H2O instance running at http://localhost:54321 ..... not found.
Attempting to start a local H2O server...
Java Version: openjdk version "11.0.11" 2021-04-20; OpenJDK Runtime Environment (build 11.0.11+9-Ubuntu-0ubuntu2.18.04); OpenJDK 64-Bit Server VM (build 11.0.11+9-Ubuntu-0ubuntu2.18.04, mixed mode, sharing)
Starting server from /usr/local/lib/python3.7/dist-packages/h2o/backend/bin/h2o.jar
Ice root: /tmp/tmp4mp_vv6k
JVM stdout: /tmp/tmp4mp_vv6k/h2o_unknownUser_started_from_python.out
JVM stderr: /tmp/tmp4mp_vv6k/h2o_unknownUser_started_from_python.err
Server is running at http://127.0.0.1:54321
Connecting to H2O server at http://127.0.0.1:54321 ... successful.
###Markdown
**************************LOADING DATASET*******************************
###Code
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
for i in range(1,51,1):
train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Law/Train'
train_path= os.path.join(train_url ,("Train"+ str(i)+ ".csv"))
train= pd.read_csv(train_path).drop(['region_first'], axis=1)
test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Law/Test'
test_path= os.path.join(test_url ,("Test"+ str(i)+ ".csv"))
test= pd.read_csv(test_path).drop(['region_first'], axis=1)
# normalization of train and test sets
Fitter= MM.fit(train)
transformed_train=Fitter.transform(train)
train=pd.DataFrame(transformed_train, columns= train.columns)
#test normalization
transformed_test=Fitter.transform(test)
test=pd.DataFrame(transformed_test, columns= test.columns)
# ********************SETTING TO H20 FRAME AND MODEL TRAINING*******************************
x = list(train.columns)
y = "first_pf"
x.remove(y)
Train=h2o.H2OFrame(train)
Test= h2o.H2OFrame(test)
Train[y] = Train[y].asfactor()
Test[y] = Test[y].asfactor()
aml = H2OAutoML(max_models=10, nfolds=10, include_algos=['GBM'] , stopping_metric='AUTO') #verbosity='info',,'GBM', 'DRF'
aml.train(x=x, y=y, training_frame=Train)
best_model= aml.leader
# a.model_performance()
#**********************REPLACE LABELS OF DUPLICATED TRAIN AND TEST SET WITH 0.5 THRESHOLDED PREDICT PROBA****************************
#predicted proba for train labels
gbm_Predictions_train= best_model.predict(Train)
gbm_Predictions_train= gbm_Predictions_train.as_data_frame()
train_label= (gbm_Predictions_train.p1>0.5).astype(int)
predicted_df_train= train.copy()
predicted_df_train['first_pf']= train_label
#predicted proba for test labels
gbm_Predictions_test= best_model.predict(Test)
gbm_Predictions_test= gbm_Predictions_test.as_data_frame()
test_label= (gbm_Predictions_test.p1>0.5).astype(int)
predicted_df_test= test.copy()
predicted_df_test['first_pf']= test_label
# *************CHECKING FAIRNESS IN DATASET**************************
## ****************CONVERTING TO BLD FORMAT******************************
'''the EO's fit method takes as input the original data (ground truth) and the predicted dataset with the predicted dataset having
same features but different labels (label for predicted dataset is the thresholded predict proba). as TPR and FPR requires \hat{Y} and Y
Converting the two required dataset to binary format as accepted by EO.
'''
#Transforming the Ground truth Train Test to BLD
advantagedGroup= [{'race':1}]
disadvantagedGroup= [{'race':0}]
class Train(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(Train, self).__init__(df=train , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_Train= Train(protected_attribute_names= ['race'],
privileged_classes= [[1]])
class Test(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(Test, self).__init__(df=test , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_Test= Test(protected_attribute_names= ['race'],
privileged_classes= [[1]])
#**************************************Predicted Train Test BLD*****************************************
class PreTrain(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(PreTrain, self).__init__(df=predicted_df_train , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_PredictedTrain= PreTrain(protected_attribute_names= ['race'],
privileged_classes= [[1]])
class PreTest(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(PreTest, self).__init__(df=predicted_df_test , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_PredictedTest= PreTest(protected_attribute_names= ['race'],
privileged_classes= [[1]])
#****************************************Equal Opportunity Instance*******************************
Equalizer= EqOddsPostprocessing(unprivileged_groups= disadvantagedGroup , privileged_groups= advantagedGroup )
#uses ground truth and unfair predictions of classifier to determine probabilities with which unfair output labels are changed to satisfy EO
Equalizer.fit(BLD_Train,BLD_PredictedTrain )
#predicting the new labels assigned by the EO engine
BLD_PredictedTest= Equalizer.predict(BLD_PredictedTest)
# ********************COMPUTE DISCRIMINATION*****************************
excelBook= load_workbook('/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/EqualOdds/EO_gbm.xlsx')
Law= excelBook['Law']
data= Law.values
# Get columns
columns = next(data)[0:]
10# Create a DataFrame based on the second and subsequent lines of data
OldDF = pd.DataFrame(data, columns=columns)
ClassifierBias = ClassificationMetric( BLD_Test,BLD_PredictedTest , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup)
Accuracy= ClassifierBias.accuracy()
TPR= ClassifierBias.true_positive_rate()
TNR= ClassifierBias.true_negative_rate()
NPV= ClassifierBias.negative_predictive_value()
PPV= ClassifierBias.positive_predictive_value()
SP=ClassifierBias .statistical_parity_difference()
IF=ClassifierBias.consistency()
DI=ClassifierBias.disparate_impact()
EOP=ClassifierBias.true_positive_rate_difference()
EO=ClassifierBias.average_odds_difference()
FDR= ClassifierBias.false_discovery_rate(privileged=False)- ClassifierBias.false_discovery_rate(privileged=True)
NPV_diff=ClassifierBias.negative_predictive_value(privileged=False)-ClassifierBias.negative_predictive_value(privileged=True)
FOR=ClassifierBias.false_omission_rate(privileged=False)-ClassifierBias.false_omission_rate(privileged=True)
PPV_diff=ClassifierBias.positive_predictive_value(privileged=False) -ClassifierBias.positive_predictive_value(privileged=True)
BGE = ClassifierBias.between_group_generalized_entropy_index()
WGE = ClassifierBias.generalized_entropy_index()-ClassifierBias.between_group_generalized_entropy_index()
BGTI = ClassifierBias.between_group_theil_index()
WGTI = ClassifierBias.theil_index() -ClassifierBias.between_group_theil_index()
EDF= ClassifierBias.differential_fairness_bias_amplification()
newdf= pd.DataFrame(index = [0], data= { 'ACCURACY': Accuracy,'TPR': TPR, 'PPV':PPV, 'TNR':TNR,'NPV':NPV,'SP':SP,'CONSISTENCY':IF,'DI':DI,'EOP':EOP,'EO':EO,'FDR':FDR,'NPV_diff':NPV_diff,
'FOR':FOR,'PPV_diff':PPV_diff,'BGEI':BGE,'WGEI':WGE,'BGTI':BGTI,'WGTI':WGTI,'EDF':EDF})
newdf=pd.concat([OldDF,newdf])
pathway= r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/EqualOdds/EO_gbm.xlsx"
with pd.ExcelWriter(pathway, engine='openpyxl') as writer:
#load workbook base as for writer
writer.book= excelBook
writer.sheets=dict((ws.title, ws) for ws in excelBook.worksheets)
newdf.to_excel(writer, sheet_name='Law', index=False)
# newdf.to_excel(writer, sheet_name='Adult', index=False)
print('Accuracy', Accuracy)
###Output
Parse progress: |█████████████████████████████████████████████████████████| 100%
Parse progress: |█████████████████████████████████████████████████████████| 100%
AutoML progress: |████████████████████████████████████████████████████████| 100%
gbm prediction progress: |████████████████████████████████████████████████| 100%
gbm prediction progress: |████████████████████████████████████████████████| 100%
###Markdown
LOGISTIC REGRESSION
###Code
for i in range(1,51,1):
train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Law/Train'
train_path= os.path.join(train_url ,("Train"+ str(i)+ ".csv"))
train= pd.read_csv(train_path).drop(['region_first'], axis=1)
test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Law/Test'
test_path= os.path.join(test_url ,("Test"+ str(i)+ ".csv"))
test= pd.read_csv(test_path).drop(['region_first'], axis=1)
# normalization of train and test sets
Fitter= MM.fit(train)
transformed_train=Fitter.transform(train)
train=pd.DataFrame(transformed_train, columns= train.columns)
#test normalization
transformed_test=Fitter.transform(test)
test=pd.DataFrame(transformed_test, columns= test.columns)
# ********************SETTING TO H20 FRAME AND MODEL TRAINING*******************************
x = list(train.columns)
y = "first_pf"
x.remove(y)
Train=h2o.H2OFrame(train)
Test= h2o.H2OFrame(test)
Train[y] = Train[y].asfactor()
Test[y] = Test[y].asfactor()
LogReg = H2OGeneralizedLinearEstimator(family= "binomial", lambda_ = 0)
LogReg.train(x=x, y=y, training_frame=Train)
#**********************REPLACE LABELS OF DUPLICATED TRAIN AND TEST SET WITH 0.5 THRESHOLDED PREDICT PROBA****************************
#predicted proba for train labels
lr_Predictions_train= LogReg.predict(Train)
lr_Predictions_train= lr_Predictions_train.as_data_frame()
train_label= (lr_Predictions_train.p1>0.5).astype(int)
predicted_df_train= train.copy()
predicted_df_train['first_pf']= train_label
#predicted proba for test labels
lr_Predictions_test= LogReg.predict(Test)
lr_Predictions_test= lr_Predictions_test.as_data_frame()
test_label= (lr_Predictions_test.p1>0.5).astype(int)
predicted_df_test= test.copy()
predicted_df_test['first_pf']= test_label
# *************CHECKING FAIRNESS IN DATASET**************************
## ****************CONVERTING TO BLD FORMAT******************************
'''the EO's fit method takes as input the original data (ground truth) and the predicted dataset with the predicted dataset having
same features but different labels (label for predicted dataset is the thresholded predict proba). as TPR and FPR requires \hat{Y} and Y
Converting the two required dataset to binary format as accepted by EO.
'''
#Transforming the Ground truth Train Test to BLD
advantagedGroup= [{'race':1}]
disadvantagedGroup= [{'race':0}]
class Train(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(Train, self).__init__(df=train , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_Train= Train(protected_attribute_names= ['race'],
privileged_classes= [[1]])
class Test(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(Test, self).__init__(df=test , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_Test= Test(protected_attribute_names= ['race'],
privileged_classes= [[1]])
#**************************************Predicted Train Test BLD*****************************************
class PreTrain(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(PreTrain, self).__init__(df=predicted_df_train , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_PredictedTrain= PreTrain(protected_attribute_names= ['race'],
privileged_classes= [[1]])
class PreTest(StandardDataset):
def __init__(self,label_name= 'first_pf',
favorable_classes= [1],protected_attribute_names=['race'], privileged_classes=[[1]], ):
super(PreTest, self).__init__(df=predicted_df_test , label_name=label_name ,
favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names ,
privileged_classes=privileged_classes ,
)
BLD_PredictedTest= PreTest(protected_attribute_names= ['race'],
privileged_classes= [[1]])
#****************************************Equal Opportunity Instance*******************************
Equalizer= EqOddsPostprocessing(unprivileged_groups= disadvantagedGroup , privileged_groups= advantagedGroup )
#uses ground truth and unfair predictions of classifier to determine probabilities with which unfair output labels are changed to satisfy EO
Equalizer.fit(BLD_Train,BLD_PredictedTrain )
#predicting the new labels based on the fitted EO engine
BLD_PredictedTest= Equalizer.predict(BLD_PredictedTest)
# ***************************COMPUTE DISCRIMINATION********************************
excelBook= load_workbook("/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/EqualOdds/EO_LogReg.xlsx")
Law= excelBook['Law']
data= Law.values
# Get columns
columns = next(data)[0:]
10# Create a DataFrame based on the second and subsequent lines of data
OldDF = pd.DataFrame(data, columns=columns)
ClassifierBias = ClassificationMetric( BLD_Test,BLD_PredictedTest , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup)
Accuracy= ClassifierBias.accuracy()
TPR= ClassifierBias.true_positive_rate()
TNR= ClassifierBias.true_negative_rate()
NPV= ClassifierBias.negative_predictive_value()
PPV= ClassifierBias.positive_predictive_value()
SP=ClassifierBias .statistical_parity_difference()
IF=ClassifierBias.consistency()
DI=ClassifierBias.disparate_impact()
EOP=ClassifierBias.true_positive_rate_difference()
EO=ClassifierBias.average_odds_difference()
FDR= ClassifierBias.false_discovery_rate(privileged=False)- ClassifierBias.false_discovery_rate(privileged=True)
NPV_diff=ClassifierBias.negative_predictive_value(privileged=False)-ClassifierBias.negative_predictive_value(privileged=True)
FOR=ClassifierBias.false_omission_rate(privileged=False)-ClassifierBias.false_omission_rate(privileged=True)
PPV_diff=ClassifierBias.positive_predictive_value(privileged=False) -ClassifierBias.positive_predictive_value(privileged=True)
BGE = ClassifierBias.between_group_generalized_entropy_index()
WGE = ClassifierBias.generalized_entropy_index()-ClassifierBias.between_group_generalized_entropy_index()
BGTI = ClassifierBias.between_group_theil_index()
WGTI = ClassifierBias.theil_index() -ClassifierBias.between_group_theil_index()
EDF= ClassifierBias.differential_fairness_bias_amplification()
newdf= pd.DataFrame(index = [0], data= { 'ACCURACY': Accuracy,'TPR': TPR, 'PPV':PPV, 'TNR':TNR,'NPV':NPV,'SP':SP,'CONSISTENCY':IF,'DI':DI,'EOP':EOP,'EO':EO,'FDR':FDR,'NPV_diff':NPV_diff,
'FOR':FOR,'PPV_diff':PPV_diff,'BGEI':BGE,'WGEI':WGE,'BGTI':BGTI,'WGTI':WGTI,'EDF':EDF})
newdf=pd.concat([OldDF,newdf])
pathway= r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/EqualOdds/EO_LogReg.xlsx"
with pd.ExcelWriter(pathway, engine='openpyxl') as writer:
#load workbook base as for writer
writer.book= excelBook
writer.sheets=dict((ws.title, ws) for ws in excelBook.worksheets)
newdf.to_excel(writer, sheet_name='Law', index=False)
# newdf.to_excel(writer, sheet_name='Adult', index=False)
print('Accuracy', Accuracy)
###Output
_____no_output_____ |
python/1. CFAS Distribution and Correlation Figures.ipynb | ###Markdown
Extracting data from files loaded for two centers (Cambridge and New Castle) using both Neuropathology and Clinical datasets
###Code
# load clinical and pathological dataframes with datasets
clin = pd.read_csv('../data/CFAS_ClinicalData.csv',header = 0) # read clinical dataset
patho = pd.read_excel('../data/CFAS_NeuropathologyData May 2018.xlsx', header = 21) # load pathological dataset
patho.dropna(subset=['Case'], inplace=True) # drop missing values
clin.rename(columns={'labno':'Case'}, inplace=True) # renaming a feature
# Create Seperate dataframes by centre
# Cambridge
# Clinical dataset
clinCambridge = clin['Case'].str.startswith('RH')
clinCambridge = clin[clinCambridge].copy()
# Pathological dataset
pathoCambridge = patho['Case'].str.startswith('RH')
pathoCambridge = patho[pathoCambridge].copy()
short = pd.to_numeric(clinCambridge.loc[:,'Case'].str[2:])
clinCambridge.loc[:,'Case'] = short
clinCambridge.sort_values(by=['Case'], inplace = True)
clinCambridge.head()
short = pathoCambridge.loc[:,'Case'].str[2:]
pathoCambridge.loc[:,'Case'] = pd.to_numeric(short)
pathoCambridge.sort_values(by=['Case'],inplace =True)
cambridge = pathoCambridge.merge(clinCambridge, how = 'left', on = ['Case'])
cambridge.head()
# Newcastle
clinNewcastle = clin['Case'].str.startswith('NA')
clinNewcastle = clin[clinNewcastle].copy()
pathoNewcastle = patho['Case'].str.startswith('NA')
pathoNewcastle = patho[pathoNewcastle].copy()
short = pd.to_numeric(clinNewcastle.loc[:,'Case'].str[2:].str.replace("/",""))
clinNewcastle.loc[:,'Case'] = short
clinNewcastle.sort_values(by=['Case'], inplace = True)
short = pathoNewcastle.loc[:,'Case'].str[2:].str.replace("/","")
pathoNewcastle.loc[:,'Case'] = pd.to_numeric(short)
pathoNewcastle.sort_values(by=['Case'],inplace =True)
newcastle = pathoNewcastle.merge(clinNewcastle, how = 'left', on = ['Case'])
# newcastle.head()
# Creates master data set containing both cambridge and newcastle data sets
master = cambridge.append(newcastle).copy()
master = master[(master[['dem_nver4']] != 0).all(axis=1)]
master.to_csv(r'../data/master.csv',mode = 'w',index=False)
patho.to_csv(r'../data/patho.csv',mode = 'w',index=False)
# count controls and dementia
# count cluster memberships
dementias=master.dem_nver4.value_counts()
print('No dementia : {} \nDementia : {}'.format(dementias[0], dementias[1]))
print('Samples : {} \nFeatures : {}'.format(master.shape[0], master.shape[1]))
# extract neuropathology dataset
patho_features = pd.read_csv('../data/Neuropathology Features.csv',header = 0).Features
master = pd.read_csv('../data/master.csv',header = 0)
master.rename(columns={'aged':'age', 'brainwgt':'brain weight'}, inplace=True)
patho_data = master[patho_features].copy()
patho_data['dem_nver4'] = master['dem_nver4'].copy()
patho_data.columns
# group features values within some different ranges by calling function defined above
patho_data["age"] = patho_data.apply(ageCat,axis=1)
patho_data["brain weight"] = patho_data.apply(brainwgt_Cat,axis=1)
patho_data["CAAAreas"] = patho_data.apply(CAAAreas_Cat,axis=1)
patho_data["CAATotalSev"] = patho_data.apply(CAATotalSev_Cat,axis=1)
patho_data["CAAParenc"] = patho_data.apply(CAAParenc_Cat,axis=1)
patho_data["CAAMeningeal"] = patho_data.apply(CAAMeningeal_Cat,axis=1)
from natsort import natsorted
df = patho_data.age.value_counts().sort_index().reset_index()
adf = df['index'].values
if adf.dtype != 'float64':
adf = natsorted(adf)
print(adf)
else:
print('good')
patho_feature_order = pd.DataFrame()
patho_feature_order['Features'] = ('Braak stage',
'BrainNet tau stage',
'age',
'CAA type',
'CAA meningeal',
'brain weight',
'Thal phase',
'CAA parenchymal',
'CAA total severity',
'subpial TSA in mesial temporal lobe',
'subpial brainstem',
'CAA areas',
'TSA-any',
'CAA parietal',
'CAA hippocampus',
'CAA occipital',
'Subpial mesial temporal',
'CAA temporal',
'CAA frontal',
'subpial TSA in brainstem',
'CAA cerebellum',
'Aβ stage typical',
'hippocampal tau stage',
'Temporal microinfarct',
'frontal microinfarct',
'TSA-total',
'subcortical stage',
'PART-all',
'cortical stage',
'PART-definite',
'Occipital microinfarct',
'microinfarct stage',
'subpial TSA in expanded cortex',
'Argyrophilic grains',
'parietal microinfarct',
'Tufted astrocytes',
'dem_nver4')
# add the class label
patho_features = patho_feature_order.iloc[:-1,:]
patho_data.rename(columns={'BraakStage':'Braak stage',
'BrainNetStage':'BrainNet tau stage',
# 'aged':'age',
'CAATotalSev':'CAA total severity',
'CAAMeningeal':'CAA meningeal',
# 'brainwgt':'brain weight',
'CAAType':'CAA type',
'ThalStage':'Thal phase',
'CAAAreas':'CAA areas',
'SubpialBrainstem':'subpial brainstem',
'CAAParenc':'CAA parenchymal',
'TSATotal':'TSA-total',
'TSAAny':'TSA-any',
'CAAParietal':'CAA parietal',
'CAAHippocampus':'CAA hippocampus',
'CAAFrontal':'CAA frontal',
'CAAOccipital':'CAA occipital',
'CAACerebellum':'CAA cerebellum',
'AbStageTypical':'Aβ stage typical',
'HippocTauStage':'hippocampal tau stage',
'CxSPETSA':'subpial TSA in expanded cortex',
'MTSPETSA':'subpial TSA in mesial temporal lobe',
'BSSPETSA':'subpial TSA in brainstem',
'SubcorticalStage':'subcortical stage',
'CorticalStage':'cortical stage',
'SubpialMesTemp':'Subpial mesial temporal',
'PARTall':'PART-all',
'MicroinfarctStage':'microinfarct stage',
'ArgyrGrains':'Argyrophilic grains',
'OccipMicroing':'Occipital microinfarct',
'ParMicrin':'parietal microinfarct',
'PARTdefinite':'PART-definite',
'FrontalMicroin':'frontal microinfarct',
'TempMicroinf':'Temporal microinfarct',
'CAATemp':'CAA temporal',
'TuftedAst':'Tufted astrocytes'} , errors="raise", inplace=True)
patho_features = patho_data.columns
patho_data = patho_data[patho_feature_order.Features]
patho_features
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from natsort import natsorted
import textwrap
sns.set_theme(style="ticks", color_codes=True)
x_axis=range(5)
fig, axes =plt.subplots(6,6, figsize=(16,18), sharey=False) #
axes = axes.flatten()
for ax, col in zip(axes, patho_data.columns):
x, y, hue = col, "proportion", "dem_nver4"
hue_order = ["No dementia", "Dementia"]
order = pd.DataFrame(patho_data[x].value_counts().sort_index().reset_index())
order= order['index'].values
if order.dtype != 'float64':
order = natsorted(order)
g = (patho_data[x]
.groupby(patho_data[hue])
.value_counts(normalize=True)#.sort_index()
.rename(y)
.reset_index()
.pipe((sns.barplot, "data"), x=x, y=y, hue=hue, order=order, ax=ax))
g.legend([],[], frameon=False)
plt.setp(ax.get_xticklabels(), rotation=45, size='14', horizontalalignment="right")
plt.setp(ax.get_yticklabels(), size='14')
ax.xaxis.get_label().set_fontsize(14)
ax.yaxis.get_label().set_fontsize(14)
for ax in axes:
ax.set_ylabel('')
axes[0].set_ylabel('proportion', size='16')
axes[6].set_ylabel('proportion', size='16')
axes[12].set_ylabel('proportion', size='16')
axes[18].set_ylabel('proportion', size='16')
axes[24].set_ylabel('proportion', size='16')
axes[30].set_ylabel('proportion', size='16')
plt.legend(loc='best');
l = plt.legend()
l.set_title('Status')
plt.setp(ax.get_legend().get_texts(), fontsize='14') # for legend text
plt.setp(ax.get_legend().get_title(), fontsize='14') # for legend title
plt.tight_layout()
plt.subplots_adjust(wspace=0.28, hspace=.7)
# plt.xticks(wrap=True)
fig.savefig('Figures/Neuropathology_Distrbution.png',dpi=300, bbox_inches="tight")
fig.savefig('Figures/Neuropathology_Distrbution.pdf',dpi=300, bbox_inches="tight")
plt.show()
# load clinical and pathological dataframes with datasets
features = pd.read_csv('../data/neuropathology_features.csv',header = 0).features
feats= pd.DataFrame(features)
feats.replace({'features':{'aged':'age', 'brainwgt':'brain weight'}}, inplace=True)
feats = feats.squeeze()
patho = master[feats]
patho['age']=master.age
#For Preprocess and Analysis of Selected Features
patho_selected_features = patho.copy()
patho_with_AB = patho_selected_features
patho.rename(columns={'BraakStage':'Braak stage',
'BrainNetStage':'BrainNet tau stage',
'CAATotalSev':'CAA total severity',
'CAAMeningeal':'CAA meningeal',
'CAAType':'CAA type',
'ThalStage':'Thal phase',
'CAAAreas':'CAA areas',
'SubpialBrainstem':'subpial brainstem',
'CAAParenc':'CAA parenchymal',
'TSATotal':'TSA-total',
'TSAAny':'TSA-any',
'CAAParietal':'CAA parietal',
'CAAHippocampus':'CAA hippocampus',
'CAAFrontal':'CAA frontal',
'CAAOccipital':'CAA occipital',
'CAACerebellum':'CAA cerebellum',
'AbStageTypical':'Aβ stage typical',
'HippocTauStage':'hippocampal tau stage',
'CxSPETSA':'subpial TSA in expanded cortex',
'MTSPETSA':'subpial TSA in mesial temporal lobe',
'BSSPETSA':'subpial TSA in brainstem',
'SubcorticalStage':'subcortical stage',
'CorticalStage':'cortical stage',
'SubpialMesTemp':'Subpial mesial temporal',
'PARTall':'PART-all',
'MicroinfarctStage':'microinfarct stage',
'ArgyrGrains':'Argyrophilic grains',
'OccipMicroing':'Occipital microinfarct',
'ParMicrin':'parietal microinfarct',
'PARTdefinite':'PART-definite',
'FrontalMicroin':'frontal microinfarct',
'TempMicroinf':'Temporal microinfarct',
'CAATemp':'CAA temporal',
'TuftedAst':'Tufted astrocytes'} , errors="raise", inplace=True)
# compute correlations
corr = patho.corr('spearman')
# plot correlation
fig=plt.figure(figsize=(15,13))
# plt.figure(figsize=(15,10))
plt.rcParams['savefig.facecolor']='white'
sns.set_theme(style="ticks", color_codes=True)
ax = sns.heatmap(
corr,
vmin=-1, vmax=1,
center=0,
cmap=sns.diverging_palette(30, 245, n=100, as_cmap=True),#'RdBu_r', #
square=True,
cbar_kws={'label': 'Spearman Coefficients', "shrink": .9})
ax.figure.axes[-1].yaxis.label.set_size(14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig('Figures/spearman_correlation_heatmap.png',dpi=300, bbox_inches="tight")
plt.savefig('Figures/spearman_correlation_heatmap.pdf',dpi=300, bbox_inches="tight")
(master.groupby(['dem_nver4', 'sex']).agg({'age': ['mean', 'count', 'median']}))
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.