path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Generative_Adversarial_Neural_Nets.ipynb | ###Markdown
###Code
# Downloading and Supporting dataset
import os
import zipfile
import requests
import math
import numpy as np
from PIL import Image
from tqdm import tqdm
DEBUG = False
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, chunk_size=32*1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size,
unit='B', unit_scale=True, desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def download_file_from_google_drive(id, destination):
print("Downloading into ./data/... Please wait.")
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={ 'id': id }, stream=True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
print("Done.")
def download_celeb_a():
dirpath = './data'
data_dir = 'celebA'
if os.path.exists(os.path.join(dirpath, data_dir)):
print('Found Celeb-A - skip')
return
filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
save_path = os.path.join(dirpath, filename)
if os.path.exists(save_path):
print('[*] {} already exists'.format(save_path))
else:
download_file_from_google_drive(drive_id, save_path)
if not DEBUG:
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(save_path)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))
def images_square_grid(images, mode='RGB'):
# Get maximum size for square grid of images
save_size = math.floor(tf.sqrt(images.shape[0]))
# Scale to 0-255
images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
# Put images in a square arrangement
images_in_square = tf.reshape(
images[:save_size*save_size],
(save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))
# Combine images to grid image
new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))
for col_i, col_images in enumerate(images_in_square):
for image_i, image in enumerate(col_images):
im = Image.fromarray(image, mode)
new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))
return new_im
# python spyder notebook
# @author:sunkara
data_dir = './content/data'
download_celeb_a()
%matplotlib inline
import os
from glob import glob
from matplotlib import pyplot
from PIL import Image
import numpy as np
# Image configuration
IMAGE_HEIGHT = 28
IMAGE_WIDTH = 28
data_files = glob(os.path.join(data_dir, 'celebA/*.jpg'))
shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, 3
def get_image(image_path, width, height, mode):
image = Image.open(image_path)
'''
if image.size != (width, height):
# Remove most pixels that aren't part of a face
face_width = face_height = 108
j = (image.size[0] - face_width) // 2
i = (image.size[1] - face_height) // 2
image = image.crop([j, i, j + face_width, i + face_height])
image = image.resize([width, height], Image.BILINEAR)
'''
return np.array(image.convert(mode))
def get_batch(image_files, width, height, mode='RGB'):
"""
Get a single image
"""
data_batch = np.array(
[get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)
# Make sure the images are in 4 dimensions
if len(data_batch.shape) < 4:
data_batch = data_batch.reshape(data_batch.shape + (1,))
return data_batch
def get_batches(batch_size):
"""
Generate batches
"""
IMAGE_MAX_VALUE = 255
current_index = 0
while current_index + batch_size <= shape[0]:
data_batch = get_batch(
data_files[current_index:current_index + batch_size],
*shape[1:3])
current_index += batch_size
yield data_batch / IMAGE_MAX_VALUE - 0.5
test_images = get_batch(glob(os.path.join(data_dir, 'celebA/*.jpg'))[:10], 56, 56)
pyplot.imshow(test_images)
import tensorflow as tf
def model_inputs(image_width, image_height, image_channels, z_dim):
inputs_real = tf.placeholder(tf.float32, shape=(None, image_width, image_height, image_channels), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return inputs_real, inputs_z, learning_rate
def generator(z, out_channel_dim, is_train=True):
alpha = 0.2
with tf.variable_scope('generator', reuse=False if is_train==True else True):
# First fully connected layer
x_1 = tf.layers.dense(z, 2*2*512)
# Reshape it to start the convolutional stack
deconv_2 = tf.reshape(x_1, (-1, 2, 2, 512))
batch_norm2 = tf.layers.batch_normalization(deconv_2, training=is_train)
lrelu2 = tf.maximum(alpha * batch_norm2, batch_norm2)
# Deconv 1
deconv3 = tf.layers.conv2d_transpose(lrelu2, 256, 5, 2, padding='VALID')
batch_norm3 = tf.layers.batch_normalization(deconv3, training=is_train)
lrelu3 = tf.maximum(alpha * batch_norm3, batch_norm3)
# Deconv 2
deconv4 = tf.layers.conv2d_transpose(lrelu3, 128, 5, 2, padding='SAME')
batch_norm4 = tf.layers.batch_normalization(deconv4, training=is_train)
lrelu4 = tf.maximum(alpha * batch_norm4, batch_norm4)
# Output layer
logits = tf.layers.conv2d_transpose(lrelu4, out_channel_dim, 5, 2, padding='SAME')
out = tf.tanh(logits)
return out
def discriminator(images, reuse=False):
alpha = 0.2
with tf.variable_scope('discriminator', reuse=reuse):
# using 4 layer network as in DCGAN Paper
# Conv 1
conv1 = tf.layers.conv2d(images, 64, 5, 2, 'SAME')
lrelu1 = tf.maximum(alpha * conv1, conv1)
# Conv 2
conv2 = tf.layers.conv2d(lrelu1, 128, 5, 2, 'SAME')
batch_norm2 = tf.layers.batch_normalization(conv2, training=True)
lrelu2 = tf.maximum(alpha * batch_norm2, batch_norm2)
# Conv 3
conv3 = tf.layers.conv2d(lrelu2, 256, 5, 1, 'SAME')
batch_norm3 = tf.layers.batch_normalization(conv3, training=True)
lrelu3 = tf.maximum(alpha * batch_norm3, batch_norm3)
# Flatten
flat = tf.reshape(lrelu3, (-1, 4*4*256))
# Logits
logits = tf.layers.dense(flat, 1)
# Output
out = tf.sigmoid(logits)
return out, logits
# loss function
def model_loss(input_real, input_z, out_channel_dim):
label_smoothing = 0.9
g_model = generator(input_z, out_channel_dim)
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_model_real) * label_smoothing))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_model_fake)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_model_fake) * label_smoothing))
return d_loss, g_loss
# optimization
def model_opt(d_loss, g_loss, learning_rate, beta1):
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
# Optimize
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
def show_generator_output(sess, n_images, input_z, out_channel_dim):
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(
generator(input_z, out_channel_dim, False),
feed_dict={input_z: example_z})
pyplot.imshow(samples)
pyplot.show()
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape):
input_real, input_z, _ = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)
d_loss, g_loss = model_loss(input_real, input_z, data_shape[3])
d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
# values range from -0.5 to 0.5, therefore scale to range -1, 1
batch_images = batch_images * 2
steps += 1
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))
_ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_opt, feed_dict={input_real: batch_images, input_z: batch_z})
if steps % 400 == 0:
# At the end of every 10 epochs, get the losses and print them out
train_loss_d = d_loss.eval({input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(epoch_i+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
_ = show_generator_output(sess, 1, input_z, data_shape[3])
batch_size = 16
z_dim = 100
learning_rate = 0.0002
beta1 = 0.5
epochs = 2
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, get_batches, shape)
###Output
_____no_output_____ |
notebook/object_tracking/object_tracking.ipynb | ###Markdown
็ฉไฝใใฉใใญใณใฐใใฉใใญใณใฐๅฏพ่ฑกใๆๅฎ(bboxใๆธกใ)ใใใใจใงใๅฏพ่ฑกใฎbboxใๅๅพใใใตใณใใซใณใผใ- ไฝฟ็จใใๅ็ป - https://pixabay.com/ja/videos/%E3%83%88%E3%83%A9%E3%83%99%E3%83%AB-%E9%81%93-%E8%BB%8A-%E9%A2%A8%E6%99%AF-47901/- ๅ่ใชใณใฏ - OpenCV Object Tracking - ็ฉไฝใใฉใใญใณใฐใฎๅฎ่ฃ
ๆนๆณใใณใผใใใญใใฏๅไฝใง่งฃ่ชฌใใฆใใใตใคใ(่ฑ่ช) - https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/
###Code
from pathlib import Path
import cv2
import sys
import os
sys.path.append(str(Path(os.getcwd()).parents[1] / 'src'))
from lib.image import Video, draw_bbox, plt_imshow
# ๅ็ปใฎ่ชญใฟ่พผใฟ
cwd = Path(os.getcwd())
video_path = cwd / 'car_driving.mp4'
video = Video(video_path)
print(video.confs)
# ใใฉใใญใณใฐๅฏพ่ฑกใๆๅฎ
init_bbox = [380,225,72,75] # ๆๅใงๅๆไฝ็ฝฎใ็ขบ่ช
bboxes = []
bboxes.append(init_bbox)
init_frame = video.read_frame_as_array(0)
img_box = draw_bbox(init_frame, bboxes[0], clr=(0,0,255), thickness=2)
plt_imshow(img_box, is_bgr=True)
# ใใฉใใญใณใฐ
tracker = cv2.TrackerCSRT_create()
tracker.init(init_frame, init_bbox) # ใใฉใใญใณใฐใฎๅๆ่จญๅฎ
# ใใฌใผใ ใ่ชญใฟ่พผใฟใใใฉใใญใณใฐ
for id_frame in range(1,video.confs['frame_count']-100):
frame = video.read_frame_as_array(id_frame) # ๅ็ป่ชญใฟ่พผใฟ
is_valid_bbox, bbox = tracker.update(frame) # ใใฉใใญใณใฐ
bboxes.append(bbox)
# bboxใๆ็ป
img_box = draw_bbox(frame, bbox, clr=(0,0,255), thickness=2)
# ใใฉใใญใณใฐ็ถๆณใ็ขบ่ช
if (id_frame % 200) == 1:
plt_imshow(img_box, is_bgr=True)
print(bbox)
###Output
(379, 223, 73, 76)
(333, 182, 48, 50)
(356, 203, 39, 41)
|
Deep_Learning_with_TensorFlow/1.4.0/Chapter06/LeNet-5/LeNet5_train.ipynb | ###Markdown
1. ๅฎไน็ฅ็ป็ฝ็ป็ธๅ
ณ็ๅๆฐ
###Code
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 6000
MOVING_AVERAGE_DECAY = 0.99
###Output
_____no_output_____
###Markdown
2. ๅฎไน่ฎญ็ป่ฟ็จ
###Code
def train(mnist):
# ๅฎไน่พๅบไธบ4็ปด็ฉ้ต็placeholder
x = tf.placeholder(tf.float32, [
BATCH_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.NUM_CHANNELS],
name='x-input')
y_ = tf.placeholder(tf.float32, [None, LeNet5_infernece.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = LeNet5_infernece.inference(x,False,regularizer)
global_step = tf.Variable(0, trainable=False)
# ๅฎไนๆๅคฑๅฝๆฐใๅญฆไน ็ใๆปๅจๅนณๅๆไฝไปฅๅ่ฎญ็ป่ฟ็จใ
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
# ๅๅงๅTensorFlowๆไน
ๅ็ฑปใ
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
reshaped_xs = np.reshape(xs, (
BATCH_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.IMAGE_SIZE,
LeNet5_infernece.NUM_CHANNELS))
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
###Output
_____no_output_____
###Markdown
3. ไธป็จๅบๅ
ฅๅฃ
###Code
def main(argv=None):
mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
train(mnist)
if __name__ == '__main__':
main()
###Output
Extracting ../../../datasets/MNIST_data/train-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/train-labels-idx1-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-labels-idx1-ubyte.gz
After 1 training step(s), loss on training batch is 5.95725.
After 1001 training step(s), loss on training batch is 0.664706.
After 2001 training step(s), loss on training batch is 0.670048.
After 3001 training step(s), loss on training batch is 0.638539.
After 4001 training step(s), loss on training batch is 0.743027.
After 5001 training step(s), loss on training batch is 0.638279.
|
02.Code/02.Baseline Embeddings & LGBM.ipynb | ###Markdown
Evaluando
###Code
df_test = pd.read_csv(os.path.join(DATA_PATH,'test_preprocessed.csv'))
df_test['target'] = -1
COLUMN_NAME = 'product_name'
test_dataset = BNPParibasText(df_test,MAX_LENGTH,tokenizer,COLUMN_NAME)
model = Roberta_Model(pretrained_model=PRETRAINED)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size = 32,
pin_memory = True,
num_workers = 72
)
emb_sentence_test = get_embedding(test_loader, model, 'cuda')
df_test[[f'emb_{COLUMN_NAME}_{i}' for i in range(emb_sentence_test.shape[1])]] = emb_sentence_test
COLUMN_NAME = 'ingredients_text'
test_dataset = BNPParibasText(df_test,MAX_LENGTH,tokenizer,COLUMN_NAME)
model = Roberta_Model(pretrained_model=PRETRAINED)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size = 32,
pin_memory = True,
num_workers = 72
)
emb_sentence_test = get_embedding(test_loader, model, 'cuda')
df_test[[f'emb_{COLUMN_NAME}_{i}' for i in range(emb_sentence_test.shape[1])]] = emb_sentence_test
COLUMN_NAME = 'countries_en'
test_dataset = BNPParibasText(df_test,MAX_LENGTH,tokenizer,COLUMN_NAME)
model = Roberta_Model(pretrained_model=PRETRAINED)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size = 32,
pin_memory = True,
num_workers = 72
)
emb_sentence_test = get_embedding(test_loader, model, 'cuda')
df_test[[f'emb_{COLUMN_NAME}_{i}' for i in range(emb_sentence_test.shape[1])]] = emb_sentence_test
df_test = apply_label_encoder(df_test,dict_le,drop_original = True, missing_new_cat = True)
probs = 0
for i in models:
probs = probs + (i.predict(df_test[feature_list]))
print('fin_predict')
y_test_pred = probs/5.0
print(f'Real: ',math.sqrt(mean_squared_error(y_test_pred,df_test['Target'].values)))
y_submission['target'] = y_test_pred
y_submission.head()
#Enviar los resultados
#apiquery.submit_api(y_submission,
# competition_name='food',
# subname='test_v2', # Pueden cambiar esto sin problemas, poner el nombre que quieran.
# holdout_key='None',
# update_ldb=True,
# username="Insight ML - DD" # Poner el nombre de su equipo como un string.
# El mejor de los resultados dentro de sus envios es el que aparecera en la tabla de posiciones.
#)
###Output
requests number 1
200
{'Date': 'Tue, 18 May 2021 20:58:56 GMT', 'Content-Type': 'application/json', 'Content-Length': '495', 'Connection': 'keep-alive', 'X-Request-ID': '9VDYQEXOTIL4RSGH', 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'POST', 'Access-Control-Allow-Headers': 'authorization,content-type'}
|
GenMod2.0/General_Simulation_Model_2_Create_GSM_NGWM.ipynb | ###Markdown
Create a General Simulation Model from a model_grid.csv and ibound.tif Standard package imports
###Code
%matplotlib notebook
import os
import datetime as dt
import pickle, joblib
# Standard data science libraries
import pandas as pd
import numpy as np
import scipy.stats as ss
import scipy.optimize as so
import scipy.interpolate as si
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-notebook')
# Options for pandas
pd.options.display.max_columns = 20
pd.options.display.max_rows = 50
# Display all cell outputs
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
from IPython.display import Image
from IPython.display import Math
###Output
_____no_output_____
###Markdown
Package imports specific to this notebook
###Code
import flopy as fp
import shutil
import Genmod_Utilities as gmu
import RTD_util6 as rtd_ut
from matplotlib import colors
from scipy.ndimage import distance_transform_edt
from argparse import Namespace
import json
###Output
_____no_output_____
###Markdown
Set scenario specific model values and map hydrologic properties to columns in `model_grid`. These values are stored in a Python dictionary and saved for use in later notebooks. * **num_surf_layers** : int, number of surficial (unconsolidated) aquifer layers**Note that there should be at least 2 bedrock layers for the interpolation used in this method to work*** **num_bdrk_layers** : int, number of bedrock layers* **K_surf**: str, column in `model_grid` to map to surficial hydraulic conductivity* **K_bdrk**: str, column in `model_grid` to map to bedrock hydraulic conductivity* **ibound**: str, column in `model_grid` to map to idomain* **GHB** : bool, whether to include general head boundary in lake cells on the model boundary* **GHB_sea** : bool, whether to correct head at general head boundary for density* **K_lakes** : float, hydraulic conductivity to set for bodies of open water (for example, lakes)* **k33overk** : float, ratio of vertical to horizontal hydraulic conductivity* **min_thk** : float, minimum thickness for the sum of all surficial layers* **stream_bed_thk** : float, thickness of streambed used in calculating conductance* **bedrock_thk** : float, thickness of bedrock* **stream_bed_kadjust** : float, fraction of cell hydraulic conductivity used to calculate conductance in streams* **coastal_sed_thk** : float, thickness of coastal sediments used in calculating conductance in coastal GHB* **coastal_sed_kadjust** : float, fraction of cell hydraulic conductivity used to calculate conductance in coastal GHB* **sea_level** : float, mean annual sea level* **den_salt** : float, density of salt water* **den_fresh** : float, density of fresh water* **NPER** : int, number of stress periods* **err_tol** : float, watertable elevation +/- err_tol is used to compute the objective function
###Code
gsm_metadata = dict(
num_surf_layers = 2,
num_bdrk_layers = 3,
K_surf = 'surf_K',
K_bdrk = 'bed_K',
ibound = 'ibound',
GHB = True,
GHB_sea = False,
K_lakes = 3000.,
k33overk = 0.1,
min_thk = 3.,
stream_bed_thk = 0.3,
surf_thk = 'thickness_Shang',
bedrock_thk = 100.,
stream_bed_kadjust = 1.0,
coastal_sed_thk = 1.5,
coastal_sed_kadjust = 15.,
sea_level = 0 ,
den_salt = 1022 ,
den_fresh = 1000 ,
NPER = 1,
err_tol = 1.
)
dst = os.path.join('model_ws', 'gsm_metadata.json')
with open(dst, 'w') as f:
json.dump(gsm_metadata, f, indent=4)
meta = Namespace(**gsm_metadata)
with open('GenMod_metadata.txt') as json_file:
metadata = json.load(json_file)
###Output
_____no_output_____
###Markdown
Create model workspace directory `model_ws`
###Code
if os.path.exists('model_ws'):
shutil.rmtree('model_ws')
os.makedirs('model_ws')
else:
os.makedirs('model_ws')
###Output
_____no_output_____
###Markdown
Read `model_grid.csv` that was created in the first notebook
###Code
model_file = os.path.join(metadata['gis_dir'], 'model_grid.csv')
model_grid = pd.read_csv(model_file)
model_grid.fillna(0, inplace=True)
model_grid.loc[model_grid[meta.K_bdrk] == 0, meta.ibound] = 0
model_grid.loc[model_grid[meta.K_surf] == 0, meta.ibound] = 0
###Output
_____no_output_____
###Markdown
Map `model_grid` (created with Notebook 1) to MODFLOW6 arrays
###Code
grid = os.path.join(metadata['gis_dir'], 'ibound.tif')
grid_raster = gmu.SourceProcessing(np.nan)
grid_raster.read_raster(grid)
NROW = grid_raster.nrow
NCOL = grid_raster.ncol
num_cells = NROW * NCOL
delr = np.abs(grid_raster.gt[1])
delc = np.abs(grid_raster.gt[5])
###Output
_____no_output_____
###Markdown
Model grid geometry
###Code
ibound = model_grid[meta.ibound].values.reshape(NROW, NCOL)
inactive = (ibound == 0)
top = model_grid.top.values.reshape(NROW, NCOL)
thick = model_grid.thickness_Shang.values.reshape(NROW, NCOL)
###Output
_____no_output_____
###Markdown
K for surficial and bedrock units.
###Code
surf_k = model_grid[meta.K_surf].values.reshape(NROW, NCOL)
bdrk_k = model_grid[meta.K_bdrk].values.reshape(NROW, NCOL)
###Output
_____no_output_____
###Markdown
Process boundary condition information Recharge
###Code
recharge = model_grid.recharge.values.reshape(NROW, NCOL)
###Output
_____no_output_____
###Markdown
DrainsCreate a dictionary of stream information for the drain or river package.River package input also needs the elevation of the river bed. Don't use both packages. The choice is made by commenting/uncommenting sections of the modflow function. Replace segment_len (segment length) with the conductance. The river package has not been tested.
###Code
drn_data = model_grid[(model_grid.order != 0) &
(model_grid.ibound == 1)].copy()
# adjust streambed K based on cell K and stream_bed_kadjust
drn_data['dcond'] = drn_data[meta.K_surf] * meta.stream_bed_kadjust * \
drn_data.reach_len * drn_data.width / meta.stream_bed_thk
drn_data['iface'] = 6
drn_data = drn_data.reindex(
['lay', 'row', 'col', 'stage', 'dcond', 'iface'], axis=1)
drn_data.rename(columns={'lay': 'k', 'row': 'i',
'col': 'j', 'stage': 'stage'}, inplace=True)
drn_data = drn_data[drn_data.dcond > 0]
# Convert to MODFLOW6 format
cellid = list(zip(drn_data.k, drn_data.i, drn_data.j))
drn_data6 = pd.DataFrame({'cellid': cellid, 'stage': drn_data.stage, 'dcond': drn_data.dcond, 'iface': drn_data.iface})
drn_recarray6 = drn_data6.to_records(index=False)
drn_dict6 = {0 : drn_recarray6}
###Output
_____no_output_____
###Markdown
General head boundary (GHB)Create a dictionary of information for the general-head boundary package.Similar to the above cell.
###Code
if (model_grid.ghb_sea.sum() > 0) & meta.GHB:
ghb_flag = model_grid.ghb == 1
ghb_data = model_grid.loc[ghb_flag, :].copy()
ghb_data['cond'] = ghb_data[meta.K_surf] * delc * delr / meta.stream_bed_thk
ghb_data['iface'] = 6
ghb_data = ghb_data.reindex(['lay', 'row', 'col', 'ned', 'cond', 'iface'], axis=1)
ghb_data.rename(columns={'lay': 'k', 'row': 'i', 'col': 'j', 'ned': 'stage'}, inplace=True)
ghb_data.dropna(axis='index', inplace=True)
ghb_recarray = ghb_data.to_records(index=False)
ghb_dict = {0 : ghb_recarray}
###Output
_____no_output_____
###Markdown
Marine general head boundaryCreate a dictionary for the marine general-head boundary.
###Code
# if model_grid.ghb_sea.sum() > 0:
# #currently the marine ghb would overwrite any existing ghb, therefore write an alert
# if GHB & GHB_sea:
# GHB = False
# print("Code doesn't support multiple ghb's. Marine ghb will be implemented.")
# ghb_flag = model_grid.ghb_sea == 1
# ghb_sea_data = model_grid.loc[ghb_flag, ['lay', 'row', 'col', 'fresh_head', 'segment_len', meta.K_surf]]
# ghb_sea_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', meta.K_surf]
# gcond = ghb_sea_data[meeta.K_surf] * L * L / coastal_sed_thk / coastal_sed_kadjust
# ghb_sea_data['segment_len'] = gcond
# ghb_sea_data.rename(columns={'segment_len' : 'cond'}, inplace=True)
# ghb_sea_data.drop(meta.K_surf, axis=1, inplace=True)
# ghb_sea_data.dropna(axis='index', inplace=True)
# ghb_sea_data.insert(ghb_sea_data.shape[1], 'iface', 6)
# ghb_sea_recarray = ghb_sea_data.to_records(index=False)
# ghb_sea_dict = {0 : ghb_sea_recarray}
###Output
_____no_output_____
###Markdown
Create 1-layer model to get initial top-of-aquifer on which to drape subsequent layering Get starting heads from top elevations. The top is defined as the model-cell-mean NED elevation except in streams, where it is interpolated between MaxElevSmo and MinElevSmo in the NHD (called 'stage' in model_grid). Make them a little higher than land so that drains don't accidentally go dry too soon.Modify the bedrock surface, ensuring that it is always at least min_thk below the top elevation. This calculation will be revisited for the multi-layer case. Define a function to create and run MODFLOW6
###Code
def modflow(md, mfpth6, model_ws, nlay=1, top=top, strt=top, nrow=NROW, ncol=NCOL, botm=(top - thick),
ibound=ibound, hk=surf_k, rech=recharge, stream_dict=drn_dict6, delr=delr, delc=delc,
hnoflo=-9999., hdry=-8888., iphdry=1, vani=meta.k33overk):
# Create the Flopy simulation object
sim = fp.mf6.MFSimulation(sim_name=md, exe_name=mfpth6,
version='mf6', sim_ws=model_ws)
# Create the Flopy temporal discretization object
tdis = fp.mf6.modflow.mftdis.ModflowTdis(sim, pname='tdis', time_units='DAYS',
nper=1, perioddata=[(1.0E+05, 1, 1.0)])
# Create the Flopy groundwater flow (gwf) model object
model_nam_file = '{}.nam'.format(md)
gwf = fp.mf6.ModflowGwf(sim, modelname=md, newtonoptions='UNDER_RELAXATION',
model_nam_file=model_nam_file, save_flows=True)
# Create the Flopy iterative model solver (ims) Package object
ims = fp.mf6.modflow.mfims.ModflowIms(
sim, pname='ims', complexity='COMPLEX')
# Create the discretization package
dis = fp.mf6.modflow.mfgwfdis.ModflowGwfdis(gwf, pname='dis', nlay=nlay, nrow=NROW, ncol=NCOL, length_units='METERS',
delr=delr, delc=delc, top=top, botm=botm, idomain=ibound)
# Create the initial conditions package
ic = fp.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname='ic', strt=strt)
# Create the node property flow package
npf = fp.mf6.modflow.mfgwfnpf.ModflowGwfnpf(gwf, pname='npf', icelltype=1, k=hk, k33=vani,
k33overk=True, save_flows=True)
rch = fp.mf6.modflow.mfgwfrcha.ModflowGwfrcha(
gwf, recharge=rech, save_flows=True)
drn = fp.mf6.modflow.mfgwfdrn.ModflowGwfdrn(
gwf, stress_period_data=drn_dict6, save_flows=True)
# Create the output control package
headfile = '{}.hds'.format(md)
head_filerecord = [headfile]
budgetfile = '{}.cbb'.format(md)
budget_filerecord = [budgetfile]
saverecord = [('HEAD', 'ALL'),
('BUDGET', 'ALL')]
printrecord = [('HEAD', 'LAST')]
oc = fp.mf6.modflow.mfgwfoc.ModflowGwfoc(gwf, pname='oc', saverecord=saverecord,
head_filerecord=head_filerecord,
budget_filerecord=budget_filerecord,
printrecord=None)
# Write the datasets
sim.write_simulation(silent=False)
# Run the simulation
success, buff = sim.run_simulation(silent=False)
if success:
print('\nSuccess is sweet')
print(" Your {:0d} layer model ran successfully\n\n".format(nlay))
else:
print('\nThat sucks')
print(" Your {:0d} layer model didn't converge\n\n".format(nlay))
return sim
###Output
_____no_output_____
###Markdown
Run 1-layer MODFLOW Use the function to run MODFLOW for 1 layer to getting approximate top-of-aquifer elevation
###Code
sim = modflow(metadata['HUC8_name'], metadata['modflow_path'], 'model_ws', nlay=1, top=top * 1.2, strt=top * 1.05, nrow=NROW, ncol=NCOL, botm=(top - thick - meta.bedrock_thk),
ibound=ibound, hk=surf_k, rech=recharge, stream_dict=drn_dict6, delr=delr, delc=delc, iphdry=0, vani=meta.k33overk)
###Output
_____no_output_____
###Markdown
Read the head file and calculate new layer top (wt) and bottom (bot) elevations based on the estimatedwater table (wt) being the top of the top layer. Divide the surficial layer into NLAY equally thick layers between wt and the bedrock surface elevation (as computed using minimum surficial thickness). Make new model with (possibly) multiple layers. If there are dry cells in the 1 layer model, they are converted to NaN (not a number). The minimum function in the first line returns NaN if the element of either input arrays is NaN. In that case, replace NaN in modeltop with the top elevation. The process is similar to the 1 layer case. Thickness is estimated based on modeltop and bedrock and is constrained to be at least min_thk (set in gen_mod_dict.py). This thickness is divided into num_surf_layers number of layers. The cumulative thickness of these layers is the distance from the top of the model to the bottom of the layers. This 3D array of distances (the same for each layer) is subtracted from modeltop. Using the estimated water table as the new top-of-aquifer elevations sometimes leads to the situation, in usually a very small number of cells, that the drain elevation is below the bottom of the cell. The following procedure resets the bottom elevation to one meter below the drain elevation if that is the case. * If add_bedrock = True in gen_mod_dict.py, add a layer to the bottom and increment NLAY by 1.* Assign the new bottom-most layer an elevation equal to the elevation of the bottom of the lowest surficial layer minus bedrock_thk, which is specified in rock_riv_dict (in gen_mod_dict.py).* Concatenate the new bottom-of-bedrock-layer to the bottom of the surficial bottom array.* Compute the vertical midpoint of each cell. Make an array (bedrock_index) that is True if the bedrock surface is higher than the midpoint and False if it is not.* lay_extrude replaces the old lay_extrude to account for the new bedrock layer. It is not used in this cell, but is used later to extrude other arrays. Extrude all arrays to NLAY number of layers. Create a top-of-aquifer elevation (fake_top) that is higher (20% in this case) than the simulated 1-layer water table because in doing this approximation, some stream elevations end up higher than top_of_aquifer and thus do not operate as drains. The fake_top shouldn't affect model computations if it is set high enough because the model uses convertible (confined or unconfined) layers. Run MODFLOW again using the new layer definitions. The difference from the first run is that the top-of-aquifer elevation is the 1-layer water table rather than land surface, and of course, the number of surficial layers and/or the presence of a bedrock layer is different.
###Code
for i in range(2):
rtd = rtd_ut.RTD_util(sim, 'flow', 'rt')
rtd.get_watertable()
wt = np.ma.masked_invalid(rtd.water_table)
top_layer1 = np.minimum(wt, top)
bedrock_top = top - thick
thk = np.maximum(top_layer1 - bedrock_top, meta.min_thk)
NLAY = meta.num_surf_layers + meta.num_bdrk_layers
lay_extrude = np.ones((meta.num_surf_layers, NROW, NCOL))
surf_thk = lay_extrude * thk / meta.num_surf_layers
surf_elev_array = top_layer1 - np.cumsum(surf_thk, axis=0)
surf_k_array = lay_extrude * surf_k
lay_extrude = np.ones((meta.num_bdrk_layers, NROW, NCOL))
bdrk_thk = lay_extrude * meta.bedrock_thk / meta.num_bdrk_layers
bdrk_elev_array = surf_elev_array[-1, ...] - np.cumsum(bdrk_thk, axis=0)
bdrk_k_array = lay_extrude * bdrk_k
botm_array = np.vstack((surf_elev_array, bdrk_elev_array))
lay_thk = np.vstack((surf_thk, bdrk_thk))
hk_3d = np.vstack((surf_k_array, bdrk_k_array))
lay_extrude = np.ones((NLAY, NROW, NCOL))
stg = model_grid.stage.copy()
stg[model_grid.order == 0] = 1.E+30
tmpdrn = (lay_extrude * stg.values.reshape(NROW, NCOL)).ravel()
tmpbot = botm_array.ravel()
index = np.less(tmpdrn, tmpbot)
tmpbot[index] = tmpdrn[index] - 1.0
botm_array = tmpbot.reshape(NLAY, NROW, NCOL)
mids = botm_array + lay_thk / 2
bedrock_index = mids < bedrock_top
la = model_grid.lake_areas.values.reshape(NROW, NCOL)
# new way to calculate lake K
frac_area = la / delr / delc
hk_3d[0, ...] = hk_3d[0, ...] * (1 - frac_area) + meta.K_lakes * frac_area
# next line is the original way to calculate lake K
# hk_3d[0, la == 1] = K_lakes
hk_3d[bedrock_index] = (lay_extrude * bdrk_k).astype(np.float32)[bedrock_index]
ind = distance_transform_edt(hk_3d==0, return_distances=False, return_indices=True)
hk_3d = hk_3d[tuple(ind)]
strt_3d = (lay_extrude * top_layer1.data * 1.05).astype(np.float32)
ibound_3d = (lay_extrude * ibound).astype(np.int16)
dst = os.path.join('bedrock_flag_array.npz')
np.savez(dst, bedrock_index=bedrock_index)
sim = modflow(metadata['HUC8_name'], metadata['modflow_path'], 'model_ws', nlay=NLAY, top=top_layer1.data, strt=strt_3d, nrow=NROW, ncol=NCOL,
botm=botm_array, ibound=ibound_3d, hk=hk_3d, rech=recharge, stream_dict=drn_dict6, delr=delr,
delc=delc, hnoflo=-9999., hdry=-8888., iphdry=1, vani=meta.k33overk)
###Output
_____no_output_____
###Markdown
Read the new head array and save it to a GeoTiff file.
###Code
rtd = rtd_ut.RTD_util(sim, 'flow', 'rt')
rtd.get_watertable()
water_table = rtd.water_table
water_table[water_table > (2 * model_grid.ned.max())] = np.nan
grid_raster.new_array = water_table
fig, ax = grid_raster.plot_raster(which_raster='new', sk={'figsize': (11, 8.5)})
fig.set_tight_layout(True)
dst = os.path.join('precal-heads.png')
plt.savefig(dst)
i = Image(filename='precal-heads.png')
i
###Output
_____no_output_____
###Markdown
Compute model errors
###Code
dif_wt = 1
hyd_wt = 1
t_crit = (model_grid.obs_type =='topo') & (ibound.ravel() != 0)
topo_cells = t_crit.values.reshape(NROW, NCOL)
h_crit = (model_grid.obs_type =='hydro') & (ibound.ravel() != 0)
hydro_cells = h_crit.values.reshape(NROW, NCOL)
num_topo = np.count_nonzero(topo_cells)
num_hydro = np.count_nonzero(hydro_cells)
topo = (top + meta.err_tol) < water_table
hydro = (top - meta.err_tol) > water_table
topo_error = topo & topo_cells
hydro_error = hydro & hydro_cells
t = np.count_nonzero(topo_error)
h = np.count_nonzero(hydro_error)
topo_rate = t / num_topo
hydro_rate = h / num_hydro
edif = dif_wt * np.abs(topo_rate - hydro_rate)
esum = topo_rate + hyd_wt * hydro_rate
target = -(edif + esum)
###Output
_____no_output_____
###Markdown
Plot a cross-section to see what the layers look like. Change row_to_plot to see other rows. Columns could be easily added.
###Code
def ma2(data2D):
return np.ma.MaskedArray(data2D, mask=inactive)
def ma3(data3D):
return np.ma.MaskedArray(data3D, mask=(ibound_3d == 0))
def interpolate_travel_times(points, values, xi):
return si.griddata(points, values, xi, method='linear')
def plot_travel_times(ax, x, y, tt, shp):
with np.errstate(invalid='ignore'):
return ax.contourf(x.reshape(shp), y.reshape(shp), tt[:].reshape(shp),
colors=colors, alpha=1.0, levels=levels, antialiased=True)
row_to_plot = np.int32(NROW / 2)
# row_to_plot = 65
xplot = np.linspace(delc / 2, NCOL * delc - delc / 2, NCOL)
mKh = ma3(hk_3d)
mtop = ma2(top)
mbed = ma2(bedrock_top)
mbot = ma3(botm_array)
# lay_colors = ['green', 'red', 'gray']
# make a color map of fixed colors
cmap = plt.cm.coolwarm
bounds = [0, 5, 10]
norm = colors.BoundaryNorm(bounds, cmap.N)
fig = plt.figure(figsize=(11, 8.5))
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
dum = ax1.plot(xplot, mtop[row_to_plot, ],
label='land surface', color='black', lw=0.5)
dum = ax1.plot(xplot, rtd.water_table[row_to_plot, ],
label='water table', color='blue', lw=1.)
dum = ax1.fill_between(xplot, mtop[row_to_plot, ], mbot[0, row_to_plot, :], alpha=0.25,
color='blue', lw=0.75)
for lay in range(NLAY-1):
label = 'layer {}'.format(lay+2)
dum = ax1.fill_between(xplot, mbot[lay, row_to_plot, :], mbot[lay+1, row_to_plot, :],
color=cmap(lay / NLAY), alpha=0.50, lw=0.75)
dum = ax1.plot(xplot, mbed[row_to_plot, :], label='bedrock',
color='red', linestyle='dotted', lw=1.5)
dum = ax1.plot(xplot, mbot[-1, row_to_plot, :], color='black',
linestyle='dashed', lw=0.5, label='model bottom')
# , bbox_to_anchor=(1.0, 0.5))
dum = ax1.legend(loc=0, frameon=False, fontsize=10, ncol=1)
dum = ax1.set_ylabel('Altitude, in meters')
# dum = ax1.set_xticklabels('')
dum = ax1.set_title('Section along row {}'.format(row_to_plot))
# ax2 = plt.subplot2grid((4, 1), (3, 0))
# dum = ax2.fill_between(xplot, 0, mKh[0, row_to_plot, :], alpha=0.25, color='blue',
# label='layer 1', lw=0.75, step='mid')
dum = ax1.set_xlabel('Distance in meters')
# dum = ax2.set_yscale('log')
# dum = ax2.set_ylabel('Hydraulic conductivity\n in layer 1, in meters / day')
line = '{}_xs.png'.format(metadata['HUC8_name'])
fig_name = os.path.join(line)
plt.savefig(fig_name)
i = Image(filename=fig_name)
i
grid = os.path.join(metadata['gis_dir'], 'ibound.tif')
mtg = gmu.SourceProcessing(np.nan)
mtg.read_raster(grid)
fig, ax = plt.subplots(1, 1, figsize=(11, 8.5))
mask = (ibound == 0) | ~topo_cells
mt = np.ma.MaskedArray(topo_cells, mask)
cmap = colors.ListedColormap(['green'])
im = ax.pcolormesh(mtg.x_edge, mtg.y_edge, mt, cmap=cmap, alpha=0.2, edgecolors=None)
mask = (ibound == 0) | ~topo_error
mte = np.ma.MaskedArray(topo_error, mask)
cmap = colors.ListedColormap(['green'])
# dum = ax[0].imshow(mte, cmap=cmap)
im = ax.pcolormesh(mtg.x_edge, mtg.y_edge, mte, cmap=cmap, alpha=0.4, edgecolors=None)
mask = (ibound == 0) | ~hydro_cells
mh = np.ma.MaskedArray(hydro_cells, mask)
cmap = colors.ListedColormap(['blue'])
im = ax.pcolormesh(mtg.x_edge, mtg.y_edge, mh, cmap=cmap, alpha=0.2, edgecolors=None)
mask = (ibound == 0) | ~hydro_error
mhe = np.ma.MaskedArray(hydro_error, mask)
cmap = colors.ListedColormap(['blue'])
im = ax.pcolormesh(mtg.x_edge, mtg.y_edge, mhe, cmap=cmap, alpha=0.6, edgecolors=None)
ax.set_aspect(1)
dum = fig.suptitle('Default model errors\n{} model\nFraction dry drains (blue) {:0.2f}\n \
Fraction flooded cells (green) {:0.2f}'.format( \
metadata['HUC8_name'], hydro_rate, topo_rate))
fig.set_tight_layout(True)
line = '{}_error_map.png'.format(metadata['HUC8_name']) #csc
fig_name = os.path.join(line)
plt.savefig(fig_name)
i = Image(filename=fig_name)
i
###Output
_____no_output_____ |
project02/myanalysis/Pj2_Preprocessing2.ipynb | ###Markdown
์๊ฐ (์์นจ,์ ์ฌ,์ ๋
,์ผ๊ฐ ๋ถ๋ฅ)
###Code
df_del
df = df_del[['DLVR_STORE_LEGALDONG_CODE','PROCESS_DT','DLVR_REQUST_STTUS_VALUE','DLVR_STORE_INDUTY_NM','DLVR_STORE_BRTC_NM','DLVR_STORE_SIGNGU_NM','GOODS_AMOUNT','DLVR_RCEPT_TIME']]
df = df[df['DLVR_REQUST_STTUS_VALUE'] == 1] #๋ฐฐ๋ฌ์ด ์๋ฃ๋ ๊ฐ๋ง ์ถ์ถ
df
df['DLVR_REQUST_STTUS_VALUE'].unique() #๋ฐฐ๋ฌ์ด ์๋ฃ๋ ๊ฐ๋ง ์ถ์ถ ์ฌํ์ธ
df.dtypes
df.columns = ['code','date','stat','menu','sido','sigungu','amount','time']
df['time'] = pd.to_datetime(df['time'])
df.dtypes
df['hour'] = df.time.dt.hour
df['hour'].unique()
df
mask = df['hour'].isin([6,7,8,9,10,11])
df.loc[mask, 'hour'] = '์์นจ'
mask = df['hour'].isin([12,13,14,15,16])
df.loc[mask, 'hour'] = '์ ์ฌ'
mask = df['hour'].isin([17,18,19,20,21])
df.loc[mask, 'hour'] = '์ ๋
'
mask = df['hour'].isin([22,23,24,0,1,2,3,4,5])
df.loc[mask, 'hour'] = '์ผ๊ฐ'
df
df.to_csv('del_hour.csv')
###Output
_____no_output_____
###Markdown
์ธ๊ตฌ ๋ฐ์ดํฐ ์ถ๊ฐ
###Code
df_pop = pd.read_csv('./pop/LOCAL_PEOPLE_DONG_201912.csv',)
df_pop2 = pd.read_csv('./pop/LOCAL_PEOPLE_DONG_202001.csv',)
df_pop3 = pd.read_csv('./pop/LOCAL_PEOPLE_DONG_202002.csv',)
df_pop4 = pd.read_csv('./pop/LOCAL_PEOPLE_DONG_202003.csv',)
df_pop5 = pd.read_csv('./pop/LOCAL_PEOPLE_DONG_202004.csv',)
df_pop6 = pd.read_csv('./pop/LOCAL_PEOPLE_DONG_202005.csv',)
df_pop
result = pd.concat([df_pop,df_pop2,df_pop3,df_pop4,df_pop5,df_pop6,])
result
result.columns = ['์๊ฐ๋๊ตฌ๋ถ','ํ์ ๋์ฝ๋','์ด์ํ์ธ๊ตฌ์','๋จ์0์ธ๋ถํฐ9์ธ์ํ์ธ๊ตฌ์','๋จ์10์ธ๋ถํฐ14์ธ์ํ์ธ๊ตฌ์','๋จ์15์ธ๋ถํฐ19์ธ์ํ์ธ๊ตฌ์','๋จ์20์ธ๋ถํฐ24์ธ์ํ์ธ๊ตฌ์','๋จ์25์ธ๋ถํฐ29์ธ์ํ์ธ๊ตฌ์','๋จ์30์ธ๋ถํฐ34์ธ์ํ์ธ๊ตฌ์','๋จ์35์ธ๋ถํฐ39์ธ์ํ์ธ๊ตฌ์','๋จ์40์ธ๋ถํฐ44์ธ์ํ์ธ๊ตฌ์','๋จ์45์ธ๋ถํฐ49์ธ์ํ์ธ๊ตฌ์','๋จ์50์ธ๋ถํฐ54์ธ์ํ์ธ๊ตฌ์','๋จ์55์ธ๋ถํฐ59์ธ์ํ์ธ๊ตฌ์','๋จ์60์ธ๋ถํฐ64์ธ์ํ์ธ๊ตฌ์','๋จ์65์ธ๋ถํฐ69์ธ์ํ์ธ๊ตฌ์','๋จ์70์ธ์ด์์ํ์ธ๊ตฌ์','์ฌ์0์ธ๋ถํฐ9์ธ์ํ์ธ๊ตฌ์','์ฌ์10์ธ๋ถํฐ14์ธ์ํ์ธ๊ตฌ์','์ฌ์15์ธ๋ถํฐ19์ธ์ํ์ธ๊ตฌ์','์ฌ์20์ธ๋ถํฐ24์ธ์ํ์ธ๊ตฌ์','์ฌ์25์ธ๋ถํฐ29์ธ์ํ์ธ๊ตฌ์','์ฌ์30์ธ๋ถํฐ34์ธ์ํ์ธ๊ตฌ์','์ฌ์35์ธ๋ถํฐ39์ธ์ํ์ธ๊ตฌ์','์ฌ์40์ธ๋ถํฐ44์ธ์ํ์ธ๊ตฌ์','์ฌ์45์ธ๋ถํฐ49์ธ์ํ์ธ๊ตฌ์','์ฌ์50์ธ๋ถํฐ54์ธ์ํ์ธ๊ตฌ์','์ฌ์55์ธ๋ถํฐ59์ธ์ํ์ธ๊ตฌ์','์ฌ์60์ธ๋ถํฐ64์ธ์ํ์ธ๊ตฌ์','์ฌ์65์ธ๋ถํฐ69์ธ์ํ์ธ๊ตฌ์','์ฌ์70์ธ์ด์์ํ์ธ๊ตฌ์',0]
result
df.dtypes
result['ํ์ ๋์ฝ๋'] = result['ํ์ ๋์ฝ๋'].astype(int)
result = result.reset_index()
result.rename(columns = {"index": "date"}, inplace = True)
result
result
result02 = result[['date','ํ์ ๋์ฝ๋','์ด์ํ์ธ๊ตฌ์']]
result02
a = df_pop2.iloc[:,4:18].sum(axis=1)
result03['men'] = round(a)
result02['0~9์ธ'] = result02['0~9']
del result02['0~9']
result02
b = df_pop2.iloc[:,18:]
b
b = b.sum(axis=1)
b
result03['women'] = round(b)
result03
a = df_pop2['๋จ์70์ธ์ด์์ํ์ธ๊ตฌ์']
a
b = df_pop2['์ฌ์70์ธ์ด์์ํ์ธ๊ตฌ์']
b
c = a+b
result03['70~'] = c
result03
result02.iloc[: ,5:].sum(axis = 1)
result02.dtypes
result02['date'] = result02['date'].astype(int)
result02.to_csv('df_age.csv')
result02.replace('(', '')
result02.head(600000)
df_pop2.columns = ['date','์๊ฐ๋๊ตฌ๋ถ','ํ์ ๋์ฝ๋','์ด์ํ์ธ๊ตฌ์','๋จ์0์ธ๋ถํฐ9์ธ์ํ์ธ๊ตฌ์','๋จ์10์ธ๋ถํฐ14์ธ์ํ์ธ๊ตฌ์','๋จ์15์ธ๋ถํฐ19์ธ์ํ์ธ๊ตฌ์','๋จ์20์ธ๋ถํฐ24์ธ์ํ์ธ๊ตฌ์','๋จ์25์ธ๋ถํฐ29์ธ์ํ์ธ๊ตฌ์','๋จ์30์ธ๋ถํฐ34์ธ์ํ์ธ๊ตฌ์','๋จ์35์ธ๋ถํฐ39์ธ์ํ์ธ๊ตฌ์','๋จ์40์ธ๋ถํฐ44์ธ์ํ์ธ๊ตฌ์','๋จ์45์ธ๋ถํฐ49์ธ์ํ์ธ๊ตฌ์','๋จ์50์ธ๋ถํฐ54์ธ์ํ์ธ๊ตฌ์','๋จ์55์ธ๋ถํฐ59์ธ์ํ์ธ๊ตฌ์','๋จ์60์ธ๋ถํฐ64์ธ์ํ์ธ๊ตฌ์','๋จ์65์ธ๋ถํฐ69์ธ์ํ์ธ๊ตฌ์','๋จ์70์ธ์ด์์ํ์ธ๊ตฌ์','์ฌ์0์ธ๋ถํฐ9์ธ์ํ์ธ๊ตฌ์','์ฌ์10์ธ๋ถํฐ14์ธ์ํ์ธ๊ตฌ์','์ฌ์15์ธ๋ถํฐ19์ธ์ํ์ธ๊ตฌ์','์ฌ์20์ธ๋ถํฐ24์ธ์ํ์ธ๊ตฌ์','์ฌ์25์ธ๋ถํฐ29์ธ์ํ์ธ๊ตฌ์','์ฌ์30์ธ๋ถํฐ34์ธ์ํ์ธ๊ตฌ์','์ฌ์35์ธ๋ถํฐ39์ธ์ํ์ธ๊ตฌ์','์ฌ์40์ธ๋ถํฐ44์ธ์ํ์ธ๊ตฌ์','์ฌ์45์ธ๋ถํฐ49์ธ์ํ์ธ๊ตฌ์','์ฌ์50์ธ๋ถํฐ54์ธ์ํ์ธ๊ตฌ์','์ฌ์55์ธ๋ถํฐ59์ธ์ํ์ธ๊ตฌ์','์ฌ์60์ธ๋ถํฐ64์ธ์ํ์ธ๊ตฌ์','์ฌ์65์ธ๋ถํฐ69์ธ์ํ์ธ๊ตฌ์','์ฌ์70์ธ์ด์์ํ์ธ๊ตฌ์',0,0,0,0,0,0]
df_pop2
result03 = df_pop2[['date','ํ์ ๋์ฝ๋','์ด์ํ์ธ๊ตฌ์']]
result03
result02
result03
result04 = pd.concat([result02,result03])
a = []
for i in range(315456,630912):
a.append(i)
a
result05 = result04.drop(index=a, axis=0)
result05.iloc[315455:630913]
result05.to_csv('df_age2.csv')
result06 = result05.copy()
result06.dtypes
result06['date'].astype(int)
result06 = result06.sort_values(by=["date"])
df.to_csv('df_hour2.csv')
all_df = pd.merge(left=df, right=result, how='left', on=['GoodsID','GoodsIDSeqNo'], sort=False)
###Output
_____no_output_____ |
pria_lifechem/analysis/RF_loader.ipynb | ###Markdown
joblib errorhttps://github.com/scikit-learn/scikit-learn/issues/5777```conda install -y -c omnia openbabel=2.4.0 > /dev/nullconda install -y -c omnia pdbfixer=1.4 > /dev/nullconda install -y -c rdkit rdkit > /dev/nullconda install -y joblib > /dev/nullconda install -y -c omnia mdtraj > /dev/nullconda install -y scikit-learn > /dev/nullconda install -y setuptools > /dev/nullconda install -y -c conda-forge keras=1.2.2 > /dev/nullconda install -y -c conda-forge protobuf=3.1.0 > /dev/nullconda install -y -c anaconda networkx=1.11 > /dev/nullconda install -y -c bioconda xgboost=0.6a2 > /dev/nullconda install -y -c conda-forge six=1.10.0 > /dev/nullconda install -y -c conda-forge nose=1.3.7 > /dev/nullconda install --yes -c conda-forge tensorflow=1.0.0 > /dev/nullconda install --yes -c jjhelmus tensorflow-gpu=1.0.1 > /dev/nullconda install --yes mkl-service > /dev/nullconda install --yes -c r rpy2 > /dev/nullconda install --yes -c bioconda r-prroc=1.1 > /dev/nullconda install --yes -c auto croc=1.0.63 > /dev/null```
###Code
import os
import joblib
path_dir = '../../output/random_forest/stage_1/sklearn_rf_390014_97/fold_0/rf_clf_Keck_Pria_AS_Retest.pkl'
os.path.exists(path_dir)
a = joblib.load(path_dir)
###Output
_____no_output_____ |
cleaning/01_cleaning_code_by_state/AZ_cleaning.ipynb | ###Markdown
Read in federal level data
###Code
fiscal = pd.read_sas('../../data/fiscal2018', format = 'sas7bdat', encoding='iso-8859-1')
###Output
_____no_output_____
###Markdown
Generate list of districts in the state in the federal data
###Code
fiscal_AZ = fiscal[(fiscal['STNAME'] == 'Arizona') & (fiscal['GSHI'] == '12')]
len(fiscal_AZ)
fiscal_AZ.head()
###Output
_____no_output_____
###Markdown
Read in state level data
###Code
AZ = pd.read_excel('../../data/state_data_raw/arizona2018.xls', sheet_name='LEA by Subgroup')
AZ.head()
###Output
_____no_output_____
###Markdown
Filter for only the district level, total population samples.
###Code
AZ = AZ[(AZ['Graduation Rate Type'] == '4-Year') &
(AZ['Subgroup'] == 'All') &
(AZ['Percent Graduated in 4 Years'] != '*')]
len(AZ)
###Output
_____no_output_____
###Markdown
Check for non-matches in the two lists
###Code
fiscal_AZ['NAME'] = fiscal_AZ['NAME'].astype(str).str.replace(' Inc.', ', Inc.')
A = [name for name in list(AZ['LEA Name']) if name not in fiscal_AZ['NAME']]
A.sort()
A
B = [name for name in fiscal_AZ['NAME'] if name not in list(AZ['LEA Name'])]
B.sort()
B
###Output
_____no_output_____
###Markdown
Replace the names I can find matches for.
###Code
AZ_fiscal_rename = {
'AIBT Non-Profit Charter High School, Inc.' : 'AIBT Non-Profit Charter High School - Phoenix',
#'ARIZONA STATE HOSPITAL',
##'ASU Preparatory Academy 2',
##'ASU Preparatory Academy 3',
##'ASU Preparatory Academy Digital',
##'ASU Preparatory Academy Tempe',
#'AZ Dept of Juvenile Corrections',
#'Academy of Mathematics and Science Inc. 1',
'American Charter Schools Foundation d.b.a. Alta Vista High S' : 'American Charter Schools Foundation d.b.a. Alta Vista High School',
'American Charter Schools Foundation d.b.a. Apache Trail High' : 'American Charter Schools Foundation d.b.a. Apache Trail High School',
'American Charter Schools Foundation d.b.a. Crestview College' : 'American Charter Schools Foundation d.b.a. Crestview College Preparatory High Sc',
'American Charter Schools Foundation d.b.a. Desert Hills High' : 'American Charter Schools Foundation d.b.a. Desert Hills High School',
'American Charter Schools Foundation d.b.a. Estrella High Sch' : 'American Charter Schools Foundation d.b.a. Estrella High School',
'American Charter Schools Foundation d.b.a. Peoria Accelerate' : 'American Charter Schools Foundation d.b.a. Peoria Accelerated High School',
'American Charter Schools Foundation d.b.a. South Pointe High' : 'American Charter Schools Foundation d.b.a. South Pointe High School',
'American Charter Schools Foundation d.b.a. South Ridge High' : 'American Charter Schools Foundation d.b.a. South Ridge High School',
'American Charter Schools Foundation d.b.a. Sun Valley High S' : 'American Charter Schools Foundation d.b.a. Sun Valley High School',
'American Charter Schools Foundation d.b.a. West Phoenix High' : 'American Charter Schools Foundation d.b.a. West Phoenix High School',
#'Apache County Sheriffs Office',
#'Archway Classical Academy Chandler',
#'Archway Classical Academy Scottsdale',
#'Archway Classical Academy Trivium West',
#'Archway Classical Academy Veritas',
##'Arizona Agribusiness & Equine Center Inc. 1',
##'Arizona Agribusiness & Equine Center Inc. 2',
##'Arizona Agribusiness & Equine Center Inc. 3',
##'Arizona Agribusiness & Equine Center Inc. 4',
##'Arizona Agribusiness & Equine Center Inc. 5',
##'Arizona Agribusiness & Equine Center Inc. 6',
##'Arizona Department of Corrections',
#'Arizona State School for the Deaf and Blind',
#'Arizona Supreme Court',
##'BASIS School Inc. 1',
##'BASIS School Inc. 10',
##'BASIS School Inc. 12',
##'BASIS School Inc. 13',
##'BASIS School Inc. 2',
##'BASIS School Inc. 3',
##'BASIS School Inc. 4',
##'BASIS School Inc. 5',
##'BASIS School Inc. 6',
##'BASIS School Inc. 7',
##'BASIS School Inc. 8',
##'BASIS School Inc. 9',
#'Blue Elementary District',
#'Bowie Unified District',
#'CAFA, Inc. dba Learning Foundation Performing Arts School',
'CAFA, Inc. dba Learning Foundation and Performing Arts Gilbe' : 'CAFA, Inc. dba Learning Foundation and Performing Arts Gilbert',
'CPLC Community Schools dba Envision High School' : 'CPLC Community Schools dba Toltecalli High School',
#'Cedar Unified District',
#'Central Arizona Valley Institute of Technology',
#'Cobre Valley Institute of Technology District',
#'Cochise County Juvenile Detention',
#'Cochise Technology District',
#'Coconino Association for Vocation Industry and Technology',
'Compass Points International Inc' : 'Compass Points International, Inc',
'Cornerstone Charter School Inc' : 'Cornerstone Charter School,Inc',
#'Cottonwood-Oak Creek Elementary District',
#'Country Gardens Charter Schools',
'Daisy Education Corporation dba Sonoran Science Academy - Ph' : 'Daisy Education Corporation dba Sonoran Science Academy - Phoenix',
'Daisy Education Corporation dba. Sonoran Science Academy Dav' : 'Daisy Education Corporation dba. Sonoran Science Academy Davis Monthan',
'Daisy Education Corporation dba. Sonoran Science Academy Peo' : 'Daisy Education Corporation dba Sonoran Science Academy',
'Desert Rose Academy Inc.' : 'Desert Rose Academy,Inc.',
#'EAGLE South Mountain Charter, Inc.',
#'East Valley Institute of Technology',
'Edge School, Inc. The' : 'Edge School, Inc., The',
'Edkey, Inc. - Sequoia Ranch School' : 'Edkey, Inc. - Sequoia Charter School',
#'Edkey, Inc. - Sequoia School for the Deaf and Hard of Hearin',
'Eduprize Schools LLC' : 'Eduprize Schools, LLC',
'Espiritu Community Development Corp. 1' : 'Espiritu Community Development Corp.',
#'Espiritu Schools',
#'Excalibur Charter Schools, Inc.',
'GAR LLC dba Student Choice High School' : 'GAR, LLC dba Student Choice High School',
#'Gadsden Elementary District',
#'Gila County Juvenile Detention',
#'Gila County Regional School District',
#'Gila County Sheriffs Office',
#'Gila Institute for Technology',
#'Graham County Juvenile Detention',
#'Graham County School Superintendent',
#'Graham County Special Services',
#'Greenlee County Accommodation District',
#'Greenlee County Sheriffs Office',
#'Highland Prep',
'Horizon Community Learning Center Inc. 1' : 'Horizon Community Learning Center, Inc.',
#'Innovative Humanities Education Corporation',
#'Integrity Education Incorporated',
'Kaizen Education Foundation dba Mission Heights Preparatory' : 'Kaizen Education Foundation dba Mission Heights Preparatory High School',
'Kaizen Education Foundation dba Tempe Accelerated High Schoo' : 'Kaizen Education Foundation dba Tempe Accelerated High School',
#'Kirkland Elementary District',
'LEAD Charter Schools dba Leading Edge Academy Queen Creek' : 'LEAD Charter Schools',
#'Leading Edge Academy Maricopa',
'Maricopa County Community College District dba Gateway Early' : 'Maricopa County Community College District dba Gateway Early College High School',
'Maricopa County Regional Special Services District' : 'Maricopa County Regional District',
#'Maricopa County Sheriffs Office',
'Mary Ellen Halvorson Educational Foundation. dba: Tri-City P' : 'Mary Ellen Halvorson Educational Foundation. dba: Tri-City Prep High School',
#'Mohave County Juvenile Detention',
#'Mountain Institute JTED',
#'Navajo County School Superintendents Office',
#'New World Educational Center',
#'Northeast Arizona Technological Institute of Vocational Educ',
#'Northern Arizona Vocational Institute of Technology',
'Nosotros Inc' : 'Nosotros, Inc',
'Ombudsman Educational Services Ltd. a subsidiary of Educ 1' : 'Ombudsman Educational Services, Ltd.,a subsidiary of Educational Services of Ame',
'PAS Charter, Inc. dba Intelli-School' : 'PAS Charter, Inc., dba Intelli-School',
#'Park View School, Inc.',
'Pathways KM Charter Schools Inc' : 'Pathways In Education-Arizona, Inc.',
'Phoenix Collegiate Academy, Inc.' : 'Phoenix Collegiate Academy High LLC',
#'Pima County JTED',
'Pima Prevention Partnership dba Pima Partnership School The' : 'Pima Prevention Partnership dba Pima Partnership School, The',
#'Pinal County Juvenile Detention',
##'Pinnacle Education-Casa Grande, Inc.',
##'Portable Practical Educational Preparation Inc. (PPEP In 1',
##'Portable Practical Educational Preparation Inc. (PPEP In 2',
#'Prescott Valley Charter School',
#'Presidio School',
#'Ray of Light Academy',
#'Rising Schools, Inc.',
#'San Simon Unified District',
#'Santa Cruz County Sheriffs Office',
#'Shonto Governing Board of Education, Inc.',
'Skyline Gila River Schools LLC' : 'Skyline Gila River Schools, LLC',
#'Sonoran Desert School',
#'Southwest Technical Education District of Yuma (STEDY)',
#'SySTEM Schools',
#'Teleos Preparatory Academy',
#'The Charter Foundation, Inc.',
#'Toltec School District',
'Vail Unified School District' : 'Vail Unified District',
#'Valley Academy for Career and Technology Education',
'Valley of the Sun Waldorf Education Association dba Desert' : 'Valley of the Sun Waldorf Education Association, dba Desert Marigold School',
#'Victory High School, Inc.',
#'Vision Charter School, Inc.',
#'Vista Charter School',
#'West-MEC - Western Maricopa Education Center',
#'Western Arizona Vocational District #50',
#'Yavapai County Juvenile Justice Center',
#'Young Elementary District',
#'Yuma County Juvenile Justice Center',
#'Yuma County Sheriffs Office'
}
fiscal_AZ = fiscal_AZ.replace(AZ_fiscal_rename)
###Output
_____no_output_____
###Markdown
Examine data types and missing values.
###Code
AZ.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 290 entries, 0 to 3968
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Cohort Year 290 non-null int64
1 Graduation Rate Type 290 non-null object
2 LEA Entity ID 290 non-null int64
3 LEA Name 290 non-null object
4 County 290 non-null object
5 Subgroup 290 non-null object
6 Number Graduated 290 non-null object
7 Number in Cohort 290 non-null object
8 Percent Graduated in 4 Years 290 non-null object
dtypes: int64(2), object(7)
memory usage: 22.7+ KB
###Markdown
Change column names for consistency across states.
###Code
AZ = AZ[['LEA Name', 'Number in Cohort', 'Percent Graduated in 4 Years']]
AZ = AZ.rename(columns={'LEA Name' : 'District Name', 'Number in Cohort' : 'Total', 'Percent Graduated in 4 Years' : 'Graduation Rate'})
###Output
_____no_output_____
###Markdown
Change data types.
###Code
AZ['Graduation Rate'] = pd.to_numeric(AZ['Graduation Rate']) / 100
AZ['Total'] = pd.to_numeric(AZ['Total'])
AZ = AZ.reset_index(drop=True)
sum(AZ['Total'])
###Output
_____no_output_____
###Markdown
Merge federal and state data, keeping only matches between the two.
###Code
AZ_merged = pd.merge(fiscal[fiscal['STABBR'] == 'AZ'], AZ, how='inner', left_on='NAME', right_on='District Name')
###Output
_____no_output_____
###Markdown
Save cleaned data.
###Code
AZ_merged.to_csv('../../data/state_data_merged/AZ.csv', index=False)
###Output
_____no_output_____ |
L26 Momentum, AdaGrad, RMSProp, Adam/L26_2_Momentum,_Adagrad,_RMSProb_in_Python.ipynb | ###Markdown
Momentum Standard gradient descent.
###Code
%matplotlib inline
import math
import matplotlib.pyplot as plt
import numpy as np
eta = 0.4
def f_2d(x1, x2):
return 0.1 * x1 ** 2 + 2 * x2 ** 2
def gd_2d(x1, x2, s1, s2):
return (x1 - eta * 0.2 * x1, x2 - eta * 4 * x2, 0, 0)
def train_2d(trainer):
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(20):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))
return results
def show_trace_2d(f, results):
plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))
plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
plt.xlabel('x1')
plt.ylabel('x2')
show_trace_2d(f_2d, train_2d(gd_2d))
###Output
epoch 20, x1 -0.943467, x2 -0.000073
###Markdown
MomentumStochastic gradient descent with momentum remembers the update $\Delta w$ at each iteration, and determines the next update as a linear combination of the gradient and the previous update.
###Code
def momentum_2d(x1, x2, v1, v2):
v1 = gamma * v1 + eta * 0.2 * x1
v2 = gamma * v2 + eta * 4 * x2
return x1 - v1, x2 - v2, v1, v2
eta, gamma = 0.4, 0.5
show_trace_2d(f_2d, train_2d(momentum_2d))
###Output
epoch 20, x1 -0.062843, x2 0.001202
###Markdown
GD with a large learning rate
###Code
eta = 0.6
show_trace_2d(f_2d, train_2d(gd_2d))
###Output
epoch 20, x1 -0.387814, x2 -1673.365109
###Markdown
Same learning rate for momentum.
###Code
show_trace_2d(f_2d, train_2d(momentum_2d))
###Output
epoch 20, x1 0.007188, x2 0.002553
###Markdown
Adagrad
###Code
def adagrad_2d(x1, x2, s1, s2):
# The first two terms are the independent variable gradients
g1, g2, eps = 0.2 * x1, 4 * x2, 1e-6
s1 += g1 ** 2
s2 += g2 ** 2
x1 -= eta / math.sqrt(s1 + eps) * g1
x2 -= eta / math.sqrt(s2 + eps) * g2
return x1, x2, s1, s2
eta = 0.4
show_trace_2d(f_2d, train_2d(adagrad_2d))
###Output
epoch 20, x1 -2.382563, x2 -0.158591
###Markdown
Use a much larger learning rate.
###Code
eta = 2
show_trace_2d(f_2d, train_2d(adagrad_2d))
###Output
epoch 20, x1 -0.002295, x2 -0.000000
###Markdown
RMSProp
###Code
def rmsprop_2d(x1, x2, s1, s2):
g1, g2, eps = 0.2 * x1, 4 * x2, 1e-6
s1 = gamma * s1 + (1 - gamma) * g1 ** 2
s2 = gamma * s2 + (1 - gamma) * g2 ** 2
x1 -= eta / math.sqrt(s1 + eps) * g1
x2 -= eta / math.sqrt(s2 + eps) * g2
return x1, x2, s1, s2
eta, gamma = 0.4, 0.9
show_trace_2d(f_2d, train_2d(rmsprop_2d))
###Output
epoch 20, x1 -0.010599, x2 0.000000
|
notebooks/hello-csharp/numbers.ipynb | ###Markdown
Manipulate integral and floating point numbers in C*In this tutorial about numeric types, you'll use Jupyter notebooks to learn C interactively. You're going to write C code and see the results of compiling and running your code directly in the notebook.*It contains a series of lessons that explore numbers and math operations in C. These lessons teach you the fundamentals of the C language. Working with integer mathRun the following cell:
###Code
int a = 18;
int b = 6;
int c = a + b;
Console.WriteLine(c);
###Output
_____no_output_____
###Markdown
You've seen one of the fundamental math operations with integers. The `int` type represents an **integer**, a positive or negative whole number. You use the `+` symbol for addition. Other common mathematical operations for integers include:- `-` for subtraction- `*` for multiplication- `/` for divisionStart by exploring those different operations. Modify the third line to try each of these operations. After each edit, select the **Run** button.- Subtraction: `int c = a - b;`- Multiplication: `int c = a * b;`- Division: `int c = a / b;````You can also experiment by writing multiple mathematics operations in the same line, if you'd like.> As you explore C (or any programming language), you'll make mistakes when you write code. The **compiler** will find those errors and report them to you. When the output contains error messages, look closely at the example code, and the code in the interactive window to see what to fix. That exercise will help you learn the structure of C code. Explore order of operationsThe C language defines the precedence of different mathematics operations with rules consistent with the rules you learned in mathematics. Multiplication and division take precedence over addition and subtraction. Explore that by running the following cell:
###Code
int a = 5;
int b = 4;
int c = 2;
int d = a + b * c;
Console.WriteLine(d);
###Output
_____no_output_____
###Markdown
The output demonstrates that the multiplication is performed before the addition.You can force a different order of operation by adding parentheses around the operation or operations you want performed first. Modify the fourth line in the cell above to force the addition to be performed first: `int d = (a + b) * c;`Explore more by combining many different operations. Replace the fourth line above with something like this. `int d = (a + b) - 6 * c + (12 * 4) / 3 + 12;`You may have noticed an interesting behavior for integers. Integer division always produces an integer result, even when you'd expect the result to include a decimal or fractional portion. Explore integer precision and limitsIf you haven't seen this behavior, try the following cell:
###Code
int a = 7;
int b = 4;
int c = 3;
int d = (a + b) / c;
int e = (a + b) % c;
Console.WriteLine($"quotient: {d}");
Console.WriteLine($"remainder: {e}");
###Output
_____no_output_____
###Markdown
That last sample showed you that integer division truncates the result. It showed how you can get the **remainder** by using the **remainder** operator, the `%` character.The C integer type differs from mathematical integers in one other way: the `int` type has minimum and maximum limits. Run this cell to see those limits:
###Code
int max = int.MaxValue;
int min = int.MinValue;
Console.WriteLine($"The range of integers is {min} to {max}");
###Output
_____no_output_____
###Markdown
If a calculation produces a value that exceeds those limits, you have an **underflow** or **overflow** condition. The answer appears to wrap from one limit to the other. Add these two lines to the preceding cell to see an example:```csharpint what = max + 3;Console.WriteLine($"An example of overflow: {what}");```Notice that the answer is very close to the minimum (negative) integer. It's the same as `min + 2`. The addition operation **overflowed** the allowed values for integers. The answer is a very large negative number because an overflow "wraps around" from the largest possible integer value to the smallest.There are other numeric types with different limits and precision that you would use when the `int` type doesn't meet your needs. Let's explore those types of numbers next. Work with the double typeThe `double` numeric type represents a double-precision floating point number. Those terms may be new to you. A **floating point** number is useful to represent non-integral numbers that may be very large or small in magnitude. **Double-precision** is a relative term that describes the numbers of binary digits used to store the value. **Double precision** number have twice the number of binary digits as **single-precision**. On modern computers, it is more common to use double precision than single precision numbers. **Single precision** numbers are declared using the `float` keyword. Let's explore. Try the following cell:
###Code
double a = 5;
double b = 4;
double c = 2;
double d = (a + b) / c;
Console.WriteLine(d);
###Output
_____no_output_____
###Markdown
Notice that the answer includes the decimal portion of the quotient. Try a slightly more complicated expression with doubles:
###Code
double a = 19;
double b = 23;
double c = 8;
double d = (a + b) / c;
Console.WriteLine(d);
###Output
_____no_output_____
###Markdown
The range of a double value is much greater than integer values. Try the following cell:
###Code
double max = double.MaxValue;
double min = double.MinValue;
Console.WriteLine($"The range of double is {min} to {max}");
###Output
_____no_output_____
###Markdown
These values are printed out in scientific notation. The number to the left of the `E` is the significand. The number to the right is the exponent,as a power of 10. Just like decimal numbers in math, doubles in C can have rounding errors. Try this cell:
###Code
double third = 1.0 / 3.0;
Console.WriteLine(third);
###Output
_____no_output_____
###Markdown
You know that `0.3` is `3/10` and not exactly the same as `1/3`. Similarly, `0.33` is `33/100`. That's closer to `1/3`, but still not exact. ***Challenge***Try other calculations with large numbers, small numbers, multiplication, and division using the `double` type. Try more complicated calculations. Use the cell below for your ideas. Work with decimal typesYou've seen the basic numeric types in C: integers and doubles. There's one other type to learn: the `decimal` type. The `decimal` type has a smaller range but greater precision than `double`. Let's take a look:
###Code
decimal min = decimal.MinValue;
decimal max = decimal.MaxValue;
Console.WriteLine($"The range of the decimal type is {min} to {max}");
###Output
_____no_output_____
###Markdown
Notice that the range is smaller than the `double` type. You can see the greater precision with the decimal type by trying the following cell:
###Code
double a = 1.0;
double b = 3.0;
Console.WriteLine(a / b);
decimal c = 1.0M;
decimal d = 3.0M;
Console.WriteLine(c / d);
###Output
_____no_output_____ |
SMS_Spam_Classifier_model.ipynb | ###Markdown
Importing the libraries
###Code
import pandas as pd
import nltk
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
###Output
_____no_output_____
###Markdown
Import the dataset
###Code
dataset = pd.read_csv('SMSSpamCollection', delimiter = '\t', names =['labels', 'messages'])
dataset
dataset.isnull().sum()
###Output
_____no_output_____
###Markdown
Text Cleaning
###Code
ps = PorterStemmer()
wnl = WordNetLemmatizer()
corpus = []
for i in range(len(dataset['messages'])):
sentence = re.sub('[^a-zA-Z]', ' ', dataset.iloc[i, 1])
tokens = nltk.word_tokenize(sentence)
tokens = [ps.stem(word) for word in tokens if word not in stopwords.words('english')]
tokens = ' '.join(tokens)
corpus.append(tokens)
###Output
_____no_output_____
###Markdown
Creating the Bag of Words (BoW) model
###Code
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 3000)
X = cv.fit_transform(corpus).toarray()
###Output
_____no_output_____
###Markdown
Declaring the variables
###Code
y = pd.get_dummies(dataset['labels'], drop_first = True)
y = y.values.reshape(-1)
###Output
_____no_output_____
###Markdown
Splitting the dataset into Training set and Test set
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 5)
###Output
_____no_output_____
###Markdown
Using Multinomial Naive Bayes model for Classification
###Code
from sklearn.naive_bayes import MultinomialNB
mnb_classifier = MultinomialNB()
mnb_classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predicting the Test set results
###Code
y_pred = mnb_classifier.predict(X_test)
# Accuracy Score and Confusion Matrix
from sklearn.metrics import accuracy_score, confusion_matrix
accuracy = accuracy_score(y_test, y_pred)
print('Accuracy_score :', round(accuracy*100, 2))
cm = confusion_matrix(y_test, y_pred)
print('Confusion matix :\n', cm)
###Output
Accuracy_score : 99.37
Confusion matix :
[[967 3]
[ 4 141]]
###Markdown
Using the model to compare the Test set predicted values with original values
###Code
X_train_new, X_test_new, y_train_new, y_test_new = train_test_split(dataset['messages'].values, y, test_size = 0.20, random_state = 5)
df_results = pd.DataFrame()
df_results['Messages'] = X_test_new
df_results['Spam_actual'] = y_test
df_results['Spam_predicted'] = y_pred
df_results.to_csv('results.csv')
df_results
###Output
_____no_output_____
###Markdown
Applying k-fold Cross Validation for checking model performance
###Code
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = mnb_classifier, X = X_train, y = y_train, cv = 20)
print('Best accuracy:', round(max(accuracies)*100, 2), '%')
print('Worst accuracy:', round(min(accuracies)*100, 2), '%')
print('Average accuracy:', round(accuracies.mean()*100, 2), '%')
print('Standard Deviation of accuracies:', round(accuracies.std()*100, 2), '%')
###Output
Best accuracy: 99.55 %
Worst accuracy: 96.41 %
Average accuracy: 98.25 %
Standard Deviation of accuracies: 0.72 %
|
fig8.ipynb | ###Markdown
Load and prepare data
###Code
# Helper function to compute recall speed
def recall_speed(X_es, spikes, start_time):
recall_speeds = []
clu_pos = [linalg.norm(clu_end - clu_start) * clu / (clu_num-1) for clu in range(clu_num)]
for i in tqdm(range(len(X_es))):
clu_neurs = get_cluster_neurons(X_es[i])
spikes_dic = spikes[i]
firing_times = []
for clu in range(clu_num):
spikes_clu = get_spikes(clu_neurs[clu], spikes_dic, start_time)
firing_times.append(rates_ftimes(spikes_clu, start_time, len(clu_neurs[clu]))[1])
firing_times = np.array(firing_times).T
spCCs = np.array(sp_corr(firing_times))
recall_speeds_i = []
#breakpoint()
for ftimes in firing_times[spCCs > 0.9]:
recall_speeds_i.append(stats.linregress(ftimes, clu_pos)[0])
if len(recall_speeds_i)>0:
recall_speeds.append(np.mean(recall_speeds_i))
return recall_speeds
# Load date and compute recall speeds
af_time = growth_time + test_time + 2*relax_time + learn_time
vlist = [4, 8, 12, 16, 20]
recall_speeds = []
for i_v, v in tqdm(enumerate(vlist)):
X_es = []
spikes = []
for i in range(40 + i_v*20, 40 + i_v*20 + seeds_num):
if i!=6:
data = load_data("./data/seqlearn_cues_v{}_seed{}.pickle".format(v, i))
X_es.append(data["X_e"])
spikes.append(data["spikes"])
recall_speeds.append(recall_speed(X_es, spikes, af_time))
###Output
_____no_output_____
###Markdown
Figure
###Code
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
ax.plot([0] + vlist + [max(vlist)+5], [0] + vlist + [max(vlist)+5],'k--',
label = r'$v_{spot}$')
recall_speeds_mean = 10**3 * np.array([np.mean(vs) for vs in recall_speeds])
recall_speeds_sem = 10**3 * np.array([stats.sem(vs) for vs in recall_speeds])
ax.errorbar(vlist, recall_speeds_mean, yerr=recall_speeds_sem, fmt='k', capsize=0,
label = r'$\langle v_{rc} \rangle$')
ax.set_xlim(0, max(vlist)+5)
ax.set_ylim(0, 25)
ax.set_ylabel(r'Speed [$\mu$m/ms]')
ax.set_xlabel(r'$v_{spot}$ [$\mu$m/ms]')
ax.legend()
plt.show()
###Output
_____no_output_____ |
09-API_test.ipynb | ###Markdown
Load Libraries
###Code
# General Import
import re
import math
import string
import numpy as np
import pandas as pd
from scipy.sparse import hstack
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.pairwise import cosine_distances
import gensim.downloader as api
from nltk.tokenize import word_tokenize
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
# Starting point
import os
import sys
from pathlib import Path
PATH_HOME = Path.home()
PATH_PROJ = Path.cwd()
PATH_DATA = PATH_PROJ
sys.path.append(str(PATH_PROJ))
###Output
_____no_output_____
###Markdown
Load Data
###Code
# TRAIN
df_train = pd.read_csv('data2.csv')
df_train.dropna(inplace=True)
print(df_train.shape)
df_train.head(2)
# rename dataframe
df_train = df_train.rename(columns={'Intent': 'intent', 'Questions': 'query'})
df_train = df_train[['intent', 'query']]
df_train.head(2)
# TEST
df_test = pd.read_csv('uat_data_intent.csv')
df_test.dropna(inplace=True)
print(df_test.shape)
df_test.head(2)
df_test['correct_google'] = np.where(df_test['User Clicked intent'] == df_test['Google-intent'], 1, 0)
df_test.head()
google_accuracy = sum(df_test['correct_google']) / len(df_test['correct_google'])
print(" Google NLU accuracy is {:.1%}".format(google_accuracy))
# rename dataframe
df_test = df_test.rename(columns={'User Clicked intent': 'intent', 'Question': 'query'})
df_test = df_test[['intent', 'query']]
df_test.head(2)
###Output
_____no_output_____
###Markdown
Utilities
###Code
def clean_text(text):
""" Basic text cleaning
1. lowercase
2. remove special characters
"""
text = text.lower()
text = re.sub(r'[^a-z0-9\s]', '', text)
return text
def nltk_tokenize(text):
""" tokenize text using NLTK and join back as sentence"""
# import nltk
# nltk.download('punkt')
return ' '.join(word_tokenize(text))
# Function for spacy tokenizer
# Create our list of punctuation marks
punctuations = string.punctuation
# Create our list of stopwords
nlp = spacy.load('en_core_web_lg')
stop_words = spacy.lang.en.stop_words.STOP_WORDS
# Creating our tokenizer function
def spacy_tokenizer(sentence):
# Creating our token object, which is used to create documents with linguistic annotations.
mytokens = nlp(sentence)
# Lemmatizing each token and converting each token into lowercase
mytokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in mytokens ]
# Removing stop words
mytokens = [ word for word in mytokens if word not in stop_words and word not in punctuations ]
# return preprocessed list of tokens
return mytokens
def get_idf_TfidfVectorizer(sentences):
""" Get idf dictionary by using TfidfVectorizer
Args:
sentences (list): list of input sentences (str)
Returns:
idf (dict): idf[word] = inverse document frequency of that word in all training queries
"""
# use customized Spacy tokenizer
vectorizer = TfidfVectorizer(tokenizer=spacy_tokenizer)
vectorizer.fit(sentences)
# TODO: normalize the idf weights
idf = {k:vectorizer.idf_[v] for k,v in vectorizer.vocabulary_.items()}
return idf
def get_sentence_vec(sentence, word2vec, idf=None):
""" Get embedding of sentence by using word2vec embedding of words
If idf is provided, the sentence is the weighted embedding by
SUM( embedding[word] x idf[word] )
Args:
sentence (str): input sentence
word2vec (dict): loaded word2vec model from Gensim
idf (dict, optional): inverse document frequency of words in all queries
Returns:
emb (np.array): 300-dimentions embedding of sentence
"""
words = sentence.split()
words = [word for word in words if word in word2vec.vocab]
# if no word in word2vec vocab, return 0x300 embedding
if len(words)==0:
return np.zeros((300,), dtype='float32')
# use mean if no idf provided
if idf is None:
emb = word2vec[words].mean(axis=0)
else:
# get all idf of words, if new word is not in idf, assign 0.0 weights
idf_series = np.array([idf.get(word, 0.0) for word in words])
# change shape to 1 x num_of_words
idf_series = idf_series.reshape(1, -1)
# use matrix multiplication to get weighted word vector sum for sentence embeddings
emb = np.matmul(idf_series, word2vec[words]).reshape(-1)
return emb
def get_sentences_centre(sentences, word2vec, idf=None, num_features=300):
""" Get sentences centre by averaging all embeddings of sentences in a list
Depends on function get_sentence_vec()
Args:
sentence (list): list of input sentences (str)
word2vec (dict): loaded word2vec model from Gensim
idf (dict, optional): inverse document frequency of words in all queries
Returns:
emb (np.array): 300-dimentions embedding of sentence
"""
# convert list of sentences to their vectors
sentences_vec = [get_sentence_vec(sentence, word2vec, idf) for sentence in sentences]
# each row in matrix is 300 dimensions embedding of a sentence
sentences_matrix = np.vstack(sentences_vec)
# print(sentences_matrix.shape)
# average of all rows, take mean at y-axis
sentences_centre = sentences_matrix.mean(axis=0)
# result should be (300,) same as single sentence
# print(sentences_centre.shape)
return sentences_centre
def get_cluster_centre(df, intent_list, word2vec, idf=None):
""" get intent cluster centre based on intent list and word embeddings
Depends on function get_sentences_centre()
Args:
intent_list (list): List of unique intents(str)
word2vec (dict): word embeddings dictionary
Returns:
result (dict): intent cluster centres in dictionary format - {intent1:embedding1, intent2:embedding2,...}
"""
result = {intent:get_sentences_centre(df[df.intent == intent]['query'].values, word2vec, idf) for intent in intent_list}
return result
def get_distance_matrix(df_in, word2vec, leave_one_out=False, idf=False):
""" Get distance for each query to every intent center
Depends on function get_cluster_centre()
Args:
df_in (pd.DataFrame): input dataframe with intent and query
word2vec (dict): word embeddings dictionary
leave_one_out (bool): whether leave the input query out of training
idf (bool): whether use weighted word vectors to get sentence embedding
Returns:
result (pd.DataFrame): distance matrix for each query, lowest distance intent idealy should match label
"""
df = df_in.copy()
intent_list = df.intent.unique().tolist()
if leave_one_out:
# print("Leave one out")
sentence_distance = []
for ind in df.index:
sentence_distance_tmp = []
query = df.loc[ind, 'query']
df_data = df.drop(ind)
sentence_centre_dic = get_cluster_centre(df_data, intent_list, word2vec, idf)
for intent in intent_list:
sentence_distance_tmp.append(cosine_distances(get_sentence_vec(query, word2vec, idf).reshape(1,-1),
sentence_centre_dic[intent].reshape(1,-1)).item())
sentence_distance.append(sentence_distance_tmp)
df_sentence_distance = pd.DataFrame(sentence_distance, columns=intent_list)
df.reset_index(drop=True, inplace=True)
result = pd.concat([df, df_sentence_distance], axis=1)
else:
sentence_centre_dic = get_cluster_centre(df, intent_list, word2vec, idf)
# build dataframe that contains distance between each query to all intent cluster centre
for intent in intent_list:
# distance = cosine_similarity(sentence embedding, intent cluster centre embedding)
df[intent] = df['query'].apply(lambda x: cosine_distances(get_sentence_vec(x, word2vec, idf).reshape(1,-1),
sentence_centre_dic[intent].reshape(1,-1)).item())
result = df
return result
def evaluate_distance_matrix(df_in):
""" Evaluate distance matrix by compare closest intent center and label """
df = df_in.copy()
df.set_index(['intent', 'query'], inplace=True)
df['cluster'] = df.idxmin(axis=1)
df.reset_index(inplace=True)
df['correct'] = (df.cluster == df.intent)
accuracy = sum(df.correct) / len(df)
# print("Accuracy for distance-based classification is", '{:.2%}'.format(result))
return accuracy
def test_clustering_accuracy(df_in, word2vec):
""" test accuracy based on distance of sentence to each cluster center"""
df_result = get_distance_matrix(df_in, word2vec)
# print(df_result.head())
accuracy = evaluate_distance_matrix(df_result)
return df_result, accuracy
# TEST
def test_idf_acc(df_in, word2vec, idf):
df_result = get_distance_matrix(df_in, word2vec, leave_one_out=False, idf=idf)
# print(df_result.head())
accuracy = evaluate_distance_matrix(df_result)
return df_result, accuracy
###Output
_____no_output_____
###Markdown
Pipeline
###Code
# preprocessing questions
df_train['query'] = df_train['query'].apply(clean_text)
df_train['query'] = df_train['query'].apply(nltk_tokenize)
df_train['query'] = df_train['query'].apply(lambda x:' '.join([token.lemma_ for token in nlp(x) if token.lemma_ not in stop_words]))
df_train['query'] = df_train['query'].str.lower()
# preprocessing test as well
df_test['query'] = df_test['query'].apply(clean_text)
df_test['query'] = df_test['query'].apply(nltk_tokenize)
df_test['query'] = df_test['query'].apply(lambda x:' '.join([token.lemma_ for token in nlp(x) if token.lemma_ not in stop_words]))
df_test['query'] = df_test['query'].str.lower()
df_train.head(2)
df_test.head(2)
intent_list = df_train.intent.unique().tolist()
intent_list[:2]
test_intent_list = df_test.intent.unique().tolist()
set(intent_list) == set(test_intent_list)
for item in test_intent_list:
if item not in intent_list:
print(item)
for item in intent_list:
if item not in test_intent_list:
print(item)
import warnings
warnings.filterwarnings("ignore")
# get idf
idf = get_idf_TfidfVectorizer(df_train['query'].tolist())
# TEST
try:
word2vec
except NameError:
word2vec = api.load("word2vec-google-news-300")
df_result, accuracy = test_idf_acc(df_train, word2vec, idf)
print("Traing accuracy for word2vec + IDF is", '{:.2%}'.format(accuracy))
###Output
Traing accuracy for word2vec + IDF is 91.89%
###Markdown
Compare: Accuracy without IDF is ~90%
###Code
# get cluster centers from training set
idf = get_idf_TfidfVectorizer(df_train['query'].tolist())
dict_cluster = get_cluster_centre(df_train, intent_list, word2vec, idf)
def get_distance_matrix_idf(df_test, intent_list, dict_cluster, word2vec, idf):
""" Get distance for each query to every intent center
Args:
df_test (pd.DataFrame): input test dataframe with intent and query
intent_list (list): list of intents to loop through
dict_cluster (dict): dictionary of cluster centres
word2vec (dict): word embeddings dictionary
idf (dict): idf of each words
Returns:
result (pd.DataFrame): distance matrix for each query, lowest distance intent idealy should match label
"""
df = df_test.copy()
for intent in intent_list:
# distance = cosine_similarity(sentence embedding, intent cluster centre embedding)
df[intent] = df['query'].apply(lambda x: cosine_distances(get_sentence_vec(x, word2vec, idf).reshape(1,-1),
dict_cluster[intent].reshape(1,-1)).item())
return df
df_test_cluster = get_distance_matrix_idf(df_test, intent_list, dict_cluster, word2vec, idf)
df_test_cluster.head(2)
cluster_cols = list(df_test_cluster.columns.values)[2:]
# verify
set(intent_list) == set(cluster_cols)
def get_top_3_clusters(data, intent_list):
data = data.copy()
cluster_cols = intent_list.copy()
data['clusters_top3'] = data.apply(lambda x: np.argsort(x[cluster_cols].values)[:3].tolist(), axis=1)
intents = cluster_cols # get all tickers
intent2index = {v: i for (i, v) in enumerate(intents)}
data['target'] = data['intent'].apply(lambda x: intent2index[x])
top_clusters_cols = pd.DataFrame(data['clusters_top3'].values.tolist(),columns = ['clusters_1','clusters_2','clusters_3']).reset_index(drop=True)
data = data.reset_index(drop=True)
data = pd.concat([data,top_clusters_cols], axis=1)
data.drop(columns = 'clusters_top3', inplace=True)
data.drop(columns = cluster_cols, inplace=True)
# print(data.head())
return data, intent2index
df_test_cluster_top_n, _ = get_top_3_clusters(df_test_cluster, cluster_cols)
df_test_cluster_top_n.head()
def get_accuracy(data, top=1):
data = data.copy()
assert top in (1,2,3), "top must be in (0, 1, 2)"
if top == 1:
# top 1 accuracy
accuracy = (data[(data['clusters_1'] == data['target'])].shape[0] / data.shape[0])
elif top == 2:
# top 2 accuracy
data["exists"] = data.drop(data.columns[[0,1,2,5]], 1).isin(data["target"]).any(1)
accuracy = sum(data['exists'])/ data.shape[0]
elif top == 3:
# top 3 accuracy
data["exists"] = data.drop(data.columns[[0,1,2]], 1).isin(data["target"]).any(1)
accuracy = sum(data['exists'])/ data.shape[0]
else:
raise ValueError("top must be in (0, 1, 2)")
print('Accuracy for top {} clustering result is {:.1%}'.format(top, accuracy))
return accuracy
get_accuracy(df_test_cluster_top_n, 1)
get_accuracy(df_test_cluster_top_n, 2)
get_accuracy(df_test_cluster_top_n, 3)
###Output
Accuracy for top 1 clustering result is 71.9%
Accuracy for top 2 clustering result is 82.8%
Accuracy for top 3 clustering result is 87.5%
###Markdown
Combine with NLP features
###Code
df_train, intent2index = get_top_3_clusters(df_result, cluster_cols)
df_train.head(2)
def get_keywords(intent_list, stop_words):
""" Get list of keywords from intent """
keywords = []
for intent in list(set(intent_list)):
keywords.extend(intent.strip().split(' '))
keyword_list = list(set(keywords))
keyword_list = [i.lower() for i in keyword_list if i.lower() not in stop_words]
keyword_list.append('nsip')
keyword_list_lemma = []
text = nlp(' '.join([w for w in keyword_list]))
for token in text:
keyword_list_lemma.append(token.lemma_)
return keyword_list_lemma
keyword_list_lemma = get_keywords(intent_list, stop_words=STOP_WORDS)
def get_nlp_features(df, keyword_list_lemma):
""" Get keyword features from dataframe """
data = df.copy()
data['lemma'] = data['query'].apply(lambda x:' '.join([token.lemma_ for token in nlp(x) if token.lemma_ not in stop_words]))
data['keyword'] = data['lemma'].apply(lambda x: list(set([token.lemma_ for token in nlp(x) if token.lemma_ in keyword_list_lemma])))
data['noun'] = data['query'].apply(lambda x: list(set([token.lemma_ for token in nlp(x) if token.pos_ in ['NOUN','PROPN'] and token.lemma_ not in stop_words])))
data['verb'] = data['query'].apply(lambda x: list(set([token.lemma_ for token in nlp(x) if token.pos_ in ['VERB'] and token.lemma_ not in stop_words])))
data['noun'] = data['noun'].apply(lambda x: ' '.join([w for w in x]))
data['verb'] = data['verb'].apply(lambda x: ' '.join([w for w in x]))
data['keyword'] = data['keyword'].apply(lambda x: ' '.join([w for w in x]))
return data
df_train.head(2)
df_train = get_nlp_features(df_train)
df_train.head(2)
df_test = get_nlp_features(df_test_cluster_top_n)
df_test.head(2)
# combine model score
countvector_cols = ['lemma', 'keyword', 'noun', 'verb']
top_clusters_cols = ['clusters_1', 'clusters_2', 'clusters_3']
feature_cols = countvector_cols + top_clusters_cols
###Output
_____no_output_____
###Markdown
Random Forest
###Code
def get_train_test(df_train, df_test, feature_cols):
""" split dataset, get X_train, X_test, y_train, y_test """
X_train = df_train[feature_cols]
# print(X_train.head(1))
y_train = df_train['target']
# print(y_train.head(1))
X_test = df_test[feature_cols]
y_test = df_test['target']
# print(X_test.head(1))
# print(y_test.head(1))
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = get_train_test(df_train, df_test, feature_cols)
def add_nlp_to_x(X_train, X_test):
""" Add NLP features to input X """
v_lemma = TfidfVectorizer()
x_train_lemma = v_lemma.fit_transform(X_train['lemma'])
x_test_lemma = v_lemma.transform(X_test['lemma'])
vocab_lemma = dict(v_lemma.vocabulary_)
v_keyword = TfidfVectorizer()
x_train_keyword = v_keyword.fit_transform(X_train['keyword'])
x_test_keyword = v_keyword.transform(X_test['keyword'])
vocab_keyword = dict(v_keyword.vocabulary_)
v_noun = TfidfVectorizer()
x_train_noun = v_noun.fit_transform(X_train['noun'])
x_test_noun = v_noun.transform(X_test['noun'])
vocab_noun = dict(v_noun.vocabulary_)
v_verb = TfidfVectorizer()
x_train_verb = v_verb.fit_transform(X_train['verb'])
x_test_verb = v_verb.transform(X_test['verb'])
vocab_verb = dict(v_verb.vocabulary_)
# combine all features
x_train_combined = hstack((x_train_lemma,x_train_keyword,x_train_noun,x_train_verb,X_train[top_clusters_cols].values),format='csr')
x_train_combined_columns= v_lemma.get_feature_names()+v_keyword.get_feature_names()+v_noun.get_feature_names()+v_verb.get_feature_names()+top_clusters_cols
x_test_combined = hstack((x_test_lemma, x_test_keyword, x_test_noun, x_test_verb, X_test[top_clusters_cols].values), format='csr')
x_test_combined_columns = v_lemma.get_feature_names()+v_keyword.get_feature_names()+v_noun.get_feature_names()+v_verb.get_feature_names()+top_clusters_cols
x_train_combined = pd.DataFrame(x_train_combined.toarray())
x_train_combined.columns = x_train_combined_columns
x_test_combined = pd.DataFrame(x_test_combined.toarray())
x_test_combined.columns = x_test_combined_columns
return x_train_combined, x_test_combined, v_lemma, v_keyword, v_noun, v_verb
x_train_combined, x_test_combined, v_lemma, v_keyword, v_noun, v_verb = add_nlp_to_x(X_train, X_test)
# build classifier
clf = RandomForestClassifier(max_depth=50, n_estimators=1000)
clf.fit(x_train_combined, y_train)
probs = clf.predict_proba(x_test_combined)
best_3 = pd.DataFrame(np.argsort(probs, axis=1)[:,-3:],columns=['top3','top2','top1'])
best_3['top1'] = clf.classes_[best_3['top1']]
best_3['top2'] = clf.classes_[best_3['top2']]
best_3['top3'] = clf.classes_[best_3['top3']]
result = pd.concat([best_3.reset_index(drop=True),pd.DataFrame(y_test).reset_index(drop=True), X_test[feature_cols].reset_index(drop=True)], axis=1)
score_1 = result[result['top1'] == result['target']].shape[0] / result.shape[0]
score_2 = result[(result['top1'] == result['target']) | (result['top2'] == result['target'])].shape[0] / result.shape[0]
score_3 = result[(result['top1'] == result['target']) | (result['top2'] == result['target'])| (result['top3'] == result['target'])].shape[0] / result.shape[0]
print('Accuracy for top 1 clustering + classifier result is {:.1%}'.format(score_1))
print('Accuracy for top 2 clustering + classifier result is {:.1%}'.format(score_2))
print('Accuracy for top 3 clustering + classifier result is {:.1%}'.format(score_3))
###Output
_____no_output_____
###Markdown
Compare: Google NLU accuracy is 78.1% APIload model and run on one sentence
###Code
import pickle
# save the model to disk
model_filename = 'RFClassifier.pkl'
pickle.dump(clf, open(model_filename, 'wb'))
# save vectorizer
with open('TFIDFVectorizer_lemma.pkl', 'wb') as f:
pickle.dump(v_lemma, f)
with open('TFIDFVectorizer_keyword.pkl', 'wb') as f:
pickle.dump(v_keyword, f)
with open('TFIDFVectorizer_noun.pkl', 'wb') as f:
pickle.dump(v_noun, f)
with open('TFIDFVectorizer_verb.pkl', 'wb') as f:
pickle.dump(v_verb, f)
# save necessary variables
with open('idf.pkl', 'wb') as f:
pickle.dump(idf, f)
with open('intent_list.pkl', 'wb') as f:
pickle.dump(intent_list, f)
with open('dict_cluster.pkl', 'wb') as f:
pickle.dump(dict_cluster, f)
with open('intent2index.pkl', 'wb') as f:
pickle.dump(intent2index, f)
with open('keyword_list_lemma.pkl', 'wb') as f:
pickle.dump(keyword_list_lemma, f)
test_query = "Please show me the current promotions"
df = pd.DataFrame()
df = pd.DataFrame(columns=['query'])
df.loc[0] = [test_query]
df
# preprocessing test as well
df['query'] = df['query'].apply(clean_text)
df['query'] = df['query'].apply(nltk_tokenize)
df['query'] = df['query'].apply(lambda x:' '.join([token.lemma_ for token in nlp(x) if token.lemma_ not in stop_words]))
df['query'] = df['query'].str.lower()
df = get_nlp_features(df)
df
df_cluster = get_distance_matrix_idf(df, intent_list, dict_cluster, word2vec, idf)
df_cluster
def get_top_3(data, intent_list):
data = data.copy()
cluster_cols = intent_list.copy()
data['clusters_top3'] = data.apply(lambda x: np.argsort(x[cluster_cols].values)[:3].tolist(), axis=1)
top_clusters_cols = pd.DataFrame(data['clusters_top3'].values.tolist(),columns = ['clusters_1','clusters_2','clusters_3']).reset_index(drop=True)
data = data.reset_index(drop=True)
data = pd.concat([data,top_clusters_cols], axis=1)
data.drop(columns = 'clusters_top3', inplace=True)
data.drop(columns = cluster_cols, inplace=True)
# print(data.head())
return data
top_3 = get_top_3(df_cluster, cluster_cols)
top_3
def add_nlp(df, v_lemma, v_keyword, v_noun, v_verb, top_clusters_cols):
""" Add NLP features to input X """
x_test_lemma = v_lemma.transform(df['lemma'])
x_test_keyword = v_keyword.transform(df['keyword'])
x_test_noun = v_noun.transform(df['noun'])
x_test_verb = v_verb.transform(df['verb'])
# combine all features
x_test_combined = hstack((x_test_lemma,
x_test_keyword,
x_test_noun,
x_test_verb,
df[top_clusters_cols].values),format='csr')
x_test_combined_columns = v_lemma.get_feature_names()+\
v_keyword.get_feature_names()+\
v_noun.get_feature_names()+\
v_verb.get_feature_names()+\
top_clusters_cols
x_test_combined = pd.DataFrame(x_test_combined.toarray())
x_test_combined.columns = x_test_combined_columns
return x_test_combined
X_in = add_nlp(top_3, v_lemma, v_keyword, v_noun, v_verb, top_clusters_cols)
probs = clf.predict_proba(X_in)
probs
np.argsort(probs[0])[-3:][::-1]
ind = np.argsort(probs, axis=1)[:,-3:]
proba = probs[0][ind[0]]
proba
best_3 = pd.DataFrame(ind,columns=['top3','top2','top1'])
best_3['top1'] = clf.classes_[best_3['top1']]
best_3['top2'] = clf.classes_[best_3['top2']]
best_3['top3'] = clf.classes_[best_3['top3']]
best_3['top3_prob'] = proba[0]
best_3['top2_prob'] = proba[1]
best_3['top1_prob'] = proba[2]
best_3
index2intent = {y:x for x,y in intent2index.items()}
def get_target_name(index, index2intent=index2intent):
return index2intent[index]
best_3['top1_name'] = best_3['top1'].apply(get_target_name)
best_3['top2_name'] = best_3['top2'].apply(get_target_name)
best_3['top3_name'] = best_3['top3'].apply(get_target_name)
best_3
top1 = best_3.at[0,'top1_name']
top2 = best_3.at[0,'top2_name']
top3 = best_3.at[0,'top3_name']
top1_prob = best_3.at[0,'top1_prob']
top2_prob = best_3.at[0,'top2_prob']
top3_prob = best_3.at[0,'top3_prob']
print(f'For sentence:\n{test_query}\n')
print(f'Top 1 prediction intent is {top1} with probability {100*top1_prob:.2f}%')
print(f'Top 2 prediction intent is {top2} with probability {100*top2_prob:.2f}%')
print(f'Top 3 prediction intent is {top3} with probability {100*top3_prob:.2f}%')
###Output
_____no_output_____
###Markdown
Consolidate
###Code
def get_intent_nlp(query, classifier_intent_nlp):
""" load classification model outside the function
return a dataframe df
columns: pred_seq, intent_class, intent_string, pred_prob
rows: top 3 prediciton, example for first row: 1, 0, Promotions, 0.66
"""
return df
def get_intent_nlp_clustering(query, classifier_intent_nlp_clustering, word2vec):
""" load word2vec dict outside the function
load classification model outside the function
return a dataframe df
columns: pred_seq, intent_class, intent_string, pred_prob
rows: top 3 prediciton, example for first row: 1, 0, Promotions, 0.66
"""
return df
###Output
_____no_output_____ |
Week 2/CNN with Tensorflow/Week_2_Cats_v_Dogs_Augmentation.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Let's start with a model that's very effective at learning Cats v Dogs.It's similar to the previous models that you have used, but I have updated the layers definition. Note that there are now 4 convolutional layers with 32, 64, 128 and 128 convolutions respectively.Also, this will train for 100 epochs, because I want to plot the graph of loss and accuracy.
###Code
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O /tmp/cats_and_dogs_filtered.zip
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['accuracy'])
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
test_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
history = model.fit(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=150,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=2)
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
The Training Accuracy is close to 100%, and the validation accuracy is in the 70%-80% range. This is a great example of overfitting -- which in short means that it can do very well with images it has seen before, but not so well with images it hasn't. Let's see if we can do better to avoid overfitting -- and one simple method is to augment the images a bit. If you think about it, most pictures of a cat are very similar -- the ears are at the top, then the eyes, then the mouth etc. Things like the distance between the eyes and ears will always be quite similar too. What if we tweak with the images to change this up a bit -- rotate the image, squash it, etc. That's what image augementation is all about. And there's an API that makes it easy...Now take a look at the ImageGenerator. There are properties on it that you can use to augment the image. ``` Updated to do image augmentationtrain_datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')```These are just a few of the options available (for more, see the Keras documentation. Let's quickly go over what we just wrote:* rotation_range is a value in degrees (0โ180), a range within which to randomly rotate pictures.* width_shift and height_shift are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally.* shear_range is for randomly applying shearing transformations.* zoom_range is for randomly zooming inside pictures.* horizontal_flip is for randomly flipping half of the images horizontally. This is relevant when there are no assumptions of horizontal assymmetry (e.g. real-world pictures).* fill_mode is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.Here's some code where we've added Image Augmentation. Run it to see the impact.
###Code
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O /tmp/cats_and_dogs_filtered.zip
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['accuracy'])
# This code has changed. Now instead of the ImageGenerator just rescaling
# the image, we also rotate and do other operations
# Updated to do image augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
history = model.fit(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=100,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=2)
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O /tmp/cats_and_dogs_filtered.zip
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['accuracy'])
# This code has changed. Now instead of the ImageGenerator just rescaling
# the image, we also rotate and do other operations
# Updated to do image augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
history = model.fit(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=100,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=2)
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____ |
predict_regulators.ipynb | ###Markdown
MethodsWe defined a set of know facilitators based on literature and used these to train an ensemble of classifiers based on features from the high content microscopy screen (Table S2). This ensemble consisted of an L2 penalized logistic regression and a random forest classifier. The hyperparameter of the L2 regularization of the logistic regression was set using three-fold cross-validation. The random forest model was trained with 1000 trees and a maximum of 10 features. The final probability was calculated as the mean of the predicted probability of the two classifiers. The performance was assesed by the ROC AUC (0.757) and the Precision-Recall AUC (0.604) based on leave-one-out cross-validation.
###Code
# Here Table_S2.tsv is Table S2 in tab-separated flat text format
df = pd.read_table("Table_S2.tsv")
df.head()
facilitator = [
"c-Myc",
"Chd1",
"Ezh2",
"Jarid2",
"Jmjd1c",
"Kdm2b",
"Kdm3a",
"Kdm5b",
"Kdm6a",
"Ncoa3",
"Parp1",
"Pou5f1",
"Snai1",
"Tet1",
"Wdr5",
]
barrier = [
"Chaf1b",
"Dnmt1",
"Ehmt2",
"Hdac2",
"Kdm6b",
"Mbd3",
"Setdb1",
"Snai2",
"Suv39h1",
"Suv39h2",
"Tet2",
"Trp53",
]
column_map = {
'Replicate ' : "Replicate",
'Plate' : "Plate",
'PLATE' : "Plate",
'GENE NAME' : "GENE NAME",
'PLATE GENE NAME' : "GENE NAME",
'WellName' : "Well",
'SAL4pos - Number of Objects' : "SAL4_no",
'SAL4pos Area [ยตmยฒ] - Mean per Well' : "SAL4_area",
'SAL4pos Roundness - Mean per Well' : "SAL4_roundness",
' - Number of Objects' : "no",
' Area [ยตmยฒ] - Mean per Well' : "area_mean",
' Area [ยตmยฒ] - StdDev per Well' : "area_sd",
' Roundness - Mean per Well' : "roundness_mean",
' Roundness - StdDev per Well' : "roundness_sd",
' Width [ยตm] - Mean per Well' : "width_mean",
' Width [ยตm] - StdDev per Well' : "width_sd",
' Length [ยตm] - Mean per Well' : "length_mean",
' Length [ยตm] - StdDev per Well' : "length_sd",
' Ratio Width to Length - Mean per Well' : "ratio_width_length_mean",
' Ratio Width to Length - StdDev per Well' : "ratio_width_length_sd",
' - Intensity Dapi Image Region Exp1Cam1 Mean - Mean per Well' : "Intensity_Dapi_mean",
' - Intensity Dapi Image Region Exp1Cam1 Mean - StdDev per Well' : "Intensity_Dapi_sd",
' - Intensity Sall4 Image Region Exp1Cam2 Mean - Mean per Well' : "Intensity_Sall4_mean",
' - Intensity Sall4 Image Region Exp1Cam2 Mean - StdDev per Well' : "Intensity_Sall4_sd",
' - Intensity E-Cadherin Image Region Exp2Cam2 Mean - Mean per Well' : "Intensity_E-Cadherin_mean",
' - Intensity E-Cadherin Image Region Exp2Cam2 Mean - StdDev per Well' : "Intensity_E-Cadherin_sd",
' - Intensity Image Region_Ring around region Sal4 Mean - Mean per Well' : "Intensity_ring_Sall4_mean",
' - Intensity Image Region_Ring around region Sal4 Mean - StdDev per Well' : "Intensity_ring_Sall4_sd",
' - Intensity Image Region_Ring around region E cadherin Mean - Mean per Well' : "Intensity_ring_E-Cadherin_mean",
' - Intensity Image Region_Ring around region E cadherin Mean - StdDev per Well' : "Intensity_ring_E-Cadherin_sd",
' - SAL4 ratio - Mean per Well' : "SAL4_ratio",
' - E-CAD RATIO - Mean per Well' : "E-cad_ratio",
' - SAL4pos - Mean per Well' : "SAL4pos",
' STAR Symmetry 02 - Mean per Well' : "STAR_Symm_02",
' STAR Symmetry 03 - Mean per Well' : "STAR_Symm_03",
' STAR Symmetry 04 - Mean per Well' : "STAR_Symm_04",
' STAR Symmetry 05 - Mean per Well' : "STAR_Symm_05",
' STAR Symmetry 12 - Mean per Well' : "STAR_Symm_12",
' STAR Symmetry 13 - Mean per Well' : "STAR_Symm_13",
' STAR Symmetry 14 - Mean per Well' : "STAR_Symm_14",
' STAR Symmetry 15 - Mean per Well' : "STAR_Symm_15",
' STAR Threshold Compactness 30% - Mean per Well' : "STAR_compact_30%",
' STAR Threshold Compactness 40% - Mean per Well' : "STAR_compact_40%",
' STAR Threshold Compactness 50% - Mean per Well' : "STAR_compact_50%",
' STAR Threshold Compactness 60% - Mean per Well' : "STAR_compact_60%",
' STAR Axial Small Length - Mean per Well' : "STAR_Axial_Small_Length",
' STAR Axial Length Ratio - Mean per Well' : "STAR_Length_Ratio",
' STAR Radial Mean - Mean per Well' : "STAR_Radial_Mean",
' STAR Radial Relative Deviation - Mean per Well' : "START_Radial_Dev",
' STAR Profile 4/5 - Mean per Well' : "STAR_Profile_4",
' STAR Profile 5/5 - Mean per Well' : "STAR_Profile_5",
' SEr Dapi SER Spot 0 px - Mean per Well' : "SER Spot",
' SEr Dapi SER Hole 0 px - Mean per Well' : "SER Hole",
' SEr Dapi SER Edge 0 px - Mean per Well' : "SER Edge",
' SEr Dapi SER Ridge 0 px - Mean per Well' : "SER Ridge",
' SEr Dapi SER Valley 0 px - Mean per Well' : "SER Valley",
' SEr Dapi SER Saddle 0 px - Mean per Well' : "SER Saddle",
' SEr Dapi SER Bright 0 px - Mean per Well' : "SER Bright",
' SEr Dapi SER Dark 0 px - Mean per Well' : "SER Dark",
' Haralick DAPI Haralick Correlation 1 px - Mean per Well' : "Haralick Correlation",
' Haralick DAPI Haralick Contrast 1 px - Mean per Well' : "Haralick Contrast",
' Haralick DAPI Haralick Sum Variance 1 px - Mean per Well' : "Haralick Sum Variance",
' Haralick DAPI Haralick Homogeneity 1 px - Mean per Well' : "Haralick Homogeneity",
' GABOR E Cadherin Gabor Min 2 px w2 - Mean per Well' : "Gabor Min 2",
' GABOR E Cadherin Gabor Max 2 px w2 - Mean per Well' : "Gabor Max 2",
}
new_columns = [column_map.get(x,x) for x in df.columns]
df.columns = new_columns
df = df[~df["Plate"].isnull()]
df["Plate"] = df["Plate"].astype(str)
df.head()
def get_median_x_y(df, kind="combine"):
sum_df = df.groupby("GENE NAME").median().iloc[:, 1:]
control = sum_df[sum_df.index.str.startswith("nt")]
pos = sum_df[sum_df.index.isin(facilitator)]
neg = sum_df[sum_df.index.isin(barrier)]
effect = sum_df[sum_df.index.isin(facilitator + barrier)]
if kind == "barrier":
effect = sum_df[sum_df.index.isin(barrier)]
elif kind == "facilitator":
effect = sum_df[sum_df.index.isin(facilitator)]
X = pd.concat((effect, control)).fillna(0)
y = np.hstack((np.ones(effect.shape[0]), np.zeros(control.shape[0])))
print X.shape[0], "training samples"
print (sum(y) / y.shape)[0], "labeled as positive"
return X, y
def get_all_x_y(df):
pos = df[df["GENE NAME"].isin(facilitator)].iloc[:,4:]
neg = df[df["GENE NAME"].isin(barrier)].iloc[:,4:]
control = df[df["GENE NAME"].str.startswith("non").fillna(False)].iloc[:,4:]
X = pd.concat((neg, control)).fillna(0)
y = np.hstack((np.ones(neg.shape[0]), np.zeros(control.shape[0])))
print X.shape
print y.shape
print sum(y) / y.shape
return X, y
def loo_cv(X, y, model, img=None):
loo = LeaveOneOut()
y_preds = []
y_probs = []
y_true = []
X_labels = []
for i, (train_index, test_index) in enumerate(loo.split(X)):
if i % 10 == 0:
print int(float(i) / X.shape[0] * 100),
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_prob = model.predict_proba(X_test)[:,1]
y_preds = np.hstack((y_preds, y_pred))
y_probs = np.hstack((y_probs, y_prob))
y_true = np.hstack((y_true, y_test))
X_labels += list(X.index[test_index])
print
print
print "total"
print "---> predicted"
print "\t0\t1"
for i,row in enumerate(confusion_matrix(y_true, y_preds)):
print "{}\t{}\t{}".format(i, row[0], row[1])
for x in np.array(X_labels)[y_preds.astype(bool)]:
if not x.startswith("non"):
print x
return y_true, y_probs
X, y = get_median_x_y(df, kind="facilitator")
l = LogisticRegressionCV()
y_true_lr, y_pred_lr = loo_cv(X,y,l)
l.fit(X,y)
pd.DataFrame({"coef":l.coef_[0]}, index=X.columns).sort_values("coef").tail(10)
X, y = get_median_x_y(df, kind="facilitator")
rf = RandomForestClassifier(n_estimators=1000, n_jobs=-1, random_state=42, max_features=10)
y_true_rf, y_pred_rf = loo_cv(X,y,rf)
rf.fit(X,y)
pd.DataFrame({"importances":rf.feature_importances_}, index=X.columns).sort_values("importances").tail(10)
y_pred_mean = np.vstack((y_pred_lr, y_pred_rf)).mean(0)
fpr_lr, tpr_lr, _ = roc_curve(y_true_lr, y_pred_lr)
fpr_rf, tpr_rf, _ = roc_curve(y_true_rf, y_pred_rf)
fpr_mean, tpr_mean, _ = roc_curve(y_true_lr, y_pred_mean)
sns.set_style('white')
plt.plot(fpr_mean, tpr_mean);
plt.xlabel("FPR")
plt.ylabel("TPR")
roc_auc = roc_auc_score(y_true_lr, y_pred_mean)
plt.title("ROC (AUC: {:0.3f})".format(roc_auc))
plt.axes().set_aspect('equal')
plt.savefig("20180821_roc_curve.pdf")
precision_lr, recall_lr, _ = precision_recall_curve(y_true_lr, y_pred_lr)
precision_rf, recall_rf, _ = precision_recall_curve(y_true_rf, y_pred_rf)
precision_mean, recall_mean, _ = precision_recall_curve(y_true_rf, y_pred_mean)
sns.set_style('white')
plt.plot(recall_mean, precision_mean);
plt.xlabel("recall")
plt.ylabel("precision")
pr_auc = average_precision_score(y_true_rf, y_pred_mean)
plt.title("Precision-Recall (AUC: {:0.3f})".format(pr_auc))
plt.ylim(0,1.1)
plt.axes().set_aspect('equal')
plt.savefig("20180821_pr_curve.pdf")
###Output
_____no_output_____ |
Guia/Notebook01-IntroducaoPython.ipynb | ###Markdown
 Guia de Processamento Digital de Imagens em linguagem de programaรงรฃo Python Estudo de caso em Reconhecimento Automรกtico de Placas Veiculares DescriรงรฃoEsse guia รฉ composto de diversos notebooks que tรชm por principal objetivo apresentar o desenvolvimento de algoritmos em linguagem python com uso da biblioteca de visรฃo computacional OpenCV. Para isso, toma como exemplo um estudo de caso em reconhecimento automรกtico de placas veiculares. As imagens utilizadas sรฃo do [SSIG-ALPR Database](http://www.smartsenselab.dcc.ufmg.br/ssig-alpr-database). Notebook nรบmero 1Esse notebook tem por objetivo introduzir um pouco da linguagem python, em especial a biblioteca Numpy. Esse biblioteca facilitarรก os cรกlculos vetoriais e matriciais, alรฉm de incluir uma diversidade de funรงรตes estatรญsticas.Nesse caso, o primeiro passo a ser dado รฉ a importaรงรฃo das bibliotecas.
###Code
# Use # para adicionar comentarios de uma linha
import numpy as np
###Output
_____no_output_____
###Markdown
Criando Vetores e Matrizes
###Code
#Criando vetor linha
vector_row = np.array([1,2,3])
#Criando vetor coluna
vector_column = np.array([[1],[2],[3]])
#Exibindo os vetores
print ('vector_row = ',vector_row)
print ('vector_column = \n',vector_column)
#Criando uma matriz
matrix = np.array([[1,2,3],[4,5,6]])
print('matrix = \n',matrix)
#Criando matriz de uns e zeros especificando tipo de variavel
matrix_1s = np.ones([2,3],'float')
matrix_0s = np.zeros([1,5],'uint8')
print('matrix_1s = \n',matrix_1s)
print('matrix_0s = ',matrix_0s)
#Criando vetor a partir de valor inicial, final e passo
vector_arange = np.arange(0,1,0.1,'float')
print('vector_arange = ',vector_arange)
#Criando vetor a partir de valor inicial, final e quantidade de elementos
vector_linspace = np.linspace(0,1,11,'float')
print('vector_linspace = ',vector_linspace)
###Output
vector_row = [1 2 3]
vector_column =
[[1]
[2]
[3]]
matrix =
[[1 2 3]
[4 5 6]]
matrix_1s =
[[1. 1. 1.]
[1. 1. 1.]]
matrix_0s = [[0 0 0 0 0]]
vector_arange = [0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]
vector_linspace = [0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1. ]
###Markdown
Selecionando elementos
###Code
#Criando vetor linha
vector_row = np.array([ 1,2,3,4,5,6 ])
print(vector_row,'\n')
#Criando uma matriz
matrix = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(matrix,'\n')
#Selecionando terceiro elemento do vetor
print(vector_row[2],'\n')
#Selecionando elemento da segunda linha e segunda coluna da matriz
print(matrix[1,1],'\n')
#Selecionando todos os elementos do vetor
print(vector_row[:],'\n')
#Selecionando tudo atรฉ e incluindo o terceiro elemento do vetor
print(vector_row[:3],'\n')
#Selecionando tudo apรณs o terceiro elemento do vetor
print(vector_row[3:],'\n')
#Selecionando o รบltimo elemento do vetor
print(vector_row[-1],'\n')
#Selecionando as duas primeiras linhas e todas as colunas da matriz
print(matrix[:2,:],'\n')
#Selecionando todas as linhas e a segunda coluna da matriz
print(matrix[:,1:2])
###Output
[1 2 3 4 5 6]
[[1 2 3]
[4 5 6]
[7 8 9]]
3
5
[1 2 3 4 5 6]
[1 2 3]
[4 5 6]
6
[[1 2 3]
[4 5 6]]
[[2]
[5]
[8]]
###Markdown
Descrevendo uma matriz
###Code
#Criando uma matriz
matrix = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
print(matrix,'\n')
#Visualizando o nรบmero de linhas e de colunas da matrรญz
print(matrix.shape)
print('Linhas: ',matrix.shape[0])
print('Colunas: ',matrix.shape[1],'\n')
#Visualizando numero de elementos (linhas*colunas)
print(matrix.size,'\n')
#isualizando o numero de dimensoes(nesse caso 2)
print(matrix.ndim)
###Output
[[ 1 2 3]
[ 4 5 6]
[ 7 8 9]
[10 11 12]]
(4, 3)
Linhas: 4
Colunas: 3
12
2
###Markdown
Extraindo informaรงรตes
###Code
print(matrix,'\n')
#Return the max element
print('Maximo = ',np.max(matrix))
#Return the min element
print('Minimo = ',np.min(matrix))
#To find the max element in each column
print('Maximo por coluna = ',np.max(matrix,axis=0))
#To find the max element in each row
print('Maximo por linha = ',np.max(matrix,axis=1))
print(matrix,'\n')
#Mรฉdia
print('Media = ',np.mean(matrix))
#Desvio padrรฃo
print('Desvio padrรฃo = ',np.std(matrix))
#Variancia
print('Variancia = ',np.var(matrix))
###Output
[[ 1 2 3]
[ 4 5 6]
[ 7 8 9]
[10 11 12]]
Media = 6.5
Desvio padrรฃo = 3.452052529534663
Variancia = 11.916666666666666
###Markdown
Reshaping
###Code
#Criando uma matriz
matrix = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(matrix,'\n')
#Reshape
print(matrix.reshape(9,1),'\n')
#Aqui -1 significa quantas colunas forem necessรกrias e 1 linha
print(matrix.reshape(1,-1),'\n')
#Se for fornecido apenas 1 valor serรก retornado um vetor 1-d com esse comprimento
print(matrix.reshape(9),'\n')
#Pode-se usar o mรฉtodo Flatten para converter a matrix em um vetor 1-d
print(matrix.flatten(),'\n')
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
[[1]
[2]
[3]
[4]
[5]
[6]
[7]
[8]
[9]]
[[1 2 3 4 5 6 7 8 9]]
[1 2 3 4 5 6 7 8 9]
[1 2 3 4 5 6 7 8 9]
###Markdown
Operaรงรตes Matriciais
###Code
#Criando uma matriz
matrix = np.array([[-1,1,1],[2,-2,2],[3,3,-3]])
print(matrix,'\n')
#Transposta
print('Transposta = \n', matrix.T)
#Inversa
print('Inversa = \n', np.linalg.inv(matrix))
#Determinante
print('Determinante = ', np.linalg.det(matrix))
#Diagonal principal
print('Diagonal principal = ', matrix.diagonal())
#Diagonais secundarias
print('Diagonal secundaria uma abaixo = ', matrix.diagonal(offset=1))
print('Diagonal secundaria uma acima = ', matrix.diagonal(offset=-1))
#Criando Matrix-1
matrix_1 = np.array([[1,2,3],[4,5,6],[7,8,9]])
#Criando Matrix-2
matrix_2 = np.array([[7,8,9],[4,5,6],[1,2,3]])
#Soma
print(np.add(matrix_1,matrix_2))
#Subtraรงรฃo
print(np.subtract(matrix_1,matrix_2))
#Multiplicaรงรฃo ponto a ponto
print(matrix_1 * matrix_2)
#Multiplicaรงรฃo de matrizes
print(matrix_1 @ matrix_2)
#Criando Matrix-3
matrix_3 = 2*np.ones([3,3],'uint8')
#Potenciaรงรฃo ponto a ponto
print(matrix_1 ** matrix_3)
###Output
[[ 8 10 12]
[ 8 10 12]
[ 8 10 12]]
[[-6 -6 -6]
[ 0 0 0]
[ 6 6 6]]
[[ 7 16 27]
[16 25 36]
[ 7 16 27]]
[[ 18 24 30]
[ 54 69 84]
[ 90 114 138]]
[[ 1 4 9]
[16 25 36]
[49 64 81]]
###Markdown
Gerando nรบmeros aleatรณrios
###Code
#Criando semente (seed)
np.random.seed(1)
#Gerando 3 inteiros aleatรณrios entre 1 e 10
print(np.random.randint(0,11,3))
#Gerando 3 numeros a partir de uma distribuicao normal com media 1.0 e desvio padrao 2.0
print(np.random.normal(1.0,2.0,3))
###Output
[5 8 9]
[-0.60434568 0.10224438 -1.21187015]
|
FoodInspection4/DS_Sprint_Challenge_7.ipynb | ###Markdown
_Lambda School Data Science, Unit 2_ Applied Modeling Sprint Challenge: Predict Chicago food inspections ๐ For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019. [See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.According to [Chicago Department of Public Health โ Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), "Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls." Your challenge: Predict whether inspections failedThe target is the `Fail` column.- When the food establishment failed the inspection, the target is `1`.- When the establishment passed, the target is `0`. Run this cell to install packages in Colab:
###Code
# <editor-fold desc="five pip category_encoders, eli5, pandas-profiling, pdpbox, shap">
import sys
from sklearn.compose import ColumnTransformer
if 'google.colab' in sys.modules:
# Install packages in Colab
!pip install category_encoders==2.*
!pip install eli5
!pip install pandas-profiling==2.*
!pip install pdpbox
!pip install shap
# </editor-fold>
###Output
_____no_output_____
###Markdown
Run this cell to load the data:
###Code
import numpy as np
# <editor-fold desc="df are train, test">
import pandas as pd
train_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'
test_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
assert train.shape == (51916, 17)
assert test.shape == (17306, 17)
# </editor-fold>
###Output
_____no_output_____
###Markdown
Part 1: PreprocessingYou may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding._To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._ Part 2: Modeling**Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**_To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._ Part 3: VisualizationMake visualizations for model interpretation. (You may use any libraries.) Choose two of these types:- Permutation Importances- Partial Dependence Plot, 1 feature isolation- Partial Dependence Plot, 2 features interaction- Shapley Values_To earn a score of 3 for this part, make all four of these visualization types._ Part 1: Preprocessing> You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.
###Code
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(train, train, test_size=.4, random_state=42)
target = 'Fail'
def wrangle(X):
X=X.copy()
Columns_drop = ['Fail',
'Inspection ID',
'DBA Name',
'AKA Name',
'License #',
'Address',
'City',
'State',
'Zip',
'Inspection Date',
'Location',
'Violations']
X=X.drop(columns=Columns_drop)
return X
train_df = wrangle(train)
X_train = wrangle(X_train)
y_train = y_train[target]
X_validate = wrangle(X_validate)
y_validate = y_validate[target]
X_Test = wrangle(test)
y_test = test[target]
X_train = X_train.replace({0 : np.nan, '0' : np.nan})
X_validate = X_validate.replace({0 : np.nan, '0' : np.nan})
X_Test = X_Test.replace({0 : np.nan, '0' : np.nan})
# <editor-fold desc="just a bunch of imports">
import category_encoders as ce
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
#from sklearn_pandas import CategoricalImputer
from sklearn.compose import ColumnTransformer
import xgboost
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_predict
# </editor-fold>
print('%74 Fails and 25% pass',pd.value_counts(train.Fail, normalize=True))
###Output
%74 Fails and 25% pass 0 0.74139
1 0.25861
Name: Fail, dtype: float64
###Markdown
Part 2: Modeling> **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.>> Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**
###Code
# <editor-fold desc="validation score">
#ct = ColumnTransformer(transformers=transformers)
#X_train = ct.fit_transform(X_train)
pipeline = make_pipeline(
ce.OneHotEncoder(),
SimpleImputer(missing_values = np.nan, strategy='median'),
#CategoricalImputer(missing_values="NaN"),
XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
scores = cross_val_score(pipeline, X_train, y_train,cv=5,scoring='roc_auc', n_jobs=-1)
#use facility Type, Risk, Inspection Date, Inspection Type, Violations, Latitude, Longitude,
print('Validation score score with XGBclassifier', scores)
# </editor-fold>
#predict_proba = pipeline.predict(X_validate)
y_pred_proba = cross_val_predict(pipeline, X_train, y_train,cv=5, n_jobs=-1, method='predict_proba')
#fpr,tpr,thresholds = roc_curve(y_validate, y_pred_proba)
#plt.plot(fpr,tpr)
#plt.title('ROC curve')
#plt.xlabel('False Positive Rate')
#plt.ylabel('True pos curve')
print('Area under the Receover p[erating characteistic curve:',
roc_auc_score(y_train, y_pred_proba))
processor = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median')
)
X_train_processed = processor.fit_transform(X_train)
X_val_processed = processor.transform(X_validate)
eval_set = [(X_train_processed, y_train),
(X_val_processed, y_validate)]
model = XGBClassifier(n_estimators=1000, n_jobs=-1)
model.fit(X_train_processed, y_train, eval_set=eval_set, eval_metric='auc',
early_stopping_rounds=10)
from sklearn.metrics import roc_auc_score
X_test_processed = processor.transform(X_Test)
y_pred_proba = model.predict_proba(X_test_processed)[:, 1]
print('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))
###Output
_____no_output_____
###Markdown
Part 3: Visualization> Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:>> - Permutation Importances> - Partial Dependence Plot, 1 feature isolation> - Partial Dependence Plot, 2 features interaction> - Shapley Values
###Code
## XGBoost, LightGBM, CatBoost
import shap
row = X_Test.iloc[[3094]]
#model = XBClassifier()
explainer = shap.TreeExplainer(model)
#processor is processed to turn row (df of 1x J into numpy.)
row_processed = processor.transform(row)
#turned the row_processed into raw number to show visulization
shap_values=explainer.shap_values(row_processed)
shap.initjs()
shap.force_plot(
base_value=explainer.expected_value,
shap_values=shap_values,
features=row
)
from pdpbox.pdp import pdp_interact, pdp_interact_plot, pdp_interact_plot
features = ['Facility Type', 'Risk', 'Inspection Type', 'Latitude', 'Longitude']
interact = pdp_interact(
model = pipeline,
dataset=X_validate,
model_features = X_validate.columns,
features=features
)
X_train.columns
pdp_interact_plot(interaction,plot_type='grid', feature_names = features);
###Output
_____no_output_____
###Markdown
asdf seperator
###Code
import category_encoders as ce
import seaborn as sns
from xgboost import XGBClassifier
#use ordinal
encoder = ce.OrdinalEncoder()
X_encoded = encoder.fit_transform(X_train)
model = XGBClassifier(n_estimators=100, random_state=42, n_jobs =-1)
model.fit(X_encoded,y_train)
X_encoded.columns
#use pdpbox
%matplotlib inline
import matplotlib.pyplot as plt
from pdpbox import pdp
features_five = ['Facility Type', 'Risk', 'Inspection Type', 'Latitude', 'Longitude']
featureRisk = 'Risk'
pdp_dist = pdp.pdp_isolate(model=model, dataset=X_encoded, model_features=features_five, feature=featureRisk)
pdp.pdp_plot(pdp_dist,featureRisk);
#look at the encoder's mappings
encoder.mapping
pdp.pdp_plot(pdp_dist, featureRisk)
#manuall change xtick
#plt.xticks([1,2], ['','']);
featureRisk = 'Risk'
for item in encoder.mapping:
if item['col']==featureRisk:
feature_mapping = item['mapping']
feature_mapping = feature_mapping[feature_mapping.index.dropna()]
category_names = feature_mapping.index.tolist()
category_codes = featureRisk
features = ['Risk', 'Inspection Type']
interaction=pdp.interact(
model=model,
dataset=X_encoded,
model_features=X_encoded.columns,
features=features
)
pdp_interact_plot(interaction,plot_type='grid',feature_names=features);
pdp=interaction.pdp.pivot_table(
values = 'preds',
columns = features[0], #x axis 1st feature
index = features[1], # y axis next feautre
)[::-1]
pdp = pdp.rename(columns.dict(zip(category_codes, category_names)))
plt.figure(figsizxe=(10,8))
sns.heatmap(pdp, annot=True, fmt='.2f', cmap='viridis')
plt.title('Partial depend of tit survival on sex&age')
###Output
_____no_output_____ |
scikit-examples/random-forest.ipynb | ###Markdown
Random foresthttps://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn import tree, datasets
%matplotlib inline
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
print(dir(tree.DecisionTreeClassifier))
tree.DecisionTreeClassifier()
###Output
_____no_output_____ |
Salem_Witchcraft/DA_SalemWitchcraft.ipynb | ###Markdown
Pandas Data Analysis in Salem Witchcraft
###Code
# Library
import pandas as pd
# Read CSV file
accused = pd.read_csv('Accused-Witches-Data-Set.csv')
anti = pd.read_csv('Anti-Parris-Social-Data-Set.csv')
committee = pd.read_csv('Committee-List-Data-Set.csv')
committee_yearly = pd.read_csv('Committee-Yearly-Data-Set.csv')
pro = pd.read_csv('Pro-Parris-Social-Data-Set.csv')
salem = pd.read_csv('Salem-Village-Data-Set.csv')
tax = pd.read_csv('Tax-Comparison-Data-Set.csv')
towns = pd.read_csv('Towns-Data-Set.csv')
###Output
_____no_output_____
###Markdown
Accused Witches Data Set
###Code
# Read first 5 rows
accused.head()
# Read last 5 rows
accused.tail()
# Rows, Columns
accused.shape
# Summary Statistics
accused.describe()
# Names of the columns in dataset
accused.columns
print('Number of accused witches by their residence:')
accused[' Residence '].value_counts()
# Reset Index to count total towns
villages = accused[' Residence '].value_counts()
villages = villages.to_frame("Places Accused").reset_index()
villages
# Total towns
# Count total towns
villages.index
###Output
_____no_output_____
###Markdown
The town of Andover has the most accustion; however, the last 7 residence have the least accustion. Salem Town and Salem Village is the next has the most accusation.
###Code
print('Number of accused by month of excecution:')
accused['Month of Execution'].value_counts()
print('Number of accused by month:')
accused['Month of Accusation'].value_counts()
# This is the order of Months of Accusation.
# -1 indicates that the actual month of accusation is not known
accused.groupby(["Month of Accusation"])["Month of Accusation"].count()
# This is the order of Months of Execution.
accused.groupby(["Month of Execution"])["Month of Execution"].count()
###Output
_____no_output_____
###Markdown
On month 9 has the most excution and in month 6 has the least excution. However, on month 5 has the most accustion. On the other hand, on month 10 has the least accustion. Anti-Parris Social Data Set
###Code
# Read the first 5 rows
anti.head()
# Read the last 5 rows
anti.tail()
# Rows, Columns
anti.shape
# Counts for each indentification in total
anti['Identification'].value_counts()
# Counts for each toal sex
anti['Sex'].value_counts()
# This count for male in each indentification
print('Number of Indentification for Male:')
anti['Identification'][anti['Sex']=='M'].value_counts()
# This count for female in each indentification
print('Number of Indentification for Female:')
anti['Identification'][anti['Sex']=='F'].value_counts()
###Output
Number of Indentification for Female:
###Markdown
There are more male in Anti-Parris than female because many of them are young men and 16 years old based on the dataset. Also, all the males were in Householder. Majority of Householder are in Anti-AntiParris; on the other hand, the Free-Holder was the least. Most of th female are Non-Member or Church Member. Committee List Data Set
###Code
# First 5 rows
committee.head()
# Last 5 rows
committee.tail()
# Rows, Columns
committee.shape
# Counts each total petition
committee['Petition'].value_counts()
# Counts each total Social
committee['Social'].value_counts()
# Number by each years that are petition
Years = ['1685','1686','1687','1688','1690','1691','1692','1693','1694','1695','1696','1697','1698']
for y in Years:
print(committee[y].groupby(committee['Petition']).count())
###Output
Petition
Anti-P 1
NoS 1
Pro-P 3
Name: 1685, dtype: int64
Petition
Anti-P 1
NoS 1
Pro-P 3
Name: 1686, dtype: int64
Petition
Anti-P 2
NoS 0
Pro-P 3
Name: 1687, dtype: int64
Petition
Anti-P 2
NoS 0
Pro-P 2
Name: 1688, dtype: int64
Petition
Anti-P 2
NoS 0
Pro-P 3
Name: 1690, dtype: int64
Petition
Anti-P 5
NoS 0
Pro-P 0
Name: 1691, dtype: int64
Petition
Anti-P 4
NoS 0
Pro-P 1
Name: 1692, dtype: int64
Petition
Anti-P 4
NoS 1
Pro-P 0
Name: 1693, dtype: int64
Petition
Anti-P 0
NoS 0
Pro-P 5
Name: 1694, dtype: int64
Petition
Anti-P 0
NoS 1
Pro-P 4
Name: 1695, dtype: int64
Petition
Anti-P 0
NoS 0
Pro-P 5
Name: 1696, dtype: int64
Petition
Anti-P 2
NoS 0
Pro-P 3
Name: 1697, dtype: int64
Petition
Anti-P 2
NoS 0
Pro-P 3
Name: 1698, dtype: int64
###Markdown
In 1690, all five people petition for Anti-P. On the other hand, in the year for 1693 and 1695, they all petition for Pro-P. Committee Yearly Data Set
###Code
# First 5 Rows
committee_yearly.head()
# Last 5 rows
committee_yearly.tail()
# Rows, Columns
committee_yearly.shape
# List of columns
committee_yearly.columns
# Counts for each columns for each rows
for c in committee_yearly.columns:
print(committee_yearly[c].value_counts())
###Output
Sibley, William 1
Buxton, John 1
Putnam, Lt. John [Senr] 1
Putnam, Thomas Junr 1
Walcott, Jonathan 1
Name: Committee 1685, dtype: int64
Pro-P 3
Anti-P 1
NoS 1
Name: Petition, dtype: int64
Householder 3
Church 2
Name: Social, dtype: int64
Tarbill, John 1
Flint, Thomas 1
Sibley, William 1
Putnam, Thomas Junr 1
Putnam, Lt. John 1
Name: Committee 1686, dtype: int64
Pro-P 3
NoS 1
Anti-P 1
Name: Petition.1, dtype: int64
Church 4
Householder 1
Name: Social.1, dtype: int64
Porter, Lt. Isarell 1
Flint, Ensigne Thomas 1
Tarbill, John 1
Putnam, Capt. John 1
Putnam, Thomas 1
Name: Committee 1687, dtype: int64
Pro-P 3
Anti-P 2
Name: Petition.2, dtype: int64
Church 4
Freeholder 1
Name: Social.2, dtype: int64
Andrew, Danill 1
Flintt, Ensine Thomas 1
Putnam, Capt. John 1
Hutchinson, Joseph 1
Name: Committee 1688, dtype: int64
Pro-P 2
Anti-P 2
Name: Petition.3, dtype: int64
Householder 3
Church 1
Name: Social.3, dtype: int64
Preston, Thomas 1
Putnam, Capt. John 1
Flintt, Ensign Thomas 1
Putnam, Edward 1
Rea, Joshua Senr 1
Name: Committee 1689, dtype: int64
Pro-P 3
Anti-P 2
Name: Petition.4, dtype: int64
Church 3
Householder 2
Name: Social.4, dtype: int64
Fuller, Thomas Junr 1
Tarbell, John 1
Putnam, Jonathan 1
Putnam, Lett. Natheniell 1
Holton, Joseph Jur 1
Name: Committee 1690, dtype: int64
Pro-P 3
Anti-P 2
Name: Petition.5, dtype: int64
Church 4
Householder 1
Name: Social.5, dtype: int64
Putnam, Joseph 1
Porter, Joseph 1
Nurse, Francis 1
Hutchinson, Joseph 1
Andrew, Daniell 1
Name: Committee 1691, dtype: int64
Anti-P 5
Name: Petition.6, dtype: int64
Householder 5
Name: Social.6, dtype: int64
Willikins, Thomas 1
Porter, Joseph 1
Godell, Zacheriah 1
Hutchinson, Joseph 1
Putnam, Joseph 1
Name: Committee 1692, dtype: int64
Anti-P 4
Pro-P 1
Name: Petition.7, dtype: int64
Householder 3
Church 2
Name: Social.7, dtype: int64
Holton, Joseph Jun 1
Preston, Thomas 1
Pope, Joseph 1
Tarbell, John 1
Smith, James 1
Name: Committee 1693, dtype: int64
Anti-P 4
NoS 1
Name: Petition.8, dtype: int64
Householder 4
Church 1
Name: Social.8, dtype: int64
Flint, Ensigne Thomas 1
Putnam, Liuet. Nathanill 1
Fuller, Corporall Thomas [Jr] 1
Willknes, Henry 1
Putnam, Thomas 1
Name: Committee 1694, dtype: int64
Pro-P 5
Name: Petition.9, dtype: int64
Church 4
Householder 1
Name: Social.9, dtype: int64
Putnam, Liuet Nathaniell 1
Fuller, Jacob 1
Wilknes, Henry 1
Flint, ensign Thomas 1
Putnam, Thomas 1
Name: Committee 1695, dtype: int64
Pro-P 4
NoS 1
Name: Petition.10, dtype: int64
Church 3
Householder 2
Name: Social.10, dtype: int64
Walcott, John 1
Dale, John 1
Putnam, Capt. John 1
Wilknes, Benjamin 1
Putnam, Thomas 1
Name: Committee 1696, dtype: int64
Pro-P 5
Name: Petition.11, dtype: int64
Church 3
Householder 2
Name: Social.11, dtype: int64
Putnam, Nathaniel Liut 1
Nurs, Samuell 1
Putnam, Jonathan 1
Buxton, John 1
Putnam, Thomas 1
Name: Committee 1697, dtype: int64
Pro-P 3
Anti-P 2
Name: Petition.12, dtype: int64
Church 4
Householder 1
Name: Social.12, dtype: int64
Wilkins, Henry 1
Rayment, Thomas 1
Andrew, Daniell 1
Goodell, Zach Senr 1
Putnam, Thomas 1
Name: Committee 1698, dtype: int64
Pro-P 3
Anti-P 2
Name: Petition.13, dtype: int64
Church 3
Householder 2
Name: Social.13, dtype: int64
###Markdown
In 1691, all five people petition for Anti-P and they were Householder member. However, in 1694, all the 5 member petition for Pro-P. Also, 4 people was a Church member and 1 person was a Householder member. In 1696, 5 people petition for Pro-5, and three person was a Church Member and 2 was Householder member. People that were Pro-P are more likely to be Church member. Pro Parris Social Data Set
###Code
# First 5 Rows
pro.head()
# Last 5 rows
pro.tail()
# Count total of member in identification
print('Number of signers by identification category:')
pro['Identification'].value_counts()
# # Count total of sex
print('Number of signers by sex:')
pro['Sex'].value_counts()
# Count total of male in each group
print('Number of male in identification:')
pro['Identification'][pro['Sex']=='M'].value_counts()
# Count total of female in each group
print('Number of female in identification:')
pro['Identification'][pro['Sex']=='F'].value_counts()
###Output
Number of female in identification:
###Markdown
There are more Householder than Church-Member and more Male in Pro Parris. Therefore, there more males in Householder compare more females in Church-Member. Salem Village DataSet
###Code
# First 5 Rows
salem.head()
# Last 5 rows
salem.tail()
# Counts total of people in each petition
print('Number of people by Petition:')
salem['Petition'].value_counts()
# Count total people are in Church or not
print('Number of people are a Church Membership:')
salem['Church to 1696'].value_counts()
###Output
Number of people are a Church Membership:
###Markdown
Tax Comparison Data Set
###Code
# First 5 Rows
tax.head()
# Last 5 rows
tax.tail()
# The Number of total Petition
tax['Petition'].value_counts()
# Value Counts for multi columns
tax[['1681','1690','1694','1695','1697','1700']].apply(pd.Series.value_counts)
# Too many NaN, so fill it up with 0
result = tax[['1681','1690','1694','1695','1697','1700']].apply(pd.value_counts).fillna(0)
result
# Each person in what petition
print('List of Name in certain petition:')
pd.crosstab(tax['Name'], tax['Petition'])
# tax[['1681','1690','1694','1695','1697','1700']].sum(axis=1)
# This shows when each person paid their taxes and how much total taxes they paid
tax['Total'] = tax.iloc[:,2:8].sum(axis=1)
tax
# Sum of total each rows in Years
tax[['1681','1690','1694','1695','1697','1700']].sum(axis=1)
# Total of tax by each person
print('List of Total Tax Paid:')
tax.sort_values('Total', ascending=False)
# List of the top 5 person paid the most taxes in order
print('The top 5 tax payers:')
print(tax.sort_values('Total', ascending=False)[0:5])
# Average people paid in tax
tax['Total'].mean()
# Summary Statistics for Tax in Total
tax['Total'].describe()
###Output
_____no_output_____
###Markdown
Towns Data Set
###Code
# First 5 Rows
towns.head()
# Last 5 rows
towns.tail()
# Number of months in certain month
towns['Bin'].value_counts()
# List of the columns' name
towns.columns
# Number of months of accusation in each towns
for t in towns.columns:
print(towns[t].value_counts())
# Total accusation in each towns
for t in towns.columns:
print(t, towns[t].count())
# Organized with sort values with town's name
for t, count in sorted(((towns[t].count(), t) for t in set(towns.columns)), reverse=True):
print('%s (%s)' % (t, count))
###Output
45 (Andover)
23 (Salem Town)
16 (Salem Village)
13 (Bin)
9 (Gloucester)
7 (Reading)
6 (Topsfield)
6 (Haverhill)
5 (Rowley)
5 (Lynn)
4 (Ipswich)
4 (Beverly)
3 (Woburn)
3 (Boxford)
3 (Billerica)
2 (Piscataqua, Maine)
2 (Charlestown)
2 (Boston)
1 (Wells, Maine)
1 (Salisbury)
1 (Marblehead)
1 (Manchester)
1 (Malden)
1 (Chelmsford)
1 ( Amesbury )
|
Code/PredScreen/WSJ_predictive_screening.ipynb | ###Markdown
1. Preparation
###Code
# load the packages
import pandas as pd
import numpy as np
from scipy import spatial
import datetime
import beautifultools as bt
import qgrid
from pandas.core.common import flatten
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import scipy.stats
import spacy
from collections import Counter
import random
random.seed(3)
from sklearn.preprocessing import normalize
from RandomWalk import random_walk
import re
import string
import nltk
nltk.data.path.append('/home/ec2-user/SageMaker/nltk_data/')
# import the dataset
wsj = pd.read_csv('wsj_full1.csv') # wsj dataset
sp100 = pd.read_csv('..//data/LogReturnData.csv')
# select the relevant topics
tp_li = [0, 2, 3, 8, 9, 14, 16, 17, 19, 20, 21, 24]
wsj_selected = wsj[wsj['Topic_Num'].isin(tp_li)]
# only the log returns of S&P100 is selected
oex = sp100[['Date', '^OEX']]
# label the return with positive & negative, 1 refers to positive log return, 0 refers to negative log return
oex['direction'] = 1
oex.loc[oex[oex['^OEX'] < 0].index, 'direction'] = -1
# drop NaN value
oex = oex.dropna()
wsj1 = wsj_selected.copy() # make a copy of wsj_selected
# select relevant columns, polarity calculated with Mcdonald dict for future comparison
wsj1 = wsj1[['Title', 'Text', 'Date']]
# convert the date to datetime
wsj1['Date'] = wsj1['Date'].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d").date())
oex['Date'] = oex['Date'].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d").date())
###Output
_____no_output_____
###Markdown
2. Text Preparation
###Code
# load stopping words
sp = spacy.load('en_core_web_sm')
all_stopwords = sp.Defaults.stop_words # list of stop words
# remove 'up', 'down' from stop words
all_stopwords.remove('up')
all_stopwords.remove('down')
## Change Numbers info for placeholder keep signs
txt_new = []
reg = re.compile(r"([\\+\\-])?[0-9]+[0-9\\.]*")
for lines in wsj1["Text"].values:
txt_new.append(reg.sub(" \\1NUM", lines))
## Define punctuation to replace (Exclude +, -, and %)
new_punct = string.punctuation + "โโโ"
for symb in ["%", "+", "-", "&"]:
new_punct = new_punct.replace(symb, "")
## String list
txt_corp = []
for doc in txt_new:
## Change everything to lowercase and exclude string that are only punctuations and stop words
aux = [elem.lower() for elem in doc.split() if elem not in set(new_punct)]
nstop = [wo for wo in aux if wo not in all_stopwords]
txt_corp.append(nstop)
## Remove strings that only have punctuation signs
exclude = [""]
txt_end = []
for doc in txt_corp:
new_list = [elem.translate(str.maketrans('', '', new_punct)) for elem in doc]
txt_end.append([elem for elem in new_list if elem not in exclude])
wsj1['corpus'] = txt_end
wsj1.head()
# label article with direction of sp100
wsj1['logDate'] = wsj1['Date'].apply(lambda x: x + datetime.timedelta(days=1))
wsj1.to_csv('cleaned_corpus.csv') # save the cleaned corpus to csv file
df1 = wsj1.set_index('logDate').join(oex.set_index('Date')) # with lag
df2 = wsj1.set_index('Date').join(oex.set_index('Date')) # without lag
# remove NaN value
df1 = df1.dropna()
df2 = df2.dropna()
# reset the index
df1 = df1.reset_index()
df2 = df2.reset_index()
df1 = df1.drop('Date', 1) # drop the date column
df2 = df2.drop('logDate', 1) # drop the date column
# rename the column
df1.columns = ['date', 'Title', 'Text', 'corpus', '^OEX', 'direction']
df2.columns = ['date', 'Title', 'Text', 'corpus', '^OEX', 'direction']
df1.groupby('date')['Title'].count().describe() # number of articles everyday, index column refers to date
###Output
_____no_output_____
###Markdown
3. Predictive Screening to get the seed words 3.1 seed words with lag = 1
###Code
# split the data into training & testing dataset to avoid data learkage
train_lag = df1.groupby('date').apply(lambda x: x.sample(frac=0.1))
train_ind = [index[1] for index in train_lag.index.tolist()]
df1['data'] = 'test'
df1.loc[train_ind, 'data'] = 'train'
# create a datadframe that contains the positive/negative words
def create_df(i, train, df):
words = df[(df['direction'] == i) & (df['data'] == train)].corpus.tolist()
words = sum(words, []) # flattern list of lists
word_dict = dict(Counter(words)) # word count
count_df = pd.DataFrame.from_dict(word_dict, orient = 'index') # convert dict to df
count_df = count_df.reset_index()
count_df.columns = ['word', 'freq']
return count_df
# for training dataset
pos_word = create_df(1, 'train', df1)
neg_word = create_df(-1, 'train', df1)
neg_word.columns = ['word', 'neg_freq']
# pos_word.columns = ['word', 'neg_freq']
word = pos_word.set_index('word').join(neg_word.set_index('word')) # join pos_word, neg_word dataframe
def filter_df(df, num):
# replace NaN with 0
df = df.fillna(0)
# reset index
df = df.reset_index()
# select only the word with frequency higher than 50
df['total_freq'] = df['freq'] + df['neg_freq']
df = df[df['total_freq'] >= num]
df['pos_prob'] = df['freq']/(df['freq'] + df['neg_freq']) # prob that specific word appear in a positive article
df['neg_prob'] = 1 - df['pos_prob']
return df
df_prob = filter_df(word, 50).sort_values(by = ['pos_prob'], ascending=False)
df_prob.head()
###Output
_____no_output_____
###Markdown
Determine the threshold with binomial Confidence interval
###Code
################# to be confirmed #################
import statsmodels.stats.proportion as smp
thres = 0.56
pos = df_prob[df_prob['pos_prob'] >= thres]
count = len(pos)
num = len(df_prob)
print('confidence interval of positive seed words: ', smp.proportion_confint (count, num, alpha=0.05, method='wilson'))
print('confidence interval of negative seed words: ', smp.proportion_confint (num - count, num, alpha=0.05, method='wilson'))
################## to be confirmed ###############
df_prob['polar'] = 'positive'
df_prob.loc[df_prob[df_prob['pos_prob'] < 0.56].index, 'polar'] = 'negative'
df_prob.to_csv('seed_words_lag.csv')
df_prob.head()
###Output
_____no_output_____
###Markdown
3.2 seed words without lag
###Code
train = df2.groupby('date').apply(lambda x: x.sample(frac=0.1))
train_ind = [index[1] for index in train_lag.index.tolist()]
df2['data'] = 'test'
df2.loc[train_ind, 'data'] = 'train'
# for training dataset
pos_word = create_df(1, 'train', df2)
neg_word = create_df(-1, 'train', df2)
neg_word.columns = ['word', 'neg_freq']
# pos_word.columns = ['word', 'neg_freq']
word = pos_word.set_index('word').join(neg_word.set_index('word')) # join pos_word, neg_word dataframe
# word
df_wolag = filter_df(word, 50).sort_values(by = ['pos_prob'], ascending=False)
df_wolag.head()
########### to be confirmed #############
import statsmodels.stats.proportion as smp
thres = 0.555
pos = df_wolag[df_wolag['pos_prob'] >= thres]
count = len(pos)
num = len(df_prob)
print('confidence interval of positive seed words: ', smp.proportion_confint (count, num, alpha=0.05, method='wilson'))
print('confidence interval of negative seed words: ', smp.proportion_confint (num - count, num, alpha=0.05, method='wilson'))
########### to be confirmed #############
df_wolag['polar'] = 'positive'
df_wolag.loc[df_wolag[df_wolag['pos_prob'] < 0.555].index, 'polar'] = 'negative'
df_wolag.to_csv('wsj_seed_word.csv')
###Output
_____no_output_____
###Markdown
4. Embeddingtwo possible ways to reduce the dimension of the embeddings before sentprop:1. PCA https://towardsdatascience.com/dimension-reduction-techniques-with-python-f36ca7009e5c2. t-SNE https://arxiv.org/abs/1708.03629; https://github.com/vyraun/Half-Size
###Code
# import the packages
import gensim.downloader as api
import tempfile
from gensim import corpora
from gensim.test.utils import datapath
from gensim import utils
from gensim.models import Word2Vec
import string
import json
from nltk.stem import WordNetLemmatizer
# text preparation
cleaned_cors = pd.read_csv('cleaned_corpus.csv') # import the cleaned dataframe
## Change Numbers info for placeholder keep signs
txt_new = []
reg = re.compile(r"([\\+\\-])?[0-9]+[0-9\\.]*")
for lines in cleaned_cors["Text"].values:
txt_new.append(reg.sub(" \\1NUM", lines))
## Define punctuation to replace (Exclude +, -, and %)
new_punct = string.punctuation + "โโโ"
for symb in ["%", "+", "-", "&"]:
new_punct = new_punct.replace(symb, "")
## String list
txt_corp = []
for doc in txt_new:
## Change everything to lowercase and exclude string that are only punctuations
aux = [elem.lower() for elem in doc.split() if elem not in set(new_punct)]
txt_corp.append(aux)
## Remove strings that only have punctuation signs
exclude = [""]
txt_end = []
for doc in txt_corp:
new_list = [elem.translate(str.maketrans('', '', new_punct)) for elem in doc]
txt_end.append([elem for elem in new_list if elem not in exclude])
dicts = corpora.Dictionary(txt_end)
## Define function to get embeddings to memory
def get_wv(model, dicts):
""" Get word embeddings in memory"""
w2v_embed = {}
missing = []
for val in dicts.values():
try:
it = model.wv[val]
except:
missing.append(val)
it = None
w2v_embed[val] = it
return w2v_embed, missing
print('number of unique words: ', len(dicts))
dicts.filter_extremes(no_below=20, no_above=0.8, keep_n=None, keep_tokens=None)
print('number of unique words after fitlering: ', len(dicts))
###Output
number of unique words: 116106
number of unique words after fitlering: 17429
###Markdown
4.1 pre-trained word embedding
###Code
path = 'GoogleNews-vectors-negative300.bin'
model = Word2Vec(txt_corp, size = 300, min_count = 25)
model.intersect_word2vec_format(path,
lockf=1.0,
binary=True)
model.train(txt_corp, total_examples=model.corpus_count, epochs=25)
w2v_embed, mis = get_wv(model, dicts)
embeds_1df = pd.DataFrame(w2v_embed)
embeds_1df.to_csv('pre_embedding.csv')
###Output
_____no_output_____
###Markdown
4.2 Self-trained embedding
###Code
model_t = Word2Vec(txt_corp, window=5, min_count=25, workers=4, size = 50)
model_t.train(txt_corp, epochs=50, total_words = model_t.corpus_total_words,
total_examples = model_t.corpus_count)
embeds_2 = get_wv(model_t, dicts)
a, b = embeds_2
embeds_2df = pd.DataFrame(a)
# save the embedding to csv
embeds_2df.to_csv('self_embedding.csv')
###Output
_____no_output_____ |
d2l/chapter_preliminaries/ndarray.ipynb | ###Markdown
ๆฐๆฎๆไฝ:label:`sec_ndarray`ไธบไบ่ฝๅคๅฎๆๅ็งๆไฝ๏ผๆไปฌ้่ฆๆ็งๆนๆณๆฅๅญๅจๅๆไฝๆฐๆฎใไธ่ฌๆฅ่ฏด๏ผๆไปฌ้่ฆๅไธคไปถ้่ฆ็ไบๆ
๏ผ๏ผ1๏ผ่ทๅๆฐๆฎ๏ผ๏ผ2๏ผๅจๅฐๆฐๆฎ่ฏปๅ
ฅ่ฎก็ฎๆบๅๅฏนๅ
ถ่ฟ่กๅค็ใๅฆๆๆฒกๆๆ็งๆนๆณๆฅๅญๅจๆฐๆฎ๏ผ้ฃไน่ทๅๆฐๆฎๆฏๆฒกๆๆไน็ใๆไปฌๅ
ๅฐ่ฏไธไธๅๆๆฐๆฎใ้ฆๅ
๏ผๆไปฌไป็ป$n$็ปดๆฐ็ป๏ผไน็งฐไธบ*ๅผ ้*๏ผtensor๏ผใไฝฟ็จ่ฟPythonไธญไฝฟ็จๆๅนฟๆณ็็งๅญฆ่ฎก็ฎๅ
NumPy็่ฏป่
ไผๅฏนๆฌ้จๅๅพ็ๆใๆ ่ฎบไฝฟ็จๅชไธชๆทฑๅบฆๅญฆไน ๆกๆถ๏ผๅฎ็*ๅผ ้็ฑป*๏ผๅจMXNetไธญไธบ`ndarray`๏ผๅจ PyTorch ๅTensorFlowไธญไธบ`Tensor`๏ผ้ฝไธNumpy็`ndarray`็ฑปไผผ๏ผไฝๅๆฏNumpy็`ndarray`ๅคไธไบ้่ฆๅ่ฝ๏ผ้ฆๅ
๏ผGPU ๅพๅฅฝๅฐๆฏๆๅ ้่ฎก็ฎ๏ผ่NumPyไป
ๆฏๆCPU่ฎก็ฎ๏ผๅ
ถๆฌก๏ผๅผ ้็ฑปๆฏๆ่ชๅจๅพฎๅใ่ฟไบๅ่ฝไฝฟๅพๅผ ้็ฑปๆด้ๅๆทฑๅบฆๅญฆไน ใๅฆๆๆฒกๆ็นๆฎ่ฏดๆ๏ผๆฌไนฆไธญๆ่ฏด็ๅผ ้ๅๆ็ๆฏๅผ ้็ฑป็ๅฎไพใ ๅ
ฅ้จๆฌ่็็ฎๆ ๆฏๅธฎๅฉ่ฏป่
ไบ่งฃๅนถ่ฟ่กไธไบๅจ้
่ฏปๆฌไนฆ็่ฟ็จไธญไผ็จๅฐ็ๅบๆฌๆฐๅผ่ฎก็ฎๅทฅๅ
ทใๅฆๆไฝ ๅพ้พ็่งฃไธไบๆฐๅญฆๆฆๅฟตๆๅบๅฝๆฐ๏ผ่ฏทไธ่ฆๆ
ๅฟใๅ้ข็็ซ ่ๅฐ้่ฟไธไบๅฎ้
็ไพๅญๆฅๅ้กพ่ฟไบๅ
ๅฎนใๅฆๆไฝ ๅทฒ็ปๆไบไธไบ่ๆฏ็ฅ่ฏ๏ผๆณ่ฆๆทฑๅ
ฅๅญฆไน ๆฐๅญฆๅ
ๅฎน๏ผๅฏไปฅ่ทณ่ฟๆฌ่ใ (**้ฆๅ
๏ผๆไปฌๅฏผๅ
ฅ `torch`ใ่ฏทๆณจๆ๏ผ่ฝ็ถๅฎ่ขซ็งฐไธบPyTorch๏ผไฝๆไปฌๅบ่ฏฅๅฏผๅ
ฅ `torch` ่ไธๆฏ `pytorch`ใ**)
###Code
import torch
###Output
_____no_output_____
###Markdown
[**ๅผ ้่กจ็คบ็ฑไธไธชๆฐๅผ็ปๆ็ๆฐ็ป๏ผ่ฟไธชๆฐ็ปๅฏ่ฝๆๅคไธช็ปดๅบฆ**]ใๅ
ทๆไธไธช่ฝด็ๅผ ้ๅฏนๅบๆฐๅญฆไธ็*ๅ้*๏ผvector๏ผใๅ
ทๆไธคไธช่ฝด็ๅผ ้ๅฏนๅบๆฐๅญฆไธ็ *็ฉ้ต*๏ผmatrix๏ผใๅ
ทๆไธคไธช่ฝดไปฅไธ็ๅผ ้ๆฒกๆ็นๆฎ็ๆฐๅญฆๅ็งฐใ้ฆๅ
๏ผๅฏไปฅไฝฟ็จ`arange`ๅๅปบไธไธช่กๅ้`x`ใ่ฟไธช่กๅ้ๅ
ๅซไป0ๅผๅง็ๅ12ไธชๆดๆฐ๏ผๅฎไปฌ่ขซ้ป่ฎคๅๅปบไธบๆตฎ็นๆฐใๅผ ้ไธญ็ๆฏไธชๅผ้ฝ็งฐไธบๅผ ้็*ๅ
็ด *๏ผelement๏ผใไพๅฆ๏ผๅผ ้`x`ไธญๆ12ไธชๅ
็ด ใ้ค้้ขๅคๆๅฎ๏ผๅฆๅๆฐ็ๅผ ้ๅฐๅญๅจๅจๅ
ๅญไธญ๏ผๅนถ้็จๅบไบCPU็่ฎก็ฎใ
###Code
x = torch.arange(12)
x
###Output
_____no_output_____
###Markdown
[**ๅฏไปฅ้่ฟๅผ ้็ `shape` ๅฑๆงๆฅ่ฎฟ้ฎๅผ ้็*ๅฝข็ถ***] (~~ๅๅผ ้ไธญๅ
็ด ็ๆปๆฐ~~)๏ผๆฒฟๆฏไธช่ฝด็้ฟๅบฆ๏ผใ
###Code
x.shape
###Output
_____no_output_____
###Markdown
ๅฆๆๅชๆณ็ฅ้ๅผ ้ไธญๅ
็ด ็ๆปๆฐ๏ผๅณๅฝข็ถ็ๆๆๅ
็ด ไน็งฏ๏ผๅฏไปฅๆฃๆฅๅฎ็ๅคงๅฐ๏ผsize๏ผใๅ ไธบ่ฟ้ๅจๅค็็ๆฏไธไธชๅ้๏ผๆไปฅๅฎ็ `shape` ไธๅฎ็ `size` ็ธๅใ
###Code
x.numel()
###Output
_____no_output_____
###Markdown
[**่ฆๆนๅไธไธชๅผ ้็ๅฝข็ถ่ไธๆนๅๅ
็ด ๆฐ้ๅๅ
็ด ๅผ๏ผๅฏไปฅ่ฐ็จ `reshape` ๅฝๆฐใ**]ไพๅฆ๏ผๅฏไปฅๆๅผ ้`x`ไปๅฝข็ถไธบ(12, )็่กๅ้่ฝฌๆขไธบๅฝข็ถไธบ(3,4)็็ฉ้ตใ่ฟไธชๆฐ็ๅผ ้ๅ
ๅซไธ่ฝฌๆขๅ็ธๅ็ๅผ๏ผไฝๆฏๅฎ่ขซ็ๆไธไธช3่ก4ๅ็็ฉ้ตใ่ฆ้็น่ฏดๆไธไธ๏ผ่ฝ็ถๅผ ้็ๅฝข็ถๅ็ไบๆนๅ๏ผไฝๅ
ถๅ
็ด ๅผๅนถๆฒกๆๅใๆณจๆ๏ผ้่ฟๆนๅๅผ ้็ๅฝข็ถ๏ผๅผ ้็ๅคงๅฐไธไผๆนๅใ
###Code
X = x.reshape(3, 4)
X
###Output
_____no_output_____
###Markdown
ไธ้่ฆ้่ฟๆๅจๆๅฎๆฏไธช็ปดๅบฆๆฅๆนๅๅฝข็ถใไนๅฐฑๆฏ่ฏด๏ผๅฆๆๆไปฌ็็ฎๆ ๅฝข็ถๆฏ(้ซๅบฆ,ๅฎฝๅบฆ) ๏ผ้ฃไนๅจ็ฅ้ๅฎฝๅบฆๅ๏ผ้ซๅบฆๅบๅฝไผ้ๅผๅพๅบ๏ผๆไปฌไธๅฟ
่ชๅทฑๅ้คๆณใๅจไธ้ข็ไพๅญไธญ๏ผไธบไบ่ทๅพไธไธช3่ก็็ฉ้ต๏ผๆไปฌๆๅจๆๅฎไบๅฎๆ3่กๅ4ๅใๅนธ่ฟ็ๆฏ๏ผๅผ ้ๅจ็ปๅบๅ
ถไป้จๅๅๅฏไปฅ่ชๅจ่ฎก็ฎๅบไธไธช็ปดๅบฆใๆไปฌๅฏไปฅ้่ฟๅจๅธๆๅผ ้่ชๅจๆจๆญ็็ปดๅบฆๆพ็ฝฎ`-1`ๆฅ่ฐ็จๆญคๅ่ฝใๅจไธ้ข็ไพๅญไธญ๏ผๆไปฌๅฏไปฅ็จ`x.reshape(-1,4)`ๆ`x.reshape(3,-1)`ๆฅๅไปฃ`x.reshape(3,4)`ใๆๆถ๏ผๆไปฌๅธๆ[**ไฝฟ็จๅ
จ0ใๅ
จ1ใๅ
ถไปๅธธ้ๆ่
ไป็นๅฎๅๅธไธญ้ๆบ้ๆ ท็ๆฐๅญ**]ๆฅๅๅงๅ็ฉ้ตใๆไปฌๅฏไปฅๅๅปบไธไธชๅฝข็ถไธบ (2, 3, 4) ็ๅผ ้๏ผๅ
ถไธญๆๆๅ
็ด ้ฝ่ฎพ็ฝฎไธบ0ใไปฃ็ ๅฆไธ๏ผ
###Code
torch.zeros((2, 3, 4))
###Output
_____no_output_____
###Markdown
ๅๆ ท๏ผๆไปฌๅฏไปฅๅๅปบไธไธชๅฝข็ถไธบ`(2,3,4)`็ๅผ ้๏ผๅ
ถไธญๆๆๅ
็ด ้ฝ่ฎพ็ฝฎไธบ1ใไปฃ็ ๅฆไธ๏ผ
###Code
torch.ones((2, 3, 4))
###Output
_____no_output_____
###Markdown
ๆๆถๆไปฌๆณ้่ฟไปๆไธช็นๅฎ็ๆฆ็ๅๅธไธญ้ๆบ้ๆ ทๆฅๅพๅฐๅผ ้ไธญๆฏไธชๅ
็ด ็ๅผใไพๅฆ๏ผๅฝๆไปฌๆ้ ๆฐ็ปๆฅไฝไธบ็ฅ็ป็ฝ็ปไธญ็ๅๆฐๆถ๏ผๆไปฌ้ๅธธไผ้ๆบๅๅงๅๅๆฐ็ๅผใไปฅไธไปฃ็ ๅๅปบไธไธชๅฝข็ถไธบ (3, 4) ็ๅผ ้ใๅ
ถไธญ็ๆฏไธชๅ
็ด ้ฝไปๅๅผไธบ0ใๆ ๅๅทฎไธบ1็ๆ ๅ้ซๆฏ๏ผๆญฃๆ๏ผๅๅธไธญ้ๆบ้ๆ ทใ
###Code
torch.randn(3, 4)
###Output
_____no_output_____
###Markdown
ๆไปฌ่ฟๅฏไปฅ[**้่ฟๆไพๅ
ๅซๆฐๅผ็ Python ๅ่กจ๏ผๆๅตๅฅๅ่กจ๏ผๆฅไธบๆ้ๅผ ้ไธญ็ๆฏไธชๅ
็ด ่ตไบ็กฎๅฎๅผ**]ใๅจ่ฟ้๏ผๆๅคๅฑ็ๅ่กจๅฏนๅบไบ่ฝด 0๏ผๅ
ๅฑ็ๅ่กจๅฏนๅบไบ่ฝด 1ใ
###Code
torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
###Output
_____no_output_____
###Markdown
่ฟ็ฎ่ฟๆฌไนฆไธๆฏๅ
ณไบ่ฝฏไปถๅทฅ็จ็ใๆไปฌ็ๅ
ด่ถฃไธไป
ไป
้ไบไปๆฐ็ป่ฏปๅๅๅๅ
ฅๆฐๆฎใๆไปฌๆณๅจ่ฟไบๆฐ็ปไธๆง่กๆฐๅญฆ่ฟ็ฎใไธไบๆ็ฎๅไธๆๆ็จ็ๆไฝๆฏ *ๆๅ
็ด *๏ผelementwise๏ผ ๆไฝใๅฎไปฌๅฐๆ ๅๆ ้่ฟ็ฎ็ฌฆๅบ็จไบๆฐ็ป็ๆฏไธชๅ
็ด ใๅฏนไบๅฐไธคไธชๆฐ็ปไฝไธบ่พๅ
ฅ็ๅฝๆฐ๏ผๆๅ
็ด ่ฟ็ฎๅฐไบๅ
่ฟ็ฎ็ฌฆๅบ็จไบไธคไธชๆฐ็ปไธญ็ๆฏๅฏนไฝ็ฝฎๅฏนๅบ็ๅ
็ด ใๆไปฌๅฏไปฅๅบไบไปปไฝไปๆ ้ๅฐๆ ้็ๅฝๆฐๆฅๅๅปบๆๅ
็ด ๅฝๆฐใๅจๆฐๅญฆ่กจ็คบๆณไธญ๏ผๆไปฌๅฐ้่ฟ็ฌฆๅท $f: \mathbb{R} \rightarrow \mathbb{R}$ ๆฅ่กจ็คบ *ไธๅ
* ๆ ้่ฟ็ฎ็ฌฆ๏ผๅชๆฅๆถไธไธช่พๅ
ฅ๏ผใ่ฟๆๅณ็่ฏฅๅฝๆฐไปไปปไฝๅฎๆฐ๏ผ$\mathbb{R}$๏ผๆ ๅฐๅฐๅฆไธไธชๅฎๆฐใๅๆ ท๏ผๆไปฌ้่ฟ็ฌฆๅท $f: \mathbb{R}, \mathbb{R} \rightarrow \mathbb{R}$ ่กจ็คบ *ไบๅ
* ๆ ้่ฟ็ฎ็ฌฆ๏ผ่ฟๆๅณ็่ฏฅๅฝๆฐๆฅๆถไธคไธช่พๅ
ฅ๏ผๅนถไบง็ไธไธช่พๅบใ็ปๅฎๅไธๅฝข็ถ็ไปปๆไธคไธชๅ้$\mathbf{u}$ๅ$\mathbf{v}$ ๅไบๅ
่ฟ็ฎ็ฌฆ $f$๏ผๆไปฌๅฏไปฅๅพๅฐๅ้$\mathbf{c} = F(\mathbf{u},\mathbf{v})$ใๅ
ทไฝ่ฎก็ฎๆนๆณๆฏ$c_i \gets f(u_i, v_i)$ ๏ผๅ
ถไธญ $c_i$ใ$u_i$ ๅ $v_i$ ๅๅซๆฏๅ้$\mathbf{c}$ใ$\mathbf{u}$ ๅ $\mathbf{v}$ไธญ็ๅ
็ด ใๅจ่ฟ้๏ผๆไปฌ้่ฟๅฐๆ ้ๅฝๆฐๅ็บงไธบๆๅ
็ด ๅ้่ฟ็ฎๆฅ็ๆๅ้ๅผ $F: \mathbb{R}^d, \mathbb{R}^d \rightarrow \mathbb{R}^d$ใๅฏนไบไปปๆๅ
ทๆ็ธๅๅฝข็ถ็ๅผ ้๏ผ[**ๅธธ่ง็ๆ ๅ็ฎๆฏ่ฟ็ฎ็ฌฆ๏ผ`+`ใ`-`ใ`*`ใ`/` ๅ `**`๏ผ้ฝๅฏไปฅ่ขซๅ็บงไธบๆๅ
็ด ่ฟ็ฎ**]ใๆไปฌๅฏไปฅๅจๅไธๅฝข็ถ็ไปปๆไธคไธชๅผ ้ไธ่ฐ็จๆๅ
็ด ๆไฝใๅจไธ้ข็ไพๅญไธญ๏ผๆไปฌไฝฟ็จ้ๅทๆฅ่กจ็คบไธไธชๅ
ทๆ5ไธชๅ
็ด ็ๅ
็ป๏ผๅ
ถไธญๆฏไธชๅ
็ด ้ฝๆฏๆๅ
็ด ๆไฝ็็ปๆใ
###Code
x = torch.tensor([1.0, 2, 4, 8])
y = torch.tensor([2, 2, 2, 2])
x + y, x - y, x * y, x / y, x ** y # **่ฟ็ฎ็ฌฆๆฏๆฑๅน่ฟ็ฎ
###Output
_____no_output_____
###Markdown
ๅฏไปฅ(**ๆๆๅ
็ด ๆนๅผๅบ็จๆดๅค็่ฎก็ฎ**)๏ผๅ
ๆฌๅๆฑๅน่ฟๆ ท็ไธๅ
่ฟ็ฎ็ฌฆใ
###Code
torch.exp(x)
###Output
_____no_output_____
###Markdown
้คไบๆๅ
็ด ่ฎก็ฎๅค๏ผๆไปฌ่ฟๅฏไปฅๆง่ก็บฟๆงไปฃๆฐ่ฟ็ฎ๏ผๅ
ๆฌๅ้็น็งฏๅ็ฉ้ตไนๆณใๆไปฌๅฐๅจ :numref:`sec_linear-algebra` ไธญ่งฃ้็บฟๆงไปฃๆฐ็้็นๅ
ๅฎน๏ผไธ้่ฆๅ
ไฟฎ็ฅ่ฏ๏ผใ[**ๆไปฌไนๅฏไปฅๆๅคไธชๅผ ้ *่ฟ็ป*๏ผconcatenate๏ผ ๅจไธ่ตท**]๏ผๆๅฎไปฌ็ซฏๅฏน็ซฏๅฐๅ ่ตทๆฅๅฝขๆไธไธชๆดๅคง็ๅผ ้ใๆไปฌๅช้่ฆๆไพๅผ ้ๅ่กจ๏ผๅนถ็ปๅบๆฒฟๅชไธช่ฝด่ฟ็ปใไธ้ข็ไพๅญๅๅซๆผ็คบไบๅฝๆไปฌๆฒฟ่ก๏ผ่ฝด-0๏ผๅฝข็ถ็็ฌฌไธไธชๅ
็ด ๏ผๅๆๅ๏ผ่ฝด-1๏ผๅฝข็ถ็็ฌฌไบไธชๅ
็ด ๏ผ่ฟ็ปไธคไธช็ฉ้ตๆถไผๅ็ไปไนๆ
ๅตใๆไปฌๅฏไปฅ็ๅฐ๏ผ็ฌฌไธไธช่พๅบๅผ ้็่ฝด-0้ฟๅบฆ ($6$) ๆฏไธคไธช่พๅ
ฅๅผ ้่ฝด-0้ฟๅบฆ็ๆปๅ ($3 + 3$)๏ผ็ฌฌไบไธช่พๅบๅผ ้็่ฝด-1้ฟๅบฆ ($8$) ๆฏไธคไธช่พๅ
ฅๅผ ้่ฝด-1้ฟๅบฆ็ๆปๅ ($4 + 4$)ใ
###Code
X = torch.arange(12, dtype=torch.float32).reshape((3,4))
Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)
###Output
_____no_output_____
###Markdown
ๆๆถ๏ผๆไปฌๆณ[**้่ฟ *้ป่พ่ฟ็ฎ็ฌฆ* ๆๅปบไบๅ
ๅผ ้**]ใไปฅ `X == Y` ไธบไพๅญใๅฏนไบๆฏไธชไฝ็ฝฎ๏ผๅฆๆ `X` ๅ `Y` ๅจ่ฏฅไฝ็ฝฎ็ธ็ญ๏ผๅๆฐๅผ ้ไธญ็ธๅบ้กน็ๅผไธบ1๏ผ่ฟๆๅณ็้ป่พ่ฏญๅฅ `X == Y` ๅจ่ฏฅไฝ็ฝฎๅคไธบ็๏ผๅฆๅ่ฏฅไฝ็ฝฎไธบ 0ใ
###Code
X == Y
###Output
_____no_output_____
###Markdown
[**ๅฏนๅผ ้ไธญ็ๆๆๅ
็ด ่ฟ่กๆฑๅไผไบง็ไธไธชๅชๆไธไธชๅ
็ด ็ๅผ ้ใ**]
###Code
X.sum()
###Output
_____no_output_____
###Markdown
ๅนฟๆญๆบๅถ:label:`subsec_broadcasting`ๅจไธ้ข็้จๅไธญ๏ผๆไปฌ็ๅฐไบๅฆไฝๅจ็ธๅๅฝข็ถ็ไธคไธชๅผ ้ไธๆง่กๆๅ
็ด ๆไฝใๅจๆไบๆ
ๅตไธ๏ผ[**ๅณไฝฟๅฝข็ถไธๅ๏ผๆไปฌไป็ถๅฏไปฅ้่ฟ่ฐ็จ *ๅนฟๆญๆบๅถ* ๏ผbroadcasting mechanism๏ผ ๆฅๆง่กๆๅ
็ด ๆไฝ**]ใ่ฟ็งๆบๅถ็ๅทฅไฝๆนๅผๅฆไธ๏ผ้ฆๅ
๏ผ้่ฟ้ๅฝๅคๅถๅ
็ด ๆฅๆฉๅฑไธไธชๆไธคไธชๆฐ็ป๏ผไปฅไพฟๅจ่ฝฌๆขไนๅ๏ผไธคไธชๅผ ้ๅ
ทๆ็ธๅ็ๅฝข็ถใๅ
ถๆฌก๏ผๅฏน็ๆ็ๆฐ็ปๆง่กๆๅ
็ด ๆไฝใๅจๅคงๅคๆฐๆ
ๅตไธ๏ผๆไปฌๅฐๆฒฟ็ๆฐ็ปไธญ้ฟๅบฆไธบ1็่ฝด่ฟ่กๅนฟๆญ๏ผๅฆไธไพๅญ๏ผ
###Code
a = torch.arange(3).reshape((3, 1))
b = torch.arange(2).reshape((1, 2))
a, b
###Output
_____no_output_____
###Markdown
็ฑไบ `a` ๅ `b` ๅๅซๆฏ $3\times1$ ๅ $1\times2$ ็ฉ้ต๏ผๅฆๆๆไปฌ่ฎฉๅฎไปฌ็ธๅ ๏ผๅฎไปฌ็ๅฝข็ถไธๅน้
ใๆไปฌๅฐไธคไธช็ฉ้ต*ๅนฟๆญ*ไธบไธไธชๆดๅคง็ $3\times2$ ็ฉ้ต๏ผๅฆไธๆ็คบ๏ผ็ฉ้ต `a`ๅฐๅคๅถๅ๏ผ็ฉ้ต `b`ๅฐๅคๅถ่ก๏ผ็ถๅๅๆๅ
็ด ็ธๅ ใ
###Code
a + b
###Output
_____no_output_____
###Markdown
็ดขๅผๅๅ็ๅฐฑๅๅจไปปไฝๅ
ถไป Python ๆฐ็ปไธญไธๆ ท๏ผๅผ ้ไธญ็ๅ
็ด ๅฏไปฅ้่ฟ็ดขๅผ่ฎฟ้ฎใไธไปปไฝ Python ๆฐ็ปไธๆ ท๏ผ็ฌฌไธไธชๅ
็ด ็็ดขๅผๆฏ 0๏ผๅฏไปฅๆๅฎ่ๅดไปฅๅ
ๅซ็ฌฌไธไธชๅ
็ด ๅๆๅไธไธชไนๅ็ๅ
็ด ใไธๆ ๅ Python ๅ่กจไธๆ ท๏ผๆไปฌๅฏไปฅ้่ฟไฝฟ็จ่ด็ดขๅผๆ นๆฎๅ
็ด ๅฐๅ่กจๅฐพ้จ็็ธๅฏนไฝ็ฝฎ่ฎฟ้ฎๅ
็ด ใๅ ๆญค๏ผๆไปฌ[**ๅฏไปฅ็จ `[-1]` ้ๆฉๆๅไธไธชๅ
็ด ๏ผๅฏไปฅ็จ `[1:3]` ้ๆฉ็ฌฌไบไธชๅ็ฌฌไธไธชๅ
็ด **]๏ผๅฆไธๆ็คบ๏ผ
###Code
X[-1], X[1:3]
###Output
_____no_output_____
###Markdown
[**้ค่ฏปๅๅค๏ผๆไปฌ่ฟๅฏไปฅ้่ฟๆๅฎ็ดขๅผๆฅๅฐๅ
็ด ๅๅ
ฅ็ฉ้ตใ**]
###Code
X[1, 2] = 9
X
###Output
_____no_output_____
###Markdown
ๅฆๆๆไปฌๆณ[**ไธบๅคไธชๅ
็ด ่ตๅผ็ธๅ็ๅผ๏ผๆไปฌๅช้่ฆ็ดขๅผๆๆๅ
็ด ๏ผ็ถๅไธบๅฎไปฌ่ตๅผใ**]ไพๅฆ๏ผ`[0:2, :]` ่ฎฟ้ฎ็ฌฌ1่กๅ็ฌฌ2่ก๏ผๅ
ถไธญ โ:โ ไปฃ่กจๆฒฟ่ฝด 1๏ผๅ๏ผ็ๆๆๅ
็ด ใ่ฝ็ถๆไปฌ่ฎจ่ฎบ็ๆฏ็ฉ้ต็็ดขๅผ๏ผไฝ่ฟไน้็จไบๅ้ๅ่ถ
่ฟ2ไธช็ปดๅบฆ็ๅผ ้ใ
###Code
X[0:2, :] = 12
X
###Output
_____no_output_____
###Markdown
่็ๅ
ๅญ[**่ฟ่กไธไบๆไฝๅฏ่ฝไผๅฏผ่ดไธบๆฐ็ปๆๅ้
ๅ
ๅญ**]ใไพๅฆ๏ผๅฆๆๆไปฌ็จ `Y = X + Y`๏ผๆไปฌๅฐๅๆถๅผ็จ `Y` ๆๅ็ๅผ ้๏ผ่ๆฏๆๅๆฐๅ้
็ๅ
ๅญๅค็ๅผ ้ใๅจไธ้ข็ไพๅญไธญ๏ผๆไปฌ็จ Python ็ `id()` ๅฝๆฐๆผ็คบไบ่ฟไธ็น๏ผๅฎ็ปๆไปฌๆไพไบๅ
ๅญไธญๅผ็จๅฏน่ฑก็็กฎๅๅฐๅใ่ฟ่ก `Y = Y + X` ๅ๏ผๆไปฌไผๅ็ฐ `id(Y)` ๆๅๅฆไธไธชไฝ็ฝฎใ่ฟๆฏๅ ไธบ Python ้ฆๅ
่ฎก็ฎ `Y + X`๏ผไธบ็ปๆๅ้
ๆฐ็ๅ
ๅญ๏ผ็ถๅไฝฟ `Y` ๆๅๅ
ๅญไธญ็่ฟไธชๆฐไฝ็ฝฎใ
###Code
before = id(Y)
Y = Y + X
id(Y) == before
###Output
_____no_output_____
###Markdown
่ฟๅฏ่ฝๆฏไธๅฏๅ็๏ผๅๅ ๆไธคไธช๏ผ้ฆๅ
๏ผๆไปฌไธๆณๆปๆฏไธๅฟ
่ฆๅฐๅ้
ๅ
ๅญใๅจๆบๅจๅญฆไน ไธญ๏ผๆไปฌๅฏ่ฝๆๆฐ็พๅ
็ๅๆฐ๏ผๅนถไธๅจไธ็งๅ
ๅคๆฌกๆดๆฐๆๆๅๆฐใ้ๅธธๆ
ๅตไธ๏ผๆไปฌๅธๆๅๅฐๆง่ก่ฟไบๆดๆฐใๅ
ถๆฌก๏ผๆไปฌๅฏ่ฝ้่ฟๅคไธชๅ้ๆๅ็ธๅๅๆฐใๅฆๆๆไปฌไธๅๅฐๆดๆฐ๏ผๅ
ถไปๅผ็จไป็ถไผๆๅๆง็ๅ
ๅญไฝ็ฝฎ๏ผ่ฟๆ ทๆไปฌ็ๆไบไปฃ็ ๅฏ่ฝไผๆ ๆไธญๅผ็จๆง็ๅๆฐใ ๅนธ่ฟ็ๆฏ๏ผ(**ๆง่กๅๅฐๆไฝ**)้ๅธธ็ฎๅใๆไปฌๅฏไปฅไฝฟ็จๅ็่กจ็คบๆณๅฐๆไฝ็็ปๆๅ้
็ปๅ
ๅๅ้
็ๆฐ็ป๏ผไพๅฆ `Y[:] = `ใไธบไบ่ฏดๆ่ฟไธ็น๏ผๆไปฌ้ฆๅ
ๅๅปบไธไธชๆฐ็็ฉ้ต `Z`๏ผๅ
ถๅฝข็ถไธๅฆไธไธช `Y` ็ธๅ๏ผไฝฟ็จ `zeros_like` ๆฅๅ้
ไธไธชๅ
จ$0$็ๅใ
###Code
Z = torch.zeros_like(Y)
print('id(Z):', id(Z))
Z[:] = X + Y
print('id(Z):', id(Z))
###Output
id(Z): 139674315734208
id(Z): 139674315734208
###Markdown
[**ๅฆๆๅจๅ็ปญ่ฎก็ฎไธญๆฒกๆ้ๅคไฝฟ็จ `X`๏ผๆไปฌไนๅฏไปฅไฝฟ็จ `X[:] = X + Y` ๆ `X += Y` ๆฅๅๅฐๆไฝ็ๅ
ๅญๅผ้ใ**]
###Code
before = id(X)
X += Y
id(X) == before
###Output
_____no_output_____
###Markdown
่ฝฌๆขไธบๅ
ถไป Python ๅฏน่ฑก[**่ฝฌๆขไธบ NumPy ๅผ ้**]ๅพๅฎนๆ๏ผๅไนไนๅพๅฎนๆใ่ฝฌๆขๅ็็ปๆไธๅ
ฑไบซๅ
ๅญใ่ฟไธชๅฐ็ไธไพฟๅฎ้
ไธๆฏ้ๅธธ้่ฆ็๏ผๅฝไฝ ๅจ CPU ๆ GPU ไธๆง่กๆไฝ็ๆถๅ๏ผๅฆๆ Python ็ NumPy ๅ
ไนๅธๆไฝฟ็จ็ธๅ็ๅ
ๅญๅๆง่กๅ
ถไปๆไฝ๏ผไฝ ไธๅธๆๅไธ่ฎก็ฎๆฅ็ญๅฎใ
###Code
A = X.numpy()
B = torch.tensor(A)
type(A), type(B)
###Output
_____no_output_____
###Markdown
่ฆ(**ๅฐๅคงๅฐไธบ1็ๅผ ้่ฝฌๆขไธบ Python ๆ ้**)๏ผๆไปฌๅฏไปฅ่ฐ็จ `item` ๅฝๆฐๆ Python ็ๅ
็ฝฎๅฝๆฐใ
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____ |
6 Dimensionality Reduction/Kernel PCA Logistic Regression(Who likely to buy SUV).ipynb | ###Markdown
Importing the Libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the Datasets
###Code
dataset=pd.read_csv("E:\Edu\Data Science and ML\Machinelearningaz\Datasets\Part 9 - Dimensionality Reduction\Section 45 - Kernel PCA\\Social_Network_Ads.csv")
print(dataset)
dataset.shape
dataset.head()
dataset.describe()
X=dataset.iloc[:,[2,3]].values #Takes all values except last colum values (Matrix)
y=dataset.iloc[:,4].values #Take last colum values (Vector)
print(X)
print(y)
###Output
[[1.90e+01 1.90e+04]
[3.50e+01 2.00e+04]
[2.60e+01 4.30e+04]
[2.70e+01 5.70e+04]
[1.90e+01 7.60e+04]
[2.70e+01 5.80e+04]
[2.70e+01 8.40e+04]
[3.20e+01 1.50e+05]
[2.50e+01 3.30e+04]
[3.50e+01 6.50e+04]
[2.60e+01 8.00e+04]
[2.60e+01 5.20e+04]
[2.00e+01 8.60e+04]
[3.20e+01 1.80e+04]
[1.80e+01 8.20e+04]
[2.90e+01 8.00e+04]
[4.70e+01 2.50e+04]
[4.50e+01 2.60e+04]
[4.60e+01 2.80e+04]
[4.80e+01 2.90e+04]
[4.50e+01 2.20e+04]
[4.70e+01 4.90e+04]
[4.80e+01 4.10e+04]
[4.50e+01 2.20e+04]
[4.60e+01 2.30e+04]
[4.70e+01 2.00e+04]
[4.90e+01 2.80e+04]
[4.70e+01 3.00e+04]
[2.90e+01 4.30e+04]
[3.10e+01 1.80e+04]
[3.10e+01 7.40e+04]
[2.70e+01 1.37e+05]
[2.10e+01 1.60e+04]
[2.80e+01 4.40e+04]
[2.70e+01 9.00e+04]
[3.50e+01 2.70e+04]
[3.30e+01 2.80e+04]
[3.00e+01 4.90e+04]
[2.60e+01 7.20e+04]
[2.70e+01 3.10e+04]
[2.70e+01 1.70e+04]
[3.30e+01 5.10e+04]
[3.50e+01 1.08e+05]
[3.00e+01 1.50e+04]
[2.80e+01 8.40e+04]
[2.30e+01 2.00e+04]
[2.50e+01 7.90e+04]
[2.70e+01 5.40e+04]
[3.00e+01 1.35e+05]
[3.10e+01 8.90e+04]
[2.40e+01 3.20e+04]
[1.80e+01 4.40e+04]
[2.90e+01 8.30e+04]
[3.50e+01 2.30e+04]
[2.70e+01 5.80e+04]
[2.40e+01 5.50e+04]
[2.30e+01 4.80e+04]
[2.80e+01 7.90e+04]
[2.20e+01 1.80e+04]
[3.20e+01 1.17e+05]
[2.70e+01 2.00e+04]
[2.50e+01 8.70e+04]
[2.30e+01 6.60e+04]
[3.20e+01 1.20e+05]
[5.90e+01 8.30e+04]
[2.40e+01 5.80e+04]
[2.40e+01 1.90e+04]
[2.30e+01 8.20e+04]
[2.20e+01 6.30e+04]
[3.10e+01 6.80e+04]
[2.50e+01 8.00e+04]
[2.40e+01 2.70e+04]
[2.00e+01 2.30e+04]
[3.30e+01 1.13e+05]
[3.20e+01 1.80e+04]
[3.40e+01 1.12e+05]
[1.80e+01 5.20e+04]
[2.20e+01 2.70e+04]
[2.80e+01 8.70e+04]
[2.60e+01 1.70e+04]
[3.00e+01 8.00e+04]
[3.90e+01 4.20e+04]
[2.00e+01 4.90e+04]
[3.50e+01 8.80e+04]
[3.00e+01 6.20e+04]
[3.10e+01 1.18e+05]
[2.40e+01 5.50e+04]
[2.80e+01 8.50e+04]
[2.60e+01 8.10e+04]
[3.50e+01 5.00e+04]
[2.20e+01 8.10e+04]
[3.00e+01 1.16e+05]
[2.60e+01 1.50e+04]
[2.90e+01 2.80e+04]
[2.90e+01 8.30e+04]
[3.50e+01 4.40e+04]
[3.50e+01 2.50e+04]
[2.80e+01 1.23e+05]
[3.50e+01 7.30e+04]
[2.80e+01 3.70e+04]
[2.70e+01 8.80e+04]
[2.80e+01 5.90e+04]
[3.20e+01 8.60e+04]
[3.30e+01 1.49e+05]
[1.90e+01 2.10e+04]
[2.10e+01 7.20e+04]
[2.60e+01 3.50e+04]
[2.70e+01 8.90e+04]
[2.60e+01 8.60e+04]
[3.80e+01 8.00e+04]
[3.90e+01 7.10e+04]
[3.70e+01 7.10e+04]
[3.80e+01 6.10e+04]
[3.70e+01 5.50e+04]
[4.20e+01 8.00e+04]
[4.00e+01 5.70e+04]
[3.50e+01 7.50e+04]
[3.60e+01 5.20e+04]
[4.00e+01 5.90e+04]
[4.10e+01 5.90e+04]
[3.60e+01 7.50e+04]
[3.70e+01 7.20e+04]
[4.00e+01 7.50e+04]
[3.50e+01 5.30e+04]
[4.10e+01 5.10e+04]
[3.90e+01 6.10e+04]
[4.20e+01 6.50e+04]
[2.60e+01 3.20e+04]
[3.00e+01 1.70e+04]
[2.60e+01 8.40e+04]
[3.10e+01 5.80e+04]
[3.30e+01 3.10e+04]
[3.00e+01 8.70e+04]
[2.10e+01 6.80e+04]
[2.80e+01 5.50e+04]
[2.30e+01 6.30e+04]
[2.00e+01 8.20e+04]
[3.00e+01 1.07e+05]
[2.80e+01 5.90e+04]
[1.90e+01 2.50e+04]
[1.90e+01 8.50e+04]
[1.80e+01 6.80e+04]
[3.50e+01 5.90e+04]
[3.00e+01 8.90e+04]
[3.40e+01 2.50e+04]
[2.40e+01 8.90e+04]
[2.70e+01 9.60e+04]
[4.10e+01 3.00e+04]
[2.90e+01 6.10e+04]
[2.00e+01 7.40e+04]
[2.60e+01 1.50e+04]
[4.10e+01 4.50e+04]
[3.10e+01 7.60e+04]
[3.60e+01 5.00e+04]
[4.00e+01 4.70e+04]
[3.10e+01 1.50e+04]
[4.60e+01 5.90e+04]
[2.90e+01 7.50e+04]
[2.60e+01 3.00e+04]
[3.20e+01 1.35e+05]
[3.20e+01 1.00e+05]
[2.50e+01 9.00e+04]
[3.70e+01 3.30e+04]
[3.50e+01 3.80e+04]
[3.30e+01 6.90e+04]
[1.80e+01 8.60e+04]
[2.20e+01 5.50e+04]
[3.50e+01 7.10e+04]
[2.90e+01 1.48e+05]
[2.90e+01 4.70e+04]
[2.10e+01 8.80e+04]
[3.40e+01 1.15e+05]
[2.60e+01 1.18e+05]
[3.40e+01 4.30e+04]
[3.40e+01 7.20e+04]
[2.30e+01 2.80e+04]
[3.50e+01 4.70e+04]
[2.50e+01 2.20e+04]
[2.40e+01 2.30e+04]
[3.10e+01 3.40e+04]
[2.60e+01 1.60e+04]
[3.10e+01 7.10e+04]
[3.20e+01 1.17e+05]
[3.30e+01 4.30e+04]
[3.30e+01 6.00e+04]
[3.10e+01 6.60e+04]
[2.00e+01 8.20e+04]
[3.30e+01 4.10e+04]
[3.50e+01 7.20e+04]
[2.80e+01 3.20e+04]
[2.40e+01 8.40e+04]
[1.90e+01 2.60e+04]
[2.90e+01 4.30e+04]
[1.90e+01 7.00e+04]
[2.80e+01 8.90e+04]
[3.40e+01 4.30e+04]
[3.00e+01 7.90e+04]
[2.00e+01 3.60e+04]
[2.60e+01 8.00e+04]
[3.50e+01 2.20e+04]
[3.50e+01 3.90e+04]
[4.90e+01 7.40e+04]
[3.90e+01 1.34e+05]
[4.10e+01 7.10e+04]
[5.80e+01 1.01e+05]
[4.70e+01 4.70e+04]
[5.50e+01 1.30e+05]
[5.20e+01 1.14e+05]
[4.00e+01 1.42e+05]
[4.60e+01 2.20e+04]
[4.80e+01 9.60e+04]
[5.20e+01 1.50e+05]
[5.90e+01 4.20e+04]
[3.50e+01 5.80e+04]
[4.70e+01 4.30e+04]
[6.00e+01 1.08e+05]
[4.90e+01 6.50e+04]
[4.00e+01 7.80e+04]
[4.60e+01 9.60e+04]
[5.90e+01 1.43e+05]
[4.10e+01 8.00e+04]
[3.50e+01 9.10e+04]
[3.70e+01 1.44e+05]
[6.00e+01 1.02e+05]
[3.50e+01 6.00e+04]
[3.70e+01 5.30e+04]
[3.60e+01 1.26e+05]
[5.60e+01 1.33e+05]
[4.00e+01 7.20e+04]
[4.20e+01 8.00e+04]
[3.50e+01 1.47e+05]
[3.90e+01 4.20e+04]
[4.00e+01 1.07e+05]
[4.90e+01 8.60e+04]
[3.80e+01 1.12e+05]
[4.60e+01 7.90e+04]
[4.00e+01 5.70e+04]
[3.70e+01 8.00e+04]
[4.60e+01 8.20e+04]
[5.30e+01 1.43e+05]
[4.20e+01 1.49e+05]
[3.80e+01 5.90e+04]
[5.00e+01 8.80e+04]
[5.60e+01 1.04e+05]
[4.10e+01 7.20e+04]
[5.10e+01 1.46e+05]
[3.50e+01 5.00e+04]
[5.70e+01 1.22e+05]
[4.10e+01 5.20e+04]
[3.50e+01 9.70e+04]
[4.40e+01 3.90e+04]
[3.70e+01 5.20e+04]
[4.80e+01 1.34e+05]
[3.70e+01 1.46e+05]
[5.00e+01 4.40e+04]
[5.20e+01 9.00e+04]
[4.10e+01 7.20e+04]
[4.00e+01 5.70e+04]
[5.80e+01 9.50e+04]
[4.50e+01 1.31e+05]
[3.50e+01 7.70e+04]
[3.60e+01 1.44e+05]
[5.50e+01 1.25e+05]
[3.50e+01 7.20e+04]
[4.80e+01 9.00e+04]
[4.20e+01 1.08e+05]
[4.00e+01 7.50e+04]
[3.70e+01 7.40e+04]
[4.70e+01 1.44e+05]
[4.00e+01 6.10e+04]
[4.30e+01 1.33e+05]
[5.90e+01 7.60e+04]
[6.00e+01 4.20e+04]
[3.90e+01 1.06e+05]
[5.70e+01 2.60e+04]
[5.70e+01 7.40e+04]
[3.80e+01 7.10e+04]
[4.90e+01 8.80e+04]
[5.20e+01 3.80e+04]
[5.00e+01 3.60e+04]
[5.90e+01 8.80e+04]
[3.50e+01 6.10e+04]
[3.70e+01 7.00e+04]
[5.20e+01 2.10e+04]
[4.80e+01 1.41e+05]
[3.70e+01 9.30e+04]
[3.70e+01 6.20e+04]
[4.80e+01 1.38e+05]
[4.10e+01 7.90e+04]
[3.70e+01 7.80e+04]
[3.90e+01 1.34e+05]
[4.90e+01 8.90e+04]
[5.50e+01 3.90e+04]
[3.70e+01 7.70e+04]
[3.50e+01 5.70e+04]
[3.60e+01 6.30e+04]
[4.20e+01 7.30e+04]
[4.30e+01 1.12e+05]
[4.50e+01 7.90e+04]
[4.60e+01 1.17e+05]
[5.80e+01 3.80e+04]
[4.80e+01 7.40e+04]
[3.70e+01 1.37e+05]
[3.70e+01 7.90e+04]
[4.00e+01 6.00e+04]
[4.20e+01 5.40e+04]
[5.10e+01 1.34e+05]
[4.70e+01 1.13e+05]
[3.60e+01 1.25e+05]
[3.80e+01 5.00e+04]
[4.20e+01 7.00e+04]
[3.90e+01 9.60e+04]
[3.80e+01 5.00e+04]
[4.90e+01 1.41e+05]
[3.90e+01 7.90e+04]
[3.90e+01 7.50e+04]
[5.40e+01 1.04e+05]
[3.50e+01 5.50e+04]
[4.50e+01 3.20e+04]
[3.60e+01 6.00e+04]
[5.20e+01 1.38e+05]
[5.30e+01 8.20e+04]
[4.10e+01 5.20e+04]
[4.80e+01 3.00e+04]
[4.80e+01 1.31e+05]
[4.10e+01 6.00e+04]
[4.10e+01 7.20e+04]
[4.20e+01 7.50e+04]
[3.60e+01 1.18e+05]
[4.70e+01 1.07e+05]
[3.80e+01 5.10e+04]
[4.80e+01 1.19e+05]
[4.20e+01 6.50e+04]
[4.00e+01 6.50e+04]
[5.70e+01 6.00e+04]
[3.60e+01 5.40e+04]
[5.80e+01 1.44e+05]
[3.50e+01 7.90e+04]
[3.80e+01 5.50e+04]
[3.90e+01 1.22e+05]
[5.30e+01 1.04e+05]
[3.50e+01 7.50e+04]
[3.80e+01 6.50e+04]
[4.70e+01 5.10e+04]
[4.70e+01 1.05e+05]
[4.10e+01 6.30e+04]
[5.30e+01 7.20e+04]
[5.40e+01 1.08e+05]
[3.90e+01 7.70e+04]
[3.80e+01 6.10e+04]
[3.80e+01 1.13e+05]
[3.70e+01 7.50e+04]
[4.20e+01 9.00e+04]
[3.70e+01 5.70e+04]
[3.60e+01 9.90e+04]
[6.00e+01 3.40e+04]
[5.40e+01 7.00e+04]
[4.10e+01 7.20e+04]
[4.00e+01 7.10e+04]
[4.20e+01 5.40e+04]
[4.30e+01 1.29e+05]
[5.30e+01 3.40e+04]
[4.70e+01 5.00e+04]
[4.20e+01 7.90e+04]
[4.20e+01 1.04e+05]
[5.90e+01 2.90e+04]
[5.80e+01 4.70e+04]
[4.60e+01 8.80e+04]
[3.80e+01 7.10e+04]
[5.40e+01 2.60e+04]
[6.00e+01 4.60e+04]
[6.00e+01 8.30e+04]
[3.90e+01 7.30e+04]
[5.90e+01 1.30e+05]
[3.70e+01 8.00e+04]
[4.60e+01 3.20e+04]
[4.60e+01 7.40e+04]
[4.20e+01 5.30e+04]
[4.10e+01 8.70e+04]
[5.80e+01 2.30e+04]
[4.20e+01 6.40e+04]
[4.80e+01 3.30e+04]
[4.40e+01 1.39e+05]
[4.90e+01 2.80e+04]
[5.70e+01 3.30e+04]
[5.60e+01 6.00e+04]
[4.90e+01 3.90e+04]
[3.90e+01 7.10e+04]
[4.70e+01 3.40e+04]
[4.80e+01 3.50e+04]
[4.80e+01 3.30e+04]
[4.70e+01 2.30e+04]
[4.50e+01 4.50e+04]
[6.00e+01 4.20e+04]
[3.90e+01 5.90e+04]
[4.60e+01 4.10e+04]
[5.10e+01 2.30e+04]
[5.00e+01 2.00e+04]
[3.60e+01 3.30e+04]
[4.90e+01 3.60e+04]]
[0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0
0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0
0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 1 0 1 1 0 0 0 1 0 0 0 1 0 1
1 1 0 0 1 1 0 1 1 0 1 1 0 1 0 0 0 1 1 0 1 1 0 1 0 1 0 1 0 0 1 1 0 1 0 0 1
1 0 1 1 0 1 1 0 0 1 0 0 1 1 1 1 1 0 1 1 1 1 0 1 1 0 1 0 1 0 1 1 1 1 0 0 0
1 1 0 1 1 1 1 1 0 0 0 1 1 0 0 1 0 1 0 1 1 0 1 0 1 1 0 1 1 0 0 0 1 1 0 1 0
0 1 0 1 0 0 1 1 0 0 1 1 0 1 1 0 0 1 0 1 0 1 1 1 0 1 0 1 1 1 0 1 1 1 1 0 1
1 1 0 1 0 1 0 0 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 0 1]
###Markdown
Splitting Dataset into TrainingSet and TestSet
###Code
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0) #testset 20%
print(X_train)
print(X_test)
print(y_train)
print(y_test)
#to predict which user going to buy SUV
###Output
_____no_output_____
###Markdown
Feature Scaling
###Code
from sklearn.preprocessing import StandardScaler
sc_X=StandardScaler()
X_train=sc_X.fit_transform(X_train)
X_test=sc_X.transform(X_test)
X_train # ytrain is not scaled because its categorical data
###Output
_____no_output_____
###Markdown
Applying Kernel PCA
###Code
from sklearn.decomposition import KernelPCA
kpca = KernelPCA(n_components = 2,kernel='rbf')
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
X_train
###Output
_____no_output_____
###Markdown
Fitting Logistic Regression to Training Set
###Code
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression(random_state=0)
classifier.fit(X_train,y_train)
###Output
C:\Users\Jakkani\Anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
###Markdown
Predicting The Test set Results
###Code
y_pred=classifier.predict(X_test)
print(y_pred)
###Output
[0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0
0 0 1 0 0 0 0 1 0 0 1 0 1 1 0 0 1 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0
0 0 0 0 1 1 1 1 0 0 1 0 0 1 1 0 0 1 0 0 0 0 0 1 1 1]
###Markdown
Making Confusion Matrix
###Code
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
cm
#Above 65 and 24 are the correct predictions and 6 and 4 are incorrect predictions
###Output
_____no_output_____
###Markdown
Visualising the Training Set results
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
#red= who didnt buy SUV
#green= who bought the SUV
###Output
_____no_output_____
###Markdown
Visualising the Test set results
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
_____no_output_____ |
tl_detector_trainer.ipynb | ###Markdown
Imports
###Code
from __future__ import print_function
%matplotlib inline
import math
import os
import cv2
import numpy as np
import keras
from keras.models import Sequential, load_model, Model
from keras.layers import Cropping2D, Lambda, Input, BatchNormalization, Concatenate, concatenate
from keras.layers.core import Activation, Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.optimizers import Adam
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpim
###Output
_____no_output_____
###Markdown
Utilities
###Code
def get_samples_list(sampledir, exclude=[], d_range=[]):
samples = []
labelcount = []
for fname in os.listdir(sampledir):
if fname[-4:]=='.png' and not fname[0] in exclude:
d = int(fname.split('__')[-1].split('_')[0])
if len(d_range) > 0 and (d < d_range[0] or d > d_range[1]):
continue
label = int(fname[0])
samples.append((os.path.join(sampledir, fname), label))
while len(labelcount) < label+1:
labelcount += [0]
labelcount[label] += 1
print('label counts:')
for i in range(len(labelcount)):
print('{}: {}'.format(i, labelcount[i]))
print('total: {}'.format(len(samples)))
return samples
def augment_samples_list(samples, mult=[1, 1, 1], tx=[0, 1], ty=[0, 1]):
augs = []
for sample in samples:
x = mult[sample[1]]-1
for i in range(x):
shiftx = np.random.randint(tx[0], tx[1]+1)
shifty = np.random.randint(ty[0], ty[1]+1)
sample_aug = [sample[0], sample[1], shiftx, shifty]
augs.append(sample_aug)
return samples + augs
def filter_samples_by_distance(samples, drange=[0, 100]):
filtered_samples = []
for sample in samples:
fname = sample[0].split('/')[-1]
dee = int(fname.split('_')[2])
if drange[0] < dee < drange[1]:
filtered_samples.append(sample)
return filtered_samples
def datagen(samples, batch_size=32, n_class=4, grey=False):
n_samples = len(samples)
while True:
samples = shuffle(samples)
for offset in range(0, n_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
labels = []
for batch_sample in batch_samples:
im = mpim.imread(batch_sample[0])
if grey:
im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
# im = cv2.cvtColor(im, cv2.COLOR_RGB2LAB)
# im = im[:, :, 2]
if len(batch_sample) > 2:
tx = batch_sample[2]
ty = batch_sample[3]
nrow, ncol = im.shape[:2]
T = np.float32([[1, 0, tx], [0, 1, ty]])
im = cv2.warpAffine(im, T, (ncol, nrow))
images.append(im)
labels.append(batch_sample[1])
images = np.array(images)
if grey:
newshape = [x for x in images.shape] + [1]
images = np.reshape(images, newshape)
labels = keras.utils.to_categorical(np.array(labels), num_classes=n_class)
yield shuffle(images, labels)
def count_sample_distro(samples):
label_counts = []
for sample in samples:
label = sample[1]
while len(label_counts) < label+1:
label_counts += [0]
label_counts[label] += 1
total_count = sum(label_counts)
max_count = max(label_counts)
print('label counts, proportion, mult')
for i, label in enumerate(label_counts):
print('{} {:5.3f} {:6.2f}'.format(label, 1.0*label/total_count, 1.0*max_count/label))
print('total', total_count)
return label_counts
def get_samples_list_recursive(sample_root, exclude=[]):
fnames = os.listdir(sample_root)
samples = []
for fname in fnames:
if not (fname[0] in exclude) and fname[-4:] == '.png':
samples.append([os.path.join(sample_root, fname), int(fname[0])])
elif os.path.isdir(os.path.join(sample_root, fname)):
new_root = os.path.join(sample_root, fname)
add_samples = get_samples_list_recursive(new_root, exclude)
samples += add_samples
return samples
def test_exhaustive(samples, model='model.h5', batch_size=32, grey=False):
model = load_model(model)
confusion = np.zeros((3, 3))
N_bat = len(samples)//batch_size + 1
for offset in range(0, len(samples), batch_size):
batch_number = offset//batch_size+1
if batch_number % 10 == 0:
print('batch', batch_number, 'of', N_bat)
labels = []
imgs = []
for sample in samples[offset:offset+batch_size]:
img = mpim.imread(sample[0])
if grey:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
labels.append(sample[1])
if len(sample) > 2:
tx = sample[2]
ty = sample[3]
T = np.float32([[1, 0, tx], [0, 1, ty]])
nrow, ncol = img.shape[:2]
img = cv2.warpAffine(img, T, (ncol, nrow))
imgs.append(img)
imgs = np.array(imgs)
if grey:
newshape = [x for x in imgs.shape] + [1]
imgs = np.reshape(imgs, newshape)
preds = model.predict(imgs)
pred_labels = [int(np.argmax(pred)) for pred in preds]
for label, pred_label in zip(labels, pred_labels):
confusion[label][pred_label] += 1
return confusion
###Output
_____no_output_____
###Markdown
Sample generation test
###Code
samples = get_samples_list_recursive('samples', exclude=['3'])
# samples = augment_samples_list(samples, mult=[1, 22, 4], tx=[-50, 51])
_ = count_sample_distro(samples)
samples = filter_samples_by_distance(samples, [0, 10])
samples = augment_samples_list(samples, mult=[1, 18, 2], tx=[-50, 50], ty=[0, 1])
_ = count_sample_distro(samples)
samplegen = datagen(samples, batch_size=10, n_class=3, grey=True)
imgs, labels = next(samplegen)
grey = False
cropx = 50
cropy = 0
print(imgs[0].shape)
if imgs[0].shape[-1] < 3:
grey = True
ncols = 5
for i, img in enumerate(imgs):
if i % ncols == 0:
plt.figure(figsize=(12, 12))
plt.subplot(1, ncols, (i%ncols)+1)
img = img[cropy:-cropy-1, cropx:-cropx-1]
if grey:
plt.imshow(np.squeeze(img), cmap='gray')
else:
plt.imshow(img)
# plt.gca().set_xlim([0+50, 800-50])
# plt.gca().set_ylim([0+100, 600-100])
plt.gca().set_title(labels[i])
###Output
_____no_output_____
###Markdown
Net definitions
###Code
def net_multiscale():
input_tensor = Input(shape=(None, None, 3), name="input_tensor") # three channels, any size
# monoscale layers
conv_com1 = Conv2D(16, (3, 3), activation='relu')(input_tensor)
batnorm1 = BatchNormalization()(conv_com1)
maxpool1 = MaxPooling2D()(batnorm1)
conv_com2 = Conv2D(32, (3, 3), activation='relu')(maxpool1)
batnorm2 = BatchNormalization()(conv_com2)
conv_com3 = Conv2D(64, (3, 3), activation='relu')(batnorm2)
batnorm3 = BatchNormalization()(conv_com3)
conv_com4 = Conv2D(128, (3, 3), activation='relu')(batnorm3)
batnorm4 = BatchNormalization()(conv_com4)
maxpool2 = MaxPooling2D()(batnorm4)
dropout1 = Dropout(0.2)(maxpool2)
# multiscale layers
conv_sc3_1 = Conv2D(128, (3, 3), activation='relu')(dropout1)
conv_sc3_2 = Conv2D(256, (3, 3), activation='relu')(conv_sc3_1)
gap_sc3 = GlobalAveragePooling2D()(conv_sc3_2)
conv_sc5_1 = Conv2D(128, (5, 5), activation='relu')(dropout1)
conv_sc5_2 = Conv2D(256, (5, 5), activation='relu')(conv_sc5_1)
gap_sc5 = GlobalAveragePooling2D()(conv_sc5_2)
conv_sc7_1 = Conv2D(128, (7, 7), activation='relu')(dropout1)
conv_sc7_2 = Conv2D(256, (7, 7), activation='relu')(conv_sc7_1)
gap_sc7 = GlobalAveragePooling2D()(conv_sc7_2)
concat = concatenate([gap_sc3, gap_sc5, gap_sc7])
# dense layers
dense1 = Dense(512, activation='relu')(concat)
dropout2 = Dropout(0.2)(dense1)
dense2 = Dense(256, activation='relu')(dense1)
logit = Dense(3, activation='softmax')(dense2)
model = Model(inputs=input_tensor, outputs=logit)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
netms = net_multiscale()
netms.summary()
def net_nvidia(n_class=4):
model = Sequential()
model.add(Cropping2D(((0, 0), (250, 250)), input_shape=(600, 800, 3)))
model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2)))
model.add(Activation('relu')) # the paper doesn't mention activation function, but isn't that needed?
model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(48, kernel_size=(5, 5), strides=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1)))
model.add(Activation('relu'))
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1)))
model.add(Dropout(0.30))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-6), metrics=['accuracy'])
return model
def net_simple(n_class=4):
model = Sequential()
model.add(Cropping2D((100, 250), input_shape=(600, 800, 3)))
model.add(Conv2D(8, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(16, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(32, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=1e-6), loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def net2(n_class=3):
model = Sequential()
model.add(Cropping2D((100, 250), input_shape=(600, 800, 3)))
model.add(Conv2D(16, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(256, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def netgrey(n_class=3):
model = Sequential()
model.add(Cropping2D((100, 50), input_shape=(600, 800, 1)))
model.add(MaxPooling2D(pool_size=(2, 2))) # scale by half
model.add(Lambda(lambda x: 2.0*x-1.0))
model.add(Conv2D(8, kernel_size=(3, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(16, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.2))
# model.add(Dense(256))
# model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def netgrey2(n_class=3):
model = Sequential()
model.add(Cropping2D((0, 50), input_shape=(600, 800, 1)))
model.add(MaxPooling2D(pool_size=(2, 2))) # scale by half
model.add(Lambda(lambda x: 2.0*x-1.0))
model.add(Conv2D(8, kernel_size=(3, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(16, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.2))
# model.add(Dense(256))
# model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def netungrey(n_class=3):
model = Sequential()
model.add(Cropping2D((0, 50), input_shape=(600, 800, 3)))
model.add(MaxPooling2D(pool_size=(2, 2))) # scale by half
model.add(Lambda(lambda x: 2.0*x-1.0))
model.add(Conv2D(8, kernel_size=(3, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(16, kernel_size=(5, 5)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.2))
# model.add(Dense(256))
# model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def train(model, samples, batch_size=32, epochs=1, grey=False):
train_samples, valid_samples = train_test_split(samples, test_size=0.25)
train_gen = datagen(train_samples, batch_size=batch_size, n_class=3, grey=grey)
valid_gen = datagen(valid_samples, batch_size=batch_size, n_class=3, grey=grey)
train_step = math.ceil(len(train_samples)/batch_size)
valid_step = math.ceil(len(valid_samples)/batch_size)
history = model.fit_generator(train_gen, steps_per_epoch=train_step,
validation_data=valid_gen, validation_steps=valid_step,
epochs=epochs, verbose=1)
model.save('model.h5')
return history
###Output
_____no_output_____
###Markdown
Model training
###Code
samples = get_samples_list_recursive('samples', exclude=['3'])
samples = augment_samples_list(samples, mult=[1, 21, 3], tx=[-50, 50], ty=[0, 1])
_ = count_sample_distro(samples)
model = net_multiscale()
h = train(model, samples, batch_size=32, epochs=1, grey=False)
samples = get_samples_list_recursive('samples', exclude=['3'])
samples = augment_samples_list(samples, mult=[1, 21, 3], tx=[-50, 50], ty=[0, 1])
_ = count_sample_distro(samples)
model = load_model('model.h5')
# model.summary()
h = train(model, samples, batch_size=32, epochs=4, grey=True)
# Further training on only close-range data
samples = get_samples_list_recursive('samples', exclude=['3'])
samples = augment_samples_list(samples, mult=[1, 21, 3], tx=[-50, 50], ty=[0, 1])
xsamples = filter_samples_by_distance(samples, drange=[0, 15])
_ = count_sample_distro(xsamples)
model = load_model('model.h5')
h = train(model, xsamples, batch_size=32, epochs=4, grey=True)
###Output
_____no_output_____
###Markdown
Testing result
###Code
samples = get_samples_list_recursive('samples', exclude=['3'])
samples = augment_samples_list(samples, mult=[1, 10, 2], tx=[-50, 51])
_ = count_sample_distro(samples)
conf = test_exhaustive(samples, model='model.h5', batch_size=64, grey=True)
plt.imshow(conf, cmap='gray')
print(conf)
accuracy = sum([conf[i, i] for i in range(len(conf))])/sum(sum(conf))
print('accuracy', accuracy)
for i in range(len(conf)):
print('class', i)
precision = conf[i, i] / (sum(conf[:, i])) # true +ve / (true +ve + false +ve)
recall = conf[i, i] / (sum(conf[i, :])) # true +ve / (true+ve + false -ve)
print('precision', precision)
print('recall', recall)
print()
samples = get_samples_list('samples', exclude=['3'])
samples = augment_samples_list(samples, mult=[1, 150, 5])
samples_gen = datagen(samples, batch_size=32, n_class=3)
model = load_model('model_simple92.h5')
imgs, labels = next(samples_gen)
preds = model.predict(imgs, len(imgs), verbose=0)
# print(preds.shape)
# for pred, label in zip(preds, labels):
# print(np.argmax(label), np.argmax(pred), pred)
good = 0
for label, pred in zip(labels, preds):
if np.argmax(label) == np.argmax(pred):
good += 1
print('accuracy', good/len(labels))
imx = mpim.imread('samples/0__21_1579319374_884872913.png')
imx = np.expand_dims(imx, axis=0)
print(imx.shape)
p = model.predict(imx)
print(np.argmax(p[0]), p[0])
for i in range(0, len(imgs), 3):
plt.figure(figsize=(16, 16))
for j in range(3):
if i + j < len(imgs):
plt.subplot(1, 3, j+1)
plt.imshow(imgs[i+j])
real_class = np.argmax(labels[i+j])
detect_class = np.argmax(preds[i+j])
confidence = np.max(preds[i+j])
plt.gca().set_title('rel {} det {} con {:5.3f}'\
.format(real_class, detect_class, confidence))
###Output
_____no_output_____ |
_notebooks/2020-09-25-basic-object-detector.ipynb.ipynb | ###Markdown
"Object detection"> "Basic image processing and simple color based object detector"- toc: false- branch: master- badges: true- comments: true- categories: [image processing, computer vision, object detection]- image: images/some_folder/your_image.png- hide: false- search_exclude: true- metadata_key1: metadata_value1- metadata_key2: metadata_value2 In object detection, one seeks to develop algorithm that identifies a specific object in an image. Here, we'll see how to build a very simple object detector (based on color) using opencv. More sophisticated object detection algorithms are capable of identifying multiple objects in a single image. For example, one can train an object detection model to identify various types of fruits, etc. Later, we'll also see that our object detection model is not exactly perfect. Nevertheless, aim of this notebook is not to build a world-class object detector but to introduce the reader to basic computer vision and image processing. Let's start by loading some useful libraries
###Code
# A popular python library useful for working with arrays
import numpy as np
# opencv library
import cv2
# For image visualization
import matplotlib.pyplot as plt
#Plots are displayed below the code cell
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's load and inspect the dimensions of our image. Images are basically a matrix of size heigth\*width\*color channels.
###Code
fruits = cv2.imread('apple_banana.png') # cv2.method loads an image
fruits.shape
###Output
_____no_output_____
###Markdown
So, we can see that our image is 1216 by 752 pixels and it has 3 color channels. Next, we'll convert our image into the RGB color channel. RGB color space is an additive color model where we can obtain other colors by a linear combinations of red, green, and blue color. Each of the red, green and blue light levels is encoded as a number in the range from 0 to 255, with 0 denoting zero light and 255 denoting maximum light. To obtain a matrix with values ranging from 0 to 1, we'll divide by 255.
###Code
fruits = cv2.cvtColor(fruits, cv2.COLOR_BGR2RGB) # cvtColor method to convert an image from one color space to another.
fruits = fruits / 255.0
###Output
_____no_output_____
###Markdown
Finally, let's plot our image.
###Code
plt.imshow(fruits)
###Output
_____no_output_____
###Markdown
We can see that our image contains one **red** apple and one **yellow** banana. Next, we will build a very basic object detector which can pinpoint apple and banana in our image based on their colors. There are more excellent algorithms out there to do this task but that's for some other time. We start by creating two new images of the same dimensions as our original image and fill first one with the red color - to detect apple and the second one with the yellow - to detect banana.
###Code
apple_red = np.zeros(np.shape(fruits))
banana_yellow = np.zeros(np.shape(fruits))
apple_red[:,:,0] = 1 # set red channel to 1 - index 0 corresponds to red channel
banana_yellow[:,:,0:2] = 1 # set yellow channel to 1 - it can be done by filling red and blue channel with 1
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(apple_red)
ax2.imshow(banana_yellow)
###Output
_____no_output_____
###Markdown
Now, we will compare the pixels between our colored and fruits images. One way is to calculate the mean-squared distance as follows:$$d_{x,y} = \sqrt{\sum_{z = 1}^{3}(R_{xyz} - F_{xyz})^2} $$where, $d_{xyz}$ is Euclidean distance between pixel values for all 3 color channels in two compared images $R$ and $F$. To implement this, we will first subtract two matrices from each other, and then take a norm of a vector. This can be easily acheived by numpy's `linalg.norm` method (Don't forget to set the axis to 2).
###Code
# Subtract matrices
diff_red = fruits - apple_red
diff_yellow = fruits - banana_yellow
# Take norm of both vectors
dist_red = np.linalg.norm(diff_red, axis=2)
dist_yellow = np.linalg.norm(diff_yellow, axis=2)
# Let's plot our matrix with values, the imshow function color-maps them.
# For apple(red) detector
plt.imshow(dist_red)
plt.colorbar()
###Output
_____no_output_____
###Markdown
One can see in the plot above that the pixels with the lowest value in the matrice are the pixels that make up the apple (see colorbar for reference). This makes sense as those pixels corresponds to the red-most pixels in the fruits image. Let's also plot the matrice for banana (yellow) detector.
###Code
# For banana (yellow) detector
plt.imshow(dist_yellow)
plt.colorbar()
###Output
_____no_output_____
###Markdown
Again we see that the pixels with the lowest value in the matrice are the pixels that make up the banana. Now in order to pinpoint apple and banana in our fruits image, we need to find the index of the matrix element with the lowest value.
###Code
ind_red = np.argmin(dist_red)
print ("red most pixel index= ", ind_red)
ind_yellow = np.argmin(dist_yellow)
print ("yellow most pixel index = ", ind_yellow)
###Output
red most pixel index= 544887
yellow most pixel index = 225109
###Markdown
In order to point the location of this index on our fruits image i.e. to pinpoint our object, we need the x,y coordinates of the index. This can be done using the np.unravel_index method.
###Code
# We will get the height and width of our fruits image
image = np.shape(fruits)[0:2]
(y_red, x_red) = np.unravel_index(ind_red, image)
(y_yellow, x_yellow) = np.unravel_index(ind_yellow, image)
###Output
_____no_output_____
###Markdown
Finally, it's time to pinpoint our objects ! Let's first pinpoint our apple.
###Code
fig, (ax1, ax2) = plt.subplots(1,2)
# Apple
ax1.scatter(x_red, y_red, c='black', s = 100, marker = 'X')
ax1.imshow(fruits)
# Banana
ax2.scatter(x_yellow, y_yellow, c='black', s = 100, marker = 'X')
ax2.imshow(fruits)
###Output
_____no_output_____ |
1D advection diffusion - finite element Galerkin & SUPG/Code.ipynb | ###Markdown
Functions
###Code
def f(x):
return 0
def special_f(x):
return np.piecewise(x, [x >= 0 , x >= 1.5, x >=2],
[lambda x: 1 - x,lambda x: -0.5+(x-1.5),0])
### Basisfunction times source function
def Bf(x,x1,x2,f,upwards = True):
if upwards:
return (x-x1)/(x2-x1)*f(x)
else:
return -(x-x2)/(x2-x1)*f(x)
### FEM == False ->> Finite difference
def solution(X,f,BC1 = 0, BC2 = 1, e = 1, u = 1, e_bar = 0, FEM = False, Bf = None, SUPG = False):
A = np.zeros((len(X)-2,len(X)-2))
f_vec = np.zeros((len(X)-2))
h = (X[-1]-X[0])/(len(X)-1)
##################
if not SUPG:
if not FEM:
e = e+ e_bar
for i in range(A.shape[0]):
A[i,i] = 2*e/h**2
if not i == 0:
A[i,i-1] = (-u/(2*h)-e/h**2)
else:
f_vec[i] += -(-u/(2*h)-e/h**2)*BC1
if not i == A.shape[0]-1:
A[i,i+1] = (u/(2*h) - e/h**2)
else:
f_vec[i] += -(u/(2*h)-e/h**2)*BC2
f_vec[i] += f(X[i+1])
else:
e = e+ e_bar
for i in range(A.shape[0]):
A[i,i] = 2*e/h
if not i == 0:
A[i,i-1] = (-u/(2)-e/h)
else:
f_vec[i] += -(-u/(2)-e/h)*BC1
if not i == A.shape[0]-1:
A[i,i+1] = (u/(2) - e/h)
else:
f_vec[i] += -(u/(2)-e/h)*BC2
f_vec[i] += integrate.quadrature(Bf,X[i],X[i+1],(X[i],X[i+1],f,True))[0]
f_vec[i] += integrate.quadrature(Bf,X[i+1],X[i+2],(X[i+1],X[i+2],f,True))[0]
else:
tau = e_bar/u**2
e= e+(u**2)*tau
for i in range(A.shape[0]):
A[i,i] = 2*e/h
if not i == 0:
A[i,i-1] = (-u/(2)-e/h)
else:
f_vec[i] += -(-u/(2)-e/h)*BC1
if not i == A.shape[0]-1:
A[i,i+1] = (u/(2) - e/h)
else:
f_vec[i] += -(u/(2)-e/h)*BC2
f_vec[i] += integrate.quadrature(Bf,X[i],X[i+1],(X[i],X[i+1],f,True))[0] + tau*u*(1/h)*integrate.quadrature(f,X[i],X[i+1])[0]
f_vec[i] += integrate.quadrature(Bf,X[i+1],X[i+2],(X[i+1],X[i+2],f,True))[0] -tau*u*(1/h)*integrate.quadrature(f,X[i+1],X[i+2])[0]
##############################################
phi = np.linalg.solve(A,f_vec)
cache = np.zeros(len(X))
cache[0] = BC1
cache[-1] = BC2
cache[1:-1] = phi
return cache
def analytic(x,e,BC1,BC2):
pe = 1/e
res = BC1 + (BC2-BC1)*(np.exp((x-1)*pe) - np.exp(-pe))/(1 - np.exp(-pe))
return res
###Output
_____no_output_____
###Markdown
Test $f(x)$
###Code
x = np.linspace(0,4,100)
y = special_f(x)
plt.plot(x,y)
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.show()
###Output
_____no_output_____
###Markdown
Numerical simulations
###Code
a = 0
b =1
e = 1
u = 1
es = [1,0.1,0.01]
n = 21
X = np.linspace(0,4,n)
phiFEM = []
phiMM = []
phiEx = []
X_ex = np.linspace(0,4,1001)
def e_bar(X,u,e):
h = X[1]- X[0]
Pe = np.abs(u)*h/(2*e)
return np.abs(u)*h/2 *(np.cosh(Pe)/np.sinh(Pe)-1/Pe)
#def e_bar(X,u,e):
# h = X[1]- X[0]
# return -e+(h*u/2)*(1+np.exp(u*h/e))/(-1+np.exp(u*h/e))
################# f = 0 ###################
for e in es:
phi = solution(X,f,BC1 = a,BC2 = b,e = e,u = u, FEM =True, Bf = Bf)
phiFEM.append(phi)
phi = solution(X,f,BC1 = a,BC2 = b,e = e,u = u, FEM =False, Bf = Bf,e_bar = e_bar(X,u,e))
phiMM.append(phi)
phi = solution(X_ex,f,BC1 = a,BC2 = b,e = e,u = u, FEM =True, Bf = Bf)
phiEx.append(phi)
counter = 0
for i,j,k,e in zip(phiFEM,phiMM,phiEx,es):
plt.plot(X,i,label = 'BG',marker = 'x',linewidth = 1)
plt.plot(X,j,label = 'SUPG',marker = '.' ,linewidth = 1)
plt.plot(X_ex,k,label = '"Exact"', linestyle = '--',linewidth = 1)
counter += 1
plt.xlabel('$x$')
plt.ylabel('$\phi$')
plt.legend(fontsize = 9)
plt.title('$\phi(0) = ' + str(a) + '$, $ \phi(4) = ' + str(b) + '$, $u=' + str(u) + ' $, $n = '+str(n-1)+'$, $\epsilon = ' + str(e) + '$')
plt.show()
a = 0
b =1
e = 1
u = 1
es = [1,0.1,0.01]
n = 21
X = np.linspace(0,4,n)
phiFEM = []
phiMM = []
phiEx = []
X_ex = np.linspace(0,4,1001)
################ special f ###################
for e in es:
phi = solution(X,special_f,BC1 = a,BC2 = b,e = e,u = u, FEM =True, Bf = Bf)
phiFEM.append(phi)
phi = solution(X,special_f,BC1 = a,BC2 = b,e = e,u = u, FEM =False, Bf = Bf,e_bar = e_bar(X,u,e),SUPG = True)
phiMM.append(phi)
phi = solution(X_ex,special_f,BC1 = a,BC2 = b,e = e,u = u, FEM =True, Bf = Bf)
phiEx.append(phi)
counter = 0
for i,j,k,e in zip(phiFEM,phiMM,phiEx,es):
plt.plot(X,i,label = 'BG',marker = 'x',linewidth = 1)
plt.plot(X,j,label = 'SUPG',marker = '.' ,linewidth = 1)
plt.plot(X_ex,k,label = '"Exact"', linestyle = '--',linewidth = 1)
counter += 1
plt.xlabel('$x$')
plt.ylabel('$\phi$')
plt.legend(fontsize = 9)
plt.title('$\phi(0) = ' + str(a) + '$, $ \phi(4) = ' + str(b) + '$, $u=' + str(u) + ' $, $n = '+str(n-1)+'$, $\epsilon = ' + str(e) + '$')
plt.show()
###Output
/home/toby/anaconda3/lib/python3.7/site-packages/scipy/integrate/_quadrature.py:247: AccuracyWarning: maxiter (50) exceeded. Latest difference = 4.970318e-06
AccuracyWarning)
/home/toby/anaconda3/lib/python3.7/site-packages/scipy/integrate/_quadrature.py:247: AccuracyWarning: maxiter (50) exceeded. Latest difference = 9.940635e-06
AccuracyWarning)
|
UnivariateLinearRegression/MyFirstUnivariateLinearRegression/UnivariateLinearRegression.ipynb | ###Markdown
My First Univariate Linear Regression Dataset from: https://www.kaggle.com/andonians/random-linear-regression NoteBook by: [iArunava](https://github.com/iArunava) IntroductionThis project is my first creating a univariate linear regression model. And in general, my first project :)The Dataset is collected so as it is best suited for a univariate linear regression model.That is all, its a quite simple project.So, lets get Started!! Libraries Required1) _Numpy_: NumPy is the fundamental package for scientific computing with Python.2) _Pandas_: Pandas is an easy-to-use data structures and data analysis tools for the Python programming language.3) _Matplotlib_: Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Goal of the projectCreate a Univariate Linear Regression Model **without the use of any readily available libraries.**To get a better understanding of how the algorithm works under the hood. Step 1:Get the Dataset. Its important to notice that the dataset is really small and, thus, is confirmed that there are no missing values. So, no data cleaning is done and the data is considered to be good for the goal of this project.**Note**: The process of dividing the data into test and training set is already done by the creator of the dataset.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Getting the training and test data
df_test = pd.read_csv('./DataSet/test.csv')
df_train = pd.read_csv('./DataSet/train.csv')
# Separating the training and test datasets into 'X' and 'Y' (i.e the features and output)
xtrain = df_train.iloc[:, 0]
ytrain = df_train.iloc[:, 1]
xtest = df_test.iloc[:, 0]
ytest = df_test.iloc[:, 1]
# Performing feature normalization (Min-Max Standardization)
xtrain /= (xtrain.max() - xtrain.min())
ytrain /= (ytrain.max() - ytrain.min())
# Adding the bias unit to the xtrain and xtest
xtrain = pd.concat([pd.Series(np.ones(len(xtrain))), xtrain], axis=1)
xtest = pd.concat([pd.Series(np.ones(len(xtrain))), xtest], axis=1)
# Getting just the features without the bias unit
xtr = xtrain.iloc[:, 1]
xte = xtest.iloc[:, 1]
###Output
_____no_output_____
###Markdown
Step 2:So, now we have features 'X' and response 'Y' for the training data and the test data.Next, lets randomly choose a hypothesis and lets train it! **Exciting !!**
###Code
hypo = np.random.randn(2, 1)
###Output
_____no_output_____
###Markdown
Before starting to train the model, lets see what the initial cost is for the randomly chosen hypothesis.**Note:** We are going to calculate SSE (Sum of Squared Errors) and provide the Accuracy percentage (for the randomly chosen hypothesis.Lets have a look at the formula used for calculating the cost function.
###Code
%%latex
\begin{align}
J(\Theta) = \frac{1}{2 \times m} \times \sum_{i=1}^m ( h_{\Theta}(x^{(i)}) - y^{(i)})^{2}
\end{align}
# Cost Function
def get_hypo(x, hypo):
return x.dot(hypo).iloc[:, 0]
def cost_function(x, y, hypo):
cost = (((get_hypo(x, hypo) - y)**2).sum()) * (1/(2*len(x)))
return cost
def plot_fit_line():
f = lambda a : a.dot(hypo)
plt.scatter(xtr, ytrain)
plt.plot(xtr, f(xtrain), c='orange')
plt.show()
print ('Initial Cost: ' + str(cost_function(xtrain, ytrain, hypo)))
plot_fit_line()
###Output
Initial Cost: 0.04218860270292831
###Markdown
So, that is how it seems initially.Now, as this is a practice work. I will try different learning rates to see how things work out.All righty then, lets get started by declaring all the essential functions that will be requried.So, for optimizing the model we will be using Gradient Descent.Let's have a look at the formula used for gradient descent.
###Code
%%latex
\begin{align}
\Theta_{j} = \Theta_{j} - \alpha \frac{1}{m}\sum_{i=1}^m ( h_{\Theta}(x^{(i)}) - y^{(i)} ) x_{j}^{(i)}
\end{align}
###Output
_____no_output_____
###Markdown
This is to be performed for all `j` where `j` is a feature.So, lets code it up!
###Code
# Derivative of the cost function
def f_cost_func(x, y, hypo, j, ALPHA):
return (((get_hypo(x, hypo) - y) * x.iloc[:, j]).sum()) * (1.0/len(x)) * ALPHA
# Gradient Descent
def gradient_descent(iter_num, alpha, reset_hypo=False):
'''
Passing just `iter_num` rest is read from the global variables
'''
if reset_hypo: # If `reset_hypo` is true, then the model is reset to a random
hypo = np.random.randn(2, 1)
ALPHA = alpha # Learning Rate
for j in range(iter_num):
for i in range(len(hypo)):
t_hypo = hypo
hypo[i] -= f_cost_func(xtrain, ytrain, t_hypo, i, ALPHA)
plt.plot(j, cost_function(xtrain, ytrain, hypo), 'bo')
plt.show()
return hypo
###Output
_____no_output_____
###Markdown
Now, as I said, I will be testing with different learning rates. So, below I do three tests.**Note:** I have a `reset_hypo` parameter in `gradient_descent()` which if true resets the model. You can use that, and play around with different learning rates.
###Code
# Testing with Learning Rate = 0.01
hypo = gradient_descent(500, 0.01, True)
plot_fit_line()
print ('Initial Cost: ' + str(cost_function(xtrain, ytrain, hypo)))
# Testing with Learning Rate = 0.1
hypo = gradient_descent(500, 0.1, True)
plot_fit_line()
print ('Initial Cost: ' + str(cost_function(xtrain, ytrain, hypo)))
# Testing with Learning Rate = 0.5
hypo = gradient_descent(500, 0.5, True)
plot_fit_line()
print ('Initial Cost: ' + str(cost_function(xtrain, ytrain, hypo)))
# Testing with Learning Rate = 4
hypo = gradient_descent(500, 4, True)
plot_fit_line()
print ('Initial Cost: ' + str(cost_function(xtrain, ytrain, hypo)))
###Output
_____no_output_____
###Markdown
There you go!!!Model trained!!!**Things to Notice:**_**1)**_ Larger Learning Rate causes faster divergence_**2)**_ Too much larger rate causes one to overshoot the minima and as can be seen from the above example which uses learning rate as `4`, that the cost increases and decreases alternatively. You can try with much larger rates, and see that the cost increases each iteration._**3)**_ Smaller learning rates, on the other hand converges alright if a convinient number of iterations are used and it takes time._**4)**_ With too small learning rates, one might never reach the global optima (Well one would for sure, with many more iterations)Finally, lets train the model with learning rate = `0.5`
###Code
hypo = gradient_descent(500, 0.5, True)
print ('\nHypothesis: \n' + str(hypo))
###Output
_____no_output_____
###Markdown
Step 3:With this, lets see the accuaracy of our model on the test data.
###Code
# Accuarcy %
print ('Accuracy : ' + str(100 - (((ytest - xtest.dot(hypo).iloc[:, 0]) / ytest) *100).mean()) + '%')
###Output
Accuracy : 89.22284928662384%
|
joins_sql.ipynb | ###Markdown
Joins using SQL
###Code
%load_ext sql
%%sql sqlite://
CREATE TABLE department
(
DepartmentID INT Primary key,
DepartmentName VARCHAR(20)
);
CREATE TABLE employee
(
LastName VARCHAR(20),
DepartmentID INT references department(DepartmentID)
);
INSERT INTO department VALUES(31, 'Sales');
INSERT INTO department VALUES(33, 'Engineering');
INSERT INTO department VALUES(34, 'Clerical');
INSERT INTO department VALUES(35, 'Marketing');
INSERT INTO employee VALUES('Rafferty', 31);
INSERT INTO employee VALUES('Jones', 33);
INSERT INTO employee VALUES('Heisenberg', 33);
INSERT INTO employee VALUES('Robinson', 34);
INSERT INTO employee VALUES('Smith', 34);
INSERT INTO employee VALUES('Williams', NULL);
%%sql
select *
from department
%%sql
select *
from employee
###Output
* sqlite://
Done.
###Markdown
Left outer join
###Code
%%sql
SELECT *
FROM employee
LEFT OUTER JOIN department ON employee.DepartmentID = department.DepartmentID;
###Output
* sqlite://
Done.
|
Supervised_Learning/Classification/Classifier Performance Measures/Recall & Sensitivity Score.ipynb | ###Markdown
Recall / Sensitivity Recall is simply defined as the number of true positives divided by the number of true positives plus the number of false negatives. Recall can be thought as of a model's ability to find all the data points of interest in a dataset. Implementation of Recall
###Code
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
#get the data
data = load_breast_cancer()
x = data.data
y = data.target
#split the data
x_train, x_test, y_train, y_test = train_test_split(x, y)
model = LogisticRegression(max_iter=2000)
model.fit(x_train, y_train)
prediction = model.predict(x_test)
print(prediction)
#importing recall
from sklearn.metrics import recall_score
#getting our recall
recall = recall_score(prediction, y_test)
print("The Model's recall is:")
print(recall)
###Output
The Model's recall is:
0.9545454545454546
|
lab1/lab1_davide.ipynb | ###Markdown
Parts of this assignment will be **automatically graded**. Please take note of the following:- Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).- You can add additional cells, but it is not recommended to (re)move cells. Cells required for autograding cannot be moved and cells containing tests cannot be edited.- You are allowed to use a service such as [Google Colaboratory](https://colab.research.google.com/) to work together. However, you **cannot** hand in the notebook that was hosted on Google Colaboratory, but you need to copy your answers into the original notebook and verify that it runs succesfully offline. This is because Google Colaboratory destroys the metadata required for grading.- Name your notebook **exactly** `{TA_name}_{student1_id}_{student2_id}_lab{i}.ipynb`, for example `wouter_12345_67890_lab1.ipynb` (or elise or stephan, depending on your TA), **otherwise your submission will be skipped by our regex and you will get 0 points** (but no penalty as we cannot parse your student ids ;)).Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your names below:
###Code
NAMES = "Davide Belli, Gabriele Cesa"
###Output
_____no_output_____
###Markdown
---
###Code
import numpy as np
import matplotlib.pyplot as plt
import sys
from tqdm import tqdm as _tqdm
def tqdm(*args, **kwargs):
return _tqdm(*args, **kwargs, mininterval=1) # Safety, do not overflow buffer
%matplotlib inline
assert sys.version_info[:3] >= (3, 6, 0), "Make sure you have Python 3.6 installed!"
###Output
_____no_output_____
###Markdown
--- 1. Policy Evaluation (1 point) In this exercise we will evaluate a policy, e.g. find the value function for a policy. The problem we consider is the gridworld from Example 4.1 in the book. The environment is implemented as `GridworldEnv`, which is a subclass of the `Env` class from [OpenAI Gym](https://github.com/openai/gym). This means that we can interact with the environment. We can look at the documentation to see how we can interact with the environment.
###Code
from gridworld import GridworldEnv
env = GridworldEnv()
# Lets see what this is
?env
# To have a quick look into the code
??env
###Output
_____no_output_____
###Markdown
Now we want to evaluate a policy by using Dynamic Programming. For more information, see the [Intro to RL](https://drive.google.com/open?id=1opPSz5AZ_kVa1uWOdOiveNiBFiEOHjkG) book, section 4.1. This algorithm requires knowledge of the problem dynamics in the form of the transition probabilities $p(s',r|s,a)$. In general these are not available, but for our gridworld we know the dynamics and these can be accessed as `env.P`.
###Code
# Take a moment to figure out what P represents.
# Note that this is a deterministic environment.
# What would a stochastic environment look like?
env.P
def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):
"""
Evaluate a policy given an environment and a full description of the environment's dynamics.
Args:
policy: [S, A] shaped matrix representing the policy.
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
Vector of length env.nS representing the value function.
"""
# Start with a random (all 0) value function
V = np.zeros(env.nS)
thetas = np.ones(env.nS) * theta
diff_V = thetas * 2
while sum(diff_V > thetas):
V_new = np.zeros(env.nS)
for s in range(env.nS):
for a in range(env.nA):
value_s_a = 0
for (p, s_prime, r, _) in env.P[s][a]:
value_s_a += p * (r + discount_factor * V[s_prime])
V_new[s] += policy[s][a] * value_s_a
diff_V = abs(V_new - V)
V = V_new
return np.array(V)
# Let's run your code, does it make sense?
random_policy = np.ones([env.nS, env.nA]) / env.nA
V = policy_eval(random_policy, env)
V
def plot_gridworld_value(V):
plt.figure()
c = plt.pcolormesh(V, cmap='gray')
plt.colorbar(c)
plt.gca().invert_yaxis() # In the array, first row = 0 is on top
# Making a plot always helps
plot_gridworld_value(V.reshape(env.shape))
# Test: When you hand in the nodebook we will check that the value function is (approximately) what we expected
# but we need to make sure it is at least of the correct shape
v = policy_eval(random_policy, env)
assert v.shape == (env.nS)
###Output
_____no_output_____
###Markdown
--- 2. Policy Iteration (2 points)Using the policy evaluation algorithm we can implement policy iteration to find a good policy for this problem. Note that we do not need to use a discount_factor for episodic tasks but make sure your implementation can handle this correctly!
###Code
def policy_improvement(env, discount_factor=1.0):
"""
Policy Improvement Algorithm. Iteratively evaluates and improves a policy
until an optimal policy is found.
Args:
env: The OpenAI envrionment.
policy_eval: Policy Evaluation function that takes 3 arguments:
policy, env, discount_factor.
discount_factor: gamma discount factor.
Returns:
A tuple (policy, V).
policy is the optimal policy, a matrix of shape [S, A] where each state s
contains a valid probability distribution over actions.
V is the value function for the optimal policy.
"""
# Start with a random policy
policy = np.ones([env.nS, env.nA]) / env.nA
while True:
V = policy_eval(policy, env, discount_factor)
policy_stable = True
for s in range(env.nS):
aargmax, amax = -1, float('-inf')
for a in range(env.nA):
val = 0
for p, t, r, d in env.P[s][a]:
val += r + discount_factor*V[t]
if val > amax:
amax = val
aargmax = a
p = np.zeros(env.nA)
p[aargmax] = 1
policy_stable &= np.allclose(p, policy[s, :].reshape(-1))
policy[s, :] = p
if policy_stable:
break
return policy, V
# Let's see what it does
policy, v = policy_improvement(env)
print("Policy Probability Distribution:")
print(policy)
print("")
def print_grid_policy(policy, symbols=["^", ">", "v", "<"]):
symbols = np.array(symbols)
for row in policy:
print("".join(symbols[row]))
print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):")
print(np.reshape(np.argmax(policy, axis=1), env.shape))
print_grid_policy(np.reshape(np.argmax(policy, axis=1), env.shape))
print("")
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
plot_gridworld_value(v.reshape(env.shape))
# This is not an empty cell. It is needed for grading.
###Output
_____no_output_____
###Markdown
--- 3. Value Iteration (3 points)Now implement the value iteration algorithm.
###Code
def value_iteration(env, theta=0.0001, discount_factor=1.0):
"""
Value Iteration Algorithm.
Args:
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
A tuple (policy, V) of the optimal policy and the optimal value function.
"""
V = np.zeros(env.nS)
V_actions = np.zeros(env.nS)
policy = np.zeros([env.nS, env.nA])
thetas = np.ones(env.nS) * theta
while True:
V_old = np.copy(V)
for s in range(env.nS):
a_argmax, a_max = -1, float('-inf')
for a in range(env.nA):
val = 0
for p, s_prime, r, _ in env.P[s][a]:
val += p * (r + discount_factor * V[s_prime])
if val > a_max:
a_max = val
a_argmax = a
policy[s] = np.zeros(env.nA)
V[s], policy[s, a_argmax] = a_max, 1
if not sum(abs(V - V_old) > thetas):
break
return policy, V
# Oh let's test again
# Let's see what it does
policy, v = value_iteration(env)
print("Policy Probability Distribution:")
print(policy)
print("")
print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):")
print(np.reshape(np.argmax(policy, axis=1), env.shape))
print_grid_policy(np.reshape(np.argmax(policy, axis=1), env.shape))
print("")
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
###Output
Policy Probability Distribution:
[[1. 0. 0. 0.]
[0. 0. 0. 1.]
[0. 0. 0. 1.]
[0. 0. 1. 0.]
[1. 0. 0. 0.]
[1. 0. 0. 0.]
[1. 0. 0. 0.]
[0. 0. 1. 0.]
[1. 0. 0. 0.]
[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 1. 0. 0.]
[1. 0. 0. 0.]]
Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):
[[0 3 3 2]
[0 0 0 2]
[0 0 1 2]
[0 1 1 0]]
^<<v
^^^v
^^>v
^>>^
Value Function:
[ 0. -1. -2. -3. -1. -2. -3. -2. -2. -3. -2. -1. -3. -2. -1. 0.]
Reshaped Grid Value Function:
[[ 0. -1. -2. -3.]
[-1. -2. -3. -2.]
[-2. -3. -2. -1.]
[-3. -2. -1. 0.]]
###Markdown
What is the difference between value iteration and policy iteration? Which algorithm is most efficient (e.g. needs to perform the least *backup* operations)? Please answer *concisely* in the cell below. Policy iteration algorithm needs to evaluate the policy at every iteration. By noticing that the evaluation steps do not always result in changes in the greedy policy, Value iteration algorithm rewrites the update rule combining both policy improvement and truncated policy evaluation steps.In particular, instead of computing the expected value for every state according to the current policy, and then updating the policy using the new value estimates (policy iteration), value iteration directly computes the value estimates as the best return obtained among all the possible action choices.As a result, value iteration is more efficient in terms of memory usage and number of updates executed. However, in this case the policy is only extracted at the end of the value function convergence. In case of the polici iteration, on the other hand, we check at every iteration if the policy is converged, and as a result, often times it is faster than value iteration. 4. Monte Carlo Prediction (7 points)What is the difference between Dynamic Programming and Monte Carlo? When would you use the one or the other algorithm? Monte-Carlo methods do not need complete knowledge of the environment but learn from a sample of interactions with the environment (experience). Indeed, while Dynamic Programming approaches try to model a complete distribution over all possible transitions, Monte-Carlo methods only aim to sample from this distribution.For this reason, it is reasonable to use Dynamic-Programming approaches when this knowedge is available and prefer a Monte-Carlo approach when it is not. Moreover, explicitly storing the full distribution is often not computationally feasible. In these cases, using Monte-Carlo apporaches is a valid alternative. For the Monte Carlo Prediction we will look at the Blackjack game (Example 5.1 from the book), for which the `BlackjackEnv` is implemented in `blackjack.py`. Note that compared to the gridworld, the state is no longer a single integer, which is why we use a dictionary to represent the value function instead of a numpy array. By using `defaultdict`, each state gets a default value of 0.
###Code
from blackjack import BlackjackEnv
env = BlackjackEnv()
###Output
_____no_output_____
###Markdown
For the Monte Carlo algorithm, we need to *interact* with the environment. This means that we start an episode by using `env.reset` and send the environment actions via `env.step` to observe the reward and next observation (state).
###Code
# So let's have a look at what we can do in general with an environment...
import gym
?gym.Env
# We can also look at the documentation/implementation of a method
?env.step
??BlackjackEnv
###Output
_____no_output_____
###Markdown
A very simple policy for Blackjack is to *stick* if we have 20 or 21 points and *hit* otherwise. We want to know how good this policy is. This policy is *deterministic* and therefore a function that maps an observation to a single action. Technically, we can implement this as a dictionary or as a function, where we use the latter. To get started, let's implement this simple policy for BlackJack.
###Code
def simple_policy(observation):
"""
A policy that sticks if the player score is >= 20 and hits otherwise.
"""
return int(observation[0] < 20)
s = env.reset()
print(s)
a = simple_policy(s)
print(env.step(a))
###Output
(20, 4, False)
((20, 4, False), 1, True, {})
###Markdown
Now implement either the MC prediction algorithm (either first visit or every visit). Hint: you can use `for i in tqdm(range(num_episodes))` to show a progress bar.
###Code
from collections import defaultdict
def mc_prediction(policy, env, num_episodes, discount_factor=1.0):
"""
Monte Carlo prediction algorithm. Calculates the value function
for a given policy using sampling.
Args:
policy: A function that maps an observation to action probabilities.
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
discount_factor: Gamma discount factor.
Returns:
A dictionary that maps from state -> value.
The state is a tuple and the value is a float.
"""
# Keeps track of sum and count of returns for each state
# to calculate an average. We could use an array to save all
# returns (like in the book) but that's memory inefficient.
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# The final value function
V = defaultdict(float)
for e in tqdm(range(num_episodes)):
observation = env.reset()
done = False
game = []
while not done:
a = policy(observation)
observation, reward, done, info = env.step(a)
game.append((observation, reward))
G = 0
rewards_history = []
for (observation, reward) in reversed(game):
G = reward + discount_factor * G
rewards_history.append((observation, G))
already_set = set()
for (observation, total_reward) in reversed(rewards_history):
if observation not in already_set:
already_set.add(observation)
returns_sum[observation] += total_reward
returns_count[observation] += 1
for observation, sums in returns_sum.items():
V[observation] = sums / returns_count[observation]
return V
V = mc_prediction(simple_policy, env, num_episodes=1000)
print(V)
###Output
100%|โโโโโโโโโโ| 1000/1000 [00:00<00:00, 13570.29it/s]
###Markdown
Now make *4 plots* like Figure 5.1 in the book. You can either make 3D plots or heatmaps. Make sure that your results look similar to the results in the book. Give your plots appropriate titles, axis labels, etc.
###Code
%%time
# Let's run your code one time
V_10k = mc_prediction(simple_policy, env, num_episodes=10000)
V_500k = mc_prediction(simple_policy, env, num_episodes=500000)
def build_heatmap(V, usable_ace):
h = np.zeros((10, 10))
for observation, v in V.items():
ps, d, ua = observation
if ua == usable_ace and 12 <= ps <= 21:
h[d-1, ps- 12] = v
return h
cols = ['10000 episodes', '500000 episodes']
rows = ['Usable Ace', 'No-Usable Ace']
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=(12, 8))
plt.setp(axes.flat, xlabel='Player sum', ylabel='Dealer Showing')
pad = 5 # in points
for ax, col in zip(axes[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
for ax, row in zip(axes[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
p = axes[0, 0].imshow(build_heatmap(V_10k, 1), cmap='hot', vmin=-1, vmax=1)
fig.colorbar(p, ax=axes[0,0])
p = axes[0, 1].imshow(build_heatmap(V_500k, 1), cmap='hot', vmin=-1, vmax=1)
fig.colorbar(p, ax=axes[0,1])
p = axes[1, 0].imshow(build_heatmap(V_10k, 0), cmap='hot', vmin=-1, vmax=1)
fig.colorbar(p, ax=axes[1,0])
p = axes[1, 1].imshow(build_heatmap(V_500k, 0), cmap='hot', vmin=-1, vmax=1)
fig.colorbar(p, ax=axes[1,1])
fig.tight_layout()
# tight_layout doesn't take these labels into account. We'll need
# to make some room. These numbers are are manually tweaked.
# You could automatically calculate them, but it's a pain.
fig.subplots_adjust(left=0.15, top=0.95)
plt.show()
###Output
_____no_output_____
###Markdown
5. Monte Carlo control with $\epsilon$-greedy policy (5 points)Now we have a method to evaluate state-values given a policy. Take a moment to think whether we can use the value function to find a better policy? Assuming we do not know the dynamics of the environment, why is this not possible?We want a policy that selects _actions_ with maximum value, e.g. is _greedy_ with respect to the _action-value_ (or Q-value) function $Q(s,a)$. We need to keep exploring, so with probability $\epsilon$ we will take a random action. First, lets implement a function `make_epsilon_greedy_policy` that takes the Q-value function and returns an $\epsilon$-greedy policy.
###Code
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
a = Q[observation]
if np.random.rand() < epsilon:
return np.random.randint(0, nA)
else:
return np.argmax(a)
return policy_fn
def mc_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1):
"""
Monte Carlo Control using Epsilon-Greedy policies.
Finds an optimal epsilon-greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
discount_factor: Gamma discount factor.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities
"""
# Again, keep track of counts for efficiency
# returns_sum, returns_count and Q are
# nested dictionaries that map state -> (action -> action-value).
# We could also use tuples (s, a) as keys in a 1d dictionary, but this
# way Q is in the format that works with make_epsilon_greedy_policy
returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))
returns_count = defaultdict(lambda: np.zeros(env.action_space.n, dtype=int))
# The final action-value function.
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for e in tqdm(range(num_episodes)):
observation = env.reset()
done = False
game = []
while not done:
a = policy(observation)
new_observation, reward, done, info = env.step(a)
game.append((observation, a, reward))
observation = new_observation
G = 0
rewards_history = []
for (observation, a, reward) in reversed(game):
G = reward + discount_factor * G
rewards_history.append((observation, a, G))
already_set = set()
for (observation, a, total_reward) in reversed(rewards_history):
if (observation, a) not in already_set:
already_set.add((observation, a))
returns_sum[observation][a] += total_reward
returns_count[observation][a] += 1
for observation, sums in returns_sum.items():
Q[observation] = sums / returns_count[observation]
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
return Q, policy
# Test it quickly
Q, policy = mc_control_epsilon_greedy(env, num_episodes=10000, epsilon=0.1)
%%time
Q, policy = mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1)
###Output
0%| | 0/500000 [00:00<?, ?it/s]/home/davide/miniconda3/envs/rl2018/lib/python3.6/site-packages/ipykernel_launcher.py:65: RuntimeWarning: invalid value encountered in true_divide
100%|โโโโโโโโโโ| 500000/500000 [01:52<00:00, 4451.54it/s]
###Markdown
How can you obtain the (V-)value function from the Q-value function? Plot the (V-)value function that is the result of 500K iterations. Additionally, visualize the greedy policy similar to Figure 5.2 in the book. Use a white square for hitting, black for sticking.
###Code
V = defaultdict(float)
for s, v in Q.items():
V[s] = v.max()
P = defaultdict(int)
for s, v in Q.items():
P[s] = v.argmax()
cols = ['Policy', 'Value-Function']
rows = ['Usable Ace', 'No-Usable Ace']
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=(12, 8))
plt.setp(axes.flat, xlabel='Player sum (-12)', ylabel='Dealer Showing')
pad = 5 # in points
for ax, col in zip(axes[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
for ax, row in zip(axes[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center')
axes[0, 0].imshow(build_heatmap(P, 1), cmap='gray', vmin=0, vmax=1)
p = axes[0, 1].imshow(build_heatmap(V, 1), cmap='hot', vmin=-1, vmax=1)
fig.colorbar(p, ax=axes[0,1])
axes[1, 0].imshow(build_heatmap(P, 0), cmap='gray', vmin=0, vmax=1)
p = axes[1, 1].imshow(build_heatmap(V, 0), cmap='hot', vmin=-1, vmax=1)
fig.colorbar(p, ax=axes[1,1])
fig.tight_layout()
# tight_layout doesn't take these labels into account. We'll need
# to make some room. These numbers are are manually tweaked.
# You could automatically calculate them, but it's a pain.
fig.subplots_adjust(left=0.15, top=0.95)
plt.show()
###Output
_____no_output_____
###Markdown
6. Temporal Difference (TD) learning (8 points)Mention one advantage and one disadvantage of Monte Carlo methods. Mention an example where you would prefer to use TD learning. One advantage is that allows for prediction/control without having an explicit model of the environment (e.g. transition probabilities) but, instead, interacts directly with the environment to learn from real episodes. A drawback of MC is that it needs a complete roll of the episode (until the agent reach a terminal state, or until a maximum depth is reached). As a consequence, the policy/value function will only be learned/updated at the end of every episode and not at every step.Let's consider a task modeled as an infinite (or very long) MDP. For example, we want to teach a robot how to walk. If we approach this task with MC control, our agent would only update is policy/value function at the end of the episode. Since the MDP is infinite, the only way to learn is to constraint the simulation to terminate after n steps. On the other side, approaching this task with TD, our agent would update its policy at every timestep in the simulation, allowing for a (much) faster learning of the optimal policy (thanks to more frequent updates of the value functions) and without the need to manually terminate an episode at some point, but possibly only running one infinite episode. For the TD algorithms, we will skip the prediction algorithm and go straight for the control setting where we optimize the policy that we are using. In other words: implement SARSA. To keep it dynamic, we will use the windy gridworld environment (Example 6.5).
###Code
from windy_gridworld import WindyGridworldEnv
env = WindyGridworldEnv()
def sarsa(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1, Q=None):
"""
SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Gamma discount factor.
alpha: TD learning rate.
epsilon: Probability to sample a random action. Float between 0 and 1.
Q: hot-start the algorithm with a Q value function (optional)
Returns:
A tuple (Q, stats).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is a list of tuples giving the episode lengths and rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
if Q is None:
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = []
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in tqdm(range(num_episodes)):
# print(i_episode)
i = 0
R = 0
observation = env.reset()
a = policy(observation)
done = False
while True:
# print(observation, a)
new_observation, reward, done, info = env.step(a)
if done:
break
new_a = policy(new_observation)
Q[observation][a] += alpha * (reward + discount_factor *
Q[new_observation][new_a] - Q[observation][a])
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
i += 1
R += reward
observation = new_observation
a = new_a
stats.append((i, R))
episode_lengths, episode_returns = zip(*stats)
return Q, (episode_lengths, episode_returns)
Q_sarsa, (episode_lengths_sarsa, episode_returns_sarsa) = sarsa(env, 1000)
# We will help you with plotting this time
plt.plot(episode_lengths_sarsa)
plt.title('Episode lengths SARSA')
plt.show()
plt.plot(episode_returns_sarsa)
plt.title('Episode returns SARSA')
plt.show()
###Output
100%|โโโโโโโโโโ| 1000/1000 [00:00<00:00, 3585.73it/s]
###Markdown
Since we might not be interested in falling off the cliff all the time, we can find another person to do this 'exploration' for us (in the name of science). Still, we would like to learn ourselfs from this persons policy, which is where we arrive at _off-policy_ learning. In the simplest variant, we learn our own value by bootstrapping based on the action value corresponding to the best action we could take, while the exploration policy actual follows the $\epsilon$-greedy strategy. This is known as Q-learning.
###Code
def q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1, Q=None):
"""
Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy
while following an epsilon-greedy policy
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Gamma discount factor.
alpha: TD learning rate.
epsilon: Probability to sample a random action. Float between 0 and 1.
Q: hot-start the algorithm with a Q value function (optional)
Returns:
A tuple (Q, stats).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is a list of tuples giving the episode lengths and rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
if Q is None:
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = []
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in tqdm(range(num_episodes)):
i = 0
R = 0
observation = env.reset()
a = policy(observation)
done = False
while True:
new_observation, reward, done, info = env.step(a)
if done:
break
Q[observation][a] += alpha * (reward + discount_factor *
max(Q[new_observation]) - Q[observation][a])
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
i += 1
R += reward
observation = new_observation
a = policy(observation)
stats.append((i, R))
episode_lengths, episode_returns = zip(*stats)
return Q, (episode_lengths, episode_returns)
Q_q_learning, (episode_lengths_q_learning, episode_returns_q_learning) = q_learning(env, 1000)
# We will help you with plotting this time
plt.plot(episode_lengths_q_learning)
plt.title('Episode lengths Q-learning')
plt.show()
plt.plot(episode_returns_q_learning)
plt.title('Episode returns Q-learning')
plt.show()
###Output
100%|โโโโโโโโโโ| 1000/1000 [00:00<00:00, 3604.74it/s]
###Markdown
Now compare the episode returns while learning for Q-learning and Sarsa (maybe run some more iterations?), by plotting the returns for both algorithms in a single plot, like in the book, Example 6.6. In order to be able to compare them, you may want to zoom in on the y-axis and smooth the returns (e.g. plotting the $n$ episode average instead). Which algorithm achieves higher return during learning? How does this compare to Example 6.6 from the book? Try to explain your observations.
###Code
Q_sarsa, (episode_lengths_sarsa, episode_returns_sarsa) = sarsa(env, 10000)
Q_q_learning, (episode_lengths_q_learning, episode_returns_q_learning) = q_learning(env, 10000)
smoothed_returns_sarsa = []
smoothed_returns_q_learning = []
avg_k = 100
for i in range(int(10000 / avg_k)):
smoothed_returns_sarsa.append(sum(episode_returns_sarsa[avg_k*i:avg_k*i+avg_k])/avg_k)
smoothed_returns_q_learning.append(sum(episode_returns_q_learning[avg_k*i:avg_k*i+avg_k])/avg_k)
plt.plot(smoothed_returns_sarsa[1:], label="Sarsa")
plt.plot(smoothed_returns_q_learning[1:], label="Q-learning")
plt.title('Episode returns')
plt.legend()
plt.show()
###Output
100%|โโโโโโโโโโ| 100000/100000 [00:20<00:00, 4824.03it/s]
100%|โโโโโโโโโโ| 100000/100000 [00:21<00:00, 4757.64it/s]
###Markdown
In agreement with the results in Example 6.6 from the book, Q-learning outperforms Sarsa in terms of returns during learning. This behavior is expected and can be explained by the fact that Q-learning, in its Off-policy approach, updates the estimates of the action-value pairs using future rewards $\max_a Q(S', a)$ which is the greedy policy choice for the next state $S'$. After we have learned the policy, we do not care about exploration any more and we may switch to a deterministic (greedy) policy instead. If we evaluate this for both Sarsa and Q-learning (actually, for Q-learning the learned policy is already deterministic), which policy would you expect to perform better? Why? TODO: Possiam prender argmax su sarsa policy e dovrebbe aver convergiuto a quella ottimale di q-learning, credo Please run the experiments to test your hypothesis (print or plot your results). How many runs do you need to evaluate the policy? Note: without learning, the order of the episodes is not relevant so a normal `plt.plot` may not be the most appropriate choice.
###Code
# YOUR CODE HERE
raise NotImplementedError()
###Output
_____no_output_____ |
NINEDA.ipynb | ###Markdown
Getting The Geolocation Of NIN Locations In Nigeria
###Code
# Importing the necessary modules
import folium
import numpy as np
import pandas as pd
import geopandas as gpd
from geopy.geocoders import Nominatim
from folium.plugins import HeatMap, MarkerCluster
# Creating the geo-locator object
geolocator = Nominatim(user_agent="nin-eda")
# The path to the dataset
dataPath = "cleanData.xls";
# Loading the data into memory
df = pd.read_excel(dataPath);
# Removing the unnames column
df.pop(df.columns[0]);
# Creating a list to hold the values for lat, long and it's co-ordinates
latitude = list()
longitude = list()
CoOrdinates = list()
location = list()
# Removing the first row
df = df.drop(df.index[[0, 0]])
# looping through the CoOrdinates column and clean the data
for ordinates in df["CoOrdinates"].values:
latValue = str(ordinates.replace(' ', '').replace(')', '').replace('(', '').split(",")[0][:5])
longValue = str(ordinates.replace(' ', '').replace(')', '').replace('(', '').split(",")[1][:5])
# Append the clean data to the latValue and longValue list
latitude.append(str(latValue))
longitude.append(str(longValue))
# Concat the values for latValue and longValue
ordinatesC = latValue + "," + longValue;
CoOrdinates.append(ordinatesC)
#
for ActualLocation in df["Location"].values:
ActualLocation = str(ActualLocation.replace(',', ''))
# Append
location.append(ActualLocation)
# Creating a new column with the respective names
df["latitude"] = pd.DataFrame(latitude)
df["longitude"] = pd.DataFrame(longitude)
df["CoOrdinates"] = pd.DataFrame(CoOrdinates)
df["Location"] = pd.DataFrame(location)
# Trying to get the co-ordinates again for the correct values
location_array = df["Location"].values
latarr = []
longarr = []
#
for locations in location_array:
co_ordinates = geolocator.geocode(locations)
lat = co_ordinates.latitude;
long = co_ordinates.longitude;
#
latarr.append(lat)
longarr.append(long)
df["latitude"] = pd.DataFrame(latarr)
df["longitude"] = pd.DataFrame(longarr)
# Checking for null values, and if any drop the row with NaN values
df.isnull().any()
df = df.dropna()
# Drop duplicates values found in the latitude and longitude columns
df = df.drop_duplicates(subset=['latitude', 'longitude'])
# Viewing the head of the dataframe
df.head()
# Describing the dataframe after cleaning
df.describe()
# Showing a brief description of the latitude column
df["latitude"].describe()
# Diplaying a brief description of the longitude column
df["longitude"].describe()
# Getting the columns for the loaded dataframe
columns = df.columns;
# Displaying the shape of the loaded dataframe
print(f"Data shape: {df.shape}")
print(columns)
# Displaying the description of the head office
df["Head Office"].describe()
# Brief description for the location address
df["Location"].describe()
###Output
_____no_output_____
###Markdown
Geospital Analysis
###Code
# Getting the country location
location = "Nigeria"
# Extract the co-ordinates for nigeria
co_ordinates = geolocator.geocode(location)
# Getting the longitude and latitude for Nigeria
lat = co_ordinates.latitude;
long = co_ordinates.longitude;
# Displaying the co-ordinates
co_ordinates.point
# mainMap = folium.Map([9.0643, 7.4892974], titles='Stamen Toner', zoom_start=5)
# folium.Marker([lat, long],
# radius=6,
# ).add_to(mainMap)
# mainMap
# Creating a map for locating NIN locations
mainMap = folium.Map([lat, long], titles='Stamen Toner', zoom_start=5)
#
for index, row in df.iterrows():
folium.Marker([row['longitude'], row['latitude']],
radius=6,
popup=row['Head Office'],
fill_color="red",
).add_to(mainMap)
# Showing the locations
mainMap
df.to_csv("ORIGINAL.csv")
df.to_excel("ORIGINAL.xls")
###Output
_____no_output_____ |
05-problem-begin.ipynb | ###Markdown
Temperature converterComplete this notebook to display temperature converted to Fahrenheitusing the formula $ F = C \times 1.8 + 32 $Use format string (f-string) to keep two decimal points, for example`Temperature in Fahrenheit is 66.65`
###Code
temp_c = input()
###Output
_____no_output_____ |
03_tuplas.ipynb | ###Markdown
###Code
arreglo.count(1)
###Output
_____no_output_____ |
ru/notebooks/ch-algorithms/quantum-walk-search-algorithm.ipynb | ###Markdown
ะะปะณะพัะธัะผ ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธั ะฟัะตะดััะฐะฒะปััั ัะพะฑะพะน ะบะฒะฐะฝัะพะฒัั ัะบะฒะธะฒะฐะปะตะฝัะฝะพััั ะบะปะฐััะธัะตัะบะพะน ัะตะฟะธ ะะฐัะบะพะฒะฐ ะธ ััะฐะปะธ ะบะปััะตะฒัะผะธ ะฒะพ ะผะฝะพะณะธั
ะบะฒะฐะฝัะพะฒัั
ะฐะปะณะพัะธัะผะฐั
. ะ ััะพะผ ัะฐะทะดะตะปะต ะผั ัะตะฐะปะธะทัะตะผ ะฐะปะณะพัะธัะผ ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั, ะบะพัะพััะน ะฝะฐั
ะพะดะธั ะพัะผะตัะตะฝะฝัะต ัะปะตะผะตะฝัั ะฒ ะณัะฐัะต. ะะปะณะพัะธัะผ ะธะผะตะตั ะบะฒะฐะดัะฐัะธัะฝะพะต ััะบะพัะตะฝะธะต ะฟะพ ััะฐะฒะฝะตะฝะธั ัะพ ัะฒะพะธะผ ะบะปะฐััะธัะตัะบะธะผ ะฐะฝะฐะปะพะณะพะผ. 1.ะะปะฐััะธัะตัะบะธะต ัะตะฟะธ ะะฐัะบะพะฒะฐะฆะตะฟั ะะฐัะบะพะฒะฐ โ ััะพ ััะพั
ะฐััะธัะตัะบะธะน ะฟัะพัะตัั, ัะฐััะพ ะธัะฟะพะปัะทัะตะผัะน ะดะปั ะผะพะดะตะปะธัะพะฒะฐะฝะธั ัะตะฐะปัะฝัั
ะฟัะพัะตััะพะฒ. ะะฝ ัะพััะพะธั ะธะท ัะพััะพัะฝะธะน ะธ ัะฒัะทะฐะฝะฝัั
ั ะฝะธะผะธ ะฒะตัะพััะฝะพััะตะน ะฟะตัะตั
ะพะดะฐ, ะบะพัะพััะต ะพะฟะธััะฒะฐัั ะฒะตัะพััะฝะพััะธ ะฟะตัะตั
ะพะดะฐ ะผะตะถะดั ัะพััะพัะฝะธัะผะธ ะฝะฐ ะบะฐะถะดะพะผ ะฒัะตะผะตะฝะฝะพะผ ัะฐะณะต. ะ ัะตะฟัั
ะะฐัะบะพะฒะฐ ั ะดะธัะบัะตัะฝัะผ ะฒัะตะผะตะฝะตะผ, ั ะบะพัะพััะผะธ ะผั ะทะดะตัั ัะฐะฑะพัะฐะตะผ, ะฒัะตะผะตะฝะฝัะต ัะฐะณะธ ะดะธัะบัะตัะฝั. ะฆะตะฟะธ ะะฐัะบะพะฒะฐ ัะดะพะฒะปะตัะฒะพัััั ัะฒะพะนััะฒั ะะฐัะบะพะฒะฐ, ััะพ ะพะทะฝะฐัะฐะตั, ััะพ ัะปะตะดัััะธะน ัะฐะณ ะฟัะพัะตััะฐ ะทะฐะฒะธัะธั ัะพะปัะบะพ ะพั ัะตะบััะตะณะพ, ะฐ ะฝะต ะพั ะปัะฑะพะณะพ ะธะท ะฟัะตะดัะดััะธั
ัะฐะณะพะฒ. ะฆะตะฟั ะะฐัะบะพะฒะฐ ะธะผะตะตั ัะฒัะทะฐะฝะฝัั ะผะฐััะธัั ะฟะตัะตั
ะพะดะฐ P, ะบะพัะพัะฐั ะพะฟะธััะฒะฐะตั ะฒะตัะพััะฝะพััั ะฟะตัะตั
ะพะดะฐ ะผะตะถะดั ะบะฐะถะดัะผ ะธะท ัะพััะพัะฝะธะน. ะะธะถะต ะผั ะฟะพะบะฐะทัะฒะฐะตะผ ะฟัะธะผะตั ัะตะฟะธ ะะฐัะบะพะฒะฐ ะธ ัะฒัะทะฐะฝะฝะพะน ั ะฝะตะน ะฟะตัะตั
ะพะดะฝะพะน ะผะฐััะธัั $P$. \begin{equation} P= \begin{pmatrix} 0,1 & 0,3 & 0,3\ 0,1 & 0,1 & 0,2 \ 0,8 & 0,6 & 0,5 \end{pmatrix} \label{eq:matrix_example} \end{equation}ะฃัะธััะฒะฐั ะผะฐััะธัั ะฟะตัะตั
ะพะดะฐ $P$, ะผั ะผะพะถะตะผ ะฟะพะปััะธัั ัะฐัะฟัะตะดะตะปะตะฝะธะต ะฒะตัะพััะฝะพััะตะน ะฟะพัะปะต $t$ ะฒัะตะผะตะฝะฝัั
ัะฐะณะพะฒ ะฟะพ $P^t$. 2. ะะฒะฐะฝัะพะฒัะต ะฟัะพะณัะปะบะธะะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธั โ ััะพ ะบะฒะฐะฝัะพะฒัะน ัะบะฒะธะฒะฐะปะตะฝั ะบะปะฐััะธัะตัะบะพะน ัะตะฟะธ ะะฐัะบะพะฒะฐ. ะะท-ะทะฐ ััะฟะตัะฟะพะทะธัะธะธ ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต ะฑัะดะตั ะฟัะพั
ะพะดะธัั ะฟะพ ะฒัะตะผ ะฒะพะทะผะพะถะฝัะผ ะฟัััะผ ะพะดะฝะพะฒัะตะผะตะฝะฝะพ, ะฟะพะบะฐ ะผั ะฝะต ะธะทะผะตัะธะผ ัะตะฟั. ะะท-ะทะฐ ะบะฒะฐะฝัะพะฒะพะน ะธะฝัะตััะตัะตะฝัะธะธ ะฝะตะบะพัะพัะพะต ัะพััะพัะฝะธะต ััะฐะฒะฝะพะฒะตัะธะฒะฐะตััั. ะญัะพ ะดะตะปะฐะตั ะฐะปะณะพัะธัะผั ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฑััััะตะต ะบะปะฐััะธัะตัะบะธั
, ะฟะพัะบะพะปัะบั ะผั ะผะพะถะตะผ ัะฟัะพะตะบัะธัะพะฒะฐัั ะธั
ัะฐะบะธะผ ะพะฑัะฐะทะพะผ, ััะพะฑั ะฝะตะฟัะฐะฒะธะปัะฝัะต ะพัะฒะตัั ะฑััััะพ ะธัะบะปััะฐะปะธัั. ะกััะตััะฒััั ะดะฒะต ัะฐัะฟัะพัััะฐะฝะตะฝะฝัะต ะผะพะดะตะปะธ ะบะฒะฐะฝัะพะฒัั
ะฑะปัะถะดะฐะฝะธะน, ะฟัะธะดัะผะฐะฝะฝัะต ะบะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธั ะธ ะบะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธั ะกะตะณะตะดะธ, ะบะพัะพััะต ัะบะฒะธะฒะฐะปะตะฝัะฝั ะฟัะธ ะพะฟัะตะดะตะปะตะฝะฝัั
ะพะฑััะพััะตะปัััะฒะฐั
. ะัะธะดัะผะฐะฝะฝะพะต ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต โ ััะพ ะฑะปัะถะดะฐะฝะธะต ะฟะพ ะฒะตััะธะฝะฐะผ ะณัะฐัะฐ, ะฒ ัะพ ะฒัะตะผั ะบะฐะบ ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต ะกะตะณะตะดะธ ะฟัะพะธัั
ะพะดะธั ะฟะพ ัะตะฑัะฐะผ. ะัะตะถะดะต ัะตะผ ะผั ะฟะพะบะฐะถะตะผ, ะบะฐะบ ะผั ะผะพะถะตะผ ัะตะฐะปะธะทะพะฒะฐัั ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต, ะผั ะฟัะตะดััะฐะฒะธะผ ะพะฑะต ะผะพะดะตะปะธ.$\newcommand{\ket}[1]{\left|{1}\right\rangle}$ $\newcommand{\bra}[1]{\left\langle {1}\right|}$ 2. ะัะธะดัะผะฐะฝะฝัะต ะบะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธัะัะพัััะผ ะฟัะธะผะตัะพะผ ะฟัะธะดัะผะฐะฝะฝะพะณะพ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ัะฒะปัะตััั ะฑะปัะถะดะฐะฝะธะต ะฟะพ ะฑะตัะบะพะฝะตัะฝะพะน ัะตะปะพัะธัะปะตะฝะฝะพะน ะฟััะผะพะน. ะ ััะพะผ ัะปััะฐะต ะผั ะฟัะตะดััะฐะฒะปัะตะผ ะฟะพะทะธัะธั ั
ะพะดะพะบะฐ ัะตะปัะผ ัะธัะปะพะผ ${\ket{j} : j \in \mathbb{Z} }$, ะฟะพัะบะพะปัะบั ั
ะพะดะพะบ ะผะพะถะตั ะฟัะพะนัะธ ะฒัะต ัะตะปัะต ัะธัะปะฐ ะฒ $\mathbb{Z}$. ะะพะฝะตัะฐ ัะตัะฐะตั, ะบะฐะบ ะดะพะปะถะตะฝ ะดะฒะธะณะฐัััั ั
ะพะดััะธะน. ะ ัะตะปะพัะธัะปะตะฝะฝะพะน ัััะพะบะต ั
ะพะดะพะบ ะผะพะถะตั ะธะดัะธ ะบะฐะบ ะฒะปะตะฒะพ, ัะฐะบ ะธ ะฒะฟัะฐะฒะพ. ะขะฐะบะธะผ ะพะฑัะฐะทะพะผ, ะฒััะธัะปะธัะตะปัะฝะฐั ะฑะฐะทะฐ ะผะพะฝะตัั ${\ket{0}, \ket{1}}$, ะธ ะผั ะดะฒะธะณะฐะตะผ ั
ะพะดัะฝะบะฐ ะฒ ะพะดะฝะพะผ ะฝะฐะฟัะฐะฒะปะตะฝะธะธ, ะตัะปะธ ะผะพะฝะตัะฐ $\ket{0}$, ะธ ะฒ ะดััะณะพะผ ะฝะฐะฟัะฐะฒะปะตะฝะธะธ, ะตัะปะธ ะผะพะฝะตัะฐ ัะพััะฐะฒะปัะตั $\ket{1}$.ะัะธะดัะผะฐะฝะฝัะน ะบะฒะฐะฝั โ ััะพ ะพะฑั
ะพะด ัะทะปะพะฒ ะฒ ะณัะฐัะต, ะธ ะผั ะฝะฐะทัะฒะฐะตะผ ัะทะปั ัะพััะพัะฝะธัะผะธ. ะฅะพะดะพะบ ะผะพะถะตั ะฟะตัะตะผะตัะฐัััั ะผะตะถะดั ัะพััะพัะฝะธัะผะธ, ัะฒัะทะฐะฝะฝัะผะธ ั ัะตะฑัะพะผ. ะ ะผะพะดะตะปะธ ะผะพะฝะตัั ั ะฝะฐั ะตััั ะดะฒะฐ ะบะฒะฐะฝัะพะฒัั
ัะพััะพัะฝะธั ะธ ะดะฒะฐ ะพะฟะตัะฐัะพัะฐ. ะะตัะฒะพะต ัะพััะพัะฝะธะต โ ััะพ ัะพััะพัะฝะธะต ะฟะพะปะพะถะตะฝะธั, ะบะพัะพัะพะต ะฟัะตะดััะฐะฒะปัะตั ะฟะพะปะพะถะตะฝะธะต ั
ะพะดะพะบะฐ. ะะปั ะฟัะธะฒะตะดะตะฝะฝะพะณะพ ะฒััะต ะพะฑั
ะพะดะฐ ััะพ ัะตะปะพะต ัะธัะปะพ, ะฟะพัะบะพะปัะบั ั
ะพะดะพะบ ะผะพะถะตั ะฝะฐั
ะพะดะธัััั ะฒ ะปัะฑะพะผ ะผะตััะต ัะตะปะพัะธัะปะตะฝะฝะพะน ัััะพะบะธ. ะััะณะพะต ัะพััะพัะฝะธะต โ ัะพััะพัะฝะธะต ะผะพะฝะตัั. ะกะพััะพัะฝะธะต ะผะพะฝะตัั ะพะฟัะตะดะตะปัะตั, ะบะฐะบ ั
ะพะดััะธะน ะดะพะปะถะตะฝ ะดะฒะธะณะฐัััั ะฝะฐ ัะปะตะดัััะตะผ ัะฐะณะต. ะั ะผะพะถะตะผ ะฟัะตะดััะฐะฒะธัั ะบะฐะบ ัะพััะพัะฝะธะต ะผะพะฝะตัั, ัะฐะบ ะธ ัะพััะพัะฝะธะต ะฟะพะทะธัะธะธ ะฒะตะบัะพัะฐะผะธ ะฒ ะณะธะปัะฑะตััะพะฒะพะผ ะฟัะพัััะฐะฝััะฒะต. ะัะปะธ ะผั ะผะพะถะตะผ ะฒััะฐะทะธัั ัะพััะพัะฝะธะต ะผะพะฝะตัั ะฒะตะบัะพัะพะผ ะธะท $\mathcal{H}_C$, ะฐ ัะพััะพัะฝะธะต ะฟะพะทะธัะธะธ - ะฒะตะบัะพัะพะผ ะธะท $\mathcal{H}_P$, ะผั ะผะพะถะตะผ ะฒััะฐะทะธัั ะบะฒะฐะฝัะพะฒะพะต ะฟัะพัััะฐะฝััะฒะพ ะดะปั ะฒัะตะณะพ ั
ะพะดัะฝะบะฐ ะบะฐะบ $\mathcal {H} = \mathcal{H}_C \otimes \mathcal{H}_P$.ะะฐะบ ะผั ัะถะต ัะฟะพะผะธะฝะฐะปะธ, ะฒ ะผะพะดะตะปะธ ัะฐะบะถะต ะตััั ะดะฒะฐ ะพะฟะตัะฐัะพัะฐ; ะพะฟะตัะฐัะพั ะผะพะฝะตัั $C$ ะธ ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ $S$. ะะฟะตัะฐัะพั ะผะพะฝะตัั ะดะตะนััะฒัะตั ะฝะฐ $\mathcal{H}_C$ ะฝะฐ ะบะฐะถะดะพะผ ะฒัะตะผะตะฝะฝะพะผ ัะฐะณะต ะธ ะฟะพะผะตัะฐะตั ั
ะพะดััะตะณะพ ะฒ ััะฟะตัะฟะพะทะธัะธั, ัะฐะบ ััะพ ะพะฝ ะฟัะพั
ะพะดะธั ะฒัะต ะฒะพะทะผะพะถะฝัะต ะฟััะธ ะพะดะฝะพะฒัะตะผะตะฝะฝะพ. ะะปั ะพะฑั
ะพะดะฐ ัะตะปะพัะธัะปะตะฝะฝะพะน ะปะธะฝะธะธ ััะพ ะพะทะฝะฐัะฐะตั, ััะพ ะพะฝะฐ ะดะฒะธะถะตััั ะบะฐะบ ะฒะปะตะฒะพ, ัะฐะบ ะธ ะฒะฟัะฐะฒะพ ะฝะฐ ะบะฐะถะดะพะผ ะฒัะตะผะตะฝะฝะพะผ ัะฐะณะต. ะกััะตััะฒััั ัะฐะทะฝัะต ะพะฟะตัะฐัะพัั ะผะพะฝะตั, ะฝะพ ะฝะฐะธะฑะพะปะตะต ัะฐัะฟัะพัััะฐะฝะตะฝะฝัะผะธ ัะฒะปััััั ะผะพะฝะตัะฐ ะะดะฐะผะฐัะฐ ะธ ะผะพะฝะตัะฐ ะัะพะฒะตัะฐ. ะะพะฝะตัะฐ ะะดะฐะผะฐัะฐ โ ััะพ ะฒะพัะพัะฐ ะะดะฐะผะฐัะฐ, ะบะพัะพััะต ะฟะพะผะตัะฐัั ั
ะพะดััะตะณะพ ะฒ ัะฐะฒะฝัั ััะฟะตัะฟะพะทะธัะธั:\begin{equation} H = \frac{1}{\sqrt{2}} \begin{bmatrix} 1 & 1 \ 1 & -1 \end{bmatrix} \end{equation}ะะพะฝะตัะฐ ะัะพะฒะตัะฐ โ ััะพ ะพะฟะตัะฐัะพั ะดะธัััะทะธะธ ะัะพะฒะตัะฐ ะธะท ะฐะปะณะพัะธัะผะฐ ะัะพะฒะตัะฐ. ะั ะพะฟัะตะดะตะปัะตะผ ะตะณะพ ะบะฐะบ\begin{equation} G = \begin{bmatrix} \frac{2}{n} -1 & \frac{2}{n} & \ldots & \frac{2}{n}\ \frac{2}{ n} & \frac{2}{n} - 1 & \ldots & \frac{2}{n} \ \vdots & \vdots & \ddots & \vdots \ \frac{2}{n} & \frac{ 2}{n} & \ldots & \frac{2}{n} -1 \end{bmatrix} \end{ััะฐะฒะฝะตะฝะธะต}ะะฐะบ ะธ ะผะพะฝะตัะฐ ะะดะฐะผะฐัะฐ, ะผะพะฝะตัะฐ ะัะพะฒะตัะฐ ะฟะพะผะตัะฐะตั ั
ะพะดััะตะณะพ ะฒ ััะฟะตัะฟะพะทะธัะธั. ะะดะฝะฐะบะพ ะฒะตะดะตั ัะตะฑั ะฝะตะผะฝะพะณะพ ะฟะพ-ะดััะณะพะผั. ะัะปะธ ะผั ะฟัะธะผะตะฝะธะผ ะผะพะฝะตัั ะัะพะฒะตัะฐ ะบ ะฟะตัะตั
ะพะดั ะฒ ะฟะพะทะธัะธะธ $\ket{000}$, ะผั ะฟะพะปััะธะผ ะฒะตัะพััะฝะพััะธ ะฒะตะบัะพัะฐ ัะพััะพัะฝะธั, ะฟะพะบะฐะทะฐะฝะฝัะต ะฝะฐ ัะธััะฝะบะต ะฝะธะถะต. ะะฐะบ ะผั ะฒะธะดะธะผ, ะผะพะฝะตัะฐ ะฝะต ััะฐะฒะธั ั
ะพะดะพะบะฐ ะฒ ัะฐะฒะฝัั ััะฟะตัะฟะพะทะธัะธั, ะบะฐะบ ััะพ ะดะตะปะฐะตั ะผะพะฝะตัะฐ ะะดะฐะผะฐัะฐ. ะะผะตััะพ ััะพะณะพ $\ket{000}$ ะธะผะตะตั ะณะพัะฐะทะดะพ ะฑะพะปัััั ะฒะตัะพััะฝะพััั, ัะตะผ ะดััะณะธะต ัะพััะพัะฝะธั.ะััะณะพะน ะพะฟะตัะฐัะพั ะฒ ะผะพะดะตะปะธ, ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ, ะดะตะนััะฒัะตั ะฝะฐ $\mathcal{H}_P$ ะธ ะฟะตัะตะผะตัะฐะตั ะพะฑั
ะพะดัะธะบ ะฝะฐ ัะปะตะดััััั ะฟะพะทะธัะธั. ะะปั ะพะฑั
ะพะดะฐ ัะตะปะพัะธัะปะตะฝะฝะพะน ัััะพะบะธ ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ ะฟะตัะตะผะตัะฐะตั ั
ะพะดะพะบ ัะปะตะฒะฐ ะพั ะผะพะฝะตัั $\ket{0}$ ะธ ัะฟัะฐะฒะฐ, ะตัะปะธ ะผะพะฝะตัะฐ $\ket{1}$:\begin{ััะฐะฒะฝะตะฝะธะต} S \ket{0}\ket{j} = \ket{0}\ket{j+1} \end{ััะฐะฒะฝะตะฝะธะต}\begin{equation} S \ket{1}\ket{j} = \ket{1}\ket{j-1} \end{equation}ะก ะพะฟะตัะฐัะพัะพะผ ัะดะฒะธะณะฐ, ะพะฟัะตะดะตะปะตะฝะฝัะผ ะฒััะต, ะผั ะผะพะถะตะผ ะฟัะตะดััะฐะฒะธัั ะพะดะธะฝ ัะฐะณ ะฟัะธะดัะผะฐะฝะฝะพะณะพ ะบะฒะฐะฝัะฐ ะบะฐะบ ัะฝะธัะฐัะฝัะน ะพะฟะตัะฐัะพั $U$, ะทะฐะดะฐะฝะฝัะน \begin{equation} U = SC, \end{equation}ะณะดะต C โ ะผะพะฝะตัะฝัะน ะพะฟะตัะฐัะพั. ะะปั ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฟะพ ัะตะปะพัะธัะปะตะฝะฝะพะน ัััะพะบะต ะผั ะธัะฟะพะปัะทัะตะผ ะผะพะฝะตัั ะะดะฐะผะฐัะฐ, ะฝะพ C ะผะพะถะตั ะฑััั ะผะพะฝะตัะพะน ะะดะฐะผะฐัะฐ, ะผะพะฝะตัะพะน ะัะพะฒะตัะฐ ะธะปะธ ะปัะฑัะผ ะดััะณะธะผ ะพะฟะตัะฐัะพัะพะผ ะผะพะฝะตัั.ะั ัะฐะบะถะต ะผะพะถะตะผ ัะผะพััะตัั ะฝะฐ ะฝะตัะบะพะปัะบะพ ัะฐะณะพะฒ ะฒะฟะตัะตะด. ะั ะผะพะถะตะผ ะฒััะฐะทะธัั ะบะฒะฐะฝัะพะฒะพะต ัะพััะพัะฝะธะต $\ket{\psi}$ ะฟะพัะปะต $t$ ะฒัะตะผะตะฝะฝัั
ัะฐะณะพะฒ ะบะฐะบ \begin{equation} \ket{\psi (t)} = U^t \ket{\psi(0)}, \ end{equation} ะณะดะต $\ket{\psi(0)}$ โ ะฝะฐัะฐะปัะฝะพะต ัะพััะพัะฝะธะต, ะฐ U โ ะพะฟะตัะฐัะพั ะพะดะฝะพะณะพ ัะฐะณะฐ ะฑะปัะถะดะฐะฝะธั [1].ะัะธะดัะผะฐะฝะฝัะต ะบะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธั ะฑะพะปััะต ะฒัะตะณะพ ะฟะพะดั
ะพะดัั ะดะปั ัะตะณัะปััะฝัั
ะณัะฐัะพะฒ, ะณัะฐัะพะฒ, ะฒ ะบะพัะพััั
ะฒัะต ัะทะปั ะธะผะตัั ะพะดะธะฝะฐะบะพะฒะพะต ะบะพะปะธัะตััะฒะพ ัะพัะตะดะตะน [2]. ะะปััะตัะฝะฐัะธะฒะฝะพะน ะผะพะดะตะปัั ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั, ะบะพัะพัะฐั ะฑะพะปะตะต ัะดะพะฑะฝะฐ ะดะปั ะฝะตัะตะณัะปััะฝัั
ะณัะฐัะพะฒ, ัะฒะปัะตััั ะผะพะดะตะปั ะกะตะณะตะดะธ, ะบะพัะพััั ะผั ัะฐััะผะพััะธะผ ะดะฐะปะตะต. 2.B ะะฒะฐะฝัะพะฒะฐั ะฟัะพะณัะปะบะฐ ะกะตะณะตะดะธะ ัะพ ะฒัะตะผั ะบะฐะบ ะฟัะธะดัะผะฐะฝะฝะพะต ะฑะปัะถะดะฐะฝะธะต โ ััะพ ะฑะปัะถะดะฐะฝะธะต ะฟะพ ัะทะปะฐะผ ะณัะฐัะฐ, ะฑะปัะถะดะฐะฝะธะต ะกะตะณะตะดะธ โ ััะพ ะฑะปัะถะดะฐะฝะธะต ะฟะพ ัะตะฑัะฐะผ ะดะฒัะดะพะปัะฝะพะณะพ ะดะฒะพะนะฝะพะณะพ ะฟะพะบัััะธั ะธัั
ะพะดะฝะพะณะพ ะณัะฐัะฐ. ะัะฐั ั ะดะฒะพะนะฝัะผ ะฟะพะบัััะธะตะผ โ ััะพ ะณัะฐั, ะฒ ะบะพัะพัะพะผ ะฒะตััะธะฝ ะฒ ะดะฒะฐ ัะฐะทะฐ ะฑะพะปััะต, ัะตะผ ะฒ ะธัั
ะพะดะฝะพะผ ะณัะฐัะต. ะะฒะต ะฒะตััะธะฝั ะฒ ะดะฒัะดะพะปัะฝะพะผ ะณัะฐัะต ั ะดะฒะพะนะฝัะผ ะฟะพะบัััะธะตะผ ัะฒัะทะฐะฝั ัะตะฑัะพะผ ัะพะณะดะฐ ะธ ัะพะปัะบะพ ัะพะณะดะฐ, ะบะพะณะดะฐ ะฒะตััะธะฝั ัะฐะบะถะต ัะฒัะทะฐะฝั ะฒ ะธัั
ะพะดะฝะพะผ ะณัะฐัะต. ะงัะพะฑั ัะพะทะดะฐัั ััั ะผะพะดะตะปั, ะผั ะฝะฐัะฝะตะผ ั ะผะฐััะธัั ะฒะตัะพััะฝะพััะธ ะฟะตัะตั
ะพะดะฐ P ะดะปั ะบะปะฐััะธัะตัะบะพะณะพ ะฑะปัะถะดะฐะฝะธั. ะะฐะบ ะพะฟะธัะฐะฝะพ ะฒ ัะฐะทะดะตะปะต 1, ะผั ะฟัะตะดััะฐะฒะปัะตะผ ะบะปะฐััะธัะตัะบะพะต ัะปััะฐะนะฝะพะต ะฑะปัะถะดะฐะฝะธะต ั ะดะธัะบัะตัะฝัะผ ะฒัะตะผะตะฝะตะผ ะฟะตัะตั
ะพะดะฝะพะน ะผะฐััะธัะตะน $P$. ะะปั ะปัะฑะพะณะพ $N$-ะฒะตััะธะฝะฝะพะณะพ ะณัะฐัะฐ ั $N \times N$ ะผะฐััะธัะตะน ะฟะตัะตั
ะพะดะฐ $P$ ะผั ะผะพะถะตะผ ะพะฟัะตะดะตะปะธัั ัะพะพัะฒะตัััะฒัััะตะต ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต ั ะดะธัะบัะตัะฝัะผ ะฒัะตะผะตะฝะตะผ ะบะฐะบ ัะฝะธัะฐัะฝัั ะพะฟะตัะฐัะธั ะฒ ะณะธะปัะฑะตััะพะฒะพะผ ะฟัะพัััะฐะฝััะฒะต $\mathcal{H}^N \otimes \ ะผะฐัะตะผะฐัะธัะตัะบะธะน{H}^N$. ะัััั $P_{jk}$ ะพะฟัะตะดะตะปัะตั ะฒะตัะพััะฝะพััั ัะพะณะพ, ััะพ ะผั ัะพะฒะตััะธะผ ะฟะตัะตั
ะพะด ะธะท ัะพััะพัะฝะธั $j$ ะฒ $k$. ะัะตะถะดะต ัะตะผ ะผั ะพะฟัะตะดะตะปะธะผ ะฑะปัะถะดะฐะฝะธะต, ะผั ะพะฟัะตะดะตะปะธะผ ะฝะพัะผะฐะปะธะทะพะฒะฐะฝะฝัะต ัะพััะพัะฝะธั\begin{equation} \ket{\psi_j} := \sum_{k=1}^N \sqrt{P_{kj}} \ket{j,k}, ; j=1,...,N \end{ััะฐะฒะฝะตะฝะธะต}ะธ ะฟัะพะตะบัะธั ะฝะฐ ${\ket{\psi_j}}:j=1,...,N$\begin{equation} \Pi := \sum_{j=1}^N \ket{\psi_j} \bra{\psi_j} \label{eq:sz_pi} \end{equation}ะะฒะตะดะตะผ ัะฐะบะถะต ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ S:\begin{equation} S := \sum_{j,k=1}^N \ket{j,k} \bra{k,j} \label{eq:sz_s} \end{equation}ะก $S$ ะธ $\Pi$, ะพะฟัะตะดะตะปะตะฝะฝัะผะธ ะฒััะต, ะผั ะผะพะถะตะผ ะฒะฒะตััะธ ะพะดะธะฝ ัะฐะณ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ั ะดะธัะบัะตัะฝัะผ ะฒัะตะผะตะฝะตะผ:\begin{ััะฐะฒะฝะตะฝะธะต} U := S(2 \Pi - 1), \label{eq:sz_op} \end{ััะฐะฒะฝะตะฝะธะต}ะณะดะต $(2\Pi - 1)$ โ ะพะฟะตัะฐัะพั ะพััะฐะถะตะฝะธั. ะั ัะฐะบะถะต ะพะฟัะตะดะตะปัะตะผ $t$ ัะฐะณะพะฒ ะพะฑั
ะพะดะฐ ะบะฐะบ $U^t$ [2]. 2.C ะญะบะฒะธะฒะฐะปะตะฝัะฝะพััั ะฟัะธะดัะผะฐะฝะฝัั
ะบะฒะฐะฝัะพะฒัั
ะฑะปัะถะดะฐะฝะธะน ะธ ะบะฒะฐะฝัะพะฒัั
ะฑะปัะถะดะฐะฝะธะน ะกะตะณะตะดะธะะทะฒะตััะฝะพ, ััะพ ะฟัะธะดัะผะฐะฝะฝะฐั ะฟัะพะณัะปะบะฐ ั ะผะพะฝะตัะพะน ะัะพะฒะตัะฐ ัะบะฒะธะฒะฐะปะตะฝัะฝะฐ ะบะฒะฐะฝัะพะฒะพะน ะฟัะพะณัะปะบะต ะกะตะณะตะดะธ. ะะฐ ะฑะพะปะตะต ะฟะพะดัะพะฑะฝะพะน ะธะฝัะพัะผะฐัะธะตะน ะผั ะพัััะปะฐะตะผ ะบ ััะพะน ััะฐััะต [3] ะขะพะผะฐัะฐ ะ. ะะพะฝะณะฐ, ะณะดะต ะพะฝ ัะฐะบะถะต ะฟะพะบะฐะทัะฒะฐะตั ัะบะฒะธะฒะฐะปะตะฝัะฝะพััั ะผะตะถะดั ะพะฟะตัะฐัะพัะฐะผะธ ะฒ ะดะฒัั
ะผะพะดะตะปัั
. 3. ะัะธะผะตั: ัะตะฐะปะธะทะฐัะธั ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฟะพ ะณะธะฟะตัะบัะฑัะะธะฟะตัะบัะฑ โ ััะพ $n$-ะผะตัะฝัะน ะฐะฝะฐะปะพะณ ััะตั
ะผะตัะฝะพะณะพ ะบัะฑะฐ. ะัะต ัะทะปั ะธะผะตัั ััะตะฟะตะฝั $n$, ะฒัะตะณะพ ะณะธะฟะตัะบัะฑ ะธะผะตะตั $N=2^n$ ัะทะปะพะฒ. ะั ะผะพะถะตะผ ะฟัะตะดััะฐะฒะธัั ัะทะปั ะณะธะฟะตัะบัะฑะธัะตัะบะพะณะพ ะณัะฐัะฐ ะฝะฐะฑะพัะฐะผะธ ะธะท $n$ ะดะฒะพะธัะฝัั
ัะธัะตะป. ะะฒะพะธัะฝะพะต ะฟัะตะดััะฐะฒะปะตะฝะธะต ัะพัะตะดะตะน ัะทะปะฐ ะฑัะดะตั ะพัะปะธัะฐัััั ัะพะปัะบะพ ะพะดะฝะธะผ ะดะฒะพะธัะฝัะผ ัะธัะปะพะผ. ะะฐะฟัะธะผะตั, ะฒ ัะตัััะตั
ะผะตัะฝะพะผ ะณะธะฟะตัะบัะฑะต ัะพัะตะดัะผะธ $0000$ ัะฒะปััััั $0001$, $0010$, $0100$ ะธ $1000$. ะขะฐะบะธะผ ะพะฑัะฐะทะพะผ, ัะทะตะป ัะพะตะดะธะฝะตะฝ ัะพ ะฒัะตะผะธ ัะทะปะฐะผะธ, ะดะพ ะบะพัะพััั
ัะฐัััะพัะฝะธะต ะฅัะผะผะธะฝะณะฐ ัะฐะฒะฝะพ 1. ะ ะตะฑัะฐ ัะฐะบะถะต ะฟะพะผะตัะตะฝั. ะะฒะฐ ัะพัะตะดะฝะธั
ัะทะปะฐ, ัะฐะทะปะธัะฐััะธั
ัั ะฑะธัะพะผ a:, ัะพะตะดะธะฝะตะฝั ัะตะฑัะพะผ, ะฟะพะผะตัะตะฝะฝัะผ $a$.ะะธะปัะฑะตััะพะฒะพ ะฟัะพัััะฐะฝััะฒะพ, ะฟัะตะดััะฐะฒะปัััะตะต ะฟัะธะดัะผะฐะฝะฝะพะต ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต ะฟะพ ะณะธะฟะตัะบัะฑั, ะธะผะตะตั ะฒะธะด $\mathcal{H} = \mathcal{H}^n \otimes \mathcal{H}^{2^n}$, ะณะดะต $\mathcal{H}^n $ ะพะฑะพะทะฝะฐัะฐะตั ะผะตััะพ ะดะปั ะผะพะฝะตั, ะฐ $\mathcal{H}^{2^n}$ - ะฟะพะปะพะถะตะฝะธะต ั
ะพะดะพะบะฐ. ะ ะฐััะตัะฝะฐั ะฑะฐะทะฐ\begin{equation} \big{ \ket{a,\vec{v}}, 0 \leq a \leq n-1, \vec{v} \in {(00...00), (00.. .01), ....., (11...11 )} \big}. \end{ััะฐะฒะฝะตะฝะธะต}ะะฝะฐัะตะฝะธะต ะฒััะธัะปะธัะตะปัะฝะพะณะพ ะฑะฐะทะธัะฐ ะผะพะฝะตัั $a$, ัะฒัะทะฐะฝะฝะพะต ั ัะตะฑัะพะผ $a$, ะพะฟัะตะดะตะปัะตั, ะบัะดะฐ ะดะพะปะถะตะฝ ะดะฒะธะณะฐัััั ั
ะพะดะพะบ. ะัะปะธ $a=0$, ะพะฑั
ะพะดัะธะบ ะฟะตัะตะนะดะตั ะบ ัะทะปั, ะณะดะต ะฟะตัะฒะพะต ะดะฒะพะธัะฝะพะต ะทะฝะฐัะตะฝะธะต ะพัะปะธัะฐะตััั ะพั ัะตะบััะตะณะพ ัะทะปะฐ. ะัะปะธ $a=1$, ั
ะพะดะพะบ ะฟะตัะตะนะดะตั ะบ ัะทะปั, ะฒ ะบะพัะพัะพะผ ะฒัะพัะพะต ะทะฝะฐัะตะฝะธะต ะพัะปะธัะฐะตััั ะพั ัะตะบััะตะณะพ, ะธ ัะฐะบ ะดะฐะปะตะต. ะัััั $\vec{e}_a$ โ n-ะบะพััะตะถ, ะฒ ะบะพัะพัะพะผ ะฒัะต ะดะฒะพะธัะฝัะต ะทะฝะฐัะตะฝะธั, ะบัะพะผะต ะทะฝะฐัะตะฝะธั ั ะธะฝะดะตะบัะพะผ $a$, ัะฐะฒะฝั $0$. ะขะพะณะดะฐ ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ $S$ ะฟะตัะตะฒะพะดะธั ั
ะพะดะพะบ ะธะท ัะพััะพัะฝะธั $\ket{a} \ket{\vec{v}}$ ะฒ $\ket{\vec{v} \oplus \vec{e}_a}$:\begin{equation} S \ket{a} \ket{\vec{v}} = \ket{a} \ket{\vec{v} \oplus \vec{e}_a}. \end{ััะฐะฒะฝะตะฝะธะต}ะะปั ััะพะน ะฟัะพะณัะปะบะธ ะผั ะธัะฟะพะปัะทัะตะผ ะผะพะฝะตัั ะัะพะฒะตัะฐ $G$. ะขะฐะบะธะผ ะพะฑัะฐะทะพะผ, ะพะฟะตัะฐัะพั ัะฒะพะปััะธะธ\begin{ััะฐะฒะฝะตะฝะธะต} U = SG. \end{ััะฐะฒะฝะตะฝะธะต}ะขะตะฟะตัั ะผั ะฟะพะบะฐะถะตะผ, ะบะฐะบ ะผั ะผะพะถะตะผ ัะตะฐะปะธะทะพะฒะฐัั ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต ะฟะพ 4-ะผะตัะฝะพะผั ะณะธะฟะตัะบัะฑั. ะะฐะผ ะฝัะถะฝะพ ัะตะฐะปะธะทะพะฒะฐัั ะพะฟะตัะฐัะพั ะผะพะฝะตัั ะธ ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ. ะะฐัะฝะตะผ ั ะธะผะฟะพััะฐ ะฒัะตั
ะฝะตะพะฑั
ะพะดะธะผัั
ะฑะธะฑะปะธะพัะตะบ ะธะท Qiskit.
###Code
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, execute, Aer, IBMQ, QuantumRegister, ClassicalRegister
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from qiskit.circuit.library import QFT
from numpy import pi
from qiskit.quantum_info import Statevector
from matplotlib import pyplot as plt
import numpy as np
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
###Output
_____no_output_____
###Markdown
ะกั
ะตะผะฐ ะฑัะดะตั ะธะผะตัั 6 ะบัะฑะธัะพะฒ, 4 ะธะท ะบะพัะพััั
ะฟัะตะดััะฐะฒะปััั ะฟะพะทะธัะธั, ะฐ 2 โ ะผะพะฝะตัั. ะะฐะบ ะผั ัะฟะพะผะธะฝะฐะปะธ ัะฐะฝะตะต, ะผะพะฝะตัะฐ โ ััะพ ะผะพะฝะตัะฐ ะัะพะฒะตัะฐ, ะบะพัะพัะฐั ัะฒะปัะตััั ะดะธัััะทะพัะพะผ ะฒ ะฐะปะณะพัะธัะผะต ะัะพะฒะตัะฐ. ะะฐัะฝะตะผ ั ัะตะฐะปะธะทะฐัะธะธ ััะพะณะพ.
###Code
one_step_circuit = QuantumCircuit(6, name=' ONE STEP')
# Coin operator
one_step_circuit.h([4,5])
one_step_circuit.z([4,5])
one_step_circuit.cz(4,5)
one_step_circuit.h([4,5])
one_step_circuit.draw()
###Output
_____no_output_____
###Markdown
ะขะตะฟะตัั ะดะฐะฒะฐะนัะต ัะตะฐะปะธะทัะตะผ ะพะฟะตัะฐัะพั ัะดะฒะธะณะฐ. ะั ะทะฝะฐะตะผ, ััะพ ั
ะพะดะพะบ ะผะพะถะตั ะดะฒะธะณะฐัััั ัะพะปัะบะพ ะบ ัะพัะตะดะฝะธะผ ัะทะปะฐะผ, ะฐ ะฒัะต ัะพัะตะดะฝะธะต ัะทะปั ะพัะปะธัะฐัััั ะฒัะตะณะพ ะพะดะฝะธะผ ะฑะธัะพะผ. ะั ั
ะพัะธะผ ะฟะตัะตะผะตััะธัั ั
ะพะดะพะบ ะฒ ัะพะพัะฒะตัััะฒะธะธ ั ะผะพะฝะตัะพะน, ะธ ะผั ะฟะตัะตะผะตัะฐะตะผ ั
ะพะดะพะบ, ะฟัะธะผะตะฝัั ะฒะตะฝัะธะปั ะะ ะบ ะพะดะฝะพะผั ะธะท ะบัะฑะธัะพะฒ ัะทะปะฐ. ะัะปะธ ะผะพะฝะตัะฐ ะฝะฐั
ะพะดะธััั ะฒ ัะพััะพัะฝะธะธ $\ket{11}$, ะผั ะฟะตัะตะฒะพะดะธะผ ั
ะพะดะพะบ ะฒ ัะพััะพัะฝะธะต, ะฒ ะบะพัะพัะพะผ ะพัะปะธัะฐะตััั ะบัะฑะธั ะฟะตัะฒะพะณะพ ัะทะปะฐ. ะัะปะธ ะผะพะฝะตัะฐ ัะฐะฒะฝะฐ $\ket{10}$ ะธะปะธ $\ket{01}$, ั
ะพะดะพะบ ะฟะตัะตั
ะพะดะธั ะฒ ัะพััะพัะฝะธะต, ะฒ ะบะพัะพัะพะผ ะฒัะพัะพะน ะธ ััะตัะธะน ะบัะฑะธั ัะพะพัะฒะตัััะฒะตะฝะฝะพ ัะฐะทะปะธัะฐัััั. ะะฐะบะพะฝะตั, ะตัะปะธ ะผะพะฝะตัะฐ ะัะพะฒะตัะฐ ัะฐะฒะฝะฐ $\ket{00}$, ะผั ะฟะตัะตะฒะพัะฐัะธะฒะฐะตะผ ัะตัะฒะตัััะน ะบัะฑะธั. ะั ัะตะฐะปะธะทัะตะผ ััะพ ั ะฟะพะผะพััั CCNOT- ะธ NOT-ะณะตะนัะพะฒ ะฟะพัะปะต ะผะพะฝะตัั ะัะพะฒะตัะฐ. ะะผะตััะต ะพะฝะธ ะฟัะตะดััะฐะฒะปััั ัะพะฑะพะน ะพะดะธะฝ ัะฐะณ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฟะพ 4-ะผะตัะฝะพะผั ะณะธะฟะตัะบัะฑั.
###Code
# Shift operator function for 4d-hypercube
def shift_operator(circuit):
for i in range(0,4):
circuit.x(4)
if i%2==0:
circuit.x(5)
circuit.ccx(4,5,i)
shift_operator(one_step_circuit)
one_step_gate = one_step_circuit.to_instruction()
one_step_circuit.draw()
###Output
_____no_output_____
###Markdown
4. ะะปะณะพัะธัะผ ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธัะขะตะฟะตัั ะผั ัะตะฐะปะธะทัะตะผ ะฐะปะณะพัะธัะผ ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั, ะบะพัะพััะน ะฝะฐั
ะพะดะธั ะพัะผะตัะตะฝะฝัั ะฒะตััะธะฝั ะฒ ะณัะฐัะต. ะกะฝะฐัะฐะปะฐ ะพะฟะธัะตะผ ะฐะปะณะพัะธัะผ, ะทะฐัะตะผ ะฟัะพะนะดะตะผัั ะฟะพ ะตะณะพ ัะตะฐะปะธะทะฐัะธะธ. ะะปะณะพัะธัะผ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ัะตัะฐะตั ะทะฐะดะฐัั ะฟะพะธัะบะฐ ะฟะพะผะตัะตะฝะฝัั
ะฒะตััะธะฝ ะฒ ะณัะฐัะต ั ะฟะพะผะพััั ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั. ะขะพ ะตััั ะผั ะฟะพะผะตัะฐะตะผ ะฝะตะบะพัะพััะน ะฝะฐะฑะพั ะฒะตััะธะฝ $|M|$, ะฝะฐัะธะฝะฐะตะผ ั ะฟัะพะธะทะฒะพะปัะฝะพะณะพ ัะทะปะฐ ะณัะฐัะฐ ะธ ะดะฒะธะถะตะผัั ะฟะพ ะพะฑั
ะพะดั, ะฟะพะบะฐ ะฝะต ะฝะฐะนะดะตะผ ะพัะผะตัะตะฝะฝัะต ัะทะปั. ะะฐะทะธัะฝัะต ัะพััะพัะฝะธั ะฒ ะฐะปะณะพัะธัะผะต ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะธะผะตัั ะดะฒะฐ ัะตะณะธัััะฐ, ะพะดะธะฝ ะธะท ะบะพัะพััั
ัะพะพัะฒะตัััะฒัะตั ัะตะบััะตะผั ัะทะปั, ะฐ ะดััะณะพะน โ ะฟัะตะดัะดััะตะผั ัะทะปั. ะขะพ ะตััั ะฑะฐะทะธัะฝัะต ัะพััะพัะฝะธั ัะพะพัะฒะตัััะฒััั ัะตะฑัะฐะผ ะณัะฐัะฐ. ะะฑะพะทะฝะฐัะธะผ ะบะฒะฐะฝัะพะฒะพะต ะฑะปัะถะดะฐะฝะธะต ะฝะฐ ะพัะฝะพะฒะต ะบะปะฐััะธัะตัะบะพะน ัะตะฟะธ ะะฐัะบะพะฒะฐ ั ะฟะตัะตั
ะพะดะฝะพะน ะผะฐััะธัะตะน $P$ ัะฝะธัะฐัะฝะพะน ะพะฟะตัะฐัะธะตะน $W(P)$ ะฝะฐ $\mathcal{H}$. ะั ัะฐะบะถะต ะพะฟัะตะดะตะปัะตะผ $\ket{p_x} = \sum_y \sqrt{P_{xy}}\ket{y}$ ะบะฐะบ ัะฐะฒะฝะพะผะตัะฝัั ััะฟะตัะฟะพะทะธัะธั ะฟะพ ัะพัะตะดัะผ ัะทะปะฐ $x$. ะัััั $\ket{x}\ket{y}$ โ ะฑะฐะทะธัะฝะพะต ัะพััะพัะฝะธะต. ะั ะพะฟัะตะดะตะปัะตะผ ะฑะฐะทะธัะฝะพะต ัะพััะพัะฝะธะต $\ket{x}\ket{y}$ ะบะฐะบ ยซั
ะพัะพัะตะตยป, ะตัะปะธ $x$ โ ะพัะผะตัะตะฝะฝัะน ัะทะตะป. ะ ะฟัะพัะธะฒะฝะพะผ ัะปััะฐะต ะผั ะฝะฐะทัะฒะฐะตะผ ััะพ ยซะฟะปะพั
ะธะผยป. ะขะตะฟะตัั ะฒะฒะตะดะตะผ ยซั
ะพัะพัะธะตยป ะธ ยซะฟะปะพั
ะธะตยป ัะพััะพัะฝะธั:\begin{equation} \ket{G} = \frac{1}{\sqrt{|M|}} \sum_{x \in M} \ket{x} \ket{p_x}, ; \ket{B} = \frac{1}{\sqrt{N-|M|}} \sum_{x \notin M} \ket{x} \ket{p_x}, \end{equation}ะบะพัะพััะต ัะฒะปััััั ััะฟะตัะฟะพะทะธัะธัะผะธ ะฝะฐะด ั
ะพัะพัะธะผะธ ะธ ะฟะปะพั
ะธะผะธ ะฑะฐะทะธัะฝัะผะธ ัะพััะพัะฝะธัะผะธ. ะะฐะปะตะต ะพะฟัะตะดะตะปะธะผ $\epsilon = |M|/N$ ะธ $\theta = \arcsin(\sqrt{\epsilon})$.ะะพัะพัะต ะณะพะฒะพัั, ะฐะปะณะพัะธัะผ ัะพััะพะธั ะธะท ััะตั
ัะฐะณะพะฒ:1. ะฃััะฐะฝะพะฒะธัะต ะฝะฐัะฐะปัะฝะพะต ัะพััะพัะฝะธะต $\ket{U} = \frac{1}{\sqrt{N}} \sum_{x} \ket{x} \ket{p_x} = \sin{\theta} \ket{G } + \cos{\theta} \ket{B}$, ัะฐะฒะฝะพะผะตัะฝะฐั ััะฟะตัะฟะพะทะธัะธั ะฟะพ ะฒัะตะผ ัะตะฑัะฐะผ2. ะะพะฒัะพัะธัะต $O(1/\sqrt{\epsilon})$ ัะฐะท: (a) ะััะฐะถะตะฝะธะต ัะตัะตะท $\ket{B}$ (b) ะััะฐะถะตะฝะธะต ัะตัะตะท $\ket{U}$3. ะกะดะตะปะฐะนัะต ะธะทะผะตัะตะฝะธะต ะฒ ะฒััะธัะปะธัะตะปัะฝะพะน ะฑะฐะทะตะั ะผะพะถะตะผ ะปะตะณะบะพ ัะตะฐะปะธะทะพะฒะฐัั ัะฐะณ $1$ ั ะฒะตะฝัะธะปัะผะธ ะะดะฐะผะฐัะฐ ะธ ะพััะฐะถะตะฝะธะต ัะตัะตะท $\ket{B}$ ั ะฟะพะผะพััั ัะฐะทะพะฒะพะณะพ ะพัะฐะบัะปะฐ, ะบะพัะพััะน ัะดะฒะธะณะฐะตั ัะฐะทั $x$, ะตัะปะธ $x$ ะฝะฐั
ะพะดะธััั ะฒ ะฟะตัะฒะพะผ ัะตะณะธัััะต, ะธ ะพััะฐะฒะปัะตั ัั
ะตะผั ะฝะตะธะทะผะตะฝะฝะพะน ะฒ ะฟัะพัะธะฒะฝะพะผ ัะปััะฐะต.ะจะฐะณ 2(b) ัะบะฒะธะฒะฐะปะตะฝัะตะฝ ะฝะฐั
ะพะถะดะตะฝะธั ัะฝะธัะฐัะฝะพะณะพ $R(P)$, ะฒัะฟะพะปะฝัััะตะณะพ ัะปะตะดัััะตะต ะพัะพะฑัะฐะถะตะฝะธะต: \begin{align} \label{eq:mapping_1} \ket{U} &\mapsto \ket{U}, : \text{ะธ} \ \\ket{\psi} &\mapsto -\ket{\psi}, : \forall \ket{\psi} \text{ะฒ ะฟัะพะผะตะถััะบะต ัะพะฑััะฒะตะฝะฝัั
ะฒะตะบัะพัะพะฒ $W(P)$, ะพััะพะณะพะฝะฐะปัะฝัั
ะฒ $\ket{U}$} \label{eq:mapping_2} \end{align}ะงัะพะฑั ะฝะฐะนัะธ ััะพั ะพะฟะตัะฐัะพั, ะผั ะฟัะธะผะตะฝัะตะผ ัะฐะทะพะฒัั ะพัะตะฝะบั ะฝะฐ $W(P)$. ะััะต ะผั ะพะฟัะตะดะตะปะธะปะธ $W(P)$ ะบะฐะบ ะพะฟะตัะฐัะพั ัะฒะพะปััะธะธ ัะปััะฐะนะฝะพะณะพ ะฑะปัะถะดะฐะฝะธั. ะะฐะบ ะผั ะฒะธะดะตะปะธ ะฒ ัะฐะทะดะตะปะต 2.A, ััะพ ัะฝะธัะฐัะฝัะน ะพะฟะตัะฐัะพั. ะัััะดะฐ ัะปะตะดัะตั, ััะพ ัะพะฑััะฒะตะฝะฝัะต ะทะฝะฐัะตะฝะธั ะพะฟะตัะฐัะพัะฐ $W(P)$ ะธะผะตัั ะฝะพัะผั $1$. ะะปะฐะณะพะดะฐัั ััะพะผั ะผั ะผะพะถะตะผ ะทะฐะฟะธัะฐัั ัะพะฑััะฒะตะฝะฝัะต ะทะฝะฐัะตะฝะธั $W(P)$ ะฒ ะฒะธะดะต $e^{\pm 2i\theta_j}$. ะฃะฝะธัะฐัะฝะฐั $W(P)$ ะธะผะตะตั ะพะดะธะฝ ัะพะฑััะฒะตะฝะฝัะน ะฒะตะบัะพั ั ัะพะพัะฒะตัััะฒัััะธะผ ัะพะฑััะฒะตะฝะฝัะผ ะทะฝะฐัะตะฝะธะตะผ $1$, ัะฐะฒะฝัะผ $\ket{U}$. ะญัะพ ะดะฐะตััั $\theta_1=0$. $R(P)$ ะฝะฐะนะดะตั ััะพั ะฒะตะบัะพั $\ket{U}$, ะดะพะฑะฐะฒะธะฒ ัะตะณะธััั ัะพ ะฒัะฟะพะผะพะณะฐัะตะปัะฝัะผะธ ะบัะฑะธัะฐะผะธ, ะธ ะฒัะฟะพะปะฝะธั ะพัะตะฝะบั ัะฐะทั ั ัะพัะฝะพัััั $O(1/\sqrt{\delta})$, ะณะดะต $\delta$ ัะฟะตะบััะฐะปัะฝะฐั ัะตะปั $P$. ะะปั ััะพะณะพ ะฝะฐะผ ะฝัะถะฝะพ ะฟัะธะผะตะฝะธัั $W(P)$ $O(1/\sqrt{\delta})$ ัะฐะท. ะัััั $\ket{w}$ โ ัะพะฑััะฒะตะฝะฝัะน ะฒะตะบัะพั $W(P)$ ั ัะพะฑััะฒะตะฝะฝัะผ ะทะฝะฐัะตะฝะธะตะผ $e^{\pm 2i\theta_j}$. ะัะตะดะฟะพะปะพะถะธะผ, ััะพ $\tilde{\theta_j}$ โ ะฝะฐะธะปัััะตะต ะฟัะธะฑะปะธะถะตะฝะธะต ะบ $\theta_j$, ะฟะพะปััะตะฝะฝะพะต ั ะฟะพะผะพััั ัะฐะทะพะฒะพะน ะพัะตะฝะบะธ. ะะฟะตัะฐัะธั $R(P)$, ะบะพัะพัะฐั ะฒัะฟะพะปะฝัะตั ะพัะพะฑัะฐะถะตะฝะธั ะฒ ะดะปั $\ket{w}$ ะฝะฐ ัะฐะณะต 2(b), ะทะฐะดะฐะตััั ัะพัะผัะปะพะน [4]\begin{equation} \ket{w} \ket{0} \mapsto \ket{w} \ket{\tilde{\theta_j}} \mapsto (-1)^{|\tilde{\theta_j} \neq 0 |} \ket{w} \ket{\tilde{\theta_j}} \mapsto (-1)^{|\tilde{\theta_j} \neq 0|} \ket{w} \ket{0} \end{ ััะฐะฒะฝะตะฝะธะต} 5.ะัะธะผะตั: ะฟะพะธัะบ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฟะพ 4-ะผะตัะฝะพะผั ะณะธะฟะตัะบัะฑัะะปะณะพัะธัะผ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฟะพะทะฒะพะปัะตั ะฝะฐะนัะธ ะพัะผะตัะตะฝะฝะพะต ะผะฝะพะถะตััะฒะพ ัะทะปะพะฒ ะทะฐ $O(1/\sqrt{\epsilon})$ ัะฐะณะพะฒ, $\epsilon = |M|/N$, ะณะดะต $M$ โ ะบะพะปะธัะตััะฒะพ ะพัะผะตัะตะฝะฝัะต ัะทะปั, ะฐ $N$ โ ะพะฑัะตะต ะบะพะปะธัะตััะฒะพ ัะทะปะพะฒ. ะญัะพั ะฐะปะณะพัะธัะผ ะฟะตัะฒะพะฝะฐัะฐะปัะฝะพ ะธัะฟะพะปัะทะพะฒะฐะปัั ั ะบะฒะฐะฝัะพะฒัะผะธ ะฑะปัะถะดะฐะฝะธัะผะธ ะฟะพ ะกะตะณะตะดะธ, ะณะดะต ะผั ะธัะฟะพะปัะทะพะฒะฐะปะธ ัะตะณะธัััั ะดะฒัั
ัะทะปะพะฒ ะดะปั ะฟัะตะดััะฐะฒะปะตะฝะธั ะบะฒะฐะฝัะพะฒะพะณะพ ัะพััะพัะฝะธั. ะะดะฝะฐะบะพ ะฟัะธะดัะผะฐะฝะฝะพะต ะฑะปัะถะดะฐะฝะธะต ั ะผะพะฝะตัะพะน ะัะพะฒะตัะฐ ัะบะฒะธะฒะฐะปะตะฝัะฝะพ ะบะฒะฐะฝัะพะฒะพะผั ะฑะปัะถะดะฐะฝะธั ะกะตะณะตะดะธ, ะธ, ะฟะพัะบะพะปัะบั ัะตะฐะปะธะทะฐัะธะธ ะฟัะธะดัะผะฐะฝะฝัั
ะฑะปัะถะดะฐะฝะธะน ะฒ ัะตะปะพะผ ะผะตะฝะตะต ัะปะพะถะฝั, ะผั ัะตัะธะปะธ ัะตะฐะปะธะทะพะฒะฐัั ะฐะปะณะพัะธัะผ ั ะฟัะธะดัะผะฐะฝะฝัะผ ะฑะปัะถะดะฐะฝะธะตะผ. ะั ะฑัะดะตะผ ะธัะฟะพะปัะทะพะฒะฐัั 4-ะผะตัะฝัะน ะณะธะฟะตัะบัะฑ, ะบะพัะพััะน ะผั ัะตะฐะปะธะทะพะฒะฐะปะธ ะฒ ัะฐะทะดะตะปะต 3.ะะบัะฐััะต ะฐะปะณะพัะธัะผ ะฑัะดะตะผ ัะตะฐะปะธะทะพะฒัะฒะฐัั ัะปะตะดัััะธะผ ะพะฑัะฐะทะพะผ. ะั ะดะพััะธะณะฐะตะผ ัะฐะณะฐ 1, ัะฐะฒะฝะพะผะตัะฝะพะน ััะฟะตัะฟะพะทะธัะธะธ ะฟะพ ะฒัะตะผ ัะตะฑัะฐะผ, ะฟัะธะผะตะฝัั ะฒะตะฝัะธะปะธ ะะดะฐะผะฐัะฐ ะบ ะบัะฑะธัะฐะผ-ัะทะปะฐะผ, ะฐ ัะฐะบะถะต ะบ ะบัะฑะธัะฐะผ-ะผะพะฝะตัะฐะผ. ะะปั ัะฐะณะฐ 2(ะฐ) ะผั ัะตะฐะปะธะทัะตะผ ัะฐะทะพะฒัะน ะพัะฐะบัะป. ะจะฐะณ 2(b) ัะตะฐะปะธะทัะตััั ะพัะตะฝะบะพะน ัะฐะทั ะฝะฐ ะพะดะฝะพะผ ัะฐะณะต ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฟะพ ะณะธะฟะตัะบัะฑั ั ะฟะพัะปะตะดัััะตะน ะฟะพะผะตัะบะพะน ะฒัะตั
ะบะฒะฐะฝัะพะฒัั
ัะพััะพัะฝะธะน, ะณะดะต $\theta\neq 0$. ะั ะดะตะปะฐะตะผ ััะพ, ะฒัะฐัะฐั ะฒัะฟะพะผะพะณะฐัะตะปัะฝัะน ะบัะฑะธั. ะ ะฟะพัะปะตะดะฝะตะน ัะฐััะธ ััะพะณะพ ัะฐะณะฐ ะผั ะพะฑัะฐัะฐะตะผ ะพัะตะฝะบั ัะฐะทั. ะะพะปะธัะตััะฒะพ ัะตัะฐ-ะบัะฑะธัะพะฒ ะทะฐะฒะธัะธั ะพั ัะพัะฝะพััะธ $\theta$.ะะธะถะต ะผั ัะตะฐะปะธะทัะตะผ ะฐะปะณะพัะธัะผ ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั ะฝะฐ 4-ะผะตัะฝะพะผ ะณะธะฟะตัะบัะฑะต. ะะปั ััะพะณะพ ะฐะปะณะพัะธัะผะฐ ะฝะฐะผ ะฝัะถะฝะพ ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัั ะพะฑัะฐัะฝัะน ะพะดะฝะพัะฐะณะพะฒัะน ะฒะตะฝัะธะปั, ัะตะฐะปะธะทะพะฒะฐะฝะฝัะน ัะฐะฝะตะต. ะั ะฟะพะปััะฐะตะผ ััะพ ั ะฟะพะผะพััั ะฒัััะพะตะฝะฝะพะน ะฒ ัั
ะตะผั ััะฝะบัะธะธ inverse().
###Code
one_step_circuit.inverse().draw()
###Output
_____no_output_____
###Markdown
ะะฑัะฐัะฝัะน ะพะดะฝะพัะฐะณะพะฒัะน ะฒะตะฝัะธะปั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฟะพะทะถะต ะดะปั ะพะฑัะฐัะตะฝะธั ะพัะตะฝะบะธ ัะฐะทั. ะะฐะผ ะฝัะถะฝะพ ัะดะตะปะฐัั ัะฟัะฐะฒะปัะตะผัะต ะฒะพัะพัะฐ ะบะฐะบ ะธะท ะพะดะฝะพัะฐะณะพะฒัั
ะฒะพัะพั, ะบะพัะพััะต ะผั ัะตะฐะปะธะทะพะฒะฐะปะธ ะฒ ัะฐะทะดะตะปะต 3, ัะฐะบ ะธ ะธะท ะธั
ะพะฑัะฐัะฝัั
. ะะพะทะถะต ะผั ะฑัะดะตะผ ะธัะฟะพะปัะทะพะฒะฐัั ะธั
ะฒ ะทะฐะฒะธัะธะผะพััะธ ะพั ะทะฝะฐัะตะฝะธั ัะฟัะฐะฒะปัััะตะณะพ ะบัะฑะธัะฐ.
###Code
# Make controlled gates
inv_cont_one_step = one_step_circuit.inverse().control()
inv_cont_one_step_gate = inv_cont_one_step.to_instruction()
cont_one_step = one_step_circuit.control()
cont_one_step_gate = cont_one_step.to_instruction()
###Output
_____no_output_____
###Markdown
ะัะธ ะพัะตะฝะบะต ัะฐะทั ะฑัะดัั ะธัะฟะพะปัะทะพะฒะฐัััั ะบะฐะบ ัะฟัะฐะฒะปัะตะผัะน ะพะดะฝะพัะฐะณะพะฒัะน ะฒะตะฝัะธะปั, ัะฐะบ ะธ ัะฟัะฐะฒะปัะตะผัะน ะธะฝะฒะตััะฝัะน ะพะดะฝะพัะฐะณะพะฒัะน ะฒะตะฝัะธะปั. ะัะต ะพะดะฝะฐ ะฒะตัั, ะบะพัะพััั ะผั ะฑัะดะตะผ ะธัะฟะพะปัะทะพะฒะฐัั ะฟัะธ ะพัะตะฝะบะต ัะฐะทั, โ ััะพ ะบะฒะฐะฝัะพะฒะพะต ะฟัะตะพะฑัะฐะทะพะฒะฐะฝะธะต ะคัััะต. ะ Qiskit ะตััั ััะฝะบัะธั QFT, ัะตะฐะปะธะทัััะฐั ะบะฒะฐะฝัะพะฒะพะต ะฟัะตะพะฑัะฐะทะพะฒะฐะฝะธะต ะคัััะต. ะะปั ะพัะตะฝะบะธ ัะฐะทั ะธัะฟะพะปัะทัะตััั ะพะฑัะฐัะฝะพะต ะบะฒะฐะฝัะพะฒะพะต ะฟัะตะพะฑัะฐะทะพะฒะฐะฝะธะต ะคัััะต, ะฝะพ ะฝะฐะผ ัะฐะบะถะต ะฟะพััะตะฑัะตััั ะธัะฟะพะปัะทะพะฒะฐัั ะพะฑััะฝัั ะะขะ ะดะปั ะพะฑัะฐัะฝะพะน ะพัะตะฝะบะธ ัะฐะทั.
###Code
inv_qft_gate = QFT(4, inverse=True).to_instruction()
qft_gate = QFT(4, inverse=False).to_instruction()
QFT(4, inverse=True).decompose().draw("mpl")
###Output
_____no_output_____
###Markdown
ะัะตะถะดะต ัะตะผ ะผั ัะตะฐะปะธะทัะตะผ ะพัะตะฝะบั ัะฐะทั, ะผั ัะตะฐะปะธะทัะตะผ ัะฐะทะพะฒัะน ะพัะฐะบัะป, ะบะพัะพััะน ะพัะผะตัะฐะตั ัะพััะพัะฝะธั 1011 ะธ 1111. ะะฐัะตะผ ะผั ัะพะทะดะฐะตะผ ัั
ะตะผั. ะญัะพ ัะฐะณ 2(ะฐ) ะฐะปะณะพัะธัะผะฐ.
###Code
phase_circuit = QuantumCircuit(6, name=' phase oracle ')
# Mark 1011
phase_circuit.x(2)
phase_circuit.h(3)
phase_circuit.mct([0,1,2], 3)
phase_circuit.h(3)
phase_circuit.x(2)
# Mark 1111
phase_circuit.h(3)
phase_circuit.mct([0,1,2],3)
phase_circuit.h(3)
phase_oracle_gate = phase_circuit.to_instruction()
# Phase oracle circuit
phase_oracle_circuit = QuantumCircuit(11, name=' PHASE ORACLE CIRCUIT ')
phase_oracle_circuit.append(phase_oracle_gate, [4,5,6,7,8,9])
phase_circuit.draw()
###Output
_____no_output_____
###Markdown
ะขะตะฟะตัั ะผั ัะตะฐะปะธะทัะตะผ ะฒะตะฝัะธะปั, ะบะพัะพััะน ะฒัะฐัะฐะตั ะฒัะฟะพะผะพะณะฐัะตะปัะฝัะน ะบัะฑะธั, ะตัะปะธ ะดััะณะธะต ะบัะฑะธัั ะฝะต ัะฐะฒะฝั ะฝัะปั. ะั ะฑัะดะตะผ ะธัะฟะพะปัะทะพะฒะฐัั ััะพั ะฒะตะฝัะธะปั ะฒ ะพัะตะฝะบะต ัะฐะทั, ะณะดะต ะพะฝ ะฑัะดะตั ะฒัะฐัะฐัั ะฒัะฟะพะผะพะณะฐัะตะปัะฝัะน ะบัะฑะธั, ะตัะปะธ $\theta\neq 0$.
###Code
# Mark q_4 if the other qubits are non-zero
mark_auxiliary_circuit = QuantumCircuit(5, name=' mark auxiliary ')
mark_auxiliary_circuit.x([0,1,2,3,4])
mark_auxiliary_circuit.mct([0,1,2,3], 4)
mark_auxiliary_circuit.z(4)
mark_auxiliary_circuit.mct([0,1,2,3], 4)
mark_auxiliary_circuit.x([0,1,2,3,4])
mark_auxiliary_gate = mark_auxiliary_circuit.to_instruction()
mark_auxiliary_circuit.draw()
###Output
_____no_output_____
###Markdown
ะขะตะฟะตัั ะผั ัะตะฐะปะธะทัะตะผ ัะฐะณ 2(b) ะฐะปะณะพัะธัะผะฐ. ะญัะพั ัะฐะณ ัะพััะพะธั ะธะท ัะฐะทะพะฒะพะน ะพัะตะฝะบะธ ะพะดะฝะพะณะพ ัะฐะณะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั, ะทะฐ ะบะพัะพััะผ ัะปะตะดัะตั ะฒัะฟะพะผะพะณะฐัะตะปัะฝัะน ะบัะฑะธั, ะบะพัะพััะน ะผั ะฒัะฐัะฐะตะผ, ะตัะปะธ $\theta \neq 0$. ะะปั ััะพะณะพ ะผั ะธัะฟะพะปัะทัะตะผ ัะพะปัะบะพ ััะพ ัะพะทะดะฐะฝะฝัะน mark_auxiliary_gate. ะะพัะปะต ััะพะณะพ ะผั ะพะฑัะฐัะฐะตะผ ะพัะตะฝะบั ัะฐะทั.
###Code
# Phase estimation
phase_estimation_circuit = QuantumCircuit(11, name=' phase estimation ')
phase_estimation_circuit.h([0,1,2,3])
for i in range(0,4):
stop = 2**i
for j in range(0,stop):
phase_estimation_circuit.append(cont_one_step, [i,4,5,6,7,8,9])
# Inverse fourier transform
phase_estimation_circuit.append(inv_qft_gate, [0,1,2,3])
# Mark all angles theta that are not 0 with an auxiliary qubit
phase_estimation_circuit.append(mark_auxiliary_gate, [0,1,2,3,10])
# Reverse phase estimation
phase_estimation_circuit.append(qft_gate, [0,1,2,3])
for i in range(3,-1,-1):
stop = 2**i
for j in range(0,stop):
phase_estimation_circuit.append(inv_cont_one_step, [i,4,5,6,7,8,9])
phase_estimation_circuit.barrier(range(0,10))
phase_estimation_circuit.h([0,1,2,3])
# Make phase estimation gate
phase_estimation_gate = phase_estimation_circuit.to_instruction()
phase_estimation_circuit.draw()
###Output
_____no_output_____
###Markdown
ะขะตะฟะตัั ะผั ัะตะฐะปะธะทัะตะผ ะฒะตัั ะฐะปะณะพัะธัะผ ะฟะพะธัะบะฐ ะบะฒะฐะฝัะพะฒะพะณะพ ะฑะปัะถะดะฐะฝะธั, ะธัะฟะพะปัะทัั ะฒะตะฝัะธะปะธ, ะบะพัะพััะต ะผั ัะดะตะปะฐะปะธ ัะฐะฝะตะต. ะั ะฝะฐัะธะฝะฐะตะผ ั ะฟัะธะผะตะฝะตะฝะธั ะฒะตะฝัะธะปะตะน ะะดะฐะผะฐัะฐ ะบ ะบัะฑะธัะฐะผ ัะทะปะฐ ะธ ะผะพะฝะตัั, ััะพ ัะฒะปัะตััั ัะฐะณะพะผ 1 ะฒ ะฐะปะณะพัะธัะผะต. ะะพัะปะต ััะพะณะพ ะผั ะธัะตัะฐัะธะฒะฝะพ ะฟัะธะผะตะฝัะตะผ ะฒะตะฝัะธะปั ัะฐะทะพะฒะพะณะพ ะพัะฐะบัะปะฐ ะธ ะฒะตะฝัะธะปั ะพัะตะฝะบะธ ัะฐะทั (ััะฐะฟั 2(a) ะธ 2(b)). ะะฐะผ ะฟะพััะตะฑัะตััั $O(1/\sqrt{\epsilon})$ ะธัะตัะฐัะธะน, ะบะฐะบ ัะบะฐะทะฐะฝะพ ะฒ ะพะฟะธัะฐะฝะธะธ ะฐะปะณะพัะธัะผะฐ ะฒ ัะฐะทะดะตะปะต 4. ะะฐะบะพะฝะตั, ะผั ะธะทะผะตััะตะผ ะบัะฑะธัั ัะทะปะฐ.
###Code
# Implementation of the full quantum walk search algorithm
theta_q = QuantumRegister(4, 'theta')
node_q = QuantumRegister(4, 'node')
coin_q = QuantumRegister(2, 'coin')
auxiliary_q = QuantumRegister(1, 'auxiliary')
creg_c2 = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(theta_q, node_q, coin_q, auxiliary_q, creg_c2)
# Apply Hadamard gates to the qubits that represent the nodes and the coin
circuit.h([4,5,6,7,8,9])
iterations = 2
for i in range(0,iterations):
circuit.append(phase_oracle_gate, [4,5,6,7,8,9])
circuit.append(phase_estimation_gate, [0,1,2,3,4,5,6,7,8,9,10])
circuit.measure(node_q[0], creg_c2[0])
circuit.measure(node_q[1], creg_c2[1])
circuit.measure(node_q[2], creg_c2[2])
circuit.measure(node_q[3], creg_c2[3])
circuit.draw()
###Output
_____no_output_____
###Markdown
ะะฐะบะพะฝะตั, ะผั ะทะฐะฟััะบะฐะตะผ ัะตะฐะปะธะทะฐัะธั ะฝะฐ ัะธะผัะปััะพัะต qasm. ะั ะฒะธะดะธะผ, ััะพ ะฒ ะฑะพะปััะธะฝััะฒะต ัะปััะฐะตะฒ ัั
ะตะผะฐ ัั
ะปะพะฟัะฒะฐะตััั ะดะพ ะพัะผะตัะตะฝะฝัั
ัะพััะพัะฝะธะน.
###Code
backend = Aer.get_backend('qasm_simulator')
job = execute( circuit, backend, shots=1024 )
hist = job.result().get_counts()
plot_histogram( hist )
###Output
_____no_output_____
###Markdown
6. ะกััะปะบะธ1. ะ ะตะฝะฐัะพ ะะพัััะณะฐะปะธั. ะะฒะฐะฝัะพะฒัะต ะฑะปัะถะดะฐะฝะธั ะธ ะฐะปะณะพัะธัะผั ะฟะพะธัะบะฐ. ะัั-ะะพัะบ, ััะฐั ะัั-ะะพัะบ: Springer, ะัั-ะะพัะบ, 2013 ะณ.2. ะะฐัะบัั ะ. ะัะฝ. ะะตะบะพัะพััะต ะฒะฒะพะดะฝัะต ะทะฐะผะตัะบะธ ะพ ะบะฒะฐะฝัะพะฒัั
ะฒััะธัะปะตะฝะธัั
. ะฐะฟัะตะปั 2000 ะณ.3. ะขะพะผะฐั ะ. ะะพะฝะณ. ยซะญะบะฒะธะฒะฐะปะตะฝัะฝะพััั ะกะตะณะตะดะธ ะธ ะฟัะธะดัะผะฐะฝะฝัั
ะบะฒะฐะฝัะพะฒัั
ะฑะปัะถะดะฐะฝะธะนยป. ะ: ะะฒะฐะฝัะพะฒะฐั ะพะฑัะฐะฑะพัะบะฐ ะธะฝัะพัะผะฐัะธะธ 16.9 (ะธัะปั 2017 ะณ.). ISSN: 1573-1332. DOI: 10.1007/s11128-017-1667-y. URL-ะฐะดัะตั: http://dx.doi.org/10.1007/s11128-017-1667-y.374. ะ ะพะฝะฐะปัะด ะดะต ะะพะปัั. ะะฒะฐะฝัะพะฒัะต ะฒััะธัะปะตะฝะธั: ะบะพะฝัะฟะตะบั ะปะตะบัะธะน. 2021. arXiv: 1907.09415 [ะบะฒะฐะฝั-ั]
###Code
import qiskit.tools.jupyter
%qiskit_version_table
###Output
/usr/local/anaconda3/envs/terra-unstable/lib/python3.9/site-packages/qiskit/aqua/__init__.py:86: DeprecationWarning: The package qiskit.aqua is deprecated. It was moved/refactored to qiskit-terra For more information see <https://github.com/Qiskit/qiskit-aqua/blob/main/README.md#migration-guide>
warn_package('aqua', 'qiskit-terra')
|
20201023/.ipynb_checkpoints/simpleRenting-checkpoint.ipynb | ###Markdown
The value of rentingAssuming we obtain the value: $\tilde{V}_{t+1}(x_{t+1})$ where: $x_{t+1} = [w_{t+1}, n_{t+1}, M_{t+1}, e_{t+1}, \hat{S}_{t+1}, z_{t+1}, (H)]$ from interpolation. We know $H$ and $M_t$ from the action taken and we could calculate mortgage payment $m$ and $rh$ (now treated as constant) is observed from the market. * Housing choice is limited: $H_{\text{choice}} = \{750, 1000, 1500, 2000\}$* Mortgage choice is also limitted to discrete values $M_{t} = [0.2H, 0.5H, 0.8H]$ * State: continue to rent: $x = [w, n, e, s, z]$ switch to owning a house: $x = [w,n,M,e,s,z]$ * Action: continue to rent: $a = (c, b, k, h)$ switch to owning a house: $a = (c, b, k, M, H)$* Buying house activities can only happend during the age of 20 and age of 45.
###Code
#Define the utility function
def u(c):
# shift utility function to the left, so it only takes positive value
return (np.float_power(c, 1-gamma) - 1)/(1 - gamma)
#Define the bequeath function, which is a function of wealth
def uB(tb):
return B*u(tb)
#Calculate TB_rent
def calTB_rent(x):
# change input x as numpy array
# w, n, e, s, z = x
TB = x[:,0] + x[:,1]
return TB
#Calculate TB_own
def calTB_own(x):
# change input x as numpy array
# transiton from (w, n, e, s, z) -> (w, n, M, e, s, z, H)
TB = x[:,0] + x[:,1] + x[:,6]*pt - x[:,2]
return TB
#Reward function for renting
def u_rent(a):
'''
Input:
action a: c, b, k, h = a
Output:
reward value: the length of return should be equal to the length of a
'''
c = a[:,0]
h = a[:,3]
C = np.float_power(c, alpha) * np.float_power(h, 1-alpha)
return u(C)
#Reward function for owning
def u_own(a):
'''
Input:
action a: c, b, k, M, H = a
Output:
reward value: the length of return should be equal to the length of a
'''
c = a[:,0]
H = a[:,4]
C = np.float_power(c, alpha) * np.float_power((1+kappa)*H, 1-alpha)
return u(C)
def transition_to_rent(x,a,t):
'''
imput: a is np array constains all possible actions
output: from x = [w, n, e, s, z] to x = [w, n, e, s, z]
'''
w, n, e, s, z = x
s = int(s)
e = int(e)
nX = len(x)
aSize = len(a)
# actions
b = a[:,1]
k = a[:,2]
h = a[:,3]
# transition of z
z_next = np.ones(aSize)
if z == 0:
z_next[k==0] = 0
# transition before T_R and after T_R
if t >= T_R:
future_states = np.zeros((aSize*nS,nX))
n_next = gn(t, n, x, r_k)
future_states[:,0] = np.repeat(b*(1+r_b[s]), nS) + np.repeat(k, nS)*(1+np.tile(r_k, aSize))
future_states[:,1] = np.tile(n_next,aSize)
future_states[:,2] = 0
future_states[:,3] = np.tile(range(nS),aSize)
future_states[:,4] = np.repeat(z_next,nS)
future_probs = np.tile(Ps[s],aSize)
else:
future_states = np.zeros((2*aSize*nS,nX))
n_next = gn(t, n, x, r_k)
future_states[:,0] = np.repeat(b*(1+r_b[s]), 2*nS) + np.repeat(k, 2*nS)*(1+np.tile(r_k, 2*aSize))
future_states[:,1] = np.tile(n_next,2*aSize)
future_states[:,2] = np.tile(np.repeat([0,1],nS), aSize)
future_states[:,3] = np.tile(range(nS),2*aSize)
future_states[:,4] = np.repeat(z_next,2*nS)
# employed right now:
if e == 1:
future_probs = np.tile(np.append(Ps[s]*Pe[s,e], Ps[s]*(1-Pe[s,e])),aSize)
else:
future_probs = np.tile(np.append(Ps[s]*(1-Pe[s,e]), Ps[s]*Pe[s,e]),aSize)
return future_states, future_probs
def transition_to_own(x,a,t):
'''
imput a is np array constains all possible actions
from x = [w, n, e, s, z] to x = [w, n, M, e, s, z, H]
'''
w, n, e, s, z = x
s = int(s)
e = int(e)
nX = len(x)
aSize = len(a)
# actions
b = a[:,1]
k = a[:,2]
M = a[:,3]
M_next = M_next*(1+rh)
H = a[:,4]
# transition of z
z_next = np.ones(aSize)
if z == 0:
z_next[k==0] = 0
# transition before T_R and after T_R
if t >= T_R:
future_states = np.zeros((aSize*nS,nX))
n_next = gn(t, n, x, r_k)
future_states[:,0] = np.repeat(b*(1+r_b[s]), nS) + np.repeat(k, nS)*(1+np.tile(r_k, aSize))
future_states[:,1] = np.tile(n_next,aSize)
future_states[:,2] = np.repeat(M_next,nS)
future_states[:,3] = 0
future_states[:,4] = np.tile(range(nS),aSize)
future_states[:,5] = np.repeat(z_next,nS)
future_states[:,6] = np.repeat(H,nS)
future_probs = np.tile(Ps[s],aSize)
else:
future_states = np.zeros((2*aSize*nS,nX))
n_next = gn(t, n, x, r_k)
future_states[:,0] = np.repeat(b*(1+r_b[s]), 2*nS) + np.repeat(k, 2*nS)*(1+np.tile(r_k, 2*aSize))
future_states[:,1] = np.tile(n_next,2*aSize)
future_states[:,2] = np.repeat(M_next,2*nS)
future_states[:,3] = np.tile(np.repeat([0,1],nS), aSize)
future_states[:,4] = np.tile(range(nS),2*aSize)
future_states[:,5] = np.repeat(z_next,2*nS)
future_states[:,6] = np.repeat(H,2*nS)
# employed right now:
if e == 1:
future_probs = np.tile(np.append(Ps[s]*Pe[s,e], Ps[s]*(1-Pe[s,e])),aSize)
else:
future_probs = np.tile(np.append(Ps[s]*(1-Pe[s,e]), Ps[s]*Pe[s,e]),aSize)
return future_states, future_probs
class Approxy(object):
def __init__(self, pointsRent, Vrent, Vown, t):
self.Vrent = Vrent
self.Vown = Vown
self.Prent = pointsRent
self.t = t
def predict(self, xx):
if xx.shape[1] == 5:
# x = [w, n, e, s, z]
pvalues = np.zeros(xx.shape[0])
for e in [0,1]:
for s in range(nS):
for z in [0,1]:
index = (xx[:,2] == e) & (xx[:,3] == s) & (xx[:,4] == z)
pvalues[index]=interpn(self.Prent, self.Vrent[:,:,e,s,z], xx[index][:,:2],
bounds_error = False, fill_value = None)
return pvalues
else:
# x = w, n, M, e, s, z, H
pvalues = np.zeros(xx.shape[0])
for i in range(len(H_options)):
H = H_options[i]
# Mortgage amount, * 0.25 is the housing price per unit
Ms = np.array([0.01*H,0.05*H,0.1*H,0.2*H,0.3*H,0.4*H,0.5*H,0.8*H]) * pt
points = (ws,ns,Ms)
for e in [0,1]:
for s in range(nS):
for z in [0,1]:
index = (xx[:,3] == e) & (xx[:,4] == s) & (xx[:,5] == z) & (xx[:,6] == H)
pvalues[index]=interpn(points, self.Vown[i][:,:,:,e,s,z,self.t], xx[index][:,:3],
method = "nearest",bounds_error = False, fill_value = None)
return pvalues
# used to calculate dot product
def dotProduct(p_next, uBTB, t):
if t >= T_R:
return (p_next*uBTB).reshape((len(p_next)//(nS),(nS))).sum(axis = 1)
else:
return (p_next*uBTB).reshape((len(p_next)//(2*nS),(2*nS))).sum(axis = 1)
# Value function is a function of state and time, according to the restriction transfer from renting to ownning can only happen
# between the age: 0 - 25
def V(x, t, NN):
w, n, e, s, z = x
yat = yAT(t,x)
# first define the objective function solver and then the objective function
def obj_solver_rent(obj_rent):
# a = [c, b, k, h]
# Constrain: yat + w = c + b + k + pr*h
actions = []
for hp in np.linspace(0.001,0.999,20):
budget1 = yat + w
h = budget1 * hp/pr
budget2 = budget1 * (1-hp)
for cp in np.linspace(0.001,0.999,11):
c = budget2*cp
budget3 = budget2 * (1-cp)
#.....................stock participation cost...............
for kp in np.linspace(0,1,11):
# If z == 1 pay for matainance cost Km = 0.5
if z == 1:
# kk is stock allocation
kk = budget3 * kp
if kk > Km:
k = kk - Km
b = budget3 * (1-kp)
else:
k = 0
b = budget3
# If z == 0 and k > 0 payfor participation fee Kc = 5
else:
kk = budget3 * kp
if kk > Kc:
k = kk - Kc
b = budget3 * (1-kp)
else:
k = 0
b = budget3
#..............................................................
actions.append([c,b,k,h])
actions = np.array(actions)
values = obj_rent(actions)
fun = np.max(values)
ma = actions[np.argmax(values)]
return fun, ma
def obj_solver_own(obj_own):
# a = [c, b, k, M, H]
# possible value of H = {750, 1000, 1500, 2000} possible value of [0.2H, 0.5H, 0.8H]]*pt
# (M, t, rh) --> m
# Constrain: yat + w = c + b + k + (H*pt - M) + ch
actions = []
for H in H_options:
for mp in M_options:
M = mp*H*pt
m = M/D[T_max - t]
# 5 is the welfare income which is also the minimum income
if (H*pt - M) + c_h <= yat + w and m < pr*H + 5:
budget1 = yat + w - (H*pt - M) - c_h
for cp in np.linspace(0.001,0.999,11):
c = budget1*cp
budget2 = budget1 * (1-cp)
#.....................stock participation cost...............
for kp in np.linspace(0,1,11):
# If z == 1 pay for matainance cost Km = 0.5
if z == 1:
# kk is stock allocation
kk = budget2 * kp
if kk > Km:
k = kk - Km
b = budget2 * (1-kp)
else:
k = 0
b = budget2
# If z == 0 and k > 0 payfor participation fee Kc = 5
else:
kk = budget2 * kp
if kk > Kc:
k = kk - Kc
b = budget2 * (1-kp)
else:
k = 0
b = budget2
#..............................................................
actions.append([c,b,k,M,H])
if len(actions) == 0:
return -np.inf, [0,0,0,0,0]
else:
actions = np.array(actions)
values = obj_own(actions)
fun = np.max(values)
ma = actions[np.argmax(values)]
return fun, ma
if t == T_max-1:
# The objective function of renting
def obj_rent(actions):
# a = [c, b, k, h]
x_next, p_next = transition_to_rent(x, actions, t)
uBTB = uB(calTB_rent(x_next))
return u_rent(actions) + beta * dotProduct(uBTB, p_next, t)
fun, action = obj_solver_rent(obj_rent)
return np.array([fun, action])
# If the agent is older that 25 or if the agent is unemployed then keep renting
elif t > 25 or e == 0:
# The objective function of renting
def obj_rent(actions):
# a = [c, b, k, h]
x_next, p_next = transition_to_rent(x, actions, t)
V_tilda = NN.predict(x_next) # V_rent_{t+1} used to approximate, shape of x is [w,n,e,s]
uBTB = uB(calTB_rent(x_next))
return u_rent(actions) + beta * (Pa[t] * dotProduct(V_tilda, p_next, t) + (1 - Pa[t]) * dotProduct(uBTB, p_next, t))
fun, action = obj_solver_rent(obj_rent)
return np.array([fun, action])
# If the agent is younger that 45 and agent is employed.
else:
# The objective function of renting
def obj_rent(actions):
# a = [c, b, k, h]
x_next, p_next = transition_to_rent(x, actions, t)
V_tilda = NN.predict(x_next) # V_rent_{t+1} used to approximate, shape of x is [w,n,e,s]
uBTB = uB(calTB_rent(x_next))
return u_rent(actions) + beta * (Pa[t] * dotProduct(V_tilda, p_next, t) + (1 - Pa[t]) * dotProduct(uBTB, p_next, t))
# The objective function of owning
def obj_own(actions):
# a = [c, b, k, M, H]
x_next, p_next = transition_to_own(x, actions, t)
V_tilda = NN.predict(x_next) # V_own_{t+1} used to approximate, shape of x is [w, n, M, e, s, H]
uBTB = uB(calTB_own(x_next))
return u_own(actions) + beta * (Pa[t] * dotProduct(V_tilda, p_next, t) + (1 - Pa[t]) * dotProduct(uBTB, p_next, t))
fun1, action1 = obj_solver_rent(obj_rent)
fun2, action2 = obj_solver_own(obj_own)
if fun1 > fun2:
return np.array([fun1, action1])
else:
return np.array([fun2, action2])
# wealth discretization
ws = np.array([10,25,50,75,100,125,150,175,200,250,500,750,1000,1500,3000])
w_grid_size = len(ws)
# 401k amount discretization
ns = np.array([1, 5, 10, 15, 25, 50, 100, 150, 400, 1000])
n_grid_size = len(ns)
pointsRent = (ws, ns)
# dimentions of the state
dim = (w_grid_size, n_grid_size, 2, nS, 2)
dimSize = len(dim)
xgrid = np.array([[w, n, e, s, z]
for w in ws
for n in ns
for e in [0,1]
for s in range(nS)
for z in [0,1]
]).reshape(dim + (dimSize,))
xs = xgrid.reshape((np.prod(dim),dimSize))
Vgrid = np.zeros(dim + (T_max,))
cgrid = np.zeros(dim + (T_max,))
bgrid = np.zeros(dim + (T_max,))
kgrid = np.zeros(dim + (T_max,))
hgrid = np.zeros(dim + (T_max,))
# Policy function of buying a house
Mgrid = np.zeros(dim + (T_max,))
Hgrid = np.zeros(dim + (T_max,))
# # Define housing choice part: Housing unit options and Mortgage amount options
V1000 = np.load("Vgrid1000.npy")
V1500 = np.load("Vgrid1500.npy")
V2000 = np.load("Vgrid2000.npy")
V750 = np.load("Vgrid750.npy")
H_options = [750, 1000, 1500, 2000]
M_options = [0.2, 0.5, 0.8]
Vown = [V750, V1000, V1500, V2000]
%%time
# value iteration part
pool = Pool()
for t in range(T_max-1,T_min, -1):
print(t)
if t == T_max - 1:
f = partial(V, t = t, NN = None)
results = np.array(pool.map(f, xs))
else:
approx = Approxy(pointsRent,Vgrid[:,:,:,:,:,t+1], Vown, t+1)
f = partial(V, t = t, NN = approx)
results = np.array(pool.map(f, xs))
Vgrid[:,:,:,:,:,t] = results[:,0].reshape(dim)
cgrid[:,:,:,:,:,t] = np.array([r[0] for r in results[:,1]]).reshape(dim)
bgrid[:,:,:,:,:,t] = np.array([r[1] for r in results[:,1]]).reshape(dim)
kgrid[:,:,:,:,:,t] = np.array([r[2] for r in results[:,1]]).reshape(dim)
# if a = [c, b, k, h]
hgrid[:,:,:,:,:,t] = np.array([r[3] if len(r) == 4 else r[4] for r in results[:,1]]).reshape(dim)
# if a = [c, b, k, M, H]
Mgrid[:,:,:,:,:,t] = np.array([r[3] if len(r) == 5 else 0 for r in results[:,1]]).reshape(dim)
Hgrid[:,:,:,:,:,t] = np.array([r[4] if len(r) == 5 else 0 for r in results[:,1]]).reshape(dim)
pool.close()
np.save("Vgrid_renting",Vgrid)
np.save("cgrid_renting",cgrid)
np.save("bgrid_renting",bgrid)
np.save("kgrid_renting",kgrid)
np.save("hgrid_renting",hgrid)
np.save("Mgrid_renting",Mgrid)
np.save("Hgrid_renting",Hgrid)
for tt in range(1,25):
print(Hgrid[:,1,1,1,1,tt])
for tt in range(1,25):
print(Hgrid[:,1,0,1,1,tt])
for tt in range(1,25):
print(Hgrid[:,1,1,0,1,tt])
for tt in range(1,25):
print(Hgrid[:,1,0,0,1,tt])
###Output
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
|
05-Uber_demand_forecasting/code/02 - GRU_Uber.ipynb | ###Markdown
02 - Forecasting using Gated Recurrent Unit (GRU) The batch generator and the loss function were implemented based on Hvass Lab's Tensorflow tutorial: https://github.com/Hvass-Labs/TensorFlow-Tutorials This notebook goes over the entire workflow for one model development, starting from transforming the data into time series, defining loss function & batch generator,
###Code
import os
import numpy as np
import pandas as pd
import random
import warnings
from datetime import datetime
import matplotlib.pyplot as plt
from math import sqrt
# keras with tensorflow backend
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Input, Dense, GRU, Embedding
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
from sklearn.metrics import mean_squared_error
%matplotlib inline
# Load in rides & weather data pickled from the previous notebook
rides_weather = pd.read_pickle("rides_weather.pkl")
rides_weather.head()
print("tensorflow version", tf.__version__)
#print("keras version", tf.keras.__version__)
print("pandas version", pd.__version__)
# potentially add day and hour later
# df['Various', 'Day'] = df.index.dayofyear
# df['Various', 'Hour'] = df.index.hour
# rides_weather.index.weekend
target_names = rides_weather.columns[:-2]
###Output
_____no_output_____
###Markdown
We are going to predict 24hours into the future
###Code
shift_days = 1
shift_steps = shift_days * 24 # Number of hours to shift.
# shifts the time so that we predict 24 hours into the future
df_targets = rides_weather[target_names].shift(-shift_steps)
x_data = rides_weather[target_names].values[0:-shift_steps]
y_data = df_targets.values[:-shift_steps]
num_data = len(x_data)
print("X_data Shape:", x_data.shape)
print("y_data Shape:", y_data.shape)
###Output
X_data Shape: (4319, 140)
y_data Shape: (4319, 140)
###Markdown
We will split the data so that our test set will be the last week of June. The data will start being collected from the week prior to the test week.
###Code
# Split test data, which will be last week of June, but will be trained on the 2nd to last week of June
# 24hrs * 2 weeks
num_test = 24*7*2
num_train_set = num_data - num_test
x_train_set = x_data[0:num_train_set]
x_test = x_data[num_train_set:]
y_train_set = y_data[0:num_train_set]
y_test = y_data[num_train_set:]
print("length x_train: {}, y_train: {}".format(len(x_train_set),len(y_train_set)))
print("length x_test: {}, y_test: {}".format(len(x_test),len(y_test)))
# We split the training set further into training and cross validation set
x_train, x_val, y_train, y_val = train_test_split(x_train_set, y_train_set, test_size=0.15, shuffle=False)
num_train = len(x_train)
print("length x_train: {}, y_train: {}".format(len(x_train),len(y_train)))
print("length x_test: {}, y_test: {}".format(len(x_val),len(y_val)))
num_x_signals = x_data.shape[1]
num_y_signals = y_data.shape[1]
print("num x features: {}, num y features: {}".format(num_x_signals,num_y_signals))
print("for x_train: Min: {}, Max: {}".format(np.min(x_train),np.max(x_train)))
print("for x_val: Min: {}, Max: {}".format(np.min(x_val),np.max(x_val)))
print("for y_test: Min: {}, Max: {}".format(np.min(x_test),np.max(x_test)))
print("The max output for test set will be whatever the max value is for x_train")
# Apply mim max scaler to all values from 0 to 1
x_scaler = MinMaxScaler()
x_train_scaled = x_scaler.fit_transform(x_train)
x_val_scaled = x_scaler.transform(x_val)
x_test_scaled = x_scaler.transform(x_test)
y_scaler = MinMaxScaler()
y_train_scaled = y_scaler.fit_transform(y_train)
y_val_scaled = y_scaler.transform(y_val)
y_test_scaled = y_scaler.transform(y_test)
print(x_train_scaled.shape)
print(y_train_scaled.shape)
def batch_generator(batch_size, sequence_length):
"""
Generator function for creating random batches of training-data.
"""
while True:
# Allocate a new array for the batch of input-signals.
x_shape = (batch_size, sequence_length, num_x_signals)
x_batch = np.zeros(shape=x_shape, dtype=np.float16)
# Allocate a new array for the batch of output-signals.
y_shape = (batch_size, sequence_length, num_y_signals)
y_batch = np.zeros(shape=y_shape, dtype=np.float16)
# Fill the batch with random sequences of data.
for i in range(batch_size):
# Get a random start-index.
# This points somewhere into the training-data.
idx = np.random.randint(num_train - sequence_length)
# Copy the sequences of data starting at this index.
x_batch[i] = x_train_scaled[idx:idx+sequence_length]
y_batch[i] = y_train_scaled[idx:idx+sequence_length]
yield (x_batch, y_batch)
# Sequence length will be a week's worth of data
batch_size = 256
sequence_length = 24 * 7
generator = batch_generator(batch_size=batch_size,
sequence_length=sequence_length)
x_batch, y_batch = next(generator)
print(x_batch.shape)
print(y_batch.shape)
batch = 0 # First sequence in the batch.
signal = 0 # First signal from the 20 input-signals.
seq = x_batch[batch, :, signal]
plt.plot(seq)
seq = y_batch[batch, :, signal]
plt.plot(seq)
validation_data = (np.expand_dims(x_val_scaled, axis=0),
np.expand_dims(y_val_scaled, axis=0))
###Output
_____no_output_____
###Markdown
Build the model
###Code
# Define loss function
# warmup steps is the timeframe which won't be penalized/considered,
# allowing the model to have sufficient information to make its prediction
warmup_steps=24
def loss_mse_warmup(y_true, y_pred):
"""
Calculate the Mean Squared Error between y_true and y_pred,
but ignore the beginning "warmup" part of the sequences, where
the prediction will be poor.
y_true is the desired output.
y_pred is the model's output.
"""
# The shape of both input tensors are:
# [batch_size, sequence_length, num_y_signals].
# Ignore the "warmup" parts of the sequences
# by taking slices of the tensors.
y_true_slice = y_true[:, warmup_steps:, :]
y_pred_slice = y_pred[:, warmup_steps:, :]
# These sliced tensors both have this shape:
# [batch_size, sequence_length - warmup_steps, num_y_signals]
# Calculate the MSE loss for each value in these tensors.
# This outputs a 3-rank tensor of the same shape.
loss = tf.losses.mean_squared_error(labels=y_true_slice,
predictions=y_pred_slice)
# Keras may reduce this across the first axis (the batch)
# but the semantics are unclear, so to be sure we use
# the loss across the entire tensor, we reduce it to a
# single scalar with the mean function.
loss_mean = tf.reduce_mean(loss)
return loss_mean
def build_model_RCU(batch_size = 256, sequence_length = 24*7):
'''
Builds a shallow 1 layer GRU network.
Sequence length can be specified if other than 1 week
'''
# generate batches with specified batch size & sequence length
# default is at 1 week
generator = batch_generator(batch_size=batch_size,
sequence_length=sequence_length)
#Model architecture
model = Sequential()
# Add a GRU layer of 256 length (matching batch size)
model.add(GRU(units=256,return_sequences=True,input_shape=(None,num_x_signals)))
# Add a dense layer for output
model.add(Dense(num_y_signals, activation='sigmoid'))
optimizer = RMSprop(lr=1e-3)
model.compile(loss=loss_mse_warmup, optimizer=optimizer)
print(model.summary())
# Define callbacks
path_checkpoint = '23_checkpoint.keras'
callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
monitor='val_loss',
verbose=1,
save_weights_only=True,
save_best_only=True)
# stop the model if the cross validation has not improved after
# 5 successive epochs
callback_early_stopping = EarlyStopping(monitor='val_loss',
patience=5, verbose=1)
# log the
callback_tensorboard = TensorBoard(log_dir='./23_logs/',
histogram_freq=0,
write_graph=False)
# if cross validation score decreases from previous epoch, then
# immediately lower the learning rate by a factor of 10
callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_lr=1e-4,
patience=0,
verbose=1)
callbacks = [callback_early_stopping,
callback_checkpoint,
callback_tensorboard,
callback_reduce_lr]
return generator, model, callbacks
def fit_model(batch_size = 256, sequence_length = 24*7):
generator, model, callbacks = build_model_RCU(batch_size, sequence_length)
%%time
model.fit_generator(generator=generator,
epochs=20,
steps_per_epoch=100,
validation_data=validation_data,
callbacks=callbacks)
return model
model2 = fit_model(sequence_length = 24*14)
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
gru_3 (GRU) (None, None, 256) 304896
_________________________________________________________________
dense_3 (Dense) (None, None, 140) 35980
=================================================================
Total params: 340,876
Trainable params: 340,876
Non-trainable params: 0
_________________________________________________________________
None
CPU times: user 0 ns, sys: 0 ns, total: 0 ns
Wall time: 7.39 ยตs
Epoch 1/20
99/100 [============================>.] - ETA: 0s - loss: 0.0205
Epoch 00001: val_loss improved from inf to 0.02359, saving model to 23_checkpoint.keras
100/100 [==============================] - 71s 706ms/step - loss: 0.0205 - val_loss: 0.0236
Epoch 2/20
99/100 [============================>.] - ETA: 0s - loss: 0.0100
Epoch 00002: val_loss improved from 0.02359 to 0.01669, saving model to 23_checkpoint.keras
100/100 [==============================] - 69s 693ms/step - loss: 0.0100 - val_loss: 0.0167
Epoch 3/20
99/100 [============================>.] - ETA: 0s - loss: 0.0087
Epoch 00003: val_loss improved from 0.01669 to 0.01603, saving model to 23_checkpoint.keras
100/100 [==============================] - 69s 691ms/step - loss: 0.0086 - val_loss: 0.0160
Epoch 4/20
99/100 [============================>.] - ETA: 0s - loss: 0.0079
Epoch 00004: val_loss did not improve
Epoch 00004: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
100/100 [==============================] - 69s 690ms/step - loss: 0.0079 - val_loss: 0.0163
Epoch 5/20
99/100 [============================>.] - ETA: 0s - loss: 0.0069
Epoch 00005: val_loss improved from 0.01603 to 0.01455, saving model to 23_checkpoint.keras
100/100 [==============================] - 69s 695ms/step - loss: 0.0069 - val_loss: 0.0146
Epoch 6/20
99/100 [============================>.] - ETA: 0s - loss: 0.0067
Epoch 00006: val_loss improved from 0.01455 to 0.01430, saving model to 23_checkpoint.keras
100/100 [==============================] - 70s 696ms/step - loss: 0.0067 - val_loss: 0.0143
Epoch 7/20
99/100 [============================>.] - ETA: 0s - loss: 0.0065
Epoch 00007: val_loss did not improve
Epoch 00007: ReduceLROnPlateau reducing learning rate to 0.0001.
100/100 [==============================] - 69s 689ms/step - loss: 0.0065 - val_loss: 0.0144
Epoch 8/20
99/100 [============================>.] - ETA: 0s - loss: 0.0064
Epoch 00008: val_loss did not improve
100/100 [==============================] - 69s 688ms/step - loss: 0.0064 - val_loss: 0.0144
Epoch 9/20
99/100 [============================>.] - ETA: 0s - loss: 0.0063
Epoch 00009: val_loss did not improve
100/100 [==============================] - 69s 695ms/step - loss: 0.0063 - val_loss: 0.0144
Epoch 10/20
99/100 [============================>.] - ETA: 0s - loss: 0.0063
Epoch 00010: val_loss did not improve
100/100 [==============================] - 70s 696ms/step - loss: 0.0063 - val_loss: 0.0144
Epoch 11/20
99/100 [============================>.] - ETA: 0s - loss: 0.0062
Epoch 00011: val_loss did not improve
100/100 [==============================] - 69s 694ms/step - loss: 0.0062 - val_loss: 0.0145
Epoch 00011: early stopping
###Markdown
Train the model
###Code
%%time
model.fit_generator(generator=generator,
epochs=20,
steps_per_epoch=100,
validation_data=validation_data,
callbacks=callbacks)
def predict_future(model_name, x_scaled):
'''
Returns the prediction given scaled x test
'''
x = np.expand_dims(x_scaled, axis=0)
y_pred = model_name.predict(x)
y_pred_rescaled = y_scaler.inverse_transform(y_pred[0])
return y_pred_rescaled
# Define metric for comparing models
def calc_rmse_total(prediction_length, y_true, y_pred):
"""
Calculate the Mean Squared Error between y_true and y_pred
across all the locations, then take the square root to get rmse.
Ignore the training week.
prediction length: the length of time that you want to predict
y_true[time, location] is the desired output.
y_pred[time, location] is the model's output.
"""
# Ignore the "the training week"
y_true_slice = y_true[-prediction_length:, :]
y_pred_slice = y_pred[-prediction_length:, :]
# Calculate the MSE loss for each value in these tensors.
# This outputs a 3-rank tensor of the same shape.
mse_list = []
for i in range(len(y_true_slice.T)):
mse_list.append(mean_squared_error(y_true_slice.T[i], y_pred_slice.T[i]))
rmse_total = sqrt(sum(mse_list))
return rmse_total
# Save model
# model.save("model1.model")
try:
model.load_weights(path_checkpoint)
except Exception as error:
print("Error trying to load checkpoint.")
print(error)
result = model.evaluate(x=np.expand_dims(x_test_scaled, axis=0),
y=np.expand_dims(y_test_scaled, axis=0))
print("loss (test-set):", result)
# calculate rmse across all locations for the last week of June
y_pred_rescaled = predict_future(model,x_test_scaled)
y_pred_rescaled2 = predict_future(model2,x_test_scaled)
print("model (1week sequence) RMSE:", calc_rmse_total(24*7, y_test, y_pred_rescaled))
print("model2 (2weeks sequence) RMSE:", calc_rmse_total(24*7, y_test, y_pred_rescaled2))
print("RMSE improves when we have a longer sequence length for batch training (2weeks)")
###Output
model RMSE: 185.73497789219232
model2 RMSE: 171.56668638399555
RMSE improves when we have a longer sequence length for batch training (2weeks)
###Markdown
total RMSE for the last week of June came out to be 185.7 rides
###Code
plt.figure(figsize=(15,5))
plt.plot(y_pred_rescaled[:,1],label="pred");
plt.plot(y_test[:,1],label="actual");
plt.legend()
###Output
_____no_output_____
###Markdown
Let's make sure that the prediction is the same with varying sequence length as long as my starting index is kept the same
###Code
y_pred_rescaled1 = predict_future(x_test_scaled[:24*8])
y_pred_rescaled2 = predict_future(x_test_scaled[:24*9])
y_pred_rescaled3 = predict_future(x_test_scaled[:24*10])
print("The shaded area indicates training phase. White area is the prediction phase.")
print("The plots are to demonstrate that if I were to predict ")
f = plt.figure(figsize=(10,10));
ax1 = f.add_subplot(311)
ax2 = f.add_subplot(312)
ax3 = f.add_subplot(313)
ax1.plot(y_pred_rescaled1[:,1],label="pred");
#ax1.plot(x_test[:24*8,1],label="x");
ax2.plot(y_pred_rescaled2[:,1],label="pred2");
ax3.plot(y_pred_rescaled3[:,1],label="pred3");
ax3.plot(y_test[:,1],label="actual");
ax1.set_title("Predict 1 day")
ax2.set_title("Predict 2 days")
ax3.set_title("Predict 3 days")
ax1.axvspan(0,24*7,facecolor="black",alpha=0.15)
ax2.axvspan(0,24*7,facecolor="black",alpha=0.15)
ax3.axvspan(0,24*7,facecolor="black",alpha=0.15)
ax1.set_xlim([0, 250])
ax2.set_xlim([0, 250])
ax3.set_xlim([0, 250])
plt.legend();
import random
def plot_last_week(prediction_length=24*7, train=False):
"""
Plot the prediction for last week of June.
prediction_length = last week of june (24*7hrs)
train: if true, will look at last week of training data
"""
if train:
# Use training-data.
x = x_train_scaled
y_true = y_train
else:
# Use test-data.
x = x_test_scaled
y_true = y_test
y_pred_rescaled = predict_future(x)
# plot 3 random locations
for i in range(3):
# Get the output-signal predicted by the model.
signal = random.randint(0,139)
signal_pred = y_pred_rescaled[-prediction_length:, signal]
# Get the true output-signal from the data-set.
signal_true = y_true[-prediction_length:, signal]
# Make the plotting-canvas bigger.
plt.figure(figsize=(13,3))
# Plot and compare the two signals.
plt.plot(signal_true, label='true')
plt.plot(signal_pred, label='pred')
# Plot labels etc.
plt.ylabel(target_names[signal])
plt.legend()
plt.show()
#plot_last_week(train=True)
plot_last_week()
###Output
_____no_output_____ |
scripts/webscrapping/surface_tension/surface_tension_des_webscrapping.ipynb | ###Markdown
URL for the data: https://ilthermo.boulder.nist.gov/ILT2/ilsearch?cmp=&ncmp=0&year=&auth=&keyw=deep%20eutectic%20solvent&prp=MHVN
###Code
paper_url = "https://ilthermo.boulder.nist.gov/ILT2/ilsearch?cmp=&ncmp=0&year=&auth=&keyw=Deep%20eutectic%20solvent&prp=MHVN"
r = requests.get(paper_url)
header = r.json()['header']
papers = r.json()['res']
i = 1
data_url = 'http://ilthermo.boulder.nist.gov/ILT2/ilset?set={paper_id}'
for paper in papers[:]:
r = requests.get(data_url.format(paper_id=paper[0]))
data = r.json()['data']
with open("/Users/jaime/Desktop/nist_data/surface_tension/%s.json" % i, "w") as outfile: #set destination path for files
json.dump(r.json(), outfile)
#then do whatever you want to data like writing to a file
sleep(0.5) #import step to avoid getting banned by server
i += 1
outer_old = pd.DataFrame()
outer_new = pd.DataFrame()
for i in range(10): #edit the number based on how many files were scraped
with open("/Users/jaime/Desktop/nist_data/surface_tension/%s.json" % str(i+1)) as json_file:
#grab data, data headers (names), the salt name
json_full = json.load(json_file)
json_data = pd.DataFrame(json_full['data'])
json_datanames = np.array(json_full['dhead']) # make names into array to add as columns headers for df
json_data.columns = json_datanames
json_saltname = pd.DataFrame(json_full['components'])#components section contains names of DES components
#print(json_saltname['name']) #grabbing the HBD and HBA
inner_old = pd.DataFrame()
inner_new = pd.DataFrame()
#loop through the columns of the data, note that some of the
#json files are missing pressure data.
for indexer in range(len(json_data.columns)):
grab=json_data.columns[indexer]
list = json_data[grab]
my_list = [l[0] for l in list]
dfmy_list = pd.DataFrame(my_list)
dfmy_list.columns = [json_datanames[indexer][0]]
inner_new = pd.concat([dfmy_list, inner_old], axis=1)
inner_old = inner_new
#print(inner_old.columns)
#add the MW for HBD and HBA
for i in range(len(json_saltname['name'])):
if 'chloride' in json_saltname['name'][i] or 'bromide' in json_saltname['name'][i]:
inner_old['HBA_MW']=json_saltname['mw'][i]
else:
inner_old['HBD_MW']=json_saltname['mw'][i]
#add the DES components, i.e. HBA and HBD
# they are not always listed in the same order on nist data, i.e., HBA always first. Will figure out later.
for i in range(len(json_saltname['name'])):
if 'chloride' in json_saltname['name'][i] or 'bromide' in json_saltname['name'][i]:
inner_old['HBA']=json_saltname['name'][i]
else:
inner_old['HBD']=json_saltname['name'][i]
#loop through the column names of the dataframe
for j in range(len(inner_old.columns)):
#if the words Mole fraction and a halogen are contained, values are correct and no value editing
#necessary and column is simply renamed to HBA mole fraction.
if 'Mole fraction' in inner_old.columns[j] and 'chloride' in inner_old.columns[j] or 'Mole fraction' in inner_old.columns[j] and 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#if the words Mole Ratio and a halogen are contained, dataset was mislabeled but values are correct.
#only need to rename column to HBA mole fraction.
elif 'Mole ratio' in inner_old.columns[j] and 'chloride' in inner_old.columns[j] or 'Mole ratio' in inner_old.columns[j] and 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#if the words mole ratio are present, but no halogens, the ratio of the HBD is displayed and needs
#to be changed to HBA mole fraction. First relabel the colum as HBA mole fraction.
elif 'Mole ratio' in inner_old.columns[j] and not 'chloride' in inner_old.columns[j] or 'Mole ratio' in inner_old.columns[j] and not 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#apparently the numbers are strings so change to integer. May need to do this for every other column
inner_old['HBA Mole Fraction'] = inner_old['HBA Mole Fraction'].astype(int)
#next make an empty list that will hold all the new HBA mole fractions
mole_fractions_list = []
#loop through every HBD ratio in the column
for k in range(len(inner_old['HBA Mole Fraction'])):
#Calculate the HBA mole fraction from every HBD ratio and append to the list
mole_fractions_list.append(1/(1+inner_old['HBA Mole Fraction'][k]))
#finally make the list the new mole fraction column in the dataframe
inner_old['HBA Mole Fraction'] = mole_fractions_list
#in the last case, if the word mole fraction is present but not a halogen, HBD mole fraction is displayed.
#Follow simialr process as before
elif 'Mole fraction' in inner_old.columns[j] and not 'chloride' in inner_old.columns[j] or 'Mole fraction' in inner_old.columns[j] and not 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#convert to float instead since it is a decimal
inner_old['HBA Mole Fraction'] = inner_old['HBA Mole Fraction'].astype(float)
#empty list
mole_fractions_list = []
#loop through column
for k in range(len(inner_old['HBA Mole Fraction'])):
#subtract 1 from HBD mole fraction to get HBA mole fraction and append to list
mole_fractions_list.append(1 - inner_old['HBA Mole Fraction'][k])
#replace column
inner_old['HBA Mole Fraction'] = mole_fractions_list
#add to the growing dataframe
outer_new = pd.concat([inner_old, outer_old], axis = 0, ignore_index = True)
outer_old = outer_new
outer_old.head(50)
outer_old.dropna(inplace = True)
pd.DataFrame.to_csv(outer_old, path_or_buf='surf_tension.csv', index=False)
pd.read_csv()
###Output
_____no_output_____ |
Model Development/Cosine-sim dev Pt.1.ipynb | ###Markdown
Here, I will just quickly prove that the idea of vectorizing MNIST and then just classifying with cosine similarity doe not work.
###Code
# imports
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import datasets, model_selection
mnist = datasets.fetch_mldata('MNIST original')
data, target = mnist.data, mnist.target
###Output
_____no_output_____
###Markdown
Test how reshape works
###Code
resh = np.array([[4, 4, 1, 4],
[4, 12, 4, 1]])
resh.reshape(1, 8)
###Output
_____no_output_____
###Markdown
Testing cosine similarity, note how it takes one argument and compares the vectors within the matrix you pass in.
###Code
# cosine similarity function
v1 = np.array([3, 5, 1, 0, 9, 9])
v2 = np.array([45, 1, 3, 2, 9, 9])
V = []
V.append(v1)
V.append(v2)
V = np.array(V)
V
cosine_similarity(V)[0][1]
###Output
_____no_output_____
###Markdown
Messing with MNIST
###Code
data.shape
np.max(target)
np.min(target)
target.shape
target[18623]
target[0]
target[1]
target[50000]
# see number of unique labels
unique, counts = np.unique(target, return_counts=True)
dict(zip(unique, counts))
###Output
_____no_output_____
###Markdown
Now do cosine similarity analysis
###Code
target[55000]
# some rnadom indices
indices = [0, 5923, 12665, 18623, 24754, 30596, 36017, 41935, 48200, 54051, 10000, 18000, 22000, 25000, 35000, 38000, 43000, 49000, 55000, 10, 15, 16, 2, 3, 4, 5, 1000]
for i in indices:
print(target[i])
# store distance values
distance = []
for i in indices:
ls = []
ls.append(data[0])
ls.append(data[i])
ls = np.array(ls)
# cosine similarity
cosim = cosine_similarity(ls)[0][1]
distance.append(cosim)
distance
###Output
_____no_output_____
###Markdown
Holy shit. I never expected that to actually work lol.
###Code
data.shape
data.T.shape
###Output
_____no_output_____
###Markdown
Well that is pretty funny that it sorta works. I will just try to build a basic version that takes the top most similar values then does weighted voting or something simple like that to determine what the actual number is.Below is an algorithm to return the top n biggest values of an array.
###Code
import heapq
distance = np.array(distance)
heapq.nlargest(5, range(len(distance)), distance.take)[1]
vsdf = np.arange(0, 20000)
heapq.nlargest(5, range(len(vsdf)), vsdf.take)
###Output
_____no_output_____
###Markdown
So it is good that those algorithms are really fast
###Code
# take top n
def vote(arr, n):
"""arr: array whose values will be checked
n: number of top values you want to return
return the determined number"""
###Output
_____no_output_____
###Markdown
Just some basic tests to see if generally works
###Code
target[54051]
%%time
distance = []
for i in range(0, len(target)):
ls = []
ls.append(data[12665])
ls.append(data[i])
ls = np.array(ls)
# cosine similarity
cosim = cosine_similarity(ls)[0][1]
distance.append(cosim)
distance = np.array(distance)
distance.shape
top = heapq.nlargest(10, range(len(distance)), distance.take)
top[1]
for i in top:
print(target[i])
indices = [0, 5923, 12665, 18623, 24754, 30596, 36017, 41935, 48200, 54051, 10000, 18000, 22000, 25000, 35000, 38000, 43000, 49000, 55000]
for i in indices:
print(target[i])
###Output
0.0
1.0
2.0
3.0
4.0
5.0
6.0
7.0
8.0
9.0
1.0
2.0
3.0
4.0
5.0
6.0
7.0
8.0
9.0
|
A_08_Hate_Speech_Classification/NLP_A8.ipynb | ###Markdown
Assignment-8Author: Tarang Ranpara (201011057)
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Dropout, Embedding, LSTM, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# downloading the dataset
!gdown https://drive.google.com/uc?id=18l6IwSqavnqtLQpVnrRqOugZf9XkhEAN
# unzipping
!unzip /content/jigsaw-toxic-comment-classification-challenge.zip
!unzip /content/train.csv.zip
# train data
df = pd.read_csv("/content/train.csv")
df.head()
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
# adding 'none' label for a scenario where record is classified in neither of the given classes
df['none'] = 1 - df[labels].max(axis=1)
df.head()
df['none'] = 1 - df[labels].max(axis=1)
# filling null with <unknown> token
df['comment_text'].fillna('<unknown>',inplace=True)
# updated labels
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'none']
# maximum sequence length of input (i.e. every sequence will be padded upto this length)
max_sequence_length = 128
# tokenizer - to split text sequence into tokens
tokenizer = Tokenizer()
tokenizer.fit_on_texts(df['comment_text'])
sequences = tokenizer.texts_to_sequences(df['comment_text'])
# to pad the sequences upto max length
pad_sequences = pad_sequences(sequences, maxlen=max_sequence_length)
def build_model(vocab_size, max_sequence_length, n_class):
input = Input(shape=(max_sequence_length,))
# step-1: calculating embedding of input sequenes
# transforms: (batch_size x max_seq_len) -> (batch size x max_seq_len x embedding_dim)
# step-2: applying bidirectional lstm
# transforms: (batch_size x 2*16)
x = Bidirectional(LSTM(16))(Embedding(input_dim=vocab_size, output_dim=64, input_length=max_sequence_length, embeddings_initializer='random_normal')(input))
x = Dropout(0.5)(x)
# step-3: dense layer: 32 -> 16
x = Dense(16, activation='relu')(x)
# step-4: dense layer: 16 -> 7
output = Dense(n_class, activation='sigmoid')(x)
model = tf.keras.Model(inputs=input, outputs=output)
# loss: binary cross entropy
# optimizer: Adam
model.compile (optimizer='adam',
loss="binary_crossentropy",
metrics=['accuracy', 'Precision', 'Recall'])
return model
vocab_size = len(tokenizer.word_index) + 1
n_class = len(labels)
model = build_model(vocab_size, max_sequence_length, n_class)
model.summary()
batch_size = 1024
epochs = 20
# fitting the model
history = model.fit(pad_sequences,
y=df[labels],
batch_size=batch_size,
epochs=epochs,
validation_split=0.1,
shuffle = True
)
# plotting training and validation accuracy
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Metric Value')
plt.legend()
plt.show()
# plotting training and validation precision
plt.plot(history.history['precision'], label='Training Precision')
plt.plot(history.history['val_precision'], label='Validation Precision')
plt.title('Training and Validation Precision')
plt.xlabel('Epoch')
plt.ylabel('Metric Value')
plt.legend()
plt.show()
# plotting training and validation recall
plt.plot(history.history['recall'], label='Training Recall')
plt.plot(history.history['val_recall'], label='Validation Recall')
plt.title('Training and Validation Recall')
plt.xlabel('Epoch')
plt.ylabel('Metric Value')
plt.legend()
plt.show()
# defining f1 score
def f1(prec, recall):
return (2 * prec * recall) / (prec + recall)
# final scores
pd.DataFrame(dict({
'precision': [history.history["precision"][-1], history.history["val_precision"][-1]],
'recall': [history.history["recall"][-1], history.history["val_recall"][-1]],
'f1': [f1(history.history["precision"][-1],history.history["recall"][-1]), f1(history.history["val_precision"][-1],history.history["val_recall"][-1])],
}), index = ['train', 'validation'])
###Output
_____no_output_____ |
apps/jupyter-notebooks/pune-flood-sensors.ipynb | ###Markdown
Pune Flood Sensors Import necessary packages
###Code
from iudx.entity.Entity import Entity
import pandas as pd
import numpy as np
import json
from datetime import date, datetime, timedelta
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import folium
from folium import plugins
from scipy.interpolate import griddata
import geojsoncontour
import ipywidgets as widgets
from ipywidgets import Layout
import warnings
###Output
_____no_output_____
###Markdown
Defining variables and widgets
###Code
# ids of each resource group
group_id="datakaveri.org/04a15c9960ffda227e9546f3f46e629e1fe4132b/rs.iudx.org.in/pune-env-flood"
# widgets for interaction
prompt1=widgets.HTML(value="")
prompt2=widgets.HTML(value="")
gif_address = 'https://www.uttf.com.ua/assets/images/loader2.gif'
select_ndays=widgets.IntSlider(
value=1,
min=1,
max=30,
step=1,
description='Days: ',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
select_col=widgets.Dropdown(
options=['currentLevel','measuredDistance','referenceLevel'],
value='currentLevel',
description='Property:',
disabled=False,
)
mywidgets=[select_ndays,select_col]
ui=widgets.VBox([select_ndays,prompt1,select_col,prompt2])
###Output
_____no_output_____
###Markdown
Functions to fetch, prepare and visualize data *Fetch data*
###Code
# fetch latest data in the past n days for a city and add/modify required columns
def get_data(ndays):
for widget in mywidgets:
widget.disabled=True
prompt1.value=f'<img src="{gif_address}" height=150 width=150> Fetching data'
global entity,measures,latest_measures,start_time,end_time,city
city='Pune'
entity=Entity(entity_id=group_id)
latest_measures=entity.latest().reset_index(drop=True)
end_time = latest_measures['observationDateTime'].sort_values(ascending=False).reset_index(drop=True)[0]
start_time = (end_time - timedelta(days=ndays,hours=6))
measures = entity.during_search(
start_time=start_time.strftime("%Y-%m-%dT%H:%M:%SZ"),
end_time=end_time.strftime("%Y-%m-%dT%H:%M:%SZ"),
)
measures['observationDateTime']=measures['observationDateTime'].apply(lambda x:x.tz_localize(None))
latest_measures['observationDateTime']=latest_measures['observationDateTime'].apply(lambda x:x.tz_localize(None))
rs_coordinates={}
rs_label={}
for res in entity.resources:
rs_coordinates[res['id']]=res['location']['geometry']['coordinates']
rs_label[res['id']]=res['name']
latest_measures['x_co']=latest_measures['id'].apply(lambda id:rs_coordinates[id][0])
latest_measures['y_co']=latest_measures['id'].apply(lambda id:rs_coordinates[id][1])
measures['x_co']=measures['id'].apply(lambda id:rs_coordinates[id][0])
measures['y_co']=measures['id'].apply(lambda id:rs_coordinates[id][1])
measures['label']=measures['id'].apply(lambda id:rs_label[id])
latest_measures['label']=measures['id'].apply(lambda id:rs_label[id])
for widget in mywidgets:
widget.disabled=False
prompt1.value=f'Fetched {measures.shape[0]} records from {len(entity.resources)} resources'
###Output
_____no_output_____
###Markdown
*Temporal Visualization*
###Code
# plot the measures of a proprty over ndays for the resource with the latest recording
def timeSeriesVis1(column_name, ndays):
global units
prop_desc=entity._data_descriptor[column_name]
units=prop_desc["unitText"]
prompt2.value=f'{prop_desc["description"]}<br> Unit: {units}'
sensor_id = measures.sort_values(by='observationDateTime',ascending=False).reset_index(drop=True)['id'][0]
single_resource_data = measures.query(f"id == '{sensor_id}'")
sensor_coordinates=[]
for res in entity.resources:
if res['id']==sensor_id:
sensor_coordinates=res['location']['geometry']['coordinates']
fig = px.line(
single_resource_data,
x="observationDateTime",
y=column_name
)
display(widgets.HTML(f'<center style="font-size:14px">Temporal sensor reading for \n {column_name.upper()} from {start_time.date()} to {end_time.date()} for resource at {sensor_coordinates}<center>'))
fig.update_layout(
xaxis_title="Observed Timestamp",
yaxis_title="Sensor reading for "+column_name.upper()+" ("+units+")",
font=dict(
size=12
)
)
fig.update_xaxes(rangeslider_visible=True)
fig.show()
# plot the measures of a proprty over ndays for all resources
def timeSeriesVis2(col, ndays):
column_name=col
fig = px.line(
measures,
x="observationDateTime",
y=column_name,
color='label'
)
display(widgets.HTML(f'<center style="font-size:14px">Temporal sensor reading for {col.upper()} from {start_time.date()} to {end_time.date()} of all sensors<center>'))
fig.update_layout(
xaxis_title="Observed Timestamp",
yaxis_title="Sensor reading for "+col.upper()+" ("+units+")",
font=dict(
size=12
)
)
fig.update_xaxes(rangeslider_visible=True)
fig.show()
def timeSeriesVis3(ndays):
sensor_id = measures.sort_values(by='observationDateTime',ascending=False).reset_index(drop=True)['id'][0]
single_resource_data = measures.query(f"id == '{sensor_id}'")
sensor_coordinates=[]
for res in entity.resources:
if res['id']==sensor_id:
sensor_coordinates=res['location']['geometry']['coordinates']
fig=go.Figure()
fig.add_trace(go.Scatter(x=single_resource_data['observationDateTime'],
y=single_resource_data['measuredDistance'],
name='Measured Distance',
line=dict(color='firebrick')))
fig.add_trace(go.Scatter(x=single_resource_data['observationDateTime'],
y=single_resource_data['referenceLevel'],
name='Reference Level',
line=dict(color='royalblue',dash='dot')))
fig.update_layout(title='Measured distance and Reference level over time',
xaxis_title='Timestamp',
yaxis_title='Distance (meters)')
fig.update_xaxes(rangeslider_visible=True)
fig.show()
###Output
_____no_output_____
###Markdown
*Basic Visualization*
###Code
# plot a bar chart for the latest measures of a property at all active resources
def simpleVis1(col):
column_name=col
display(widgets.HTML(f'<center style="font-size:14px">Latest temporal sensor reading for {col.upper()} of all sensors<center>'))
fig = px.bar(latest_measures, x='label', y=column_name)
fig.update_layout(
xaxis_title="Sensor Id",
yaxis_title="Sensor reading for "+col.upper()+" ("+units+")",
font=dict(
size=12
)
)
fig.show()
def simpleVis2(ndays):
fig=go.Figure()
fig.add_trace(go.Scatter(x=latest_measures['referenceLevel'],
y=latest_measures['label'],
marker=dict(color='royalblue'),
mode='markers',
name='Reference Level'))
fig.add_trace(go.Scatter(x=latest_measures['measuredDistance'],
y=latest_measures['label'],
marker=dict(color='firebrick'),
mode='markers',
name='Measured Distance'))
fig.update_layout(title='Measured distance and Reference level at different locations',
yaxis_title='Device Name',
xaxis_title='Distance (meters)')
fig.show()
###Output
_____no_output_____
###Markdown
*Spatial Visualization*
###Code
def spatialVis1(column_name):
maxval=max(list(filter(None,latest_measures[column_name])))
minval=min(list(filter(None,latest_measures[column_name])))
geomap2 = folium.Map([latest_measures['y_co'].mean(), latest_measures['x_co'].mean()], zoom_start=12, tiles="cartodbpositron")
for res in entity.resources:
entity_id = res["id"]
try:
val=latest_measures[latest_measures['id']==entity_id]['currentLevel'].values[0]
if val is not None and val>0:
folium.Circle(
[res["location"]["geometry"]["coordinates"][1], res["location"]["geometry"]["coordinates"][0]],
radius=2000*(val-minval)/(maxval-minval),
popup = f'{column_name}: {str(val)}',
color='b',
fill_color=('red' if ((val-minval)/(maxval-minval))>0.6 else 'blue'),
fill=True,
fill_opacity=0.4
).add_to(geomap2)
except:
pass
display(geomap2)
###Output
_____no_output_____
###Markdown
Interactive Outputs
###Code
ui
widgets.interactive_output(get_data,{'ndays':select_ndays})
widgets.interactive_output(spatialVis1,{'column_name':select_col})
widgets.interactive_output(timeSeriesVis1,{'column_name':select_col, 'ndays':select_ndays})
widgets.interactive_output(timeSeriesVis2,{'col':select_col, 'ndays':select_ndays})
widgets.interactive_output(simpleVis1,{'col':select_col})
widgets.interactive_output(timeSeriesVis3,{'ndays':select_ndays})
widgets.interactive_output(simpleVis2,{'ndays':select_ndays})
###Output
_____no_output_____ |
slides/plot_lle_digits.ipynb | ###Markdown
%matplotlib inline Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...An illustration of various embeddings on the digits dataset.The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is nottechnically a manifold embedding method, as it learn a high-dimensionalrepresentation on which we apply a dimensionality reduction method.However, it is often useful to cast a dataset into a representation inwhich the classes are linearly-separable.t-SNE will be initialized with the embedding that is generated by PCA inthis example, which is not the default setting. It ensures global stabilityof the embedding, i.e., the embedding does not depend on randominitialization.Linear Discriminant Analysis, from the :mod:`sklearn.discriminant_analysis`module, and Neighborhood Components Analysis, from the :mod:`sklearn.neighbors`module, are supervised dimensionality reduction method, i.e. they make use ofthe provided labels, contrary to other methods.
###Code
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection, neighbors)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
digits.images.shape
digits.images[0]
n_samples
n_features
X.shape
X[:5]
y[:18]
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
X[0]
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2
).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
# Isomap projection of the digits dataset
print("Computing Isomap projection")
t0 = time()
X_iso = manifold.Isomap(n_neighbors=n_neighbors, n_components=2
).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
X_reduced.shape
X_reduced[:5]
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
X_se.shape
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
# NCA projection of the digits dataset
print("Computing NCA projection")
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=2, random_state=0)
t0 = time()
X_nca = nca.fit_transform(X, y)
plot_embedding(X_nca,
"NCA embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
from sklearn.datasets import make_swiss_roll
X, t = make_swiss_roll(n_samples=1000, noise=0.2, random_state=42)
X.shape
# Scale and visualize the embedding vectors
def plot_embedding(X, y, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
# shown_images = np.array([[1., 1.]]) # just something big
# for i in range(X.shape[0]):
# dist = np.sum((X[i] - shown_images) ** 2, 1)
# if np.min(dist) < 4e-3:
# # don't show points that are too close
# continue
# shown_images = np.r_[shown_images, [X[i]]]
# imagebox = offsetbox.AnnotationBbox(
# offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
# X[i])
# ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne, t,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,t,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
###Output
Computing PCA projection
|
Notebooks/char_rnn_classification_tutorial.ipynb | ###Markdown
Classifying Names with a Character-Level RNNAdapted from [pytorch tutorial](https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html)**Author**: `Sean Robertson `We will be building and training a basic character-level RNN to classifywords. A character-level RNN reads words as a series of characters -outputting a prediction and "hidden state" at each step, feeding itsprevious hidden state into each next step. We take the final predictionto be the output, i.e. which class the word belongs to.Specifically, we'll train on a few thousand surnames from 18 languagesof origin, and predict which language a name is from based on thespelling:**Recommended Reading:**To know more about RNNs and how they work:- [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness) shows a bunch of real life examples- [Understanding LSTM Networks](http://colah.github.io/posts/2015-08-Understanding-LSTMs) is about LSTMs specifically but also informative about RNNs in general Preparing the Data Download the data from [here](https://download.pytorch.org/tutorial/data.zip) and extract it to the data directory.Included in the ``data/names`` directory are 18 text files named as"[Language].txt". Each file contains a bunch of names, one name perline, mostly romanized (but we still need to convert from Unicode toASCII).We'll end up with a dictionary of lists of names per language,``{language: [names ...]}``. The generic variables "category" and "line"(for language and name in our case) are used for later extensibility.
###Code
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
#this line needs to be modified!
all_files = '/home/lelarge/data/names/*.txt'
def findFiles(path): return glob.glob(path)
print(findFiles(all_files))
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('ลlusร rski'))
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles(all_files):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
###Output
_____no_output_____
###Markdown
Now we have ``category_lines``, a dictionary mapping each category(language) to a list of lines (names). We also kept track of``all_categories`` (just a list of languages) and ``n_categories`` forlater reference.
###Code
print(category_lines['Italian'][:5])
###Output
_____no_output_____
###Markdown
Turning Names into Tensors--------------------------Now that we have all the names organized, we need to turn them intoTensors to make any use of them.To represent a single letter, we use a "one-hot vector" of size````. A one-hot vector is filled with 0s except for a 1at index of the current letter, e.g. ``"b" = ``.To make a word we join a bunch of those into a 2D matrix````.That extra 1 dimension is because PyTorch assumes everything is inbatches - we're just using a batch size of 1 here.
###Code
import torch
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
###Output
_____no_output_____
###Markdown
Creating the Network====================Before autograd, creating a recurrent neural network in Torch involvedcloning the parameters of a layer over several timesteps. The layersheld hidden state and gradients which are now entirely handled by thegraph itself. This means you can implement a RNN in a very "pure" way,as regular feed-forward layers.This RNN module (mostly copied from `the PyTorch for Torch users tutorial `__)is just 2 linear layers which operate on an input and hidden state, witha LogSoftmax layer after the output.
###Code
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
###Output
_____no_output_____
###Markdown
To run a step of this network we need to pass an input (in our case, theTensor for the current letter) and a previous hidden state (which weinitialize as zeros at first). We'll get back the output (probability ofeach language) and a next hidden state (which we keep for the nextstep).
###Code
input = letterToTensor('A')
hidden = torch.zeros(1, n_hidden)
output, next_hidden = rnn(input, hidden)
print(output)
print(next_hidden)
###Output
_____no_output_____
###Markdown
For the sake of efficiency we don't want to be creating a new Tensor forevery step, so we will use ``lineToTensor`` instead of``letterToTensor`` and use slices. This could be further optimized bypre-computing batches of Tensors.
###Code
input = lineToTensor('Albert')
hidden = torch.zeros(1, n_hidden)
output, next_hidden = rnn(input[0], hidden)
print(output)
###Output
_____no_output_____
###Markdown
As you can see the output is a ```` Tensor, whereevery item is the likelihood of that category (higher is more likely). Training========Preparing for Training----------------------Before going into training we should make a few helper functions. Thefirst is to interpret the output of the network, which we know to be alikelihood of each category. We can use ``Tensor.topk`` to get the indexof the greatest value:
###Code
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
print(categoryFromOutput(output))
###Output
_____no_output_____
###Markdown
We will also want a quick way to get a training example (a name and itslanguage):
###Code
import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
###Output
_____no_output_____
###Markdown
Training the Network--------------------Now all it takes to train this network is show it a bunch of examples,have it make guesses, and tell it if it's wrong.For the loss function ``nn.NLLLoss`` is appropriate, since the lastlayer of the RNN is ``nn.LogSoftmax``.
###Code
criterion = nn.NLLLoss()
###Output
_____no_output_____
###Markdown
Each loop of training will:- Create input and target tensors- Create a zeroed initial hidden state- Read each letter in and - Keep hidden state for next letter- Compare final output to target- Back-propagate- Return the output and loss
###Code
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item()
###Output
_____no_output_____
###Markdown
Now we just have to run that with a bunch of examples. Since the``train`` function returns both the output and loss we can print itsguesses and also keep track of loss for plotting. Since there are 1000sof examples we print only every ``print_every`` examples, and take anaverage of the loss.
###Code
import time
import math
n_iters = 100000
print_every = 5000
plot_every = 1000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = 'โ' if guess == category else 'โ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
###Output
_____no_output_____
###Markdown
Plotting the Results--------------------Plotting the historical loss from ``all_losses`` shows the networklearning:
###Code
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.figure()
plt.plot(all_losses)
###Output
_____no_output_____
###Markdown
Evaluating the Results======================To see how well the network performs on different categories, we willcreate a confusion matrix, indicating for every actual language (rows)which language the network guesses (columns). To calculate the confusionmatrix a bunch of samples are run through the network with``evaluate()``, which is the same as ``train()`` minus the backprop.
###Code
# Keep track of correct guesses in a confusion matrix
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
###Output
_____no_output_____
###Markdown
You can pick out bright spots off the main axis that show whichlanguages it guesses incorrectly, e.g. Chinese for Korean, and Spanishfor Italian. It seems to do very well with Greek, and very poorly withEnglish (perhaps because of overlap with other languages). Running on User Input---------------------
###Code
def predict(input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(lineToTensor(input_line))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
predict('Dovesky')
predict('Jackson')
predict('Satoshi')
###Output
_____no_output_____ |
Notebooks/Time_use_shares_with_vaes_TestActivations.ipynb | ###Markdown
###Code
import tensorflow.keras as keras
keras.__version__
from tensorflow.keras import backend as K # Use tensorflow.keras
K.clear_session()
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow import set_random_seed
from numpy.random import seed
import numpy as np
epochs = 100
batch_size = 32 # Batch size is 32 instead of 16.
callback_list = [
keras.callbacks.ReduceLROnPlateau(
monitor = 'val_loss',
factor = 0.5,
patience = 10,
verbose =1 #true
)
]
###Output
_____no_output_____
###Markdown
Use PReLU instead of ReLU.
###Code
def make_vae(
img_shape = (389+1, ),
latent_dim = 1,
dense_width = 600,
l2_penalty=0.00001,
l1_penalty=0.0,
encoder_dropout_rate=0.5,
decoder_dropout_rate=0.0,
entanglement_penalty = 1,
hidden_n = 1):
input_img = keras.Input(shape=img_shape)
# The last input indicate to the network whether this is validation
is_validation = input_img[:,-1]
input_data = input_img[:,:-1]
# Test the PReLU
# x = layers.Dense(dense_width, activation='relu',
# kernel_regularizer=regularizers.l1_l2(
# l1=l1_penalty,l2=l2_penalty))(input_data)
x = layers.Dense(dense_width, activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)), \
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(input_data)
x = layers.Dropout(encoder_dropout_rate)(x)
for i in range(hidden_n):
x = layers.Dense(dense_width, activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(x)
x = layers.Dropout(encoder_dropout_rate)(x)
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim)(x)
# Reduce sampling variance to near zero on validation (idea credit: Shahaf Grofit)
is_validation_change = is_validation*100
z_log_var = keras.layers.Subtract()([z_log_var, is_validation_change])
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=1.)
return z_mean + K.exp(z_log_var) * epsilon
class CustomVariationalLayer(keras.layers.Layer):
def vae_loss(self, x, z_decoded):
is_validation = x[:,-1]
input_data = x[:,:-1]
x = K.flatten(input_data)
z_decoded = K.flatten(z_decoded)
xent_loss = keras.metrics.binary_crossentropy(x, z_decoded)
kl_loss = -5e-4 * K.mean(
1 + z_log_var - K.square(z_mean)
- entanglement_penalty*K.exp(z_log_var), axis=-1)
# Penalize for variance, but only in training
return K.mean(xent_loss + (1-is_validation)*kl_loss)
def call(self, inputs):
x = inputs[0]
z_decoded = inputs[1]
loss = self.vae_loss(x, z_decoded)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
z = layers.Lambda(sampling)([z_mean, z_log_var])
encoder = Model(input_img,z_mean) # Maybe better if Model(input_data,z_mean)
# This is the input where we will feed `z`.
decoder_input = layers.Input(K.int_shape(z)[1:])
print(decoder_input.shape)
x = layers.Dense(dense_width,
activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(decoder_input)
x = layers.Dropout(decoder_dropout_rate)(x)
for i in range(hidden_n):
x = layers.Dense(dense_width,
activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(x)
x = layers.Dropout(decoder_dropout_rate)(x)
x = layers.Dense(img_shape[0]-1,
activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(x)
# This is our decoder model.
decoder = Model(decoder_input, x)
# We then apply it to `z` to recover the decoded `z`.
z_decoded = decoder(z)
# We call our custom layer on the input and the decoded output,
# to obtain the score. Note that the objective is computed by
# this special final layer.
y = CustomVariationalLayer()([input_img, z_decoded])
vae = Model(input_img, y)
vae.compile(optimizer='adam', loss=None)
return (vae, encoder, decoder)
import pandas as pd
df=pd.read_csv("https://github.com/yaniv256/VAEs-in-Economics/blob/master/Data/Timeuse/time_shares_only_2013.csv?raw=true")
df
from sklearn.preprocessing import QuantileTransformer
qt_trans = QuantileTransformer(n_quantiles=1000, random_state=0)
qt = pd.DataFrame(qt_trans.fit_transform(df))
qt.columns = df.columns
qt
from sklearn.model_selection import train_test_split
x_train, x_test = train_test_split(qt, test_size=0.33, random_state=42)
train_examples = x_train.shape[0]
flag_0 = np.zeros((train_examples,1),dtype=x_train.values.dtype)
x_train = np.concatenate((x_train.values,flag_0),axis=-1)
test_examples = x_test.shape[0]
flag_1 = np.ones((test_examples,1),dtype=x_test.values.dtype)
x_test = np.concatenate((x_test.values,flag_1),axis=-1)
seed(100)
set_random_seed(100)
(vae, encoder, decoder) = make_vae(encoder_dropout_rate=0.2) # encoder_dropout_rate 0.2
vae.summary()
fitted = vae.fit(
x=x_train,
y=None,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None),
callbacks = callback_list
)
epochs_grid = range(1, epochs+1)
val_loss1 = fitted.history['val_loss']
#val_loss2 = fitted2.history['val_loss']
import matplotlib.pyplot as plt
# b+ is for "blue cross"
plt.plot(epochs_grid, val_loss1, 'b+', label='Original model')
# "bo" is for "blue dot"
#plt.plot(epochs_grid, val_loss2, 'bo', label='Alternative model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
import seaborn as sns
def plot_types(decoder, data, n_type = 40, each_hight = 20, approx_width=400,
n_activity = 30, lowest_percentile= 0.1,
highest_percentile = 99.9, figsize=(10, 10),
cmap='viridis', n_xlabels=9, spacing = -0.02, standard_scaler=True):
# definitions for the axes
left, width = 0.05, 0.40
bottom, height = 0.025, 0.65
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.3]
rect_colorbar = [left+width+0.1, bottom + height + spacing +0.05, width, 0.03]
# start with a rectangular Figure
plt.figure(figsize=figsize)
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_colorbar = plt.axes(rect_colorbar)
ax_colorbar.tick_params(direction='in', labelbottom=False, labelleft=False)
each_width = np.int(np.ceil(approx_width/n_type))
figure = np.zeros((each_hight*n_activity,n_type*each_width))
# Linearly spaced coordinates on the unit square were transformed
# through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z,
# since the prior of the latent space is Gaussian
# We need to add a column of ones to indicate validation
test_examples = data.shape[0]
flag_1 = np.ones((test_examples,1),dtype=data.values.dtype)
data = np.concatenate((data.values,flag_1),axis=-1)
encoded_data=encoder.predict(data)
lowest=np.percentile(encoded_data, lowest_percentile)
highest=np.percentile(encoded_data, highest_percentile)
#print(lowest,highest)
grid_x = np.linspace(lowest, highest, n_type)
for i, xi in enumerate(grid_x):
z_sample = np.array([[xi]])
x_decoded = decoder.predict(z_sample)
figure[0:n_activity*each_hight,i*each_width : (i + 1)*each_width] = \
np.repeat(x_decoded[0,0:n_activity],each_hight).reshape(n_activity*each_hight,1)
if standard_scaler:
figure=np.transpose(figure)
scaler = StandardScaler()
figure=scaler.fit_transform(figure)
figure=np.transpose(figure)
im = ax_scatter.imshow(figure, cmap=cmap)
plt.colorbar(im, ax= ax_colorbar, orientation='horizontal', fraction=1)
prec = pd.DataFrame(np.percentile(df,[50, 75, 95, 99],axis=0))
ax_scatter.text(1.02*n_type*each_width,
0.8*each_hight -each_hight, '50% 75% 95% 99%', fontsize=14)
for i in range(n_activity):
ax_scatter.text(1.02*n_type*each_width,
0.8*each_hight+i*each_hight,
'{:5.1f} {:5.1f} {:5.1f} {:5.1f} '.format(prec.iloc[0,i]/60,
prec.iloc[1,i]/60,
prec.iloc[2,i]/60,
prec.iloc[3,i]/60)
+ df.columns[i].replace("_", " ")
, fontsize=14)
bins=np.append(grid_x-(grid_x[1]-grid_x[0])/2,
grid_x[n_type-1]+(grid_x[1]-grid_x[0])/2)
ax_scatter.set_xticks( np.linspace(0,n_type*each_width,n_xlabels))
ax_scatter.set_xticklabels(np.round(np.linspace(bins[0], bins[n_type], n_xlabels),
decimals=2))
ax_scatter.set_yticks([])
sns.set()
sns.set_style("darkgrid")
ax_histx.set_xticks( np.linspace(bins[0], bins[n_type], n_xlabels))
ax_histx.set_xticklabels(np.round(np.linspace(bins[0], bins[n_type], n_xlabels),
decimals=2))
sns.distplot(encoded_data,ax=ax_histx,bins=bins,kde=False,
rug=False).set_xlim(bins[0],bins[n_type])
plt.savefig('type_plot.png')
plt.show()
plot_types(decoder,qt, standard_scaler = False);
flag_1 = np.ones((qt.shape[0],1),dtype=qt.values.dtype)
data = np.concatenate((qt.values,flag_1),axis=-1)
encoded_data=encoder.predict(data)
pd.DataFrame(encoded_data)
filtered=pd.DataFrame((decoder.predict(encoded_data)))
filtered.columns = df.columns
filtered
filtered-qt
import time
from google.colab import files
files.download('type_plot.png')
pd.DataFrame(encoded_data).to_csv("encoded_data.csv", header=False, index=False)
files.download('encoded_data.csv')
encoder.save_weights('encoder')
files.download('encoder.index')
files.download('encoder.data-00000-of-00002')
files.download('encoder.data-00001-of-00002')
decoder.save_weights('decoder')
files.download('decoder.index')
files.download('decoder.data-00000-of-00002')
files.download('decoder.data-00001-of-00002')
files.download('checkpoint')
###Output
_____no_output_____ |
1 - Categories Analysis.ipynb | ###Markdown
Distribution of pages on categories
###Code
dstop_cat = pd.read_csv('data/eswiki_topcategories.csv',
names=['cat_id', 'cat_title','cat_pages','cat_subcats','cat_files'])
print(dstop_cat.shape)
dstop_cat_tp = pd.read_csv('data/eswiki_topcategories_talkpages.csv',
names=['cat_title','cat_pages'])
print(dstop_cat_tp.shape)
def format_title(dscol):
title=dscol.str.replace('Wikipedia:', '')
title=title.str.replace('_', ' ')
title=title.str.replace('Artรญculos con ', '')
title=title.str.replace('Pรกginas con ', '')
return title
cat_titles = format_title(dstop_cat.cat_title)
cat_titles_tp = format_title(dstop_cat_tp.cat_title)
print(cat_titles)
cat_titles = ['C'+str(i) for i in range(len(cat_titles))]
cat_titles
width = 0.35
N=len(dstop_cat)
ind = np.arange(N)
fig, ax = plt.subplots()
rects1=ax.bar(ind, dstop_cat.cat_pages,width, color='g',alpha=0.5)
rects2=ax.bar(ind+width, dstop_cat_tp.cat_pages,width, color='b', alpha=0.5)
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(cat_titles)
ax.legend((rects1[0], rects2[0]), ('Articles', 'Discussions'))
xfmt = matplotlib.ticker.ScalarFormatter(useMathText=True)
xfmt.set_powerlimits((-3,3))
ax.yaxis.set_major_formatter(xfmt)
plt.tight_layout()
plt.savefig('output/top_categories.eps', format='eps')
###Output
_____no_output_____
###Markdown
Correlation between the number articles and discussions
###Code
np.corrcoef(dstop_cat.cat_pages, dstop_cat_tp.cat_pages)
###Output
_____no_output_____ |
notebooks/Climate_Velocities/oa_k10_regional_preliminary.ipynb | ###Markdown
Omega Aragonite Escape Velocity Regional Comparison
###Code
import xgcm
import xarray as xr
import pandas as pd
import numpy as np
import scipy
import matplotlib as mpl
from matplotlib import cm
import matplotlib.colors as mcolors
from matplotlib.patches import Patch
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib import pyplot as plt
from matplotlib import gridspec
from cartopy import crs as ccrs
import cartopy.feature as cfeature
from xhistogram.xarray import histogram
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from chazbpei2020.preprocessing import *
###Output
_____no_output_____
###Markdown
--- Surface k10 RCP85 Ensemble Average
###Code
# k10 Omega Arag for ensemble average (preprocessed)
directory = '~/chazbpei2020/data/processed/Omega_Arag/RCP85/'
filename = 'omega_arag_k10_ensAvg_1950_2100.nc'
oa_path = directory+filename
ds = xr.open_dataset(oa_path).rename({'XT_OCEAN': 'xt_ocean',
'YT_OCEAN': 'yt_ocean',
'TIME': 'time',
'OMEGA_ARAG': 'omega_arag'})
###Output
_____no_output_____
###Markdown
--- Decadal Mean Omega Arag
###Code
# Calculate the time-mean Omega Arag for 15 decades of simulation
# 1959s through 2090s
da_oa_annual = ds.omega_arag.groupby('time.year').mean(dim='time', skipna=True)
da_oa_mean = []
decade = 1950
for i in range(15):
dec_mean = decadal_mean(da_oa_annual, decade)
da_oa_mean.append(dec_mean.squeeze())
decade += 10
###Output
/home/aos/chazb/miniconda3/envs/chazbpei2020/lib/python3.8/site-packages/xarray/core/nanops.py:142: RuntimeWarning: Mean of empty slice
return np.nanmean(a, axis=axis, dtype=dtype)
###Markdown
Calculate Escape Vectors
###Code
# Definte projection transformations and coordiantes
crs = ccrs.Robinson(central_longitude=180)
src=ccrs.PlateCarree()
lon = ds.xt_ocean.data
lat = ds.yt_ocean.data
# colors = cm.get_cmap('plasma', 10)
colors = ['hotpink','magenta','darkviolet','darkblue','blue',
'dodgerblue','turquoise','limegreen','lime','gold',
'darkorange','orangered','red','brown','maroon']
# Create levels array to isolate undersaturation threshold
clevs=[1]
# Plot Velocities at undersaturation border for all decades
fig, ax = plt.subplots(figsize=[16,10],
subplot_kw={'projection':crs})
num_decades=15
decade=1950
legend_elements = []
for i in range(num_decades):
element = Patch(facecolor=colors[i], label=str(decade)+'s')
legend_elements.append(element)
decade+=10
# Extract points from contour line segments for each decade
list_xpoints = [] # list contianing lists of x points for each decade
list_ypoints = [] # list contianing lists of y points for each decade
for i in range(num_decades):
cs = ax.contour(lon,lat,da_oa_mean[i],levels=clevs,
colors=colors[i],transform=src)
segments = cs.allsegs[0]
num_segs = len(segments)
xpoints = [] # to track multiple paths within same decade
ypoints = []
for j in range(num_segs):
x = segments[j][:,0].tolist() # convert to list to be easily concatenated
y = segments[j][:,1].tolist()
for p in x:
xpoints.append(p)
for p in y:
ypoints.append(p)
list_xpoints.append(xpoints) # add list of x points for each decade
list_ypoints.append(ypoints) # add list of y points for each decade
ax.legend(handles=legend_elements, loc='center')
ax.set_global()
ax.set_title('RCP85 Ensemble Avg, 1950s-2090s $\Omega$Arag Undersaturation Thresholds',fontsize=22)
ax.add_feature(cfeature.LAND,zorder=10,facecolor='darkgray')
fig.savefig('./oa_escape_vel_figs/oa_k10_contours_15')
# Round all values to nearest 0.5 (to be easily indexed)
# Create adjusted list to use later for indexing
list_xpoints_idx = []
list_ypoints_idx = []
for i in range(num_decades): # list of lists
xpoints = list_xpoints[i] # individual list of xpoints
ypoints = list_ypoints[i] # individual list of ypoints
num_points = len(xpoints)
for p in range(num_points):
xpoints[p] = round_half(xpoints[p])
ypoints[p] = round_half(ypoints[p])
xpoints = (np.array(xpoints)-0.5).tolist()
ypoints = (np.array(ypoints)+89.5).tolist()
list_xpoints_idx.append(xpoints)
list_ypoints_idx.append(ypoints)
# For each contour, for 1950s-2090s, compute the minimum distance to
# the contour of the next decade. i.e. for each x,y on the OA=1@2000
# contour, find the closest OA=1@2010 contour.
# Create parallel arrays of list to hold lists of directions and vectors for each decade
list_vector_dx = [] # change in x for nearest points
list_vector_dy = [] # change in y for nearest points
list_vector_magnitude = [] # distance to nearest points
for i in range(num_decades-1):
vector_dx = [] # change in x for decade
vector_dy = [] # change in y for decade
vector_magnitude = [] # vector magnitude for decade
xpoints = list_xpoints[i] # x coords for decade
ypoints = list_ypoints[i] # y coords for decade
num_points = len(xpoints)
# For each point, find min dist and closest point on contour
# of next decade
for p in range(num_points):
xp = xpoints[p] # x value along contour
yp = ypoints[p] # y value along contour
x,y,dx,dy,mindist = min_dist(xp,yp,
list_xpoints[i+1],
list_ypoints[i+1],
da_oa_mean[i].data)
# maintain lists of x and y vectors
vector_dx.append(dx/1000)
vector_dy.append(dy/1000)
vector_magnitude.append(mindist/1000) # dist magnitude
list_vector_dx.append(vector_dx)
list_vector_dy.append(vector_dy)
list_vector_magnitude.append(vector_magnitude)
# Reformat data to be Mappable
nx = len(lon)
ny = len(lat)
da_escape_dist = [] # escape distances for each decade
da_escape_dx = [] # escape dx for each decade
da_escape_dy = [] # escape dy for each decade
# For each decade up to 2090s
for i in range(num_decades-1):
# Create empty arrays and initialize all values to np.nan
da_dx = np.zeros(shape=(nx,ny))
da_dx[:,:] = np.nan
da_dy = np.zeros(shape=(nx,ny))
da_dy[:,:] = np.nan
da_dist = np.zeros(shape=(nx,ny))
da_dist[:,:] = np.nan
# Iterate through points in array of contour point indices
x_idx = list_xpoints_idx[i]
y_idx = list_ypoints_idx[i]
dx_vals = list_vector_dx[i]
dy_vals = list_vector_dy[i]
dist_vals = list_vector_magnitude[i]
# For each contour point in the decade, save the escape vector
# magnitude and direction in parallel DataArrays
num_points = len(x_idx)
for p in range(num_points):
xi = int(x_idx[p])
yi = int(y_idx[p])
da_dx[xi,yi] = dx_vals[p]
da_dy[xi,yi] = dy_vals[p]
da_dist[xi,yi] = dist_vals[p]
# Save out the vector (directionality and magnitude) fields as maps
# for each decade
da_dx = xr.DataArray(da_dx, dims=['xt_ocean','yt_ocean'], coords=[lon,lat]).T
da_dx = da_dx.where(da_dx < np.inf)
da_escape_dx.append(da_dx)
da_dy = xr.DataArray(da_dy, dims=['xt_ocean','yt_ocean'], coords=[lon,lat]).T
da_dy = da_dy.where(da_dy < np.inf)
da_escape_dy.append(da_dy)
da_dist = xr.DataArray(da_dist, dims=['xt_ocean','yt_ocean'], coords=[lon,lat]).T
da_dist = da_dist.where(da_dist < np.inf)
da_escape_dist.append(da_dist)
%reload_ext autoreload
%autoreload 2
from chazbpei2020.preprocessing import *
###Output
_____no_output_____
###Markdown
--- Calculate Escape Velocity
###Code
# Calculate escape velocity and create DataArray
nx = len(lon)
ny = len(lat)
dec=1950
da_escape_vel = []
for i in range(num_decades-1):
da_vel = da_escape_dist[i].copy().rename('Escape Velocity - '+str(dec)+'s')
da_escape_vel.append(da_vel)
dec+=10
# # comparison test
# da_escape_vel[7][140]
###Output
_____no_output_____
###Markdown
___ Differentiate Regions
###Code
da_escvel_arctic = [] # Arctic ocean
da_escvel_equatorial = [] # Equatorial region
da_escvel_southern = [] # Southern ocean
for i in range(num_decades-1):
da_escvel_arctic.append(da_escape_vel[i].loc[35:90,:].copy())
da_escvel_equatorial.append(da_escape_vel[i].loc[-40:35,:].copy())
da_escvel_southern.append(da_escape_vel[i].loc[-90:-40,:].copy())
# Define bin range and interval size
xlim = 1501
step = 50
levels = np.arange(0, xlim, step)
bins = np.array(levels)
# Create Histograms for escape velocities
nrows=2
ncols=7
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[16,6],
sharex=True,sharey=True)
decade = 1950
for row in range(nrows):
for col in range(ncols):
ax = axs[row,col]
i = row*ncols + col
h = histogram(da_escape_vel[i], bins=[bins])
h.plot(ax=ax, color=colors[i])
ax.set_title('Escape Vel - '+str(decade)+'s')
ax.set_xlabel('Esc Vel (km/decade)',fontsize=14)
ax.set_xlim(0,xlim)
ax.set_xticks(np.arange(0, xlim, 500))
ax.set_ylabel('Frequency',fontsize=14)
ax.set_ylim(0,350)
ax.label_outer()
# ax.hist(da_escape_vel[i].data,bins=bins)
decade+=10
fig.suptitle('RCP85 Ensemble Avg, $\Omega$ Arag k10 Escape Velocities - 21st Century',
fontsize=25)
%reload_ext autoreload
%autoreload 2
from chazbpei2020.preprocessing import *
# Calculate average Escape Velocities for different time periods
levels = np.arange(0, xlim, step)
bins = np.array(levels)
# da_escvel_equatorial
arctic_historic = hist_bins(da_escvel_arctic, levels, 0, 7)
arctic_future = hist_bins(da_escvel_arctic, levels, 7, 14)
# da_escvel_equatorial
equatorial_historic = hist_bins(da_escvel_equatorial, levels, 0, 7)
equatorial_future = hist_bins(da_escvel_equatorial, levels, 7, 14)
# da_escvel_southern
southern_historic = hist_bins(da_escvel_equatorial, levels, 0, 7)
southern_future = hist_bins(da_escvel_southern, levels, 7, 14)
# da_escape_vel
global_historic = hist_bins(da_escape_vel, levels, 0, 7)
global_future = hist_bins(da_escape_vel, levels, 7, 14)
# Average frequency per decade
# arctic_historic_mean = arctic_historic / 7
# arctic_future_mean = arctic_future / 7
# equatorial_historic_mean = equatorial_historic / 7
# equatorial_future_mean = equatorial_future / 7
# southern_historic_mean = southern_historic / 7
# southern_future_mean = southern_future / 7
# global_historic_mean = global_historic / 7
# global_future_mean = global_future / 7
# Percentage of Calculated Climate Velocities
arctic_historic_mean = arctic_historic / arctic_historic.sum()
arctic_future_mean = arctic_future / arctic_future.sum()
equatorial_historic_mean = equatorial_historic / equatorial_historic.sum()
equatorial_future_mean = equatorial_future / equatorial_future.sum()
southern_historic_mean = southern_historic / southern_historic.sum()
southern_future_mean = southern_future / southern_future.sum()
global_historic_mean = global_historic / global_historic.sum()
global_future_mean = global_future / global_future.sum()
# Create DataArrays for entire earth and individual regions
arctic_historic_mean = xr.DataArray(arctic_historic_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('arctic_hist')
arctic_future_mean = xr.DataArray(arctic_future_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('arctic_future')
equatorial_historic_mean = xr.DataArray(equatorial_historic_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('equatorial_hist')
equatorial_future_mean = xr.DataArray(equatorial_future_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('equatorial_future')
southern_historic_mean = xr.DataArray(southern_historic_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('southern_hist')
southern_future_mean = xr.DataArray(southern_future_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('southern_future')
global_historic_mean = xr.DataArray(southern_historic_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('global_hist')
global_future_mean = xr.DataArray(global_future_mean, dims=['bin_edges'],
coords=[np.delete(bins,len(bins)-1)]).rename('global_future')
# Define Legend and colormap
colorcmp = ['lightpink','plum','moccasin','coral','lawngreen','limegreen']
regions = ['Arctic (Historical)','Arctic (Future)',
'Equatorial (Historical)','Equatorial (Future)',
'Southern (Historical)','Southern (Future)']
legend_elements = []
num_colors = len(colorcmp)
for i in range(num_colors):
element = Patch(facecolor=colorcmp[i], label=regions[i])
legend_elements.append(element)
# Create Histograms for Different Regions' Escape Velocities
nrows=1
ncols=3
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[16,6],
sharex=True,sharey=True)
num_bins = len(levels)-1
d = scipy.zeros(num_bins)
xs = np.arange(0,xlim-1,step)
# Arctic
ax = axs[0]
ys = arctic_historic_mean
ys.plot(ax=axs[0],color=colorcmp[0])
ax.fill_between(xs, ys, where=ys>=d, interpolate=True, color=colorcmp[0])
ys = arctic_future_mean
ys.plot(ax=ax,color=colorcmp[1])
ax.fill_between(xs, ys, where=ys>=d, interpolate=True, color=colorcmp[1])
ax.set_title('Arctic Escape Vel Frequency',fontsize=16)
# Equatorial
ax = axs[1]
ys = equatorial_historic_mean
ys.plot(ax=ax,color=colorcmp[2])
ax.fill_between(xs, ys, where=ys>=d, interpolate=True, color=colorcmp[2])
ys = equatorial_future_mean
ys.plot(ax=ax,color=colorcmp[3])
ax.fill_between(xs, ys, where=ys>=d, interpolate=True, color=colorcmp[3])
ax.set_title('Equatorial Escape Vel Frequency',fontsize=16)
# Southern
ax = axs[2]
ys = southern_historic_mean
ys.plot(ax=ax,color=colorcmp[4])
ax.fill_between(xs, ys, where=ys>=d, interpolate=True, color=colorcmp[4])
ys = southern_future_mean
ys.plot(ax=ax,color=colorcmp[5])
ax.fill_between(xs, ys, where=ys>=d, interpolate=True, color=colorcmp[5])
ax.set_title('Southern Escape Vel Frequency',fontsize=16)
for ax in axs:
ax.set_xlabel('Escape Velocity (km/decade)',fontsize=14)
ax.set_ylabel('Proportion',fontsize=14)
ax.set_ylim(0,0.48)
ax.label_outer()
fig.suptitle('$\Omega$ Arag Regional Escape Velocities - Historical vs. Future',
fontsize=25)
# Create Histograms for Different Regions' Escape Velocities
nrows=1
ncols=3
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[16,6],
sharex=True,sharey=True)
# Arctic
ax = axs[0]
arctic_historic_mean.plot(ax=axs[0],color=colorcmp[0])
arctic_future_mean.plot(ax=ax,color=colorcmp[1])
ax.set_title('Arctic Escape Velocities',fontsize=15)
# Equatorial
ax = axs[1]
equatorial_historic_mean.plot(ax=ax,color=colorcmp[2])
equatorial_future_mean.plot(ax=ax,color=colorcmp[3])
ax.set_title('Equatorial Escape Velocities',fontsize=15)
# Southern
ax = axs[2]
southern_historic_mean.plot(ax=ax,color=colorcmp[4])
southern_future_mean.plot(ax=ax,color=colorcmp[5])
ax.set_title('Southern Escape Velocities',fontsize=15)
for ax in axs:
ax.set_xlabel('Escape Velocity (km/decade)',fontsize=16)
ax.set_ylabel('Proportion', fontsize=14)
ax.set_ylim(0,0.48)
ax.label_outer()
fig.suptitle('$\Omega$ Arag Regional Escape Velocities - Historical vs. Future',
fontsize=25)
# Create Histograms for Different Regions' Escape Velocities
nrows=3
ncols=1
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[12,10],
sharex=True,sharey=True)
# Arctic
ax = axs[0]
arctic_historic_mean.plot(ax=axs[0],color=colorcmp[0])
arctic_future_mean.plot(ax=ax,color=colorcmp[1])
ax.set_title('Arctic Escape Velocities',fontsize=16)
# Equatorial
ax = axs[1]
equatorial_historic_mean.plot(ax=ax,color=colorcmp[2])
equatorial_future_mean.plot(ax=ax,color=colorcmp[3])
ax.set_title('Equatorial Escape Velocities',fontsize=16)
# Southern
ax = axs[2]
southern_historic_mean.plot(ax=ax,color=colorcmp[4])
southern_future_mean.plot(ax=ax,color=colorcmp[5])
ax.set_title('Southern Escape Velocities',fontsize=16)
i=0
for ax in axs:
ax.set_xlabel('Escape Velocity (km/decade)',fontsize=16)
ax.set_ylabel('Proportion',fontsize=14)
ax.set_ylim(0,0.48)
ax.label_outer()
ax.legend(handles=legend_elements[i:i+2], loc='upper right')
i+=2
fig.suptitle('$\Omega$ Arag Regional Escape Velocities - Historical vs. Future',
fontsize=25)
# Create Histogram for Different Regions' Escape Velocities (single plot)
fig, ax = plt.subplots(figsize=[10,6],
sharex=True,sharey=True)
arctic_historic_mean.plot(ax=ax,color=colorcmp[0])
arctic_future_mean.plot(ax=ax,color=colorcmp[1])
equatorial_historic_mean.plot(ax=ax,color=colorcmp[2])
equatorial_future_mean.plot(ax=ax,color=colorcmp[3])
southern_historic_mean.plot(ax=ax,color=colorcmp[4])
southern_future_mean.plot(ax=ax,color=colorcmp[5])
ax.set_title('Escape Velocities - Historic vs. Future)',fontsize=20)
ax.set_xlabel('Escape Velocity (km/decade)',fontsize=14)
ax.set_ylim(0,0.48)
ax.set_ylabel('Proportion',fontsize=14)
ax.label_outer()
ax.legend(handles=legend_elements, loc='upper right')
# Create Histogram for Different Regions' Escape Velocities (single plot)
fig, ax = plt.subplots(figsize=[10,6],
sharex=True,sharey=True)
global_historic_mean.plot(ax=ax,color='b')
global_future_mean.plot(ax=ax,color='r')
ax.set_title('Escape Velocities - Historic vs. Future',fontsize=20)
ax.set_xlabel('Escape Velocity (km/decade)',fontsize=14)
ax.set_ylim(0,0.6)
ax.set_ylabel('Proportion',fontsize=14)
ax.label_outer()
###Output
_____no_output_____ |
docs/auto_examples/plot_image_transmission.ipynb | ###Markdown
Coding - Decoding simulation of an image========================================This example shows a simulation of the transmission of an image as abinary message through a gaussian white noise channel with an LDPC coding anddecoding system.
###Code
# Author: Hicham Janati ([email protected])
#
# License: BSD (3-clause)
import numpy as np
from pyldpc import make_ldpc, ldpc_images
from pyldpc.utils_img import gray2bin, rgb2bin
from matplotlib import pyplot as plt
from PIL import Image
from time import time
###Output
_____no_output_____
###Markdown
Let's see the image we are going to be working with
###Code
eye = Image.open("data/eye.png")
# convert it to grayscale and keep one channel
eye = np.asarray(eye.convert('LA'))[:, :, 0]
# Convert it to a binary matrix
eye_bin = gray2bin(eye)
print("Eye shape: (%s, %s)" % eye.shape)
print("Binary Eye shape: (%s, %s, %s)" % eye_bin.shape)
n = 200
d_v = 3
d_c = 4
seed = 42
###Output
_____no_output_____
###Markdown
First we create a small LDPC code i.e a pair of decoding and coding matricesH and G. H is a regular parity-check matrix with d_v ones per rowand d_c ones per column
###Code
H, G = make_ldpc(n, d_v, d_c, seed=seed, systematic=True, sparse=True)
###Output
_____no_output_____
###Markdown
Now we simulate the transmission with Gaussian white noiseand recover the original image via belief-propagation.
###Code
snr = 8
eye_coded, eye_noisy = ldpc_images.encode_img(G, eye_bin, snr)
print("Coded eye shape", eye_coded.shape)
t = time()
eye_decoded = ldpc_images.decode_img(G, H, eye_coded, snr, eye_bin.shape)
t = time() - t
print("Eye | Decoding time: ", t)
error_decoded_eye = abs(eye - eye_decoded).mean()
error_noisy_eye = abs(eye_noisy - eye).mean()
###Output
_____no_output_____
###Markdown
With RGB images, we proceed similarly
###Code
print("\n\n")
tiger = np.asarray(Image.open("data/tiger.jpg"))
# Convert it to a binary matrix
tiger_bin = rgb2bin(tiger)
print("Tiger shape: (%s, %s, %s)" % tiger.shape)
print("Tiger Binary shape: (%s, %s, %s)" % tiger_bin.shape)
tiger_coded, tiger_noisy = ldpc_images.encode_img(G, tiger_bin, snr)
print("Coded Tiger shape", tiger_coded.shape)
t = time()
tiger_decoded = ldpc_images.decode_img(G, H, tiger_coded, snr, tiger_bin.shape)
t = time() - t
print("Tiger | Decoding time: ", t)
error_decoded_tiger = abs(tiger - tiger_decoded).mean()
error_noisy_tiger = abs(tiger_noisy - tiger).mean()
titles_eye = ["Original", "Noisy | Err = %.2f %%" % error_noisy_eye,
"Decoded | Err = %.2f %%" % error_decoded_eye]
titles_tiger = ["Original", "Noisy | Err = %.2f %%" % error_noisy_tiger,
"Decoded | Err = %.2f %%" % error_decoded_tiger]
all_imgs = [[eye, eye_noisy, eye_decoded], [tiger, tiger_noisy, tiger_decoded]]
f, axes = plt.subplots(2, 3, figsize=(18, 12))
for ax_row, titles, img_list, cmap in zip(axes, [titles_eye, titles_tiger],
all_imgs, ["gray", None]):
for ax, data, title in zip(ax_row, img_list, titles):
ax.imshow(data, cmap=cmap)
ax.set_title(title)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
src/archive/sentiment_emotion_scores.ipynb | ###Markdown
Emotion Analysis using the NRC Emotion Lexicon- Rather than using just the simple TextBlob or Vader packages for sentiment analysis, I thought it would be interesting to explore emotional tone using- Let's see what we can uncover using the popular open-sourced emotion lexicon published by the NRC (National Research Council Canada).- In addition to 'positive' and 'negative', we have word associations for 8 overarching emotion categories.- For simplicity, let's remove words without scores as well as those that are associated with 8 or more of the 10 categories
###Code
# read in raw emotion lexicon
filepath = "../NRC-Sentiment-Emotion-Lexicons/NRC-Emotion-Lexicon-v0.92/NRC-Emotion-Lexicon-Wordlevel-v0.92.txt"
emolex_df = pd.read_csv(filepath, names=["word", "emotion", "association"], skiprows=1, sep='\t')
# pivot df so we have one row per word, one column per emotion
emolex_df = emolex_df.pivot(index='word', columns='emotion', values='association').reset_index()
# rename df column
emolex_df.columns.name = 'index'
# filter out words without scores and with more than 7 scores
emolex_df = emolex_df[emolex_df.sum(axis=1)>0].reset_index(drop=True)
emolex_df = emolex_df[emolex_df.sum(axis=1)<7].reset_index(drop=True)
emolex_df
###Output
_____no_output_____
###Markdown
Using this lexicon, we can now we can now easily lookup all words from a single paragraph of the corpus:
###Code
paragraph_words = briefings_df.text[500].split()
emolex_df[pd.DataFrame(emolex_df.word.tolist()).isin(paragraph_words).any(1)]
###Output
_____no_output_____
###Markdown
Let's calculate and store aggregate emotion scores for each paragraph in the corpus:
###Code
# create empty df to store aggregated emotion calcs
data = pd.DataFrame([])
for text in briefings_df['text']:
paragraph_words = text.split()
paragraph_emos = emolex_df[pd.DataFrame(emolex_df.word.tolist()).isin(paragraph_words).any(1)].mean()
data = data.append(paragraph_emos, ignore_index=True)
# combine aggregated emotion scores with transcript df
briefings_df = briefings_df.join(data)
# drop empty 'word' column, fill NaNs with zero
briefings_df = briefings_df.drop(columns=['word'])
briefings_df = briefings_df.fillna(0)
briefings_df
# save scores df to csv
briefings_df.to_csv("../data/scored_briefings.csv",index=False)
briefings_df[briefings_df.sum(axis=1) > 5]
###Output
_____no_output_____ |
files/sampling/sampling.ipynb | ###Markdown
Remember, each FASTQ record is exactly four lines long Sample 10% of reads
###Code
record_number = 0
with open("test.fastq") as input:
with open("sample.fastq", "w") as output:
for line1 in input:
line2 = input.readline()
line3 = input.readline()
line4 = input.readline()
if record_number % 10 == 0:
output.write(line1)
output.write(line2)
output.write(line3)
output.write(line4)
record_number += 1
###Output
_____no_output_____
###Markdown
Randomly sample 10% of reads (more or less)
###Code
import random
percentage = 10
with open("test.fastq") as input:
with open("sample.fastq", "w") as output:
for line1 in input:
line2 = input.readline()
line3 = input.readline()
line4 = input.readline()
if random.randrange(0,percentage) == 0:
output.write(line1)
output.write(line2)
output.write(line3)
output.write(line4)
###Output
_____no_output_____
###Markdown
Sample a given number of reads
###Code
import random
records_to_sample = 100
with open("test.fastq") as input:
num_lines = sum([1 for line in input])
total_records = int(num_lines / 4)
print("sampling {} out of {} records".format(records_to_sample, total_records))
percentage = (records_to_sample / total_records) * 100
print("sampling {p} % of records".format(p=percentage))
records_to_keep = random.sample(range(total_records), records_to_sample)
with open("test.fastq") as input:
with open("sample.fastq", "w") as output:
record_number = 0
for line1 in input:
line2 = input.readline()
line3 = input.readline()
line4 = input.readline()
if record_number in records_to_keep:
output.writelines([line1, line2, line3, line4])
record_number += 1
###Output
_____no_output_____
###Markdown
Create multiple samples of records from a single file
###Code
import random
input_dataset = "test.fastq"
records_to_sample = 100
number_of_replicates = 10
with open(input_dataset) as input:
num_lines = sum([1 for line in input])
total_records = int(num_lines / 4)
print("sampling {} out of {} records, replicated {} times".format(records_to_sample, total_records, number_of_replicates))
outputs = []
for i in range(number_of_replicates):
outputs.append([open("sample.{}.fastq".format(i), "w"),
random.sample(range(total_records), records_to_sample)]
)
record_number = 0
with open(input_dataset) as input:
for line1 in input:
line2 = input.readline()
line3 = input.readline()
line4 = input.readline()
for output, keep in outputs:
if record_number in keep:
output.writelines([line1, line2, line3, line4])
record_number += 1
for output, keep in outputs:
output.close()
###Output
_____no_output_____
###Markdown
Put all together with a minimal user interface
###Code
import argparse
import random
import sys
def make_parser():
parser = argparse.ArgumentParser(description='Randomly sampling a FASTQ file')
parser.add_argument("input", help="input FASTQ filename")
parser.add_argument("output", help="output FASTQ filename")
parser.add_argument("-n", "--number", type=int, help="number of reads to sample")
parser.add_argument("-p", "--percentage", type=int, help="percentage of reads to sample")
parser.add_argument("-r", "--replicates", type=int, help="number of output files to write", default=1)
return parser
def count_records(filename, record_length=4):
print("counting records....")
with open(filename) as input:
num_lines = sum([1 for line in input])
total_records = int(num_lines / record_length)
return total_records
def main():
parser = make_parser()
args = parser.parse_args()
if args.percentage and args.number:
sys.exit("give either a percentage or a number of reads to sample, not both")
if not args.percentage and not args.number:
sys.exit("you must give either a percentage or a number of reads to sample")
total_records = count_records(args.input)
records_to_sample = args.number if args.number else (total_records * args.percentage) // 100
number_of_replicates = args.replicates
input_filename = args.input
output_filename = args.output
print("sampling {} out of {} records, replicated {} times".format(records_to_sample, total_records, number_of_replicates))
outputs = []
for i in range(number_of_replicates):
outputs.append([open("{}_{}".format(i, output_filename), "w"),
random.sample(range(total_records), records_to_sample)]
)
record_number = 0
with open(input_filename) as input:
for line1 in input:
line2 = input.readline()
line3 = input.readline()
line4 = input.readline()
for output, keep in outputs:
if record_number in keep:
output.writelines([line1, line2, line3, line4])
record_number += 1
if record_number % ((total_records * 10) / 100) == 0:
print("{} % done".format((record_number / total_records) * 100))
for output, keep in outputs:
output.close()
print("All done!")
if __name__ == '__main__':
# execute only if run as a script
main()
###Output
_____no_output_____ |
4 - Exploratory Data Analysis - Terrorism.ipynb | ###Markdown
ANSHUMAAN KUMAR PRASAD**DATA SCIENCE & BUSINESS INTERN AT THE SPARKS FOUNDATION****TASK 4** : Exploratory Data Analysis - Terrorism**PROBLEM** : Perform โExploratory Data Analysisโ on dataset โGlobal Terrorismโ (https://bit.ly/2TK5Xn5), as a security/defense analyst, try to find out the hot zone of terrorism & what all security issues and insights you can derive by EDA?**SOLUTION** :-
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv("C:/Users/Anshumaan/Documents/csv_dataset/globalterrorismdb_0718dist.csv",encoding='latin1')
data.head()
data.columns.values
data.rename(columns={'iyear':'Year','imonth':'Month','iday':"day",'gname':'Group','country_txt':'Country','region_txt':'Region','provstate':'State','city':'City','latitude':'latitude',
'longitude':'longitude','summary':'summary','attacktype1_txt':'Attacktype','targtype1_txt':'Targettype','weaptype1_txt':'Weapon','nkill':'kill',
'nwound':'Wound'},inplace=True)
data = data[['Year','Month','day','Country','State','Region','City','latitude','longitude',"Attacktype",'kill',
'Wound','target1','summary','Group','Targettype','Weapon','motive']]
data.head()
data.shape
data.isnull().sum()
data['Wound'] = data['Wound'].fillna(0)
data['kill'] = data['kill'].fillna(0)
data['Casualities'] = data['kill'] + data['Wound']
data.info()
data.describe()
year = data['Year'].unique()
years_count = data['Year'].value_counts(dropna = False).sort_index()
plt.figure(figsize = (18,10))
sns.barplot(x = year,
y = years_count,
palette = "tab10")
plt.xticks(rotation = 50)
plt.xlabel('Attacking Year',fontsize=20)
plt.ylabel('Number of Attacks Each Year',fontsize=20)
plt.title('Attacks In Years',fontsize=30)
plt.show()
pd.crosstab(data.Year, data.Region).plot(kind='area',stacked=False,figsize=(20,10))
plt.title('Terrorist Activities By Region In Each Year',fontsize=25)
plt.ylabel('Number of Attacks',fontsize=20)
plt.xlabel("Year",fontsize=20)
plt.show()
attack = data.Country.value_counts()[:10]
attack
data.Group.value_counts()[1:10]
plt.subplots(figsize=(20,10))
sns.barplot(data['Country'].value_counts()[:10].index,data['Country'].value_counts()[:10].values,palette='YlOrBr_r')
plt.title('Top Countries Affected')
plt.xlabel('Countries')
plt.ylabel('Count')
plt.xticks(rotation = 50)
plt.show()
df = data[['Year','kill']].groupby(['Year']).sum()
fig, ax4 = plt.subplots(figsize=(20,10))
df.plot(kind='bar',alpha=0.7,ax=ax4)
plt.xticks(rotation = 50)
plt.title("People Died Due To Attack",fontsize=25)
plt.ylabel("Number of killed peope",fontsize=20)
plt.xlabel('Year',fontsize=20)
top_side = ax4.spines["top"]
top_side.set_visible(False)
right_side = ax4.spines["right"]
right_side.set_visible(False)
data['City'].value_counts().to_frame().sort_values('City',axis=0,ascending=False).head(10).plot(kind='bar',figsize=(20,10),color='blue')
plt.xticks(rotation = 50)
plt.xlabel("City",fontsize=15)
plt.ylabel("Number of attack",fontsize=15)
plt.title("Top 10 most effected city",fontsize=20)
plt.show()
data['Attacktype'].value_counts().plot(kind='bar',figsize=(20,10),color='magenta')
plt.xticks(rotation = 50)
plt.xlabel("Attacktype",fontsize=15)
plt.ylabel("Number of attack",fontsize=15)
plt.title("Name of attacktype",fontsize=20)
plt.show()
data[['Attacktype','kill']].groupby(["Attacktype"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['darkslateblue'])
plt.xticks(rotation=50)
plt.title("Number of killed ",fontsize=20)
plt.ylabel('Number of people',fontsize=15)
plt.xlabel('Attack type',fontsize=15)
plt.show()
data[['Attacktype','Wound']].groupby(["Attacktype"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['cyan'])
plt.xticks(rotation=50)
plt.title("Number of wounded ",fontsize=20)
plt.ylabel('Number of people',fontsize=15)
plt.xlabel('Attack type',fontsize=15)
plt.show()
plt.subplots(figsize=(20,10))
sns.countplot(data["Targettype"],order=data['Targettype'].value_counts().index,palette="gist_heat",edgecolor=sns.color_palette("mako"));
plt.xticks(rotation=90)
plt.xlabel("Attacktype",fontsize=15)
plt.ylabel("count",fontsize=15)
plt.title("Attack per year",fontsize=20)
plt.show()
data['Group'].value_counts().to_frame().drop('Unknown').head(10).plot(kind='bar',color='green',figsize=(20,10))
plt.title("Top 10 terrorist group attack",fontsize=20)
plt.xlabel("terrorist group name",fontsize=15)
plt.ylabel("Attack number",fontsize=15)
plt.show()
data[['Group','kill']].groupby(['Group'],axis=0).sum().drop('Unknown').sort_values('kill',ascending=False).head(10).plot(kind='bar',color='yellow',figsize=(20,10))
plt.title("Top 10 terrorist group attack",fontsize=20)
plt.xlabel("terrorist group name",fontsize=15)
plt.ylabel("No of killed people",fontsize=15)
plt.show()
df=data[['Group','Country','kill']]
df=df.groupby(['Group','Country'],axis=0).sum().sort_values('kill',ascending=False).drop('Unknown').reset_index().head(10)
df
kill = data.loc[:,'kill']
print('Number of people killed by terror attack:', int(sum(kill.dropna())))
typeKill = data.pivot_table(columns='Attacktype', values='kill', aggfunc='sum')
typeKill
countryKill = data.pivot_table(columns='Country', values='kill', aggfunc='sum')
countryKill
###Output
_____no_output_____ |
Statistik-II/sitzung-6.ipynb | ###Markdown
Sitzung 6Diese Skripte sind ausschlieรlich als Zusatz-Material gedacht. Speziell fรผr diejenigen unter Euch, die einen Einblick in das Programmieren gewinnen wollen. Wenn Du es also leid bist repetitive Tรคtigkeiten auszufรผhren und das lieber einer Maschine รผberlassen willst, bist Du hier genau richtig. Die Codes sind nicht fรผr die Klausur relevant, genau genommen haben sie mit dem Lehrstuhl fรผr Statistik __rein gar nichts__ zu tun. ---
###Code
import numpy as np
from scipy.special import binom
from matplotlib import pyplot as plt
from tqdm import trange
###Output
_____no_output_____
###Markdown
Verteilungen diskreter WartezeitenIst es mรถglich, die Argumentationsweise der Exponentialverteilung auf die Binomialverteilung und die Hypergeometrische Verteilung zu erweitern?Sei $$T: \text{Wartezeit auf den ersten Erfolg}$$Wobei $$X: \text{Anzahl der Erfolge}$$Zur Erinnerung: warten heiรt, dass bisher nichts passiert ist $X=0$. Fรผr $X \sim Pois(\lambda)$ gilt:\begin{equation}P(X=0) = \frac{\lambda^0}{0!}\exp(-\lambda) = \exp(-\lambda)\end{equation}Fรผr unabhรคngige und identisch verteilte Zeiteinheiten gilt:\begin{align}P(X&=0 \text{ in 2 Zeiteinheiten}) \\&= P(\{X=0\text{ in der ersten Zeiteinheit}\} , \{ X=0\text{ in der zweiten Zeiteinheit} \}) \\ &= P(\{X=0\text{ in der ersten Zeiteinheit}\}) \cdot P(\{ X=0\text{ in der zweiten Zeiteinheit} \}) \\ &= \exp(-\lambda) \cdot \exp(-\lambda) = \exp(-2\lambda)\end{align}Und allgemein:$$P(X=0 \text{ in t Zeiteinheiten}) = \exp(-\lambda t) = P(T \geq t)$$Damit kรถnnen wir sagen:\begin{equation}P(T \leq t) = 1 - \exp(-\lambda t)\end{equation}--- Erweiterung auf die BinomialverteilungFรผr $X \sim Bin(n, p)$, mit $n \in \mathbb{N}, p \in \mathbb{R}_{+}$ gilt die obere Argumentation immer noch:$$P_n(X=0)=\underbrace{{N \choose 0}}_{=1} \overbrace{p^0}^{=1} (1-p)^{n-0} = (1-p)^n$$und $$P(T \leq n) = 1 - P_n(X=0) = 1 - (1-p)^n$$ รberprรผfung:Wartezeit auf die erste sechs beim Mensch-รคrgere-dich-nicht
###Code
trials = 1000000
n = np.arange(0, 100)
theoretical = 1 - (1-1/6)**n
samples = np.ones(trials)
for i in trange(trials):
while np.random.randint(low=1, high=7) != 6:
samples[i] += 1
values, counts = np.unique(samples, return_counts=True)
empirical = counts/trials
plt.figure(figsize=(10, 5))
plt.step(n, theoretical, where='post', label='$P_{theoretisch}(X < x)$')
plt.step(values, empirical.cumsum(), where='post', label='$P_{empirisch}(X < x)$')
plt.legend()
plt.title("Vergleich/รberprรผfung theoretischer Verteilungsfunktion")
plt.xlim([0, 40])
###Output
_____no_output_____
###Markdown
--- Erweiterung auf die Hypergeometrische VerteilungFรผr $X \sim Hyper(N, M, n)$\begin{align}P_n(X=0) &= \overbrace{\left(\frac{N-M}{N}\right) \cdot \left(\frac{N-M-1}{N-1}\right) \cdot \dots \cdot \left(\frac{N-M-(n-1)}{N-(n-1)}\right)}^{\textit{n Faktoren}} \\&= \Large{\frac{\frac{(N-M)!}{(N-M-n)!}}{\frac{N!}{(N-n)!}}}= \Large{\frac{\frac{(N-M)! \color{red}{n!}}{(N-M-n)!\color{red}{n!}}}{\frac{N!\color{red}{n!}}{(N-n)!\color{red}{n!}}}}\\&= \Large{\frac{\color{red}{n!}{N-M \choose n}}{\color{red}{n!}{N \choose n}} = \frac{{N-M \choose n}}{{N \choose n}}}\end{align}und $$P(T < n) = 1 - P_n(X=0) = 1 - \frac{{N-M \choose n}}{{N \choose n}}$$ รberprรผfung:Wie wahrscheinlich ist es eine Partie russisches Roulette zu รผberleben?
###Code
N = 6
M = 1
n = np.arange(0, 6)
theoretical = 1 - binom(N-M, n)/binom(N, n)
samples = np.zeros(trials)
for i in trange(trials):
x = [1, 2, 3, 4, 5, 6]
np.random.shuffle(x)
didi_mao = None
while didi_mao != 6:
didi_mao = x.pop()
samples[i] += 1
values, counts = np.unique(samples, return_counts=True)
empirical = counts/trials
plt.figure(figsize=(10, 5))
plt.step(n, theoretical, where='post', label='$P_{theoretisch}(X < x)$')
plt.step(values, empirical.cumsum(), where='post', label='$P_{empirisch}(X < x)$')
plt.legend()
plt.title("Vergleich/รberprรผfung theoretischer Verteilungsfunktion")
plt.xlim([0, 6])
###Output
_____no_output_____ |
nbs/dl1/lesson7-human-numbers_20190111.ipynb | ###Markdown
Lesson 7: Human numbers
###Code
from fastai.text import *
bs = 64
###Output
_____no_output_____
###Markdown
Data
###Code
path = untar_data(URLs.HUMAN_NUMBERS)
path.ls()
def readnums(d): return [', '.join(o.strip() for o in open(path / d).readlines())]
train_txt = readnums('train.txt')
train_txt[0][:80]
valid_txt = readnums('valid.txt')
valid_txt[0][-80:]
train = TextList(train_txt, path=path)
valid = TextList(valid_txt, path=path)
src = ItemLists(path=path, train=train, valid=valid).label_for_lm()
data = src.databunch(bs=bs)
train[0].text[:80]
len(data.valid_ds[0][0].data)
data.bptt, len(data.valid_dl)
13017/70/bs
it = iter(data.valid_dl)
x1, y1 = next(it)
x2, y2 = next(it)
x3, y3 = next(it)
it.close()
x1.numel() + x2.numel() + x3.numel()
x1.shape, y1.shape
x2.shape, y2.shape
x1[:, 0]
y1[:, 0]
v = data.valid_ds.vocab
x1[0].shape
v.textify(x1[0])
v.textify(y1[0])
v.textify(x2[0])
v.textify(x3[0])
v.textify(x1[1])
v.textify(x2[1])
v.textify(x3[1])
v.textify(x3[-1])
data.show_batch(ds_type=DatasetType.Valid)
###Output
_____no_output_____
###Markdown
Single fully connected model
###Code
data = src.databunch(bs=bs, bptt=3, max_len=0, p_bptt=1.)
x, y = data.one_batch()
x.shape, y.shape
nv = len(v.itos)
nv
nh = 64
def loss4(input, target): return F.cross_entropy(input, target[:, -1])
def acc4(input, target): return accuracy(input, target[:, -1])
class Model0(nn.Module):
def __init__(self):
super().__init__()
self.i_h = nn.Embedding(nv, nh) # green arrow
self.h_h = nn.Linear(nh, nh) # brown arrow
self.h_o = nn.Linear(nh, nv) # blue arrow
self.bn = nn.BatchNorm1d(nh)
def forward(self, x):
h = self.bn(F.relu(self.i_h(x[:,0])))
if x.shape[1] > 1:
h = h + self.i_h(x[:,1])
h = self.bn(F.relu(self.h_h(h)))
if x.shape[1] > 2:
h = h + self.i_h(x[:,2])
h = self.bn(F.relu(self.h_h(h)))
return self.h_o(h)
learn = Learner(data, Model0(), loss_func=loss4, metrics=acc4)
learn.fit_one_cycle(6, 1e-4)
###Output
_____no_output_____
###Markdown
Same thing with a loop
###Code
class Model1(nn.Module):
def __init__(self):
super().__init__()
self.i_h = nn.Embedding(nv, nh) # green arrow
self.h_h = nn.Linear(nh, nh) # brown arrow
self.h_o = nn.Linear(nh, nv) # blue arrow
self.bn = nn.BatchNorm1d(nh)
def forward(self, x):
h = torch.zeros(x.shape[0], nh).to(device=x.device)
for i in range(x.shape[1]):
h = h + self.i_h(x[:,i])
h = self.bn(F.relu(self.h_h(h)))
return self.h_o(h)
learn = Learner(data, Model1(), loss_func=loss4, metrics=acc4)
learn.fit_one_cycle(6, 1e-4)
###Output
_____no_output_____
###Markdown
Multi fully connected model
###Code
data = src.databunch(bs=bs, bptt=20)
x, y = data.one_batch()
x.shape, y.shape
class Model2(nn.Module):
def __init__(self):
super().__init__()
self.i_h = nn.Embedding(nv, nh) # green arrow
self.h_h = nn.Linear(nh, nh) # brown arrow
self.h_o = nn.Linear(nh, nv) # blue arrow
self.bn = nn.BatchNorm1d(nh)
def forward(self, x):
h = torch.zeros(x.shape[0], nh).to(device=x.device)
res = []
for i in range(x.shape[1]):
h = h + self.i_h(x[:,i])
h = F.relu(self.h_h(h))
res.append(self.h_o(self.bn(h)))
return torch.stack(res, dim=1)
learn = Learner(data, Model2(), metrics=accuracy)
learn.fit_one_cycle(10, 1e-4, pct_start=0.1)
###Output
_____no_output_____
###Markdown
Maintain state
###Code
class Model3(nn.Module):
def __init__(self):
super().__init__()
self.i_h = nn.Embedding(nv, nh) # green arrow
self.h_h = nn.Linear(nh, nh) # brown arrow
self.h_o = nn.Linear(nh, nv) # blue arrow
self.bn = nn.BatchNorm1d(nh)
self.h = torch.zeros(bs, nh).cuda()
def forward(self, x):
res = []
h = self.h
for i in range(x.shape[1]):
h = h + self.i_h(x[:,i])
h = F.relu(self.h_h(h))
res.append(self.bn(h))
self.h = h.detach()
res = torch.stack(res, dim=1)
res = self.h_o(res)
return res
learn = Learner(data, Model3(), metrics=accuracy)
learn.fit_one_cycle(20, 3e-3)
###Output
_____no_output_____
###Markdown
PyTorch `nn.RNN`
###Code
class Model4(nn.Module):
def __init__(self):
super().__init__()
self.i_h = nn.Embedding(nv, nh)
self.rnn = nn.RNN(nh, nh, batch_first=True)
self.h_o = nn.Linear(nh, nv)
self.bn = BatchNorm1dFlat(nh)
self.h = torch.zeros(1, bs, nh).cuda()
def forward(self, x):
res, h = self.rnn(self.i_h(x), self.h)
self.h = h.detach()
return self.h_o(self.bn(res))
learn = Learner(data, Model4(), metrics=accuracy)
learn.fit_one_cycle(20, 3e-3)
###Output
_____no_output_____
###Markdown
2-layer GRU
###Code
class Model5(nn.Module):
def __init__(self):
super().__init__()
self.i_h = nn.Embedding(nv, nh)
self.rnn = nn.GRU(nh, nh, 2, batch_first=True)
self.h_o = nn.Linear(nh, nv)
self.bn = BatchNorm1dFlat(nh)
self.h = torch.zeros(2, bs, nh).cuda()
def forward(self, x):
res, h = self.rnn(self.i_h(x), self.h)
self.h = h.detach()
return self.h_o(self.bn(res))
learn = Learner(data, Model5(), metrics=accuracy)
learn.fit_one_cycle(10, 1e-2)
###Output
_____no_output_____ |
analysis/Baseline.ipynb | ###Markdown
check sample submission
###Code
sample_submission = pd.read_csv(os.path.join(PATH, 'sample_submission.csv'))
sample_submission.head()
###Output
_____no_output_____
###Markdown
check train
###Code
train = pd.read_csv(os.path.join(PATH, 'train.csv'))
target = train['target']
train.head()
train.shape
train.nunique()
###Output
_____no_output_____
###Markdown
check test
###Code
test = pd.read_csv(os.path.join(PATH, 'test.csv'))
test.head()
test.shape
test.nunique()
###Output
_____no_output_____
###Markdown
check new_merchant_transactions
###Code
new_merchant_transactions = pd.read_csv(os.path.join(PATH, 'new_merchant_transactions.csv'))
new_merchant_transactions.head()
new_merchant_transactions.shape
new_merchant_transactions['card_id'].nunique()
new_merchant_transactions.nunique()
new_merchant_transactions.groupby(['card_id'])[]
###Output
_____no_output_____
###Markdown
check merchants
###Code
merchants = pd.read_csv(os.path.join(PATH, 'merchants.csv'))
merchants.columns
merchants.head()
merchants.shape
merchants.nunique()
###Output
_____no_output_____
###Markdown
check historical_transactions
###Code
historical_transactions = pd.read_csv(os.path.join(PATH, 'historical_transactions.csv'))
historical_transactions.head()
historical_transactions.shape
historical_transactions['card_id'].nunique()
historical_transactions.nunique()
###Output
_____no_output_____ |
notebooks/03-ml_modelling_sklearn.ipynb | ###Markdown
ML - Fitting a scikit-learn regressor on EC2 Milestone 3 - Task 3 DSCI 525 - Group 10
###Code
import re
import os
import glob
import zipfile
import requests
from urllib.request import urlretrieve
import json
import pandas as pd
import numpy as np
# Modelling
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
# Visuals
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 8, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
###Output
/opt/tljh/user/lib/python3.7/site-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.26.4) or chardet (3.0.4) doesn't match a supported version!
RequestsDependencyWarning)
###Markdown
Overview In our previous step (notebooks/02-ml_preprocessing.ipynb) we have read in multiple weather model forecasts for Australia from parquet files, trim to Sydney forecasts and then join on actual observed rainfall data. We aggregated to *mean* daily rainfall from the models to compare to actuals. The final data frame has a separate column for each weather model to be used for machine learning modelling of the actual rainfall in Sydney. The target column is `observed_rainfall`.We will read in this dataset from our S3 bucket, drop NA's and do a 80/20% train/test split. We will then compare RMSE error from the raw weather model predictions vs. an ensemble machine learning method.
###Code
# INPUTS ------------------------------------------
# path_to_input_datasets = "/srv/data/my_shared_data_folder/"
path_to_input = "s3://mds-s3-student71/output/"
# path_to_output = "/srv/data/my_shared_data_folder/"
path_to_output = "s3://mds-s3-student71/output/"
df_modelling = pd.read_csv(os.path.join(path_to_input, "ml_data_SYD.csv"), index_col=0, parse_dates=True)
model_names = [col for col in df_modelling.columns if col != "observed_rainfall"]
df_modelling.head()
df_modelling.shape
###Output
_____no_output_____
###Markdown
Data splitting and EDA
###Code
df_train, df_test = train_test_split(df_modelling.dropna(), test_size=0.2, random_state=123)
print(f"Train set size: {df_train.shape}")
print(f"Test set size: {df_test.shape}")
df_train["observed_rainfall"].plot(kind="hist",
title="Observed Rainfall in Training Set Data for Sydney AUS",
bins=50);
plt.xlabel("Observed Rainfall (mm)");
(df_train
.assign(month = lambda x:x.index.month,
decade = lambda x: 10*(x.index.year // 10))
.groupby(["decade","month"])
.mean()
.reset_index()
.pivot(index="month", columns="decade", values="observed_rainfall")
.plot(kind="line", cmap="viridis", title="Average Rainfall Near Sydney,AUS By Decade", ylabel="Mean Monthly Rainfall (mm)")
);
# Compare errors and predicted vs. actual for each basic weather model
rmse_train_dict = {}
for i in range(0,25):
rmse_train_dict[df_train.columns[i]] = np.sqrt(mean_squared_error(df_train.iloc[:,25],df_train.iloc[:,i]))
def plot_rain_models(df, title):
"""
Plot each column of weather model data vs. observed data. Assumes observed data is in column == 25
"""
fig,ax = plt.subplots(nrows=5, ncols=5, figsize=(20,20))
fig.suptitle(title, fontsize=15)
for i in range(0,25):
ax[i//5, i % 5].scatter(df.iloc[:,25], df.iloc[:,i], alpha=0.2)
ax[i//5, i % 5].set_title(f"Model : {df_test.columns[i]}")
ax[i//5, i % 5].set_xlabel("Observed")
ax[i//5, i % 5].set_ylabel("Predicted")
ax[i//5, i % 5].set_xlim([0, 200])
ax[i//5, i % 5].set_ylim([0, 200])
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plot_rain_models(df_train, title="Predicted vs. Actual Rainfall for Assorted Weather Models Around Sydney, AUS Training Set")
###Output
_____no_output_____
###Markdown
We can see there isn't a clear correlation between the underlying model predictions and the rainfall actuals. Let's hope we can clean this up. Model Building and Cross Validation on Training Set
###Code
%%time
X, y = df_train.loc[:,model_names].values, df_train["observed_rainfall"].values
param_grid = {"n_estimators":[50,100,150],
"max_depth": [3,4,5]}
grid_searcher = GridSearchCV(estimator=RandomForestRegressor(random_state=42),
n_jobs=-1,
param_grid=param_grid,
scoring="neg_mean_squared_error",
cv=3, refit=True, error_score="raise")
grid_searcher.fit(X,y)
(pd.DataFrame(grid_searcher.cv_results_)
.sort_values("mean_test_score", ascending=False)
[["mean_test_score","params"]]
.assign(mean_squared_error = lambda x: -x.mean_test_score)
.drop(columns="mean_test_score")
.style
.background_gradient(subset="mean_squared_error")
.set_caption("Grid Search Results Summary")
)
###Output
_____no_output_____
###Markdown
We'll compare the CV root mean squared error against the other model's root mean squared error score. Otherwise we'd be inflating our estimate of model performance when it was fit on the train set.We can see that our optimal parameters are very similar to the results we obtained with `max_depth`=5, `n_estimators`=100, which matches from our work with PySpark.
###Code
rmse_train_dict["rf_ensemble"] = np.sqrt(-pd.DataFrame(grid_searcher.cv_results_)["mean_test_score"].max())
(pd.DataFrame.from_dict(rmse_train_dict,orient="index", columns=["RMSE"])
.sort_values("RMSE")
.style
.background_gradient()
.set_caption("Train Set RMSE - CV for Random Forest Model")
)
###Output
_____no_output_____
###Markdown
Evaluation on Test Set
###Code
plot_rain_models(df_test,title="Predicted vs. Actual Rainfall for Assorted Weather Models Around Sydney, AUS Test Set")
rmse_test_dict = {}
for i in range(0,25):
rmse_test_dict[df_test.columns[i]] = np.sqrt(mean_squared_error(df_test.iloc[:,25],df_test.iloc[:,i]))
X_test,y_test = df_test.loc[:,model_names].values, df_test["observed_rainfall"].values
y_test_pred = grid_searcher.best_estimator_.predict(X_test)
rmse_test_dict["rf_ensemble"] = np.sqrt(mean_squared_error(y_test,y_test_pred))
train_results = pd.DataFrame.from_dict(rmse_train_dict,orient="index", columns=["RMSE"])
test_results = pd.DataFrame.from_dict(rmse_test_dict,orient="index", columns=["RMSE_test"])
total_results = train_results.merge(test_results, left_index=True, right_index=True).sort_values("RMSE_test")
fig,ax = plt.subplots(figsize=(15,8))
(total_results
.plot(ax=ax, title="RMSE Comparison between ML Model Ensemble and Weather Models",
ylabel="Root Mean Squared Error (mm rain)",
xlabel="Model")
)
ax.set_xticks(np.arange(len(total_results.index)));
ax.set_xticklabels(total_results.index,rotation=45);
df_test["rf_predicted"] = y_test_pred
df_test.plot(kind="scatter", x="observed_rainfall", y="rf_predicted",
title="Predicted vs. Actual Rainfall for Sydney AUS \n Random Forest Ensemble of Weather Models \n Test Set",
xlabel="Observed Rainfall (mm)", ylabel="Predicted Rainfall (mm)");
###Output
_____no_output_____
###Markdown
Model Persistence
###Code
from joblib import dump
dump(grid_searcher.best_estimator_, "model.joblib")
###Output
_____no_output_____ |
Genmod1.0/general-models/UserGuide_General_Model_3_Create_the_Model--Multi_layer_GESS.ipynb | ###Markdown
Create a general MODFLOW model from the NHDPlus dataset Project specific variables are imported in the model_spec.py and gen_mod_dict.py files that must be included in the notebook directory. The first first includes pathnames to data sources that will be different for each user. The second file includes a dictionary of model-specific information such as cell size, default hydraulic parameter values, and scenario defintion (e.g. include bedrock, number of layers, etc.). There are examples in the repository. Run the following cells up to the "Run to here" cell to get a pull-down menu of models in the model_dict. Then, without re-running that cell, run all the remaining cells. Re-running the following cell would re-set the model to the first one in the list, which you probably don't want. If you use the notebook option to run all cells below, it runs the cell you're in, so if you use that option, move to the next cell (below the pull-down menu of models) first.
###Code
__author__ = 'Jeff Starn'
%matplotlib notebook
from model_specs import *
from gen_mod_dict import *
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy as fp
import pandas as pd
import gdal
gdal.UseExceptions()
import shutil
from ipywidgets import interact, Dropdown
from IPython.display import display
for key, value in model_dict.items(): # from "gen_mod_dict.py"
md = key
ms = model_dict[md]
print('trying {}'.format(md))
try:
pass
except:
pass
models = list(model_dict.keys())
models.sort()
model_area = Dropdown(
options=models,
description='Model:',
background_color='cyan',
border_color='black',
border_width=2)
display(model_area)
###Output
_____no_output_____
###Markdown
Run to here to initiate notebook First time using this notebook in this session (before restarting the notebook), run the cells up to this point. Then select your model from the dropdown list above. Move your cursor to this cell and use the toolbar menu Cell --> Run All Below. After the first time, if you want to run another model, select your model and start running from this cell--you don't need to re-run the cells from the beginning. Preliminary stuff
###Code
md = model_area.value
ms = model_dict[md]
print('The model being processed is {}\n'.format(md))
###Output
The model being processed is Assabet
###Markdown
Set pathnames and create workspace directories for geographic data (from Notebook 1) and this model.
###Code
geo_ws = os.path.join(proj_dir, ms['ws'])
model_ws = os.path.join(geo_ws, scenario_dir)
array_pth = os.path.join(model_ws, 'arrays')
try:
shutil.rmtree(array_pth)
except:
pass
try:
shutil.rmtree(model_ws)
except:
pass
os.makedirs(model_ws)
head_file_name = '{}.hds'.format(md)
head_file_pth = os.path.join(model_ws, head_file_name)
print (model_ws)
###Output
C:/General_Models_User_Guide/subprojects/siteGeneral\Assabet\layers
###Markdown
Replace entries from the default K_dict with the model specific K values from model_dict if they exist.
###Code
# from "gen_mod_dict.py"
for key, value in K_dict.items():
if key in ms.keys():
K_dict[key] = ms[key]
###Output
_____no_output_____
###Markdown
Replace entries from the default rock_riv_dict with the model specific values from model_dict if they exist. rock_riv_dict has various attributes of bedrock and stream geometry.
###Code
# from "gen_mod_dict.py"
for key, value in rock_riv_dict.items():
if key in ms.keys():
rock_riv_dict[key] = ms[key]
###Output
_____no_output_____
###Markdown
Assign values to variables used in this notebook using rock_riv_dict
###Code
min_thk = rock_riv_dict['min_thk']
stream_width = rock_riv_dict['stream_width']
stream_bed_thk = rock_riv_dict['stream_bed_thk']
river_depth = rock_riv_dict['river_depth']
bedrock_thk = rock_riv_dict['bedrock_thk']
# addition of stream_bed_kadjust factor by Leon Kauffman
stream_bed_kadjust = rock_riv_dict['stream_bed_kadjust']
#read in the marine boundary information
coastal_sed_kadjust = rock_riv_dict['coastal_sed_kadjust']
coastal_sed_thk = rock_riv_dict['coastal_sed_thk']
###Output
_____no_output_____
###Markdown
Read the information for a model domain processed using Notebook 1 Read the model_grid data frame from a csv file. Extract grid dimensions and ibound array.
###Code
model_file = os.path.join(geo_ws, 'model_grid.csv')
model_grid = pd.read_csv(model_file, index_col='node_num', na_values=['nan', hnoflo])
NROW = model_grid.row.max() + 1
NCOL = model_grid.col.max() + 1
num_cells = NROW * NCOL
ibound = model_grid.ibound.values.reshape(NROW, NCOL)
inactive = (ibound == 0)
###Output
_____no_output_____
###Markdown
Translate geologic information into hydrologic properties Method for geology mapping to zone number from dataset GESS_poly.gdb in"Glacial Environments and Surficial Sediments (GESS) Geodatabase for the Glaciated, Conterminous United States",https://doi.org/10.5066/F71R6PQGmodel_grid.gess_poly from CrseStratSed in GESS_poly.gdb = 0 represents fine sedimentsmodel_grid.gess_poly from CrseStratSed in GESS_poly.gdb = 1 represents coarse sediments Create a dictionary that maps the K_dict from gen_mod_dict to zone numbers (key=zone number, value=entry in K_dict). Make sure these correspond with the correct units. If you're using the defaults, it is correct.
###Code
zone_dict = {0 : 'K_fine', 1 : 'K_coarse', 2 : 'K_lakes', 3 : 'K_bedrock'}
###Output
_____no_output_____
###Markdown
Perform the mapping from zone number to K to create the Kh1d array.
###Code
zones1d = np.zeros(( NROW, NCOL ), dtype=np.int32)
gess = model_grid.gess_poly.values.reshape( NROW, NCOL )
zones1d[gess == 0] = 0
zones1d[gess == 1] = 1
la = model_grid.lake.values.reshape( NROW, NCOL )
zones1d[la == 1] = 2
Kh1d = np.zeros(( NROW, NCOL ), dtype=np.float32)
for key, val in zone_dict.items():
Kh1d[zones1d == key] = K_dict[val]
model_grid['K0'] = Kh1d.ravel()
###Output
_____no_output_____
###Markdown
Process boundary condition information Create a dictionary of stream information for the drain or river package.River package input also needs the elevation of the river bed. Don't use both packages. The choice is made by commenting/uncommenting sections of the modflow function. Replace segment_len (segment length) with the conductance. The river package has not been tested.
###Code
drn_flag = (model_grid.stage != np.nan) & (model_grid.ibound == 1)
drn_data = model_grid.loc[drn_flag, ['lay', 'row', 'col', 'stage', 'segment_len', 'K0']]
drn_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'K0']
# dcond = drn_data.K0 * drn_data.segment_len * stream_width / stream_bed_thk
# addition of stream_bed_kadjust factor by Leon Kauffman
dcond = drn_data.K0 * stream_bed_kadjust * drn_data.segment_len * stream_width / stream_bed_thk
drn_data['segment_len'] = dcond
drn_data.rename(columns={'segment_len' : 'cond'}, inplace=True)
drn_data.drop('K0', axis=1, inplace=True)
drn_data.dropna(axis='index', inplace=True)
drn_data.insert(drn_data.shape[1], 'iface', 6)
drn_recarray = drn_data.to_records(index=False)
drn_dict = {0 : drn_recarray}
riv_flag = (model_grid.stage != np.nan) & (model_grid.ibound == 1)
riv_data = model_grid.loc[riv_flag, ['lay', 'row', 'col', 'stage', 'segment_len',
'reach_intermit', 'K0']]
riv_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'rbot', 'K0']
riv_data[['rbot']] = riv_data.stage - river_depth
#rcond = riv_data.K0 * riv_data.segment_len * stream_width / stream_bed_thk
# addition of stream_bed_kadjust factor by Leon Kauffman
rcond = riv_data.K0 * stream_bed_kadjust * riv_data.segment_len * stream_width / stream_bed_thk
riv_data['segment_len'] = rcond
riv_data.rename(columns={'segment_len' : 'rcond'}, inplace=True)
riv_data.drop('K0', axis=1, inplace=True)
riv_data.dropna(axis='index', inplace=True)
riv_data.insert(riv_data.shape[1], 'iface', 6)
riv_recarray = riv_data.to_records(index=False)
riv_dict = {0 : riv_recarray}
###Output
_____no_output_____
###Markdown
Create a dictionary of information for the general-head boundary package.Similar to the above cell. Not tested.
###Code
if model_grid.ghb.sum() > 0:
ghb_flag = model_grid.ghb == 1
ghb_data = model_grid.loc[ghb_flag, ['lay', 'row', 'col', 'top', 'segment_len', 'K0']]
ghb_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'K0']
gcond = ghb_data.K0 * L * L / stream_bed_thk
ghb_data['segment_len'] = gcond
ghb_data.rename(columns={'segment_len' : 'cond'}, inplace=True)
ghb_data.drop('K0', axis=1, inplace=True)
ghb_data.dropna(axis='index', inplace=True)
ghb_data.insert(ghb_data.shape[1], 'iface', 6)
ghb_recarray = ghb_data.to_records(index=False)
ghb_dict = {0 : ghb_recarray}
###Output
_____no_output_____
###Markdown
Create a dictionary for the marine general-head boundary.
###Code
if model_grid.ghb_sea.sum() > 0:
#currently the marine ghb would overwrite any existing ghb, therefore write an alert
if GHB & GHB_sea:
GHB = False
print("Code doesn't support multiple ghb's. Marine ghb will be implemented.")
ghb_flag = model_grid.ghb_sea == 1
ghb_sea_data = model_grid.loc[ghb_flag, ['lay', 'row', 'col', 'fresh_head', 'segment_len', 'K0']]
ghb_sea_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'K0']
gcond = ghb_sea_data.K0 * L * L / coastal_sed_thk / coastal_sed_kadjust
ghb_sea_data['segment_len'] = gcond
ghb_sea_data.rename(columns={'segment_len' : 'cond'}, inplace=True)
ghb_sea_data.drop('K0', axis=1, inplace=True)
ghb_sea_data.dropna(axis='index', inplace=True)
ghb_sea_data.insert(ghb_sea_data.shape[1], 'iface', 6)
ghb_sea_recarray = ghb_sea_data.to_records(index=False)
ghb_sea_dict = {0 : ghb_sea_recarray}
###Output
_____no_output_____
###Markdown
Create 1-layer model to get initial top-of-aquifer on which to drape subsequent layering Get starting heads from top elevations. The top is defined as the model-cell-mean NED elevation except in streams, where it is interpolated between MaxElevSmo and MinElevSmo in the NHD (called 'stage' in model_grid). Make them a little higher than land so that drains don't accidentally go dry too soon.
###Code
top = model_grid.top.values.reshape(NROW, NCOL)
strt = top * 1.05
###Output
_____no_output_____
###Markdown
Modify the bedrock surface, ensuring that it is always at least min_thk below the top elevation. This calculation will be revisited for the multi-layer case.
###Code
bedrock = model_grid.bedrock_el.values.reshape(NROW, NCOL)
thk = top - bedrock
thk[thk < min_thk] = min_thk
bot = top - thk
###Output
_____no_output_____
###Markdown
Create recharge array This version replaces the Wolock/Yager recharge grid with the Reitz grid. An alternative recharge source can also be specified (Wolock).
###Code
## used in general models prior to 4/5/2016
# rech = model_grid.recharge.values.reshape(NROW, NCOL)
###Output
_____no_output_____
###Markdown
Replace rech array with* calculate total recharge for the model domain* calculate areas of fine and coarse deposits* apportion recharge according to the ratio specified in gen_mod_dict.py* write the values to an array
###Code
r_Reitz = model_grid.rch_eff_m_Reitz_2013.values.reshape(NROW, NCOL) / 365.25
rech_ma = np.ma.MaskedArray(r_Reitz, mask=inactive)
coarse_ma = np.ma.MaskedArray(zones1d != 0, mask=inactive)
fine_ma = np.ma.MaskedArray(zones1d == 0, mask=inactive)
total_rech = rech_ma.sum()
Af = fine_ma.sum()
Ac = coarse_ma.sum()
Rf = total_rech / (rech_fact * Ac + Af)
Rc = rech_fact * Rf
rech = np.zeros_like(r_Reitz)
rech[zones1d != 0] = Rc
rech[zones1d == 0] = Rf
model_grid['recharge'] = model_grid.rch_eff_m_Reitz_2013
model_grid.to_csv(os.path.join(model_ws, 'model_grid.csv'))
##Alternative recharge, to use, comment-out the above cell and uncomment this cell
# r_Wolock = model_grid.rch_m_Wolock.values.reshape(NROW, NCOL) / 365.25
# rech_ma = np.ma.MaskedArray(r_Wolock, mask=inactive)
# coarse_ma = np.ma.MaskedArray(zones1d != 0, mask=inactive)
# fine_ma = np.ma.MaskedArray(zones1d == 0, mask=inactive)
# total_rech = rech_ma.sum()
# Af = fine_ma.sum()
# Ac = coarse_ma.sum()
# Rf = total_rech / (rech_fact * Ac + Af)
# Rc = rech_fact * Rf
# rech = np.zeros_like(r_Wolock)
# rech[zones1d != 0] = Rc
# rech[zones1d == 0] = Rf
# model_grid['recharge'] = model_grid.rch_m_Wolock
# model_grid.to_csv(os.path.join(model_ws, 'model_grid.csv'))
###Output
_____no_output_____
###Markdown
Define a function to create and run MODFLOW
###Code
def modflow(md, mfpth, model_ws, nlay=1, top=top, strt=strt, nrow=NROW, ncol=NCOL, botm=bedrock,
ibound=ibound, hk=Kh1d, rech=rech, stream_dict=drn_dict, delr=L, delc=L,
hnoflo=hnoflo, hdry=hdry, iphdry=1):
strt_dir = os.getcwd()
os.chdir(model_ws)
ml = fp.modflow.Modflow(modelname=md, exe_name=mfpth, version='mfnwt',
external_path='arrays')
# add packages (DIS has to come before either BAS or the flow package)
dis = fp.modflow.ModflowDis(ml, nlay=nlay, nrow=NROW, ncol=NCOL, nper=1, delr=L, delc=L,
laycbd=0, top=top, botm=botm, perlen=1.E+05, nstp=1, tsmult=1,
steady=True, itmuni=4, lenuni=2, extension='dis',
unitnumber=11)
bas = fp.modflow.ModflowBas(ml, ibound=ibound, strt=strt, ifrefm=True,
ixsec=False, ichflg=False, stoper=None, hnoflo=hnoflo, extension='bas',
unitnumber=13)
upw = fp.modflow.ModflowUpw(ml, laytyp=1, layavg=0, chani=1.0, layvka=1, laywet=0, ipakcb=53,
hdry=hdry, iphdry=iphdry, hk=hk, hani=1.0, vka=1.0, ss=1e-05,
sy=0.15, vkcb=0.0, noparcheck=False, extension='upw',
unitnumber=31)
rch = fp.modflow.ModflowRch(ml, nrchop=3, ipakcb=53, rech=rech, irch=1,
extension='rch', unitnumber=19)
drn = fp.modflow.ModflowDrn(ml, ipakcb=53, stress_period_data=drn_dict,
dtype=drn_dict[0].dtype,
extension='drn', unitnumber=21, options=['NOPRINT', 'AUX IFACE'])
riv = fp.modflow.ModflowRiv(ml, ipakcb=53, stress_period_data=riv_dict,
dtype=riv_dict[0].dtype,
extension='riv', unitnumber=18, options=['NOPRINT', 'AUX IFACE'])
if GHB:
ghb = fp.modflow.ModflowGhb(ml, ipakcb=53, stress_period_data=ghb_dict,
dtype=ghb_dict[0].dtype,
extension='ghb', unitnumber=23, options=['NOPRINT', 'AUX IFACE'])
if GHB_sea:
ghb = fp.modflow.ModflowGhb(ml, ipakcb=53, stress_period_data=ghb_sea_dict,
dtype=ghb_sea_dict[0].dtype,
extension='ghb', unitnumber=23, options=['NOPRINT', 'AUX IFACE'])
oc = fp.modflow.ModflowOc(ml, ihedfm=0, iddnfm=0, chedfm=None, cddnfm=None, cboufm=None,
compact=True, stress_period_data={(0, 0): ['save head', 'save budget']},
extension=['oc', 'hds', 'ddn', 'cbc'], unitnumber=[14, 51, 52, 53])
# nwt = fp.modflow.ModflowNwt(ml, headtol=0.0001, fluxtol=500, maxiterout=1000,
# thickfact=1e-05, linmeth=2, iprnwt=1, ibotav=0, options='COMPLEX')
# below was used prior to March 2018
# nwt = fp.modflow.ModflowNwt(ml, headtol=0.0001, fluxtol=500, maxiterout=100, thickfact=1e-05,
# linmeth=2, iprnwt=1, ibotav=1, options='SPECIFIED', dbdtheta =0.80,
# dbdkappa = 0.00001, dbdgamma = 0.0, momfact = 0.10, backflag = 1,
# maxbackiter=30, backtol=1.05, backreduce=0.4, iacl=2, norder=1,
# level=3, north=7, iredsys=1, rrctols=0.0,idroptol=1, epsrn=1.0E-3,
# hclosexmd= 1.0e-4, mxiterxmd=200)
#below reflects changes to make solving easier, March 2018
nwt = fp.modflow.ModflowNwt(ml, headtol=0.0001, fluxtol=500, maxiterout=1000, thickfact=1e-05,
linmeth=2, iprnwt=1, ibotav=1, options='SPECIFIED', dbdtheta =0.10,
dbdkappa = 0.00001, dbdgamma = 0.01, momfact = 0.10, backflag = 1,
maxbackiter=10, backtol=1.05, backreduce=0.4, iacl=2, norder=1,
level=3, north=7, iredsys=1, rrctols=0.0,idroptol=1, epsrn=1.0E-3,
hclosexmd= 1.0e-4, mxiterxmd=200)
ml.write_input()
ml.remove_package('RIV')
ml.write_input()
success, output = ml.run_model(silent=True)
os.chdir(strt_dir)
if success:
print(" Your {:0d} layer model ran successfully".format(nlay))
else:
print(" Your {:0d} layer model didn't work".format(nlay))
###Output
_____no_output_____
###Markdown
Run 1-layer MODFLOW Use the function to run MODFLOW for 1 layer to getting approximate top-of-aquifer elevation
###Code
modflow(md, mfpth, model_ws, nlay=1, top=top, strt=strt, nrow=NROW, ncol=NCOL, botm=bot, ibound=ibound,
hk=Kh1d, rech=rech, stream_dict=drn_dict, delr=L, delc=L, hnoflo=hnoflo, hdry=hdry, iphdry=0)
###Output
Util2d:delr: resetting 'how' to external
Util2d:delc: resetting 'how' to external
Util2d:vani layer 1: resetting 'how' to external
Util2d:delr: resetting 'how' to external
Util2d:delc: resetting 'how' to external
Util2d:vani layer 1: resetting 'how' to external
Your 1 layer model ran successfully
###Markdown
Read the head file and calculate new layer top (wt) and bottom (bot) elevations based on the estimatedwater table (wt) being the top of the top layer. Divide the surficial layer into NLAY equally thick layers between wt and the bedrock surface elevation (as computed using minimum surficial thickness).
###Code
hdobj = fp.utils.HeadFile(head_file_pth)
heads1 = hdobj.get_data(kstpkper=(0, 0))
heads1[heads1 == hnoflo] = np.nan
heads1[heads1 <= hdry] = np.nan
heads1 = heads1[0, :, :]
hdobj = None
###Output
C:\Miniconda2\envs\aug27b\lib\site-packages\ipykernel\__main__.py:4: RuntimeWarning: invalid value encountered in less_equal
###Markdown
Create layering using the scenario in gen_mod_dict Make new model with (possibly) multiple layers. If there are dry cells in the 1 layer model, they are converted to NaN (not a number). The minimum function in the first line returns NaN if the element of either input arrays is NaN. In that case, replace NaN in modeltop with the top elevation. The process is similar to the 1 layer case. Thickness is estimated based on modeltop and bedrock and is constrained to be at least min_thk (set in gen_mod_dict.py). This thickness is divided into num_surf_layers number of layers. The cumulative thickness of these layers is the distance from the top of the model to the bottom of the layers. This 3D array of distances (the same for each layer) is subtracted from modeltop.
###Code
modeltop = np.minimum(heads1, top)
nan = np.isnan(heads1)
modeltop[nan] = top[nan]
thk = modeltop - bedrock
thk[thk < min_thk] = min_thk
NLAY = num_surf_layers
lay_extrude = np.ones((NLAY, NROW, NCOL))
lay_thk = lay_extrude * thk / NLAY
bot = modeltop - np.cumsum(lay_thk, axis=0)
###Output
C:\Miniconda2\envs\aug27b\lib\site-packages\ipykernel\__main__.py:1: RuntimeWarning: invalid value encountered in minimum
if __name__ == '__main__':
###Markdown
Using the estimated water table as the new top-of-aquifer elevations sometimes leads to the situation, in usually a very small number of cells, that the drain elevation is below the bottom of the cell. The following procedure resets the bottom elevation to one meter below the drain elevation if that is the case.
###Code
stg = model_grid.stage.fillna(1.E+30, inplace=False)
tmpdrn = (lay_extrude * stg.values.reshape(NROW, NCOL)).ravel()
tmpbot = bot.ravel()
index = np.less(tmpdrn, tmpbot)
tmpbot[index] = tmpdrn[index] - 1.0
bot = tmpbot.reshape(NLAY, NROW, NCOL)
###Output
_____no_output_____
###Markdown
* If add_bedrock = True in gen_mod_dict.py, add a layer to the bottom and increment NLAY by 1.* Assign the new bottom-most layer an elevation equal to the elevation of the bottom of the lowest surficial layer minus bedrock_thk, which is specified in rock_riv_dict (in gen_mod_dict.py).* Concatenate the new bottom-of-bedrock-layer to the bottom of the surficial bottom array.* Compute the vertical midpoint of each cell. Make an array (bedrock_index) that is True if the bedrock surface is higher than the midpoint and False if it is not.* lay_extrude replaces the old lay_extrude to account for the new bedrock layer. It is not used in this cell, but is used later to extrude other arrays.
###Code
sol_thk = model_grid.soller_thk.values.reshape(NROW, NCOL)
tmp = top - sol_thk
bedrock_4_K = bedrock.copy()
bedrock_4_K[bedrock > top] = tmp[bedrock > top]
if add_bedrock:
NLAY = num_surf_layers + 1
lay_extrude = np.ones((NLAY, NROW, NCOL))
bed_bot = bot[-1:,:,:] - bedrock_thk
bot = np.concatenate((bot, bed_bot), axis=0)
mids = bot + thk / NLAY / 2
bedrock_index = mids < bedrock_4_K
bedrock_index[-1:,:,:] = True
elif not add_bedrock:
print(' no bedrock')
pass
else:
print(' add_bedrock variable needs to True or False')
###Output
_____no_output_____
###Markdown
Extrude all arrays to NLAY number of layers. Create a top-of-aquifer elevation (fake_top) that is higher (20% in this case) than the simulated 1-layer water table because in doing this approximation, some stream elevations end up higher than top_of_aquifer and thus do not operate as drains. The fake_top shouldn't affect model computations if it is set high enough because the model uses convertible (confined or unconfined) layers.
###Code
fake_top = (modeltop * 1.2).astype(np.float32)
strt = (lay_extrude * modeltop * 1.05).astype(np.float32)
ibound = (lay_extrude * ibound).astype(np.int16)
###Output
_____no_output_____
###Markdown
Perform the mapping from zone number to K to create the Kh3d array.
###Code
zones3d = np.zeros(( NLAY, NROW, NCOL ), dtype=np.int32)
gess = model_grid.gess_poly.values.reshape(NROW, NCOL)
gess3d = (lay_extrude * gess).astype(np.int32)
zones3d[gess3d == 0] = 0
zones3d[gess3d == 1] = 1
if add_bedrock:
zones3d[bedrock_index] = 3
la = model_grid.lake.values.reshape(NROW, NCOL)
zones3d[0, la == 1] = 2
Kh3d = np.zeros(( NLAY, NROW, NCOL ), dtype=np.float32)
for key, val in zone_dict.items():
Kh3d[zones3d == key] = K_dict[val]
###Output
_____no_output_____
###Markdown
Run MODFLOW again using the new layer definitions. The difference from the first run is that the top-of-aquifer elevation is the 1-layer water table rather than land surface, and of course, the number of surficial layers and/or the presence of a bedrock layer is different.
###Code
modflow(md, mfpth, model_ws, nlay=NLAY, top=fake_top, strt=strt, nrow=NROW, ncol=NCOL,
botm=bot, ibound=ibound, hk=Kh3d, rech=rech, stream_dict=drn_dict, delr=L,
delc=L, hnoflo=hnoflo, hdry=hdry, iphdry=1)
###Output
Note: external_path arrays already exists
Util2d:delr: resetting 'how' to external
Util2d:delc: resetting 'how' to external
Util2d:vani layer 1: resetting 'how' to external
Util2d:vani layer 2: resetting 'how' to external
Util2d:vani layer 3: resetting 'how' to external
Util2d:vani layer 4: resetting 'how' to external
Util2d:delr: resetting 'how' to external
Util2d:delc: resetting 'how' to external
Util2d:vani layer 1: resetting 'how' to external
Util2d:vani layer 2: resetting 'how' to external
Util2d:vani layer 3: resetting 'how' to external
Util2d:vani layer 4: resetting 'how' to external
Your 4 layer model ran successfully
###Markdown
Read the new head array
###Code
hdobj = fp.utils.HeadFile(head_file_pth)
heads = hdobj.get_data()
hdobj = None
###Output
_____no_output_____
###Markdown
Make a 2D array of the heads in the highest active cells and call it the water_table
###Code
heads[heads == hnoflo] = np.nan
heads[heads <= hdry] = np.nan
hin = np.argmax(np.isfinite(heads), axis=0)
row, col = np.indices((hin.shape))
water_table = heads[hin, row, col]
water_table_ma = np.ma.MaskedArray(water_table, inactive)
###Output
C:\Miniconda2\envs\aug27b\lib\site-packages\ipykernel\__main__.py:2: RuntimeWarning: invalid value encountered in less_equal
from ipykernel import kernelapp as app
###Markdown
Save the head array to a geotiff file.
###Code
data = water_table_ma
src_pth = os.path.join(geo_ws, 'ibound.tif')
src = gdal.Open(src_pth)
dst_pth = os.path.join(model_ws, 'pre-heads.tif')
driver = gdal.GetDriverByName('GTiff')
dst = driver.CreateCopy(dst_pth, src, 0)
band = dst.GetRasterBand(1)
band.WriteArray(data)
band.SetNoDataValue(np.nan)
dst = None
src = None
###Output
_____no_output_____
###Markdown
Save the heads and K from the upper-most layer to model_grid.csv
###Code
model_grid['pre_cal_heads'] = water_table_ma.ravel()
model_grid['pre_cal_K'] = Kh3d[0,:,:].ravel()
if add_bedrock:
model_grid['thk'] = model_grid.top - bot[-1,:,:].ravel() + bedrock_thk
else:
model_grid['thk'] = model_grid.top - bot[-1,:,:].ravel()
model_grid['thkR'] = model_grid.thk / model_grid.recharge
model_grid.to_csv(os.path.join(model_ws, 'model_grid.csv'))
###Output
_____no_output_____
###Markdown
Save zone array for use in calibration.
###Code
zone_file = os.path.join(model_ws, 'zone_array.npz')
np.savez(zone_file, zone=zones3d)
###Output
_____no_output_____
###Markdown
Plot a cross-section to see what the layers look like. Change row_to_plot to see other rows. Columns could be easily added.
###Code
def calc_error(top, head, obs_type):
# an offset of 1 is used to eliminate counting heads that
# are within 1 m of their target as errors.
# count topo and hydro errors
t = top < (head - err_tol)
h = top > (head + err_tol)
tmp_df = pd.DataFrame({'head':head, 'ot':obs_type, 't':t, 'h':h})
tmp = tmp_df.groupby('ot').sum()
h_e_ = tmp.loc['hydro', 'h']
t_e_ = tmp.loc['topo', 't']
result = np.array([h_e_, t_e_])
return result
hydro, topo = calc_error(model_grid.top, water_table.ravel(), model_grid.obs_type)
num_hydro = model_grid.obs_type.value_counts()['hydro']
num_topo = model_grid.obs_type.value_counts()['topo']
num_cells = num_hydro + num_topo
hydro = hydro / num_hydro
topo = topo / num_topo
def ma2(data2D):
return np.ma.MaskedArray(data2D, mask=inactive)
def ma3(data3D):
return np.ma.MaskedArray(data3D, mask=(ibound == 0))
row_to_plot = NROW / 2
xplot = np.linspace( L / 2, NCOL * L - L / 2, NCOL)
mKh = ma3(Kh3d)
mtop = ma2(top)
mbed = ma2(bedrock)
mbot = ma3(bot)
colors = ['green', 'red', 'gray']
fig = plt.figure(figsize=(8,8))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax1.plot(xplot, mtop[row_to_plot, ], label='land surface', color='black', lw=0.5)
ax1.plot(xplot, water_table_ma[row_to_plot, ], label='water table', color='blue', lw=1.)
ax1.fill_between(xplot, mtop[row_to_plot, ], mbot[0, row_to_plot, :], alpha=0.25,
color='blue', label='layer 1', lw=0.75)
for lay in range(NLAY-1):
label = 'layer {}'.format(lay+2)
ax1.fill_between(xplot, mbot[lay, row_to_plot, :], mbot[lay+1, row_to_plot, :], label=label,
color=colors[lay], alpha=0.250, lw=0.75)
ax1.plot(xplot, mbed[row_to_plot, :], label='bedrock (Soller)', color='red', linestyle='dotted', lw=1.5)
ax1.plot(xplot, mbot[-1, row_to_plot, :], color='black', linestyle='solid', lw=0.5)
ax1.legend(loc=0, frameon=False, fontsize=10, ncol=3)#, bbox_to_anchor=(1.0, 0.5))
ax1.set_ylabel('Altitude, in meters')
ax1.set_xticklabels('')
ax1.set_title('Default section along row {}, {} model, weight {:0.1f}\nK fine = {:0.1f} K coarse = {:0.1f}\
K bedrock = {:0.1f}\nFraction dry drains {:0.2f} Fraction flooded cells {:0.2f}'.format(row_to_plot, \
md, 1, K_dict['K_fine'], K_dict['K_coarse'], K_dict['K_bedrock'], hydro, topo))
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax2.fill_between(xplot, 0, mKh[0, row_to_plot, :], alpha=0.25, color='blue',
label='layer 1', lw=0.75, step='mid')
ax2.set_xlabel('Distance in meters')
ax2.set_yscale('log')
ax2.set_ylabel('Hydraulic conductivity\n in layer 1, in meters / day')
line = '{}_{}_xs.png'.format(md, scenario_dir)
fig_name = os.path.join(model_ws, line)
plt.savefig(fig_name)
t = top < (water_table - err_tol)
h = top > (water_table + err_tol)
mt = np.ma.MaskedArray(t.reshape(NROW, NCOL), model_grid.obs_type != 'topo')
mh = np.ma.MaskedArray(h.reshape(NROW, NCOL), model_grid.obs_type != 'hydro')
from matplotlib import colors
cmap = colors.ListedColormap(['0.50', 'red'])
cmap2 = colors.ListedColormap(['blue'])
back = np.ma.MaskedArray(ibound[0,:,:], ibound[0,:,:] == 0)
fig, ax = plt.subplots(1,2)
ax[0].imshow(back, cmap=cmap2, alpha=0.2)
im0 = ax[0].imshow(mh, cmap=cmap, interpolation='None')
ax[0].axhline(row_to_plot)
# fig.colorbar(im0, ax=ax[0])
ax[1].imshow(back, cmap=cmap2, alpha=0.2)
im1 = ax[1].imshow(mt, cmap=cmap, interpolation='None')
ax[1].axhline(row_to_plot)
# fig.colorbar(im1, ax=ax[1])
fig.suptitle('Default model errors (in red) along row {}, {} model, weight {:0.1f}\nK fine = {:0.1f} K coarse = {:0.1f}\
K bedrock = {:0.1f}\nFraction dry drains {:0.2f} Fraction flooded cells {:0.2f}'.format(row_to_plot, \
md, 1.0, K_dict['K_fine'], K_dict['K_coarse'], K_dict['K_bedrock'], hydro, topo))
# fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
# wspace=None, hspace=None)
fig.set_size_inches(6, 6)
# line = '{}_{}_error_map_cal.png'.format(md, scenario_dir)
line = '{}_{}_error_map.png'.format(md, scenario_dir) #csc
fig_name = os.path.join(model_ws, line)
plt.savefig(fig_name)
###Output
_____no_output_____ |
Projects/Project2/Notebooks and figs/Project2_2.ipynb | ###Markdown
Installs
###Code
!pip3 install http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl
###Output
Requirement already satisfied: torch==0.3.0.post4 from http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl in /home/lucia/anaconda3/lib/python3.6/site-packages (0.3.0.post4)
Requirement already satisfied: pyyaml in /home/lucia/anaconda3/lib/python3.6/site-packages (from torch==0.3.0.post4) (3.12)
Requirement already satisfied: numpy in /home/lucia/anaconda3/lib/python3.6/site-packages (from torch==0.3.0.post4) (1.14.2)
[31mspacy 1.9.0 has requirement pip<10.0.0,>=9.0.0, but you'll have pip 10.0.1 which is incompatible.[0m
###Markdown
Implementation
###Code
import torch
from torch import Tensor
import numpy as np
import matplotlib.pyplot as plt
# import warnings
# warnings.filterwarnings('error')
###Output
_____no_output_____
###Markdown
Generate dataset
###Code
def generate_disc_set(nb):
data = torch.Tensor(nb, 2).uniform_(0, 1)
label = ((data - .5) ** 2).sum(1) <= 1 / (2 * np.pi)
return data, convert_to_one_hot_labels(data, label.long())
def convert_to_one_hot_labels(input_, target):
tmp = input_.new(target.size(0), max(0, target.max()) + 1).fill_(0)
tmp.scatter_(1, target.view(-1, 1), 1.0)
return tmp.long()
###Output
_____no_output_____
###Markdown
Modules
###Code
class Module(object):
def forward(self, *input):
raise NotImplementedError
def backward(self, *gradwrtoutput):
raise NotImplementedError
def param(self):
return []
def update(self, lr=None, values=None):
pass
###Output
_____no_output_____
###Markdown
Linear
###Code
class Linear(Module):
"""Implements the fully connected layer module
It requires the number of inputs and outputs.
Weights are initialized assuming that a ReLU module
will be used afterwards. If a Tanh module will be used
instead, it is recommended to set
std_w = 1 / np.sqrt(n_input)
It is possible to set a default learning rate that will be used
during backpropagation if no other learning rate is stated.
"""
def __init__(self, n_input, n_output, lr=1e-5,
std_w=None, bias=True, std_b=0):
if std_w is None:
# "Xavier" initialization
std_w = 1 / np.sqrt(.5 * n_input)
# Set parameters
self.lr = lr
self.w = Tensor(n_output, n_input).normal_(0, std_w)
self.dw = Tensor(self.w.shape).zero_()
self.bias = bias
if bias:
if not std_b:
self.b = Tensor(n_output, 1).fill_(0)
else:
self.b = Tensor(n_output, 1).normal_(0, std_b)
self.db = Tensor(self.b.shape).zero_()
def forward(self, x):
"""Carries out the forward pass for backpropagation."""
self.x = x
self.s = self.w.mm(x.t())
if self.bias:
self.s += self.b
return self.s.t()
def backward(self, grad):
"""Carries out the backward pass for backpropagation.
It does not update the parameters.
"""
out = grad.mm(self.w)
self.dw = grad.t().mm(self.x)
if self.bias:
self.db = grad.sum(0).view(self.b.shape)
return out
def param(self):
"""Returns the list of parameters and gradients."""
out = [(self.w, self.dw)]
if self.bias:
out.append((self.b, self.db))
return out
def update(self, lr=None, values=None):
"""Updates the parameters with the accumulated gradients.
It must be called explicitly. If no lr is stated, the
default lr of the module is used.
"""
if lr is None:
lr = self.lr
if values is None:
self.w.add_(-lr * self.dw)
if self.bias:
self.b.add_(-lr * self.db)
self.db = Tensor(self.b.shape).zero_()
else:
self.w.add_(-lr * values[0])
self.dw = Tensor(self.w.shape).zero_()
if self.bias:
self.b.add_(-lr * values[1])
self.db = Tensor(self.b.shape).zero_()
###Output
_____no_output_____
###Markdown
Sequential
###Code
class Sequential(Module):
"""Allows to combine several modules sequentially
It is possible to either include a loss module in the Sequential
module or to not include it and use a loss module defined outside
of the Sequential module instead.
"""
def __init__(self, layers, loss=None):
self.layers = layers
self.loss = loss
def forward(self, x, target=None):
"""Carries out the forward pass for backpropagation
To do it it calls the forward functions of each individual
module.
"""
if self.loss is not None:
assert target is not None, "Target required for loss module"
for l in self.layers:
x = l.forward(x)
if self.loss is not None:
x = self.loss.forward(x, target)
self.x = x
return x
def backward(self, grad=None):
"""Carries out the backward pass for backpropagation
To do it it calls the backward functions of each individual
module
"""
if self.loss is not None:
grad = self.loss.backward()
else:
assert grad is not None, "Initial gradient required when no loss module defined"
for l in reversed(self.layers):
grad = l.backward(grad)
def param(self):
return [p for l in self.layers for p in l.param()]
def update(self, lr=None, values=None):
if values is None:
for l in self.layers:
l.update(lr)
else:
init_p = 0
for l in self.layers:
len_p = len(l.param())
if len_p:
e_p = values[init_p:init_p + len_p]
l.update(lr, e_p)
init_p += len_p
else:
l.update(lr)
###Output
_____no_output_____
###Markdown
Dropout
###Code
class Dropout(Module):
"""Dropout module"""
def __init__(self, drop_prob):
self.drop_prob=drop_prob
def forward(self, X):
#Mask with size of input with random numbers between 0 and 1
self.mask = torch.Tensor(X.size()).uniform_()
# Everything with a probability bigger than the drop probability is shutdown
self.mask = (self.mask > self.drop_prob).float()
# Shutdown neurons and normalize
return (X * self.mask)/(1-self.drop_prob)
def backward(self, grad):
#Shutdowm same neurons as in forward pass and normalize
return (grad * self.mask)/(1-self.drop_prob)
###Output
_____no_output_____
###Markdown
Activation functions ReLU
###Code
class ReLU(Module):
"""Implements the Rectified Linear Unit activation layer"""
def forward(self, x):
"""Carries out the forward pass for backpropagation."""
self.x = x
return self.x.clamp(min=0)
def backward(self, grad):
"""Carries out the backward pass for backpropagation."""
return grad * Tensor(np.where(self.x <= 0, 0, 1)).view(grad.size())
class LeakyReLU(Module):
"""Implements the Leaky ReLU activation layer"""
def __init__(self, a=.001):
self.a = a
"""Carries out the forward pass for backpropagation."""
self.x = x
return Tensor(np.where(x >= 0, x, self.a * x ))
def backward(self, grad):
"""Carries out the backward pass for backpropagation."""
return grad * Tensor(np.where(self.x >= 0,
1, self.a)).view(grad.size())
###Output
_____no_output_____
###Markdown
Tanh
###Code
class Tanh(Module):
"""Implements the Tanh activation layer"""
def forward(self, x):
"""Carries out the forward pass for backpropagation."""
self.x_tanh = x.tanh()
return self.x_tanh
def backward(self, grad):
"""Carries out the backward pass for backpropagation."""
return grad * (1 - self.x_tanh ** 2).view(grad.size())
###Output
_____no_output_____
###Markdown
Sigmoid
###Code
class Sigmoid(Module):
"""Implements the Rectified Linear Unit activation layer
It is recommended to use the Tanh module instead.
"""
def forward(self, x):
"""Carries out the forward pass for backpropagation."""
self.sigmoid = (1 + (x / 2).tanh()) / 2 #With tanh to avoid overflow
return self.sigmoid
def backward(self, grad):
"""Carries out the backward pass for backpropagation."""
out = grad * (self.sigmoid * (1 - self.sigmoid)).view(grad.size())
return out
###Output
_____no_output_____
###Markdown
Loss functions MSE loss
###Code
class LossMSE(Module):
"""Implements the MSE loss computation"""
def forward(self, output, target):
"""Carries out the forward pass for backpropagation."""
self.diff = output.float() - target.float().view(output.size())
return (self.diff ** 2).sum()
def backward(self):
"""Carries out the backward pass for backpropagation."""
return self.diff * 2
class CrossEntropyLoss(Module):
"""Implements the Cross-Entropy loss computation"""
def forward(self, output, target):
# Forward pass
self.target = target
self.p = softmax(output)
return -np.log(self.p[self.target]).sum() / len(target)
def backward(self):
# Backward pass
self.p[self.target.numpy()] -= 1
return self.p / len(self.target)
def softmax(X):
exps = np.exp(X - X.max()) #Subtract max(X) to avoid overflow
return exps / exps.sum()
# class softmax(Module):
# '''
# Softmax loss, auxiliary for cross-entropy
# '''
# def forward(self, X):
# exps = np.exp(X - X.max()) #Substract max(X) to avoid overflow
# return exps / exps.sum()
# def backward(self):
###Output
_____no_output_____
###Markdown
Optimizers
###Code
def compute_labels(predicted):
#new = predicted.clone()
#new[:,0] = 1 - new[:,0]
#res = new.mean(1).round().long()
res = torch.max(predicted, 1, keepdim=False, out=None)[1]
lbl = convert_to_one_hot_labels(Tensor(), res)
return lbl
class Optimizer(object):
def __init__(self):
self.model = None
def step(self, *input):
raise NotImplementedError
def adaptive_lr(kappa=0.75, eta0=1e-5):
"""Adaptive learning rate. After creating the lr with the
values for kappa and eta0, it yields the value for the learning
rate of the next iteration. Used for (Stochastic) Gradient Descent
methods.
"""
t = 1
while True:
yield eta0 * t ** -kappa
t += 1
###Output
_____no_output_____
###Markdown
SGD
###Code
class SGD(Optimizer):
def __init__(self, a_lr=None):
if a_lr is not None:
self.a_lr = Optimizer.adaptive_lr(kappa=a_lr[0], eta0=a_lr[1])
else:
self.a_lr = None
def step(self, model, loss):
# Update gradients
if self.a_lr is not None:
next_a_lr = next(self.a_lr)
else:
next_a_lr = None
model.update(lr=next_a_lr)
###Output
_____no_output_____
###Markdown
Adam
###Code
def compute_adam_moment_estimates(m_t_old, v_t_old, gradient,
beta1, beta2, epsilon, t):
# compute bias-corrected first moment estimate
m_t = (beta1 * m_t_old + (1 - beta1) * gradient)
# compute bias-corrected second raw moment estimate
v_t = (beta2 * v_t_old + (1 - beta2) * gradient.pow(2))
out = (m_t / (1 - beta1 ** (t+1)) /
((v_t / (1 - beta2 ** (t+1))).sqrt() + epsilon))
return out, m_t, v_t
class Adam(Optimizer):
def __init__(self, alpha=1e-3, beta1=.9, beta2=.999, epsilon=1e-8):
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.t = 1
self.m_prev = None
self.v_prev = None
def restart(self):
self.t = 1
self.m_prev = None
self.v_prev = None
def step(self, model, loss):
if self.m_prev is None:
# 1st moment vector
self.m_prev = [x[1].clone().fill_(0) for x in model.param()]
if self.v_prev is None:
# 2nd moment vector
self.v_prev = [x[1].clone().fill_(0) for x in model.param()]
# Update parameters
m_e = [compute_adam_moment_estimates(m_p, v_p, g[1], self.beta1,
self.beta2, self.epsilon, self.t) for g, (m_p, v_p) in
zip(model.param(), zip(self.m_prev, self.v_prev))]
out = [p[0] for p in m_e]
self.m_prev = [p[1] for p in m_e]
self.v_prev = [p[2] for p in m_e]
model.update(lr=self.alpha, values=out)
self.t += 1
###Output
_____no_output_____
###Markdown
Run
###Code
def test(model, loss, test_input, test_target):
# Get output and loss
output = model.forward(test_input)
L = loss.forward(output, test_target)
# Get predicted labels
labels = compute_labels(output)[:,0]
# Compute accuracy
errors = (test_target[:,0] != labels).sum()
accuracy = (len(test_target) - errors) / len(test_target)
print('Loss {:.08f} Accuracy {:.02f} Errors {}'.format(
L, accuracy, errors))
return accuracy, labels
def predict(model, input_):
# Get output
output = model.forward(input_)
# Get predicted labels
labels = compute_labels(output)[:,0]
return labels
def train(optimizer, model, loss, n_epochs, mini_batch_size,
train_input, train_target, verbose=True):
output_vals = Tensor()
max_range = None
if train_input.size(0) % mini_batch_size == 0:
max_range = train_input.size(0)
else:
max_range = train_input.size(0) - mini_batch_size
for e in range(n_epochs):
L_tot = 0
errors_tot = 0
pred_acc = Tensor().long()
for b in range(0, max_range, mini_batch_size):
d = train_input.narrow(0, b, mini_batch_size)
l = train_target.narrow(0, b, mini_batch_size)
# Forward pass
output = model.forward(d)
L = loss.forward(output, l)
# Backward pass
grad = loss.backward()
model.backward(grad)
optimizer.step(model, loss)
# Compute total loss
L_tot += L
# Compute accuracy
r = compute_labels(output)[:,0]
pred_acc = torch.cat([pred_acc, r])
errors = (l[:,0] != r).sum()
errors_tot += errors
accuracy = (len(train_target) - errors_tot) / len(train_target)
if verbose:
print('Epoch {:d} Loss {:.08f} Accuracy {:.02f} Errors {}'.format(
e, L_tot, accuracy, errors_tot))
return accuracy, pred_acc
train_input, train_target = generate_disc_set(1000)
data_0 = train_input[train_target[:,0].nonzero(),:].view(-1,2)
data_1 = train_input[train_target[:,1].nonzero(),:].view(-1,2)
# Plot data points
import matplotlib.pyplot as plt
def plot_points(input_, target, pred=None, alpha=.5, highlight_errors=True,
errors_color="red"):
if highlight_errors:
assert pred is not None
input_0 = input_[target[:,0].nonzero(),:].view(-1,2)
input_1 = input_[target[:,1].nonzero(),:].view(-1,2)
plt.scatter(input_0[:,0], input_0[:,1], c="gray", alpha=1)
plt.scatter(input_1[:,0], input_1[:,1], c="lightgray", alpha=1)
if highlight_errors:
idx = (pred != target[:,0]).nonzero()
if len(idx.shape):
errors = input_[idx,:].view(-1,2)
plt.scatter(errors[:,0], errors[:,1], c=errors_color, alpha=alpha)
plt.show()
plot_points(train_input, train_target, highlight_errors=False)
###Output
_____no_output_____
###Markdown
Run with SGD:
###Code
k, e0, mb = (.6, 3.5e-2, 50)
hl1 = [Linear(2, 25, lr=0), ReLU()]
hl2 = [Linear(25, 25, lr=0), ReLU()]
hl3 = [Linear(25, 30, lr=0), ReLU()]
out = [Linear(30, 2, lr=0), Sigmoid()]
model = Sequential(hl1 + hl2 + hl3 + out)
loss = LossMSE()
#loss = CrossEntropyLoss()
sgd = SGD([k, e0])
tr_acc, train_pred = train(sgd, model, loss, 50, mb, train_input, train_target, verbose=False)
print("Training Accuracy: {}".format(tr_acc))
plot_points(train_input, train_target, train_pred, highlight_errors=True)
test_input, test_target = generate_disc_set(1000)
te_acc, test_pred = test(model, loss, test_input, test_target)
print("Testing Accuracy: {}".format(te_acc))
plot_points(test_input, test_target, test_pred, highlight_errors=True)
###Output
Loss 95.64481162 Accuracy 0.96 Errors 42
Testing Accuracy: 0.958
###Markdown
Run with Adam
###Code
k, e0, mb = (.6, 3.5e-2, 50)
hl1 = [Linear(2, 25, lr=0), ReLU()]
hl2 = [Linear(25, 25, lr=0), ReLU()]
hl3 = [Linear(25, 30, lr=0), ReLU()]
out = [Linear(30, 2, lr=0), Sigmoid()]
model = Sequential(hl1 + hl2 + hl3 + out)
loss = LossMSE()
adam = Adam()
tr_acc, train_pred = train(adam, model, loss, 50, mb, train_input, train_target, verbose=False)
print("Training Accuracy: {}".format(tr_acc))
plot_points(train_input, train_target, train_pred, highlight_errors=True)
test_input, test_target = generate_disc_set(1000)
te_acc, test_pred = test(model, loss, test_input, test_target)
print("Testing Accuracy: {}".format(te_acc))
plot_points(test_input, test_target, test_pred, highlight_errors=True)
###Output
Loss 45.99411059 Accuracy 0.98 Errors 16
Testing Accuracy: 0.984
|
Module6/CAM_with_conv_empty.ipynb | ###Markdown
Class Activation Map with convolutionsIn this exercice, we will code class activation map as described in the paper [Learning Deep Features for Discriminative Localization](http://cnnlocalization.csail.mit.edu/)There is a GitHub repo associated with the paper:https://github.com/zhoubolei/CAMAnd even a demo in PyTorch:https://github.com/zhoubolei/CAM/blob/master/pytorch_CAM.pyThe code below is adapted from this demo but we will not use hooks only convolutions...
###Code
import io
import requests
from PIL import Image
from IPython.display import Image as Img
import torch
import torch.nn as nn
from torchvision import models, transforms
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
import cv2
import pdb
from matplotlib.pyplot import imshow
# input image
LABELS_URL = 'https://s3.amazonaws.com/outcome-blog/imagenet/labels.json'
IMG_URL = 'http://media.mlive.com/news_impact/photo/9933031-large.jpg'
###Output
_____no_output_____
###Markdown
As in the demo, we will use the Resnet18 architecture. In order to get CAM, we need to transform this network in a fully convolutional network: at all layers, we need to deal with images, i.e. with a shape $\text{Number of channels} \times W\times H$ . In particular, we are interested in the last images as shown here:As we deal with a Resnet18 architecture, the image obtained before applying the `AdaptiveAvgPool2d` has size $512\times 7 \times 7$ if the input has size $3\times 224\times 224 $:1- The first thing, you will need to do is 'removing' the last layers of the resnet18 model which are called `(avgpool)` and `(fc)`. Check that for an original image of size $3\times 224\times 224 $, you obtain an image of size $512\times 7\times 7$.2- Then you need to retrieve the weights (and bias) of the `fc` layer, i.e. a matrix of size $1000\times 512$ transforming a vector of size 512 into a vector of size 1000 to make the prediction. Then you need to use these weights and bias to apply it pixelwise in order to transform your $512\times 7\times 7$ image into a $1000\times 7\times 7$ output (Hint: use a convolution). You can interpret this output as follow `output[i,j,k]` is the logit for 'pixel' `[j,k]` for being of class `i`.3- From this $1000\times 7\times 7$ output, you can retrieve the original output given by the `resnet18` by using an `AdaptiveAvgPool2d`.4- In addition, you can construct the Class Activation Map. Draw the activation map for the classe mountain bike, for the classe lake.5- Is your network working on an image which is not of size $224\times 224$? and what about `resnet18`?
###Code
net = models.resnet18(pretrained=True)
print(net)
x = torch.randn(5, 3, 224, 224)
y = net(x)
y.shape
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
#transforms.Resize((224,224)),
transforms.ToTensor(),
normalize
])
response = requests.get(IMG_URL)
img_pil = Image.open(io.BytesIO(response.content))
img_pil.save('test.jpg')
imshow(img_pil);
img_tensor = preprocess(img_pil)
net.eval()
logit = net(img_tensor.unsqueeze(0))
# download the imagenet category list
classes = {int(key):value for (key, value)
in requests.get(LABELS_URL).json().items()}
h_x = F.softmax(logit, dim=1).data.squeeze()
probs, idx = h_x.sort(0, True)
probs = probs.numpy()
idx = idx.numpy()
# output the prediction
for i in range(0, 5):
print('{:.3f} -> {}'.format(probs[i], classes[idx[i]]))
def returnCAM(feature_conv, idx):
# input: tensor feature_conv of dim 1000*W*H and idx between 0 and 999
# output: image W*H with entries rescaled between 0 and 255 for the display
cam = feature_conv[idx].detach().numpy()
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
return cam_img
diag_CAM = returnCAM(torch.eye(7).unsqueeze(0),0)
img = cv2.imread('test.jpg')
height, width, _ = img.shape
heatmap = cv2.applyColorMap(cv2.resize(diag_CAM,(width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.3 + img * 0.5
cv2.imwrite('diag_CAM.jpg', result)
Img('diag_CAM.jpg')
###Output
_____no_output_____ |
exercises/e-vis05-multiples-master.ipynb | ###Markdown
Vis: Small Multiples*Purpose*: A powerful idea in visualization is the *small multiple*. In this exercise you'll learn how to design and create small multiple graphs.> "At the heart of quantitative reasoning is a single question: *Compared to what?*" *Edward Tufte on visual comparison.* Setup
###Code
import grama as gr
DF = gr.Intention()
%matplotlib inline
###Output
_____no_output_____
###Markdown
Fundamentals of small multiples**Facets** in ggplot allow us to apply the ideas of [small multiples](https://en.wikipedia.org/wiki/Small_multiple). As an example, consider the following graph; this example introduces the new ggplot utility `facet_wrap()`. This visual depicts economic data across several decades.
###Code
## NOTE: No need to edit
from plotnine.data import economics as df_economics
(
df_economics
>> gr.tf_pivot_longer(
columns=["pce", "pop", "psavert", "uempmed", "unemploy"],
names_to="variable",
values_to="value",
)
>> gr.ggplot(gr.aes("date", "value"))
+ gr.geom_line()
## Faceting allows us to implement small multiples
+ gr.facet_wrap("variable", scales="free_y")
+ gr.theme(axis_text_x=gr.element_text(angle=270))
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:390: PlotnineWarning: If you need more space for the x-axis tick text use ... + theme(subplots_adjust={'wspace': 0.25}). Choose an appropriate value for 'wspace'.
###Markdown
The "multiples" are the different panels; above we've separated the different variables into their own panel, and plotted each one against the date. This allows us to compare trends simply by looking across at different panels. For instance, we can see that `pce` and `pop` exhibit smooth growth over time, while the other variables seem to exhibit cyclical trends.The faceting above works particularly well for *comparing trends*: It's clear by inspection whether the various trends are increasing or decreasing, and we can easily see how each trend compares with others by looking at a different panel.The `facet_wrap(var)` utility takes the name of a column `var` to use as a grouping variable; each unique value in the given column will be used to construct a small multiple. You'll practice using this utility in the next task.
###Code
## NOTE: Run this cell
from grama.data import df_stang
###Output
_____no_output_____
###Markdown
__q1__ Use `facet_wrap()`Use `gr.facet_wrap()` to create a small multiple for each material `"property"`. Make sure to free the `y` scale in order scale both multiples to fit their data properly.
###Code
## TASK:
(
df_stang
>> gr.tf_pivot_longer(
columns=["E", "mu"],
names_to="property",
values_to="value",
)
>> gr.ggplot(gr.aes("thick", "value"))
+ gr.geom_point()
# task-begin
## TODO: Add a facet on "property"
# task-end
# solution-begin
+ gr.facet_wrap("property", scales="free_y")
# solution-end
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:390: PlotnineWarning: If you need more space for the x-axis tick text use ... + theme(subplots_adjust={'wspace': 0.25}). Choose an appropriate value for 'wspace'.
###Markdown
"Freeing" a scale allows it to adjust to the data. This can be a good idea if each group of values has very different numerical values (or if they have different units!). In the example above `E` and `mu` take very different numerical values (order 10,000 vs order 0.1), so freeing the scale is a good idea.Freeing the scales is *not* always a good idea. The next exercise will have you consider when *not* to free the scales.
###Code
from plotnine.data import mpg
df_mpg = (
mpg
>> gr.tf_rename(carclass="class")
)
###Output
_____no_output_____
###Markdown
__q2__ To free the scales? Or not?Run the following code as-is and inspect the results. Answer the questions under *observations* below. Re-run the code following the instructions below.
###Code
## TASK: Run this code, then try disabling the `scales` argument
(
df_mpg
>> gr.ggplot(gr.aes("displ", "hwy"))
+ gr.geom_point()
+ gr.facet_wrap(
"~carclass",
# task-begin
scales="free",
# task-end
)
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:390: PlotnineWarning: If you need more space for the x-axis tick text use ... + theme(subplots_adjust={'wspace': 0.25}). Choose an appropriate value for 'wspace'.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:396: PlotnineWarning: If you need more space for the y-axis tick text use ... + theme(subplots_adjust={'hspace': 0.25}). Choose an appropriate value for 'hspace'
###Markdown
*Observations*- Based on the plot above, how much **visual** variation is there among `2seater` vehicles, with respect to their `hwy` fuel economy and engine displacement (`displ`)? - (Your response here)- Now comment out the `scales="free"` argument, re-run the code, and inspect the new plot.- With the new plot, how much **visual** variation is there among `2seater` vehicles, with respect to their `hwy` fuel economy and engine displacement (`displ`)? - (Your response here)- Based on the plot above, how much **visual** variation is there among `2seater` vehicles, with respect to their `hwy` fuel economy and engine displacement (`displ`)? - In this version of the plot (`scales="free"`) the `hwy` and `displ` values fill up the entire panel, which gives the impression of large variability among the values.- Now comment out the `scales="free"` argument, re-run the code, and inspect the new plot.- With the new plot, how much **visual** variation is there among `2seater` vehicles, with respect to their `hwy` fuel economy and engine displacement (`displ`)? - In this version of the plot (`scales="fixed"`) the `hwy` and `displ` values are tightly clustered within their panel, which gives the impression of very small variability among the values. If the different groups have similar values, or if you are trying to encourage *numerical* comparisons rather than just trend comparisons, it may be a good idea to keep the scales fixed. Finer PointsWith the basics of facets under our belt, now we can move on to some finer points about constructing small multiple plots. "Ghost points"This version of the `df_mpg` plot is not as effective as it could be:
###Code
## NOTE: No need to edit
(
df_mpg
>> gr.ggplot(gr.aes("displ", "hwy"))
+ gr.geom_point()
+ gr.facet_wrap("~carclass")
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
###Markdown
With these scatterplots it's difficult to "keep in our heads" the absolute positions of the other points as we look across the multiples. Instead we could add some "ghost points":
###Code
## NOTE: No need to edit
(
df_mpg
>> gr.ggplot(gr.aes("displ", "hwy"))
## A bit of a trick; remove the facet variable to prevent faceting
+ gr.geom_point(
data=df_mpg >> gr.tf_drop("carclass"),
color="grey",
)
+ gr.geom_point()
+ gr.facet_wrap("carclass")
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:487: FutureWarning: Passing a set as an indexer is deprecated and will raise in a future version. Use a list instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
###Markdown
Here we're using *visual weight* to call attention to the black points within each panel, while using lower-weight grey points to de-emphasize the bulk of data. From this version of the plot we can see clearly that the `2seater` vehicles are tightly clustered **and** they tend to have higher `hwy` for similar `displ` vehicles. There's a trick to getting the visual above; removing the facet variable from an internal dataframe prevents the faceting of that layer. This combined with a second point layer gives the "ghost" point effect.The presence of these "ghost" points provides more context; they facilitate the "Compared to what?" question that Tufte puts at the center of quantitative reasoning.
###Code
from grama.data import df_diamonds
###Output
_____no_output_____
###Markdown
__q3__ Use the "ghost point" trickEdit the following figure to use the "ghost" point trick demonstrated above.
###Code
## TASK: Add "ghost points" to the following plot in order to show
## every observation within each panel
(
df_diamonds
>> gr.ggplot(gr.aes("carat", "price"))
# solution-begin
+ gr.geom_point(
data=df_diamonds >> gr.tf_drop("cut"),
color="grey",
)
# solution-end
+ gr.geom_point()
# solution-begin
+ gr.facet_wrap("cut")
# solution-end
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:487: FutureWarning: Passing a set as an indexer is deprecated and will raise in a future version. Use a list instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
###Markdown
Aside: Reordering factorsThe utility function `gr.fct_reorder()` allows us to "reorder" factor levels according to another variable. This is useful because it enables us to control the *order* in which factor levels are displayed on a plot. __q4__ Reorder the `"carclass"`Use `gr.fct_reorder()` to reorder the `"carclass"` variable according to `"hwy"`. Answer the questions under *observations* below.*Hint*: Remember to check the documentation for a new function to learn how to use it!
###Code
## TASK: Reorder `carclass` by `hwy`
(
df_mpg
# solution-begin
>> gr.tf_mutate(carclass=gr.fct_reorder(DF.carclass, DF.hwy))
# solution-end
>> gr.ggplot(gr.aes("carclass", "hwy"))
+ gr.geom_boxplot()
)
###Output
_____no_output_____
###Markdown
*Observations*- When you *do not* reorder `carclass`, what order are the classes listed along the horizontal axis? - (Your response here?)- When you *do* reorder `carclass`, what is changed about the plot? What do you notice about the boxplots? - (Your response here?)- When you *do not* reorder `carclass`, what order are the classes listed along the horizontal axis? - The `carclass` values are listed in alphabetical order.- When you *do* reorder `carclass`, what is changed about the plot? What do you notice about the boxplots? - Now the boxplots tend to "rise" across the plot; the `carclass` values are now ordered by their median `hwy` value. Controlling the facet axisSometimes you'll want to place facets along the horizontal or vertical axis *only*; this is helpful when seeking to make more direct comparisons across an axis. The utility `facet_grid()` allows you to specify whether to facet along the vertical or horizontal axis of the plot.For example, consider the following figure:
###Code
## NOTE: No need to edit
(
df_mpg
## Find highest fuel economy models within each manufacturer
>> gr.tf_group_by(DF.manufacturer)
>> gr.tf_filter(DF.hwy == gr.max(DF.hwy))
>> gr.tf_ungroup()
## Reorder manufacturers based on their fuel economy
>> gr.tf_mutate(manufacturer=gr.fct_reorder(DF.manufacturer, DF.hwy))
## Visualize
>> gr.ggplot(gr.aes("hwy", "model"))
+ gr.geom_point()
## Use facet_grid to control which axis gets the faceting
+ gr.facet_grid("manufacturer~.", scales="free_y")
+ gr.theme(strip_text_y=gr.element_text(angle=0, hjust=0))
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
###Markdown
For this visual to work I need all facets to share a common horizontal axis. Note what happens when I simply wrap the facets instead:
###Code
## NOTE: No need to edit
(
df_mpg
## Find highest fuel economy models within each manufacturer
>> gr.tf_group_by(DF.manufacturer)
>> gr.tf_filter(DF.hwy == gr.max(DF.hwy))
>> gr.tf_ungroup()
## Reorder manufacturers based on their fuel economy
>> gr.tf_mutate(manufacturer=gr.fct_reorder(DF.manufacturer, DF.hwy))
## Visualize
>> gr.ggplot(gr.aes("hwy", "model"))
+ gr.geom_point()
## Use facet_grid to control which axis gets the faceting
+ gr.facet_wrap("manufacturer", scales="free_y")
+ gr.theme(strip_text_y=gr.element_text(angle=0, hjust=0))
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/facets/facet.py:390: PlotnineWarning: If you need more space for the x-axis tick text use ... + theme(subplots_adjust={'wspace': 0.25}). Choose an appropriate value for 'wspace'.
###Markdown
This figure is essentially useless; without a common horizontal axis is it almost impossible to compare values across panels. __q5__ Facet along a single axisUse `gr.facet_grid()` to facet by the `"metric"` column. Experiment with both forms of the argument `"~metric"` and `"metric~."` to test faceting along the horizontal and vertical axes. Answer the questions under *observations* below.
###Code
## TASK: Facet by "metric" along a single axis
(
df_economics
>> gr.tf_select("date", "pce", "pop", "psavert")
>> gr.tf_pivot_longer(
columns=["pce", "pop", "psavert"],
names_to="metric",
values_to="value",
)
>> gr.ggplot(gr.aes("date", "value"))
+ gr.geom_line()
+ gr.facet_grid(
# task-begin
## TODO: Test both forms of the argument
# task-begin
# solution-begin
"metric~.",
# solution-end
scales="free_y",
)
)
###Output
/Users/zach/opt/anaconda3/envs/evc/lib/python3.9/site-packages/plotnine/utils.py:371: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
###Markdown
*Observations*- Which axis does the argument `"metric~."` facet along? - (Your response here)- Which axis does the argument `"~metric"` facet along? - (Your response here)- Which version of the faceting---along the horizontal or along the vertical---do you find more effective? - (Your response here)- Which axis does the argument `"metric~."` facet along? - Along the vertical axis (panels stacked vertically)- Which axis does the argument `"~metric"` facet along? - Along the horizontal axis (panels stacked horizontally)- Which version of the faceting---along the horizontal or along the vertical---do you find more effective? - I find vertical faceting more effective; this form gives all of the panels a common horizontal axis that eases comparison. The `facet_grid()` utility also allows us to facet by *multiple* columns through the syntax `gr.facet_grid("var1~var2")`. This functionality is used by a number of functions in grama. Sinew plots and facetsFacets show up in a variety of grama functions; for instance, the `ev_sinews()` utility has autoplot functionality that makes heavy use of facets. The autoplot facets along the horizontal axis by the inputs, and along the vertical axis by the outputs. This allows us to quickly assess how every model input affects every model output.
###Code
## NOTE: No need to edit
from grama.models import make_plate_buckle
md_plate = make_plate_buckle()
(
md_plate
>> gr.ev_sinews(df_det="nom")
>> gr.pt_auto()
)
###Output
Calling plot_sinew_outputs....
|
LSTM_Stock_final.ipynb | ###Markdown
Code
###Code
# Mount local drive
from google.colab import drive
drive.mount('/content/gdrive/', force_remount=True)
import torch.nn as nn
import torch.optim as optim
import random
import numpy as np
import torch
import csv
import matplotlib.pyplot as plt
import time
# multivariate data preparation
from numpy import array
from numpy import hstack
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
# Hyperparameters: split sizes (percentages), note: testsize = 100 - train_sz
train_sz = 80 # currently 4/5 of the data (before val-data is removed)
val_sz = 20 # currently 1/5 of training
n_timesteps = 5 # this is number of timesteps (tau)
learn_rate = 0.001 #Hyperparam
train_episodes = 1000 # Hyperparam
batch_size = 16
num_hidden_states = 200
num_hidden_layers = 3
PATH = "/content/gdrive/My Drive/Colab Notebooks/FMCC.csv"
SAVE_PATH = "/content/gdrive/My Drive/Colab Notebooks"
with open(PATH, newline='') as csvfile:
dictreader = csv.DictReader(csvfile, delimiter = ',')
feature_names = dictreader.fieldnames
all_data = list(dictreader) # creates a list of dicts (1 per sample) using first row of csv for feature-names
del all_data[-1] # delete last item
del all_data[0] # delete first item
del all_data[1506] # Volume == 0 causing problems (div by 0)
del all_data[1507] # Volume == 0 causing problems (div by 0)
data_length = len(all_data)
inp_feats = ['Percentage Change (Low)','Percentage Change (High)' ,'Percentage Change (Close)', 'Percentage Change(Volume)', 'Percentage Change(Vix)', 'Percentage Change(S&P)']
alternate_inp_feats = ['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume', 'VIX Closed', 'S&P Close']
outp_feat = 'Next Day Percentage Change (High)'
for i, row in enumerate(all_data):
try:
all_data[i][outp_feat] = float(all_data[i][outp_feat][0:-1])
except ValueError:
print ('Line {} is corrupt!'.format(i))
break
for mat_feat in inp_feats:
all_data[i][mat_feat] = float(all_data[i][mat_feat][0:-1])
# alternate features
# for mat_feat in alternate_inp_feats:
# all_data[i][mat_feat] = float(all_data[i][mat_feat])
all_inps = np.array([[all_data[samp][feat] for feat in inp_feats] for samp in range(data_length)])
all_outps = np.array([all_data[samp][outp_feat] for samp in range(data_length)]).reshape(data_length, 1)
# alternate features
# all_inps = np.array([[all_data[samp][feat] for feat in alternate_inp_feats] for samp in range(data_length)])
n_features = len(inp_feats)
dataset = hstack((all_inps, all_outps))
print(dataset.shape)
# split a multivariate sequence into samples
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
class MV_LSTM(torch.nn.Module):
def __init__(self,n_features,seq_length):
super(MV_LSTM, self).__init__()
self.n_features = n_features
self.seq_len = seq_length
self.n_hidden = num_hidden_states # number of hidden states
self.n_layers = num_hidden_layers # number of LSTM layers (stacked)
self.l_lstm = torch.nn.LSTM(input_size = n_features,
hidden_size = self.n_hidden,
num_layers = self.n_layers,
batch_first = True)
# according to pytorch docs LSTM output is
# (batch_size,seq_len, num_directions * hidden_size)
# when considering batch_first = True
self.l_linear = torch.nn.Linear(self.n_hidden*self.seq_len, 1)
def init_hidden(self, batch_size):
# even with batch_first = True this remains same as docs
hidden_state = torch.zeros(self.n_layers,batch_size,self.n_hidden)
cell_state = torch.zeros(self.n_layers,batch_size,self.n_hidden)
self.hidden = (hidden_state, cell_state)
def forward(self, x):
batch_size, seq_len, _ = x.size()
lstm_out, self.hidden = self.l_lstm(x,self.hidden)
# lstm_out(with batch_first = True) is
# (batch_size,seq_len,num_directions * hidden_size)
# for following linear layer we want to keep batch_size dimension and merge rest
# .contiguous() -> solves tensor compatibility error
x = lstm_out.contiguous().view(batch_size,-1)
return self.l_linear(x)
# convert dataset into input/output
X, y = split_sequences(dataset, n_timesteps)
# split data into training/test/val
train_test_split = int(train_sz * X.shape[0] // 100)
val_split = (val_sz * train_test_split // 100)
valX = X[:val_split]
valy = y[:val_split]
trainX = X[val_split:train_test_split]
trainy = y[val_split:train_test_split]
testX = X[train_test_split:]
testy = y[train_test_split:]
# create NN
mv_net = MV_LSTM(n_features, n_timesteps)
criterion = torch.nn.MSELoss() # reduction='sum' created huge loss value
optimizer = torch.optim.Adam(mv_net.parameters(), lr=learn_rate)
# mv_net.train()
num_epoch = []
loss_list_train = []
loss_list_val = []
loss_order = 10
temp_lr = learn_rate
start_time = time.time()
for t in range(train_episodes):
# train
mv_net.train()
for b in range(0, len(trainX), batch_size):
inpt = trainX[b:b+batch_size,:,:]
target = trainy[b:b+batch_size]
x_batch = torch.tensor(inpt,dtype=torch.float32)
y_batch = torch.tensor(target,dtype=torch.float32)
mv_net.init_hidden(x_batch.size(0))
# lstm_out, _ = mv_net.l_lstm(x_batch,nnet.hidden)
# lstm_out.contiguous().view(x_batch.size(0),-1)
output = mv_net(x_batch)
loss = criterion(output.view(-1), y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# experiment
if loss.item() < (loss_order / 100):
loss_order /= 100
temp_lr /= 5
for param_group in optimizer.param_groups:
param_group['lr'] = temp_lr
print('cut lr to', temp_lr)
if t % 20 == 0:
print('step : ' , t , 'loss : ' , loss.item())
num_epoch.append(t)
loss_list_train.append(loss.item())
# estimate validation error per epoch
for b in range(0,len(valX),batch_size):
inpt = valX[b:b+batch_size,:,:]
target = valy[b:b+batch_size]
x_batch_val = torch.tensor(inpt,dtype=torch.float32)
y_batch_val = torch.tensor(target,dtype=torch.float32)
mv_net.init_hidden(x_batch_val.size(0))
# lstm_out, _ = mv_net.l_lstm(x_batch,nnet.hidden)
# lstm_out.contiguous().view(x_batch.size(0),-1)
output = mv_net(x_batch_val)
loss_val = criterion(output.view(-1), y_batch_val)
loss_list_val.append(loss_val.item())
minutes = (time.time() - start_time)
print("--- %s ---" % minutes)
# plot two error curves
d_epoch = np.array(num_epoch)
d_train = np.array(loss_list_train)
d_val = np.array(loss_list_val)
val_min = np.argmin(loss_list_val)
fig, ax = plt.subplots()
ax.plot(d_epoch, d_train, 'k--', label = 'train loss')
ax.plot(d_epoch, d_val, 'r--', label = 'validation loss')
plt.axvline(val_min)
ax.set_title("Training and validation losses")
ax.set_xlabel("Number of epochs")
ax.set_ylabel("Loss")
ax.legend(loc='best')
fig.show()
# test - print out the output
batch_size = 1
num_correct = 0
num_incorrect = 0
mae = []
for b in range(0,len(testX),batch_size):
inpt = testX[b:b+batch_size,:,:]
target = testy[b:b+batch_size]
x_batch = torch.tensor(inpt,dtype=torch.float32)
y_batch = torch.tensor(target,dtype=torch.float32)
mv_net.init_hidden(x_batch.size(0))
output = mv_net(x_batch)
abs_diff = np.absolute(output.detach().numpy() - target)
mae.append(abs_diff)
# print('ground_truth: ', target, 'output: ', output)
if(np.sign(target) == np.sign(output.detach().numpy())):
num_correct += 1
else:
num_incorrect += 1
mae = np.array(mae)
mae_avg = np.mean(mae)
print('Mean Absolute Error: ', mae_avg)
print('correct:', num_correct)
print('incorrect:', num_incorrect)
print(num_correct/ (num_correct + num_incorrect))
print('train size: ', len(trainX))
print('test size: ', len(testX))
# model_save_name = 'mvnet_state_CCL_moredata'
# save_path = F'{SAVE_PATH}/{model_save_name}'
# torch.save(mv_net.state_dict(), save_path)
# model.load_state_dict(torch.load(save_path))
# dollar calculation
day = []
dollar_list = []
dollars = 100000
threshold = 1.0
# number of actually trading
count = 0
count_list = []
batch_size = 1
for b in range(0,len(testX),batch_size):
inpt = testX[b:b+batch_size,:,:]
target = testy[b:b+batch_size]
x_batch = torch.tensor(inpt,dtype=torch.float32)
y_batch = torch.tensor(target,dtype=torch.float32)
mv_net.init_hidden(x_batch.size(0))
# lstm_out, _ = mv_net.l_lstm(x_batch,nnet.hidden)
# lstm_out.contiguous().view(x_batch.size(0),-1)
output = mv_net(x_batch)
if output.detach().numpy() > threshold:
# print(target)
day.append(b)
# print(b)
dollars *= (1 + (target[0])/100)
# print(type(int(dollars)))
count += 1
dollar_list.append(dollars)
else:
day.append(b)
# print(b)
# print(type(int(dollars)))
dollar_list.append(dollars)
# print(day)
# print(dollar_list)
fig, ax = plt.subplots()
ax.plot(day, dollar_list, 'b--', label = 'dollar amount movement')
ax.set_xlabel("trading days")
ax.set_ylabel("dollars")
ax.set_title("Dollar amount movement")
fig.show()
###Output
_____no_output_____ |
8 recurrent-neural-networks/rnn-gluon.ipynb | ###Markdown
Concise Implementation of Recurrent Neural Networks:label:`chapter_rnn_gluon`While :numref:`chapter_rnn_scratch` was instructive to see how recurrent neural networks are implemented, this isn't convenient or fast. The current section will show how to implement the same language model more efficiently using functions provided by Gluon. We begin as before by reading the 'Time Machine" corpus.
###Code
import d2l
import math
from mxnet import gluon, init, np, npx
from mxnet.gluon import nn, rnn
npx.set_np()
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
###Output
_____no_output_____
###Markdown
Defining the ModelGluon's `rnn` module provides a recurrent neural network implementation (beyond many other sequence models). We construct the recurrent neural network layer `rnn_layer` with a single hidden layer and 256 hidden units, and initialize the weights.
###Code
num_hiddens = 256
rnn_layer = rnn.RNN(num_hiddens)
rnn_layer.initialize()
###Output
_____no_output_____
###Markdown
Initializing the state is straightforward. We invoke the member function `rnn_layer.begin_state(batch_size)`. This returns an initial state for each element in the minibatch. That is, it returns an object that is of size (hidden layers, batch size, number of hidden units). The number of hidden layers defaults to 1. In fact, we haven't even discussed yet what it means to have multiple layers - this will happen in :numref:`chapter_deep_rnn`. For now, suffice it to say that multiple layers simply amount to the output of one RNN being used as the input for the next RNN.
###Code
batch_size = 1
state = rnn_layer.begin_state(batch_size=batch_size)
len(state), state[0].shape
###Output
_____no_output_____
###Markdown
With a state variable and an input, we can compute the output with the updated state.
###Code
num_steps = 1
X = np.random.uniform(size=(num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
Y.shape, len(state_new), state_new[0].shape
###Output
_____no_output_____
###Markdown
Similar to :numref:`chapter_rnn_scratch`, we define an `RNNModel` block by subclassing the `Block` class for a complete recurrent neural network. Note that `rnn_layer` only contains the hidden recurrent layers, we need to create a separate output layer. While in the previous section, we have the output layer within the `rnn` block.
###Code
# Save to the d2l package.
class RNNModel(nn.Block):
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = nn.Dense(vocab_size)
def forward(self, inputs, state):
X = npx.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
# The fully connected layer will first change the shape of Y to
# (num_steps * batch_size, num_hiddens)
# Its output shape is (num_steps * batch_size, vocab_size)
output = self.dense(Y.reshape(-1, Y.shape[-1]))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
###Output
_____no_output_____
###Markdown
TrainingLet's make a prediction with the model that has random weights.
###Code
ctx = d2l.try_gpu()
model = RNNModel(rnn_layer, len(vocab))
model.initialize(force_reinit=True, ctx=ctx)
d2l.predict_ch8('time traveller', 10, model, vocab, ctx)
###Output
_____no_output_____
###Markdown
As is quite obvious, this model doesn't work at all (just yet). Next, we call just `train_ch8` defined in :numref:`chapter_rnn_scratch` with the same hyper-parameters to train our model.
###Code
num_epochs, lr = 500, 1
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, ctx)
###Output
Perplexity 1.2, 189795 tokens/sec on gpu(0)
time traveller you can show black is white by argumentative it
traveller you can shis not howe that ulurious after dinner
|
utils/.ipynb_checkpoints/PICES_Regional_Ecosystem_Tool-checkpoint.ipynb | ###Markdown
PICES Regional Ecosystem Tool Data acquisition, analysis, plotting & saving to facilitate IEA report Developed by Chelle Gentemann ([email protected]) & Marisol Garcia-Reyes ([email protected])****** Instructions To configure: In the cell marked by `* Configuration *`, specify: Region, Variable, Date Period To execute: Click on the top menu click: `Run/Cell` -> `Run All` ****** Regions Region Number 11121314151617 18192021222324 Region Name California Current Gulf of Alaska East Bering Sea North Bering Sea Aleutian Islands West Bering Sea Sea of Okhotsk Oyashio Current Sea of Japan Yellow Sea East China Sea Kuroshio Current West North Pacific East North Pacific *** Variables 1) SST: Sea Surface Temperature ('1981-01-01' - present) 2) Chl: Chlorphyll-a Concentration ('1997-01-01' - '2018-06-30') 3) Wind: Wind Speed Vectors ('1997-01-01' - present) 4) Current: Sea Surface Currents Vectors ('1992-01-01' - present)****** *** Configuration ***
###Code
#######################
#### Configuration ####
#######################
## Region to analyze ##
region = 11 # <<<----- Use number (11 to 24) based on table above
## Variable ##
## Select the variable to analyze from the list above
var = 'Chl' # <<<----- Use short name given above. upper or lower case accepted.
## Date Period to analize ##
## Specify the period using the format: #### YYYY-MM-DD #####
## Data available specified above
## All data in monthly resolution
initial_date = '1981-01-01'
final_date = '2019-09-30'
##############################
#### End of configuration ####
##############################
#### Do not modify ####
%matplotlib inline
import sys
sys.path.append('./subroutines/')
from pices import analyze_PICES_Region
analyze_PICES_Region(region, var, initial_date, final_date)
#### End of script ####
###Output
_____no_output_____ |
2022/introPython.ipynb | ###Markdown
Course: Computational Thinking for Governance Analytics Prof. Josรฉ Manuel Magallanes, PhD * Visiting Professor of Computational Policy at Evans School of Public Policy and Governance, and eScience Institute Senior Data Science Fellow, University of Washington.* Professor of Government and Political Methodology, Pontificia Universidad Catรณlica del Perรบ. Session 0: Introduction to Python Data Structures_____ 1. Data Structures Python has basic native structures, like lists, tuples and dictionaries. A. **LISTS** Lists are the most flexible structure to save or contain data elements.
###Code
names=["Qing", "Franรงoise", "Raรบl", "Bjork","Marie"]
ages=[32,33,28,30,29]
country=["China", "Senegal", "Espaรฑa", "Norway","Korea"]
education=["Bach", "Bach", "Master", "PhD","PhD"]
###Output
_____no_output_____
###Markdown
Above we have created some lists. Lists can contain any values. Lists support different operations: * **Accessing**:Keep in mind the positions in Python start in **0**.
###Code
# one element
ages[0]
# several, using slices:
ages[1:-1] #second to before last
# several, using slices:
ages[:-2] #all but two last ones
# non consecutive
from operator import itemgetter
list(itemgetter(0,2,3)(ages))
ages
# difficul to understand?
ages
ages[0:4:2] + [ages[3]]
type(ages[3])
###Output
_____no_output_____
###Markdown
* **Modifying**:
###Code
# by position
country[2]="Spain"
# list changed:
country
# by value
country=["PR China" if x == "China" else x for x in country]
# list changed:
country
###Output
_____no_output_____
###Markdown
* **Deleting**
###Code
# by position
del country[-1] #last value
# list changed:
country
# by position
names.pop() #last value by default
# list changed:
names
# only 'del' works for several positions
lista=[1,2,3,4,5,6]
del lista[1:3]
#now:
lista
# by value
ages.remove(29)
# list changed:
ages # just first ocurrence of value!!
# by value
education.remove('PhD')
# list changed:
education # just first ocurrence!!
# deleting every value:
lista=[1,'a',45,'b','a']
lista=[x for x in lista if x!='a']
# you get:
lista
###Output
_____no_output_____
###Markdown
* **Inserting values**
###Code
# at the end
lista.append("abc")
lista
# PART ONE:
# first delete a position
education.pop(2)
education
# PART TWO:
# now insert in that position
education.insert(2,"Master")
education
###Output
_____no_output_____
###Markdown
B. **TUPLES**Tuples are inmutable structures in Python, they look like lists but do not share much of their functionality:
###Code
# new list:
weekend=("Friday", "Saturday", "Sunday")
###Output
_____no_output_____
###Markdown
You can access:
###Code
weekend[0]
###Output
_____no_output_____
###Markdown
But no other operation is allowed. Python itself uses tuples as output of some important functions:
###Code
zip(names,ages)
###Output
_____no_output_____
###Markdown
The **zip** functions creates tuples, by combining in parallel. You can see it if you turn the result into a list:
###Code
list(zip(names,ages)) # a list of tuples
###Output
_____no_output_____
###Markdown
C. **DICTIONARIES** *Dicts* work in a more sophisticated way, as they have a **'key'**:**'value'** structure:
###Code
classroom={'student':names,'age':ages,'edu':education}
# see it:
classroom
###Output
_____no_output_____
###Markdown
Dicts do not use indexes to access values:
###Code
#classroom[0]
###Output
_____no_output_____
###Markdown
Dicts use keys:
###Code
classroom['student']
###Output
_____no_output_____
###Markdown
Notice I created a dictionary where the value is not ONE but a LIST of values. Once you access a value, you can modify it. You can also use _pop_ or _del_ using the **keys**. But you can not use _append_ to add an element, you need **update**:
###Code
classroom.update({'country':country})
# now:
classroom
###Output
_____no_output_____
###Markdown
D. DATA FRAMES **Data frames** are more complex containers of values. The most common analogy is a spreadsheet. To create a data frame, we need to call **pandas**:
###Code
import pandas
###Output
_____no_output_____
###Markdown
We can prepare a data frame from a dictionary immediately, but ONLY if you have the same amount of elements in each list representing a column.
###Code
# our data frame:
students=pandas.DataFrame(classroom)
## see it:
students
###Output
_____no_output_____
###Markdown
But, let me update the dictionary with:
###Code
names=["Qing", "Franรงoise", "Raรบl", "Bjork","Marie"]
#
classroom.update({'student':names})
#
classroom
###Output
_____no_output_____
###Markdown
We have five students, but only data for four of them. Then this does not work:
###Code
#pandas.DataFrame(classroom)
classroom.items()
[(a,pandas.Series(b)) for a, b in classroom.items()]
###Output
_____no_output_____
###Markdown
In that case, you need this:
###Code
#then
students=pandas.DataFrame({key:pandas.Series(value) for key, value in classroom.items()})
# seeing it:
students
###Output
_____no_output_____
###Markdown
Sometimes, Python users code like this:
###Code
import pandas as pd # renaming the library
students=pd.DataFrame({key:pd.Series(value) for key, value in classroom.items()})
students
###Output
_____no_output_____
###Markdown
Data frame basic operations
###Code
# data of structure: list? tuple? dataframe?
type(students)
# type of data in data frame column
students.dtypes
# details of data frame
students.info()
# number of rows and columns
students.shape
# number of rows:
len(students)
# first rows
students.head(2) # compare with: students.tail(2)
# name of columns
students.columns
###Output
_____no_output_____
###Markdown
If you needed the column names as a list:
###Code
students.columns.tolist()# or simply: list(students)
###Output
_____no_output_____
###Markdown
If you needed a column values as a list:
###Code
students.age.tolist()# list(students.ages)
###Output
_____no_output_____
###Markdown
Accesing elements in DF:The data frames in pandas behave much like in R:
###Code
#one particular column
students.student
# or
students['student']
# it is not the same as:
students[['student']] # a data frame, not a column (or series)
# this is also a DF
students[['country','student']]
# and this, using loc:
columnNames=['country','student']
students.loc[:,columnNames]
## Using positions is very common:
columnPositions=[1,3,0]
students.iloc[:,columnPositions]
###Output
_____no_output_____
###Markdown
Changing valuesIf you have a position, you can update values:
###Code
students.iloc[4,1]=23 # change is immediate! (no warning)
students
###Output
_____no_output_____
###Markdown
Deleting columns You can modify any values in a data frame, but let me create a **deep** copy of this data frame to play with:
###Code
studentsCopy=students.copy()
studentsCopy
# This is what you want get rid of:
byeColumns=['edu'] # you can delete more than one
#this is the result
studentsCopy.drop(columns=byeColumns)
###Output
_____no_output_____
###Markdown
Notice you do not have saved the previous result:
###Code
studentsCopy
#NOW we do
studentsCopy.drop(columns=byeColumns,inplace=True)
#then:
studentsCopy
###Output
_____no_output_____
###Markdown
Deleting a rowLet me delete a row:
###Code
# axis 0 is delete by row
studentsCopy.drop(index=2,inplace=True)
studentsCopy
###Output
_____no_output_____
###Markdown
As you see, the index dissapeared. Then, you should reset the indexes:
###Code
studentsCopy.reset_index(drop=False,inplace=True)
studentsCopy
?studentsCopy.reset_index
###Output
_____no_output_____ |
grurl.ipynb | ###Markdown
RL Trading based on GRU
###Code
import torch
import torch.nn as nn
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
# def setup_seed(seed):
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# np.random.seed(seed)
# random.seed(seed)
# torch.backends.cudnn.deterministic = True
# # ่ฎพ็ฝฎ้ๆบๆฐ็งๅญ
# setup_seed(20)
###Output
_____no_output_____
###Markdown
1. Loading data and data preprocessing
###Code
raw_prices = pd.read_csv("BTC.csv", header=None)
prices = np.array(raw_prices[1])
OFFSET = 200 #ๆฐๆฎ่ตทๅง็น
M = 10 #่พๅ
ฅ็ฝ็ป็ๅๅฒ็ชๅฃ็ๅคงๅฐ๏ผ็จไบๅจๆฏไธชๆถ้ดๆญฅๆดๆฐๆ้็ๅๅฒๅคงๅฐ
T = 2000 #ไบคๆ่
่พๅ
ฅ็ๆถ้ดๅบๅ้ฟๅบฆ
N = 600 #้ช่ฏ้ๅคงๅฐ
prices = prices[OFFSET:OFFSET+M+T+N+1]
# asset_returns = torch.tensor(np.log(prices[1:]) - np.log(prices[:-1])).to(torch.float32)
# asset_returns = torch.tensor((prices[1:] - prices[:-1]) / prices[:-1]).to(torch.float32)
asset_returns = torch.tensor(prices[1:] - prices[:-1]).to(torch.float32)
print('asset_returns',asset_returns)
scaler = StandardScaler()
normalized_asset_returns = torch.tensor(scaler.fit_transform(asset_returns[:M+T][:, None])[:, 0]).to(torch.float32)
###Output
asset_returns tensor([-1.6900e+00, -6.6000e-01, -2.1400e+00, ..., 3.0003e+02,
-1.5075e+03, 3.1755e+02])
###Markdown
2. Create NN model
###Code
class GRURL(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(GRURL, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.gru = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def init_hidden(self, batch_size, use_gpu=True):
if use_gpu: return torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_().cuda()
else: return torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_().cuda()
out, (hn) = self.gru(x, (h0.detach()))
out = self.fc(out[:, -1, :])
return torch.tanh(out)
input_dim = 1
hidden_dim = 64
num_layers = 3
output_dim = 1
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
model = GRURL(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers).to(device)
print(model)
###Output
GRURL(
(gru): GRU(1, 64, num_layers=3, batch_first=True)
(fc): Linear(in_features=64, out_features=1, bias=True)
)
###Markdown
3. Train the model
###Code
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
rewards = []
max_iter = 1000
miu = 1
delta = 0.04
eps = 1e-6
for epoch in range(max_iter):
optimizer.zero_grad()
Ft = torch.zeros(T).to(normalized_asset_returns.device)
for i in range(1, T):
data = normalized_asset_returns[i-1:i+M-1]
input = data.view(1,M,1).to(device)
Ft[i] = model(input)
returns = miu * (Ft[:T-1] * asset_returns[M:M+T-1]) - (delta * torch.abs(Ft[1:] - Ft[:T-1]))
expected_return = torch.mean(returns, dim=-1)
std_return = torch.std(returns, dim=-1)
sharpe = expected_return / (torch.sqrt(std_return) + eps)
(-1 * sharpe).backward()
optimizer.step()
rewards.append(sharpe.detach().cpu())
Cum_returns = returns.cumsum(dim=-1)
print("Epoch ", epoch, "Sharpe: ", sharpe.detach().cpu())
print('Cum_return_train',Cum_returns[-1])
###Output
Epoch 0 Sharpe: tensor(0.0822)
Epoch 1 Sharpe: tensor(0.1102)
Epoch 2 Sharpe: tensor(0.1335)
Epoch 3 Sharpe: tensor(0.1545)
Epoch 4 Sharpe: tensor(0.1741)
Epoch 5 Sharpe: tensor(0.1928)
Epoch 6 Sharpe: tensor(0.2107)
Epoch 7 Sharpe: tensor(0.2278)
Epoch 8 Sharpe: tensor(0.2441)
Epoch 9 Sharpe: tensor(0.2598)
Epoch 10 Sharpe: tensor(0.2753)
Epoch 11 Sharpe: tensor(0.2915)
Epoch 12 Sharpe: tensor(0.3097)
Epoch 13 Sharpe: tensor(0.3303)
Epoch 14 Sharpe: tensor(0.3527)
Epoch 15 Sharpe: tensor(0.3742)
Epoch 16 Sharpe: tensor(0.3938)
Epoch 17 Sharpe: tensor(0.4145)
Epoch 18 Sharpe: tensor(0.4418)
Epoch 19 Sharpe: tensor(0.4788)
Epoch 20 Sharpe: tensor(0.5196)
Epoch 21 Sharpe: tensor(0.5518)
Epoch 22 Sharpe: tensor(0.5752)
Epoch 23 Sharpe: tensor(0.5941)
Epoch 24 Sharpe: tensor(0.6049)
Epoch 25 Sharpe: tensor(0.6077)
Epoch 26 Sharpe: tensor(0.6082)
Epoch 27 Sharpe: tensor(0.6146)
Epoch 28 Sharpe: tensor(0.6312)
Epoch 29 Sharpe: tensor(0.6529)
Epoch 30 Sharpe: tensor(0.6683)
Epoch 31 Sharpe: tensor(0.6779)
Epoch 32 Sharpe: tensor(0.6877)
Epoch 33 Sharpe: tensor(0.6983)
Epoch 34 Sharpe: tensor(0.7034)
Epoch 35 Sharpe: tensor(0.7045)
Epoch 36 Sharpe: tensor(0.7102)
Epoch 37 Sharpe: tensor(0.7251)
Epoch 38 Sharpe: tensor(0.7457)
Epoch 39 Sharpe: tensor(0.7583)
Epoch 40 Sharpe: tensor(0.7613)
Epoch 41 Sharpe: tensor(0.7644)
Epoch 42 Sharpe: tensor(0.7718)
Epoch 43 Sharpe: tensor(0.7836)
Epoch 44 Sharpe: tensor(0.7652)
Epoch 45 Sharpe: tensor(0.7890)
Epoch 46 Sharpe: tensor(0.7883)
Epoch 47 Sharpe: tensor(0.7872)
Epoch 48 Sharpe: tensor(0.7950)
Epoch 49 Sharpe: tensor(0.8073)
Epoch 50 Sharpe: tensor(0.8035)
Epoch 51 Sharpe: tensor(0.8042)
Epoch 52 Sharpe: tensor(0.8117)
Epoch 53 Sharpe: tensor(0.8094)
Epoch 54 Sharpe: tensor(0.8103)
Epoch 55 Sharpe: tensor(0.8148)
Epoch 56 Sharpe: tensor(0.8205)
Epoch 57 Sharpe: tensor(0.8270)
Epoch 58 Sharpe: tensor(0.8355)
Epoch 59 Sharpe: tensor(0.8474)
Epoch 60 Sharpe: tensor(0.8506)
Epoch 61 Sharpe: tensor(0.8621)
Epoch 62 Sharpe: tensor(0.8622)
Epoch 63 Sharpe: tensor(0.8655)
Epoch 64 Sharpe: tensor(0.8693)
Epoch 65 Sharpe: tensor(0.8704)
Epoch 66 Sharpe: tensor(0.8774)
Epoch 67 Sharpe: tensor(0.8778)
Epoch 68 Sharpe: tensor(0.8818)
Epoch 69 Sharpe: tensor(0.8839)
Epoch 70 Sharpe: tensor(0.8851)
Epoch 71 Sharpe: tensor(0.8882)
Epoch 72 Sharpe: tensor(0.8885)
Epoch 73 Sharpe: tensor(0.8906)
Epoch 74 Sharpe: tensor(0.8919)
Epoch 75 Sharpe: tensor(0.8922)
Epoch 76 Sharpe: tensor(0.8941)
Epoch 77 Sharpe: tensor(0.8957)
Epoch 78 Sharpe: tensor(0.8969)
Epoch 79 Sharpe: tensor(0.8993)
Epoch 80 Sharpe: tensor(0.9000)
Epoch 81 Sharpe: tensor(0.9022)
Epoch 82 Sharpe: tensor(0.9029)
Epoch 83 Sharpe: tensor(0.9049)
Epoch 84 Sharpe: tensor(0.9056)
Epoch 85 Sharpe: tensor(0.9077)
Epoch 86 Sharpe: tensor(0.9087)
Epoch 87 Sharpe: tensor(0.9105)
Epoch 88 Sharpe: tensor(0.9111)
Epoch 89 Sharpe: tensor(0.9124)
Epoch 90 Sharpe: tensor(0.9130)
Epoch 91 Sharpe: tensor(0.9138)
Epoch 92 Sharpe: tensor(0.9144)
Epoch 93 Sharpe: tensor(0.9150)
Epoch 94 Sharpe: tensor(0.9157)
Epoch 95 Sharpe: tensor(0.9164)
Epoch 96 Sharpe: tensor(0.9170)
Epoch 97 Sharpe: tensor(0.9176)
Epoch 98 Sharpe: tensor(0.9180)
Epoch 99 Sharpe: tensor(0.9187)
Epoch 100 Sharpe: tensor(0.9191)
Epoch 101 Sharpe: tensor(0.9199)
Epoch 102 Sharpe: tensor(0.9209)
Epoch 103 Sharpe: tensor(0.9228)
Epoch 104 Sharpe: tensor(0.9269)
Epoch 105 Sharpe: tensor(0.9383)
Epoch 106 Sharpe: tensor(0.9458)
Epoch 107 Sharpe: tensor(0.9630)
Epoch 108 Sharpe: tensor(0.9791)
Epoch 109 Sharpe: tensor(0.9976)
Epoch 110 Sharpe: tensor(1.0223)
Epoch 111 Sharpe: tensor(1.0511)
Epoch 112 Sharpe: tensor(1.0728)
Epoch 113 Sharpe: tensor(1.0945)
Epoch 114 Sharpe: tensor(1.1243)
Epoch 115 Sharpe: tensor(1.1473)
Epoch 116 Sharpe: tensor(1.1615)
Epoch 117 Sharpe: tensor(1.1763)
Epoch 118 Sharpe: tensor(1.1812)
Epoch 119 Sharpe: tensor(1.1864)
Epoch 120 Sharpe: tensor(1.1876)
Epoch 121 Sharpe: tensor(1.1872)
Epoch 122 Sharpe: tensor(1.1793)
Epoch 123 Sharpe: tensor(1.1908)
Epoch 124 Sharpe: tensor(1.1479)
Epoch 125 Sharpe: tensor(1.1187)
Epoch 126 Sharpe: tensor(1.0587)
Epoch 127 Sharpe: tensor(1.0911)
Epoch 128 Sharpe: tensor(1.1070)
Epoch 129 Sharpe: tensor(1.1148)
Epoch 130 Sharpe: tensor(1.1538)
Epoch 131 Sharpe: tensor(1.1425)
Epoch 132 Sharpe: tensor(1.1608)
Epoch 133 Sharpe: tensor(1.1647)
Epoch 134 Sharpe: tensor(1.1660)
Epoch 135 Sharpe: tensor(1.1681)
Epoch 136 Sharpe: tensor(1.1721)
Epoch 137 Sharpe: tensor(1.1804)
Epoch 138 Sharpe: tensor(1.1775)
Epoch 139 Sharpe: tensor(1.1813)
Epoch 140 Sharpe: tensor(1.1880)
Epoch 141 Sharpe: tensor(1.1830)
Epoch 142 Sharpe: tensor(1.1898)
Epoch 143 Sharpe: tensor(1.1872)
Epoch 144 Sharpe: tensor(1.1905)
Epoch 145 Sharpe: tensor(1.1904)
Epoch 146 Sharpe: tensor(1.1930)
Epoch 147 Sharpe: tensor(1.1978)
Epoch 148 Sharpe: tensor(1.1191)
Epoch 149 Sharpe: tensor(1.1649)
Epoch 150 Sharpe: tensor(0.9294)
Epoch 151 Sharpe: tensor(0.9012)
Epoch 152 Sharpe: tensor(0.9040)
Epoch 153 Sharpe: tensor(0.9086)
Epoch 154 Sharpe: tensor(0.9240)
Epoch 155 Sharpe: tensor(0.9353)
Epoch 156 Sharpe: tensor(0.9377)
Epoch 157 Sharpe: tensor(0.9450)
Epoch 158 Sharpe: tensor(0.9578)
Epoch 159 Sharpe: tensor(0.9608)
Epoch 160 Sharpe: tensor(0.9606)
Epoch 161 Sharpe: tensor(0.9598)
Epoch 162 Sharpe: tensor(0.9584)
Epoch 163 Sharpe: tensor(0.9575)
Epoch 164 Sharpe: tensor(0.9564)
Epoch 165 Sharpe: tensor(0.9558)
Epoch 166 Sharpe: tensor(0.9552)
Epoch 167 Sharpe: tensor(0.9552)
Epoch 168 Sharpe: tensor(0.9554)
Epoch 169 Sharpe: tensor(0.9559)
Epoch 170 Sharpe: tensor(0.9567)
Epoch 171 Sharpe: tensor(0.9575)
Epoch 172 Sharpe: tensor(0.9585)
Epoch 173 Sharpe: tensor(0.9593)
Epoch 174 Sharpe: tensor(0.9602)
Epoch 175 Sharpe: tensor(0.9608)
Epoch 176 Sharpe: tensor(0.9614)
Epoch 177 Sharpe: tensor(0.9617)
Epoch 178 Sharpe: tensor(0.9620)
Epoch 179 Sharpe: tensor(0.9621)
Epoch 180 Sharpe: tensor(0.9621)
Epoch 181 Sharpe: tensor(0.9621)
Epoch 182 Sharpe: tensor(0.9620)
Epoch 183 Sharpe: tensor(0.9619)
Epoch 184 Sharpe: tensor(0.9619)
Epoch 185 Sharpe: tensor(0.9619)
Epoch 186 Sharpe: tensor(0.9619)
Epoch 187 Sharpe: tensor(0.9620)
Epoch 188 Sharpe: tensor(0.9620)
Epoch 189 Sharpe: tensor(0.9621)
Epoch 190 Sharpe: tensor(0.9622)
Epoch 191 Sharpe: tensor(0.9624)
Epoch 192 Sharpe: tensor(0.9624)
Epoch 193 Sharpe: tensor(0.9625)
Epoch 194 Sharpe: tensor(0.9626)
Epoch 195 Sharpe: tensor(0.9627)
Epoch 196 Sharpe: tensor(0.9627)
Epoch 197 Sharpe: tensor(0.9628)
Epoch 198 Sharpe: tensor(0.9628)
Epoch 199 Sharpe: tensor(0.9629)
Epoch 200 Sharpe: tensor(0.9629)
Epoch 201 Sharpe: tensor(0.9629)
Epoch 202 Sharpe: tensor(0.9629)
Epoch 203 Sharpe: tensor(0.9629)
Epoch 204 Sharpe: tensor(0.9629)
Epoch 205 Sharpe: tensor(0.9629)
Epoch 206 Sharpe: tensor(0.9629)
Epoch 207 Sharpe: tensor(0.9629)
Epoch 208 Sharpe: tensor(0.9629)
Epoch 209 Sharpe: tensor(0.9629)
Epoch 210 Sharpe: tensor(0.9629)
Epoch 211 Sharpe: tensor(0.9629)
Epoch 212 Sharpe: tensor(0.9629)
Epoch 213 Sharpe: tensor(0.9629)
Epoch 214 Sharpe: tensor(0.9630)
Epoch 215 Sharpe: tensor(0.9630)
Epoch 216 Sharpe: tensor(0.9630)
Epoch 217 Sharpe: tensor(0.9630)
Epoch 218 Sharpe: tensor(0.9630)
Epoch 219 Sharpe: tensor(0.9630)
Epoch 220 Sharpe: tensor(0.9630)
Epoch 221 Sharpe: tensor(0.9630)
Epoch 222 Sharpe: tensor(0.9630)
Epoch 223 Sharpe: tensor(0.9630)
Epoch 224 Sharpe: tensor(0.9630)
Epoch 225 Sharpe: tensor(0.9630)
Epoch 226 Sharpe: tensor(0.9630)
Epoch 227 Sharpe: tensor(0.9630)
Epoch 228 Sharpe: tensor(0.9630)
Epoch 229 Sharpe: tensor(0.9631)
Epoch 230 Sharpe: tensor(0.9631)
Epoch 231 Sharpe: tensor(0.9631)
Epoch 232 Sharpe: tensor(0.9631)
Epoch 233 Sharpe: tensor(0.9631)
Epoch 234 Sharpe: tensor(0.9631)
Epoch 235 Sharpe: tensor(0.9631)
Epoch 236 Sharpe: tensor(0.9631)
Epoch 237 Sharpe: tensor(0.9631)
Epoch 238 Sharpe: tensor(0.9631)
Epoch 239 Sharpe: tensor(0.9631)
Epoch 240 Sharpe: tensor(0.9631)
Epoch 241 Sharpe: tensor(0.9631)
Epoch 242 Sharpe: tensor(0.9631)
Epoch 243 Sharpe: tensor(0.9631)
Epoch 244 Sharpe: tensor(0.9631)
Epoch 245 Sharpe: tensor(0.9631)
Epoch 246 Sharpe: tensor(0.9631)
Epoch 247 Sharpe: tensor(0.9631)
Epoch 248 Sharpe: tensor(0.9631)
Epoch 249 Sharpe: tensor(0.9631)
Epoch 250 Sharpe: tensor(0.9631)
Epoch 251 Sharpe: tensor(0.9631)
Epoch 252 Sharpe: tensor(0.9631)
Epoch 253 Sharpe: tensor(0.9631)
Epoch 254 Sharpe: tensor(0.9631)
Epoch 255 Sharpe: tensor(0.9631)
Epoch 256 Sharpe: tensor(0.9631)
Epoch 257 Sharpe: tensor(0.9631)
Epoch 258 Sharpe: tensor(0.9631)
Epoch 259 Sharpe: tensor(0.9631)
Epoch 260 Sharpe: tensor(0.9631)
Epoch 261 Sharpe: tensor(0.9631)
Epoch 262 Sharpe: tensor(0.9631)
Epoch 263 Sharpe: tensor(0.9632)
Epoch 264 Sharpe: tensor(0.9632)
Epoch 265 Sharpe: tensor(0.9632)
Epoch 266 Sharpe: tensor(0.9632)
Epoch 267 Sharpe: tensor(0.9632)
Epoch 268 Sharpe: tensor(0.9632)
Epoch 269 Sharpe: tensor(0.9632)
Epoch 270 Sharpe: tensor(0.9632)
Epoch 271 Sharpe: tensor(0.9632)
Epoch 272 Sharpe: tensor(0.9632)
Epoch 273 Sharpe: tensor(0.9632)
Epoch 274 Sharpe: tensor(0.9632)
Epoch 275 Sharpe: tensor(0.9632)
Epoch 276 Sharpe: tensor(0.9632)
Epoch 277 Sharpe: tensor(0.9632)
Epoch 278 Sharpe: tensor(0.9632)
Epoch 279 Sharpe: tensor(0.9632)
Epoch 280 Sharpe: tensor(0.9632)
Epoch 281 Sharpe: tensor(0.9632)
Epoch 282 Sharpe: tensor(0.9632)
Epoch 283 Sharpe: tensor(0.9632)
Epoch 284 Sharpe: tensor(0.9632)
Epoch 285 Sharpe: tensor(0.9632)
Epoch 286 Sharpe: tensor(0.9632)
Epoch 287 Sharpe: tensor(0.9632)
Epoch 288 Sharpe: tensor(0.9632)
Epoch 289 Sharpe: tensor(0.9632)
Epoch 290 Sharpe: tensor(0.9632)
Epoch 291 Sharpe: tensor(0.9632)
Epoch 292 Sharpe: tensor(0.9632)
Epoch 293 Sharpe: tensor(0.9632)
Epoch 294 Sharpe: tensor(0.9632)
Epoch 295 Sharpe: tensor(0.9632)
Epoch 296 Sharpe: tensor(0.9632)
Epoch 297 Sharpe: tensor(0.9632)
Epoch 298 Sharpe: tensor(0.9632)
Epoch 299 Sharpe: tensor(0.9632)
Epoch 300 Sharpe: tensor(0.9632)
Epoch 301 Sharpe: tensor(0.9632)
Epoch 302 Sharpe: tensor(0.9632)
Epoch 303 Sharpe: tensor(0.9632)
Epoch 304 Sharpe: tensor(0.9632)
Epoch 305 Sharpe: tensor(0.9632)
Epoch 306 Sharpe: tensor(0.9632)
Epoch 307 Sharpe: tensor(0.9632)
Epoch 308 Sharpe: tensor(0.9632)
Epoch 309 Sharpe: tensor(0.9632)
Epoch 310 Sharpe: tensor(0.9632)
Epoch 311 Sharpe: tensor(0.9632)
Epoch 312 Sharpe: tensor(0.9632)
Epoch 313 Sharpe: tensor(0.9632)
Epoch 314 Sharpe: tensor(0.9632)
Epoch 315 Sharpe: tensor(0.9632)
Epoch 316 Sharpe: tensor(0.9632)
Epoch 317 Sharpe: tensor(0.9632)
Epoch 318 Sharpe: tensor(0.9632)
Epoch 319 Sharpe: tensor(0.9632)
Epoch 320 Sharpe: tensor(0.9632)
Epoch 321 Sharpe: tensor(0.9632)
Epoch 322 Sharpe: tensor(0.9632)
Epoch 323 Sharpe: tensor(0.9632)
Epoch 324 Sharpe: tensor(0.9632)
Epoch 325 Sharpe: tensor(0.9633)
Epoch 326 Sharpe: tensor(0.9633)
Epoch 327 Sharpe: tensor(0.9633)
Epoch 328 Sharpe: tensor(0.9633)
Epoch 329 Sharpe: tensor(0.9633)
Epoch 330 Sharpe: tensor(0.9633)
Epoch 331 Sharpe: tensor(0.9633)
Epoch 332 Sharpe: tensor(0.9633)
Epoch 333 Sharpe: tensor(0.9633)
Epoch 334 Sharpe: tensor(0.9633)
Epoch 335 Sharpe: tensor(0.9633)
Epoch 336 Sharpe: tensor(0.9633)
Epoch 337 Sharpe: tensor(0.9633)
Epoch 338 Sharpe: tensor(0.9633)
Epoch 339 Sharpe: tensor(0.9633)
Epoch 340 Sharpe: tensor(0.9633)
Epoch 341 Sharpe: tensor(0.9633)
Epoch 342 Sharpe: tensor(0.9633)
Epoch 343 Sharpe: tensor(0.9633)
Epoch 344 Sharpe: tensor(0.9633)
Epoch 345 Sharpe: tensor(0.9633)
Epoch 346 Sharpe: tensor(0.9633)
Epoch 347 Sharpe: tensor(0.9633)
Epoch 348 Sharpe: tensor(0.9633)
Epoch 349 Sharpe: tensor(0.9633)
Epoch 350 Sharpe: tensor(0.9633)
Epoch 351 Sharpe: tensor(0.9633)
Epoch 352 Sharpe: tensor(0.9633)
Epoch 353 Sharpe: tensor(0.9633)
Epoch 354 Sharpe: tensor(0.9633)
Epoch 355 Sharpe: tensor(0.9633)
Epoch 356 Sharpe: tensor(0.9633)
Epoch 357 Sharpe: tensor(0.9633)
Epoch 358 Sharpe: tensor(0.9633)
Epoch 359 Sharpe: tensor(0.9633)
Epoch 360 Sharpe: tensor(0.9633)
Epoch 361 Sharpe: tensor(0.9633)
Epoch 362 Sharpe: tensor(0.9633)
Epoch 363 Sharpe: tensor(0.9633)
Epoch 364 Sharpe: tensor(0.9633)
Epoch 365 Sharpe: tensor(0.9633)
Epoch 366 Sharpe: tensor(0.9633)
Epoch 367 Sharpe: tensor(0.9633)
Epoch 368 Sharpe: tensor(0.9633)
Epoch 369 Sharpe: tensor(0.9633)
Epoch 370 Sharpe: tensor(0.9633)
Epoch 371 Sharpe: tensor(0.9633)
Epoch 372 Sharpe: tensor(0.9633)
Epoch 373 Sharpe: tensor(0.9633)
Epoch 374 Sharpe: tensor(0.9633)
Epoch 375 Sharpe: tensor(0.9633)
Epoch 376 Sharpe: tensor(0.9633)
Epoch 377 Sharpe: tensor(0.9633)
Epoch 378 Sharpe: tensor(0.9633)
Epoch 379 Sharpe: tensor(0.9633)
Epoch 380 Sharpe: tensor(0.9633)
Epoch 381 Sharpe: tensor(0.9633)
Epoch 382 Sharpe: tensor(0.9633)
Epoch 383 Sharpe: tensor(0.9633)
Epoch 384 Sharpe: tensor(0.9633)
Epoch 385 Sharpe: tensor(0.9633)
Epoch 386 Sharpe: tensor(0.9633)
Epoch 387 Sharpe: tensor(0.9633)
Epoch 388 Sharpe: tensor(0.9633)
Epoch 389 Sharpe: tensor(0.9633)
Epoch 390 Sharpe: tensor(0.9633)
Epoch 391 Sharpe: tensor(0.9633)
Epoch 392 Sharpe: tensor(0.9633)
Epoch 393 Sharpe: tensor(0.9633)
Epoch 394 Sharpe: tensor(0.9633)
Epoch 395 Sharpe: tensor(0.9633)
Epoch 396 Sharpe: tensor(0.9633)
Epoch 397 Sharpe: tensor(0.9633)
Epoch 398 Sharpe: tensor(0.9633)
Epoch 399 Sharpe: tensor(0.9633)
Epoch 400 Sharpe: tensor(0.9633)
Epoch 401 Sharpe: tensor(0.9633)
Epoch 402 Sharpe: tensor(0.9633)
Epoch 403 Sharpe: tensor(0.9633)
Epoch 404 Sharpe: tensor(0.9633)
Epoch 405 Sharpe: tensor(0.9633)
Epoch 406 Sharpe: tensor(0.9633)
Epoch 407 Sharpe: tensor(0.9633)
Epoch 408 Sharpe: tensor(0.9633)
Epoch 409 Sharpe: tensor(0.9633)
Epoch 410 Sharpe: tensor(0.9633)
Epoch 411 Sharpe: tensor(0.9633)
Epoch 412 Sharpe: tensor(0.9633)
Epoch 413 Sharpe: tensor(0.9633)
Epoch 414 Sharpe: tensor(0.9633)
Epoch 415 Sharpe: tensor(0.9633)
Epoch 416 Sharpe: tensor(0.9633)
Epoch 417 Sharpe: tensor(0.9633)
Epoch 418 Sharpe: tensor(0.9633)
Epoch 419 Sharpe: tensor(0.9633)
Epoch 420 Sharpe: tensor(0.9633)
Epoch 421 Sharpe: tensor(0.9633)
Epoch 422 Sharpe: tensor(0.9633)
Epoch 423 Sharpe: tensor(0.9633)
Epoch 424 Sharpe: tensor(0.9633)
Epoch 425 Sharpe: tensor(0.9633)
Epoch 426 Sharpe: tensor(0.9633)
Epoch 427 Sharpe: tensor(0.9633)
Epoch 428 Sharpe: tensor(0.9633)
Epoch 429 Sharpe: tensor(0.9633)
Epoch 430 Sharpe: tensor(0.9633)
Epoch 431 Sharpe: tensor(0.9633)
Epoch 432 Sharpe: tensor(0.9633)
Epoch 433 Sharpe: tensor(0.9633)
Epoch 434 Sharpe: tensor(0.9633)
Epoch 435 Sharpe: tensor(0.9633)
Epoch 436 Sharpe: tensor(0.9633)
Epoch 437 Sharpe: tensor(0.9633)
Epoch 438 Sharpe: tensor(0.9633)
Epoch 439 Sharpe: tensor(0.9633)
Epoch 440 Sharpe: tensor(0.9633)
Epoch 441 Sharpe: tensor(0.9633)
Epoch 442 Sharpe: tensor(0.9633)
Epoch 443 Sharpe: tensor(0.9633)
Epoch 444 Sharpe: tensor(0.9633)
Epoch 445 Sharpe: tensor(0.9633)
Epoch 446 Sharpe: tensor(0.9633)
Epoch 447 Sharpe: tensor(0.9633)
Epoch 448 Sharpe: tensor(0.9633)
Epoch 449 Sharpe: tensor(0.9633)
Epoch 450 Sharpe: tensor(0.9633)
Epoch 451 Sharpe: tensor(0.9633)
Epoch 452 Sharpe: tensor(0.9633)
Epoch 453 Sharpe: tensor(0.9633)
Epoch 454 Sharpe: tensor(0.9633)
Epoch 455 Sharpe: tensor(0.9633)
Epoch 456 Sharpe: tensor(0.9633)
Epoch 457 Sharpe: tensor(0.9633)
Epoch 458 Sharpe: tensor(0.9633)
Epoch 459 Sharpe: tensor(0.9633)
Epoch 460 Sharpe: tensor(0.9633)
Epoch 461 Sharpe: tensor(0.9633)
Epoch 462 Sharpe: tensor(0.9633)
Epoch 463 Sharpe: tensor(0.9633)
Epoch 464 Sharpe: tensor(0.9633)
Epoch 465 Sharpe: tensor(0.9633)
Epoch 466 Sharpe: tensor(0.9634)
Epoch 467 Sharpe: tensor(0.9634)
Epoch 468 Sharpe: tensor(0.9634)
Epoch 469 Sharpe: tensor(0.9634)
Epoch 470 Sharpe: tensor(0.9634)
Epoch 471 Sharpe: tensor(0.9634)
Epoch 472 Sharpe: tensor(0.9634)
Epoch 473 Sharpe: tensor(0.9634)
Epoch 474 Sharpe: tensor(0.9634)
Epoch 475 Sharpe: tensor(0.9634)
Epoch 476 Sharpe: tensor(0.9634)
Epoch 477 Sharpe: tensor(0.9634)
Epoch 478 Sharpe: tensor(0.9634)
Epoch 479 Sharpe: tensor(0.9634)
Epoch 480 Sharpe: tensor(0.9634)
Epoch 481 Sharpe: tensor(0.9634)
Epoch 482 Sharpe: tensor(0.9634)
Epoch 483 Sharpe: tensor(0.9634)
Epoch 484 Sharpe: tensor(0.9634)
Epoch 485 Sharpe: tensor(0.9634)
Epoch 486 Sharpe: tensor(0.9634)
Epoch 487 Sharpe: tensor(0.9634)
Epoch 488 Sharpe: tensor(0.9634)
Epoch 489 Sharpe: tensor(0.9634)
Epoch 490 Sharpe: tensor(0.9634)
Epoch 491 Sharpe: tensor(0.9634)
Epoch 492 Sharpe: tensor(0.9634)
Epoch 493 Sharpe: tensor(0.9634)
Epoch 494 Sharpe: tensor(0.9634)
Epoch 495 Sharpe: tensor(0.9634)
Epoch 496 Sharpe: tensor(0.9634)
Epoch 497 Sharpe: tensor(0.9634)
Epoch 498 Sharpe: tensor(0.9634)
Epoch 499 Sharpe: tensor(0.9634)
Epoch 500 Sharpe: tensor(0.9634)
Epoch 501 Sharpe: tensor(0.9634)
Epoch 502 Sharpe: tensor(0.9634)
Epoch 503 Sharpe: tensor(0.9634)
Epoch 504 Sharpe: tensor(0.9634)
Epoch 505 Sharpe: tensor(0.9634)
Epoch 506 Sharpe: tensor(0.9634)
Epoch 507 Sharpe: tensor(0.9634)
Epoch 508 Sharpe: tensor(0.9634)
Epoch 509 Sharpe: tensor(0.9634)
Epoch 510 Sharpe: tensor(0.9634)
Epoch 511 Sharpe: tensor(0.9634)
Epoch 512 Sharpe: tensor(0.9634)
Epoch 513 Sharpe: tensor(0.9634)
Epoch 514 Sharpe: tensor(0.9634)
Epoch 515 Sharpe: tensor(0.9634)
Epoch 516 Sharpe: tensor(0.9634)
Epoch 517 Sharpe: tensor(0.9634)
Epoch 518 Sharpe: tensor(0.9634)
Epoch 519 Sharpe: tensor(0.9634)
Epoch 520 Sharpe: tensor(0.9634)
Epoch 521 Sharpe: tensor(0.9634)
Epoch 522 Sharpe: tensor(0.9634)
Epoch 523 Sharpe: tensor(0.9634)
Epoch 524 Sharpe: tensor(0.9634)
Epoch 525 Sharpe: tensor(0.9634)
Epoch 526 Sharpe: tensor(0.9634)
Epoch 527 Sharpe: tensor(0.9634)
Epoch 528 Sharpe: tensor(0.9634)
Epoch 529 Sharpe: tensor(0.9634)
Epoch 530 Sharpe: tensor(0.9634)
Epoch 531 Sharpe: tensor(0.9634)
Epoch 532 Sharpe: tensor(0.9634)
Epoch 533 Sharpe: tensor(0.9634)
Epoch 534 Sharpe: tensor(0.9634)
Epoch 535 Sharpe: tensor(0.9634)
Epoch 536 Sharpe: tensor(0.9634)
Epoch 537 Sharpe: tensor(0.9634)
Epoch 538 Sharpe: tensor(0.9634)
Epoch 539 Sharpe: tensor(0.9634)
Epoch 540 Sharpe: tensor(0.9634)
Epoch 541 Sharpe: tensor(0.9634)
Epoch 542 Sharpe: tensor(0.9634)
Epoch 543 Sharpe: tensor(0.9634)
Epoch 544 Sharpe: tensor(0.9634)
Epoch 545 Sharpe: tensor(0.9634)
Epoch 546 Sharpe: tensor(0.9634)
Epoch 547 Sharpe: tensor(0.9634)
Epoch 548 Sharpe: tensor(0.9634)
Epoch 549 Sharpe: tensor(0.9634)
Epoch 550 Sharpe: tensor(0.9634)
Epoch 551 Sharpe: tensor(0.9634)
Epoch 552 Sharpe: tensor(0.9634)
Epoch 553 Sharpe: tensor(0.9634)
Epoch 554 Sharpe: tensor(0.9634)
Epoch 555 Sharpe: tensor(0.9634)
Epoch 556 Sharpe: tensor(0.9634)
Epoch 557 Sharpe: tensor(0.9634)
Epoch 558 Sharpe: tensor(0.9634)
Epoch 559 Sharpe: tensor(0.9634)
Epoch 560 Sharpe: tensor(0.9634)
Epoch 561 Sharpe: tensor(0.9634)
Epoch 562 Sharpe: tensor(0.9634)
Epoch 563 Sharpe: tensor(0.9634)
Epoch 564 Sharpe: tensor(0.9634)
Epoch 565 Sharpe: tensor(0.9634)
Epoch 566 Sharpe: tensor(0.9634)
Epoch 567 Sharpe: tensor(0.9634)
Epoch 568 Sharpe: tensor(0.9634)
Epoch 569 Sharpe: tensor(0.9634)
Epoch 570 Sharpe: tensor(0.9634)
Epoch 571 Sharpe: tensor(0.9634)
Epoch 572 Sharpe: tensor(0.9634)
Epoch 573 Sharpe: tensor(0.9634)
Epoch 574 Sharpe: tensor(0.9634)
Epoch 575 Sharpe: tensor(0.9634)
Epoch 576 Sharpe: tensor(0.9634)
Epoch 577 Sharpe: tensor(0.9634)
Epoch 578 Sharpe: tensor(0.9634)
Epoch 579 Sharpe: tensor(0.9634)
Epoch 580 Sharpe: tensor(0.9634)
Epoch 581 Sharpe: tensor(0.9634)
Epoch 582 Sharpe: tensor(0.9634)
Epoch 583 Sharpe: tensor(0.9634)
Epoch 584 Sharpe: tensor(0.9634)
Epoch 585 Sharpe: tensor(0.9634)
Epoch 586 Sharpe: tensor(0.9634)
Epoch 587 Sharpe: tensor(0.9634)
Epoch 588 Sharpe: tensor(0.9634)
Epoch 589 Sharpe: tensor(0.9634)
Epoch 590 Sharpe: tensor(0.9634)
Epoch 591 Sharpe: tensor(0.9634)
Epoch 592 Sharpe: tensor(0.9634)
Epoch 593 Sharpe: tensor(0.9634)
Epoch 594 Sharpe: tensor(0.9634)
Epoch 595 Sharpe: tensor(0.9634)
Epoch 596 Sharpe: tensor(0.9634)
Epoch 597 Sharpe: tensor(0.9634)
Epoch 598 Sharpe: tensor(0.9634)
Epoch 599 Sharpe: tensor(0.9634)
Epoch 600 Sharpe: tensor(0.9634)
Epoch 601 Sharpe: tensor(0.9634)
Epoch 602 Sharpe: tensor(0.9634)
Epoch 603 Sharpe: tensor(0.9634)
Epoch 604 Sharpe: tensor(0.9634)
Epoch 605 Sharpe: tensor(0.9634)
Epoch 606 Sharpe: tensor(0.9634)
Epoch 607 Sharpe: tensor(0.9634)
Epoch 608 Sharpe: tensor(0.9634)
Epoch 609 Sharpe: tensor(0.9634)
Epoch 610 Sharpe: tensor(0.9634)
Epoch 611 Sharpe: tensor(0.9634)
Epoch 612 Sharpe: tensor(0.9634)
Epoch 613 Sharpe: tensor(0.9634)
Epoch 614 Sharpe: tensor(0.9634)
Epoch 615 Sharpe: tensor(0.9634)
Epoch 616 Sharpe: tensor(0.9634)
Epoch 617 Sharpe: tensor(0.9634)
Epoch 618 Sharpe: tensor(0.9634)
Epoch 619 Sharpe: tensor(0.9634)
Epoch 620 Sharpe: tensor(0.9634)
Epoch 621 Sharpe: tensor(0.9634)
Epoch 622 Sharpe: tensor(0.9634)
Epoch 623 Sharpe: tensor(0.9634)
Epoch 624 Sharpe: tensor(0.9634)
Epoch 625 Sharpe: tensor(0.9634)
Epoch 626 Sharpe: tensor(0.9634)
Epoch 627 Sharpe: tensor(0.9634)
Epoch 628 Sharpe: tensor(0.9634)
Epoch 629 Sharpe: tensor(0.9634)
Epoch 630 Sharpe: tensor(0.9634)
Epoch 631 Sharpe: tensor(0.9634)
Epoch 632 Sharpe: tensor(0.9634)
Epoch 633 Sharpe: tensor(0.9634)
Epoch 634 Sharpe: tensor(0.9634)
Epoch 635 Sharpe: tensor(0.9634)
Epoch 636 Sharpe: tensor(0.9634)
Epoch 637 Sharpe: tensor(0.9634)
Epoch 638 Sharpe: tensor(0.9634)
Epoch 639 Sharpe: tensor(0.9634)
Epoch 640 Sharpe: tensor(0.9634)
Epoch 641 Sharpe: tensor(0.9634)
Epoch 642 Sharpe: tensor(0.9634)
Epoch 643 Sharpe: tensor(0.9634)
Epoch 644 Sharpe: tensor(0.9634)
Epoch 645 Sharpe: tensor(0.9634)
Epoch 646 Sharpe: tensor(0.9634)
Epoch 647 Sharpe: tensor(0.9634)
Epoch 648 Sharpe: tensor(0.9634)
Epoch 649 Sharpe: tensor(0.9634)
Epoch 650 Sharpe: tensor(0.9634)
Epoch 651 Sharpe: tensor(0.9634)
Epoch 652 Sharpe: tensor(0.9634)
Epoch 653 Sharpe: tensor(0.9634)
Epoch 654 Sharpe: tensor(0.9634)
Epoch 655 Sharpe: tensor(0.9634)
Epoch 656 Sharpe: tensor(0.9634)
Epoch 657 Sharpe: tensor(0.9634)
Epoch 658 Sharpe: tensor(0.9634)
Epoch 659 Sharpe: tensor(0.9634)
Epoch 660 Sharpe: tensor(0.9634)
Epoch 661 Sharpe: tensor(0.9634)
Epoch 662 Sharpe: tensor(0.9634)
Epoch 663 Sharpe: tensor(0.9634)
Epoch 664 Sharpe: tensor(0.9634)
Epoch 665 Sharpe: tensor(0.9634)
Epoch 666 Sharpe: tensor(0.9634)
Epoch 667 Sharpe: tensor(0.9634)
Epoch 668 Sharpe: tensor(0.9634)
Epoch 669 Sharpe: tensor(0.9634)
Epoch 670 Sharpe: tensor(0.9634)
Epoch 671 Sharpe: tensor(0.9634)
Epoch 672 Sharpe: tensor(0.9634)
Epoch 673 Sharpe: tensor(0.9634)
Epoch 674 Sharpe: tensor(0.9634)
Epoch 675 Sharpe: tensor(0.9634)
Epoch 676 Sharpe: tensor(0.9634)
Epoch 677 Sharpe: tensor(0.9634)
Epoch 678 Sharpe: tensor(0.9634)
Epoch 679 Sharpe: tensor(0.9634)
Epoch 680 Sharpe: tensor(0.9634)
Epoch 681 Sharpe: tensor(0.9634)
Epoch 682 Sharpe: tensor(0.9634)
Epoch 683 Sharpe: tensor(0.9634)
Epoch 684 Sharpe: tensor(0.9634)
Epoch 685 Sharpe: tensor(0.9634)
Epoch 686 Sharpe: tensor(0.9634)
Epoch 687 Sharpe: tensor(0.9634)
Epoch 688 Sharpe: tensor(0.9634)
Epoch 689 Sharpe: tensor(0.9634)
Epoch 690 Sharpe: tensor(0.9634)
Epoch 691 Sharpe: tensor(0.9634)
Epoch 692 Sharpe: tensor(0.9634)
Epoch 693 Sharpe: tensor(0.9634)
Epoch 694 Sharpe: tensor(0.9634)
Epoch 695 Sharpe: tensor(0.9634)
Epoch 696 Sharpe: tensor(0.9634)
Epoch 697 Sharpe: tensor(0.9634)
Epoch 698 Sharpe: tensor(0.9634)
Epoch 699 Sharpe: tensor(0.9634)
Epoch 700 Sharpe: tensor(0.9634)
Epoch 701 Sharpe: tensor(0.9634)
Epoch 702 Sharpe: tensor(0.9634)
Epoch 703 Sharpe: tensor(0.9634)
Epoch 704 Sharpe: tensor(0.9634)
Epoch 705 Sharpe: tensor(0.9634)
Epoch 706 Sharpe: tensor(0.9634)
Epoch 707 Sharpe: tensor(0.9634)
Epoch 708 Sharpe: tensor(0.9634)
Epoch 709 Sharpe: tensor(0.9634)
Epoch 710 Sharpe: tensor(0.9634)
Epoch 711 Sharpe: tensor(0.9634)
Epoch 712 Sharpe: tensor(0.9634)
Epoch 713 Sharpe: tensor(0.9634)
Epoch 714 Sharpe: tensor(0.9634)
Epoch 715 Sharpe: tensor(0.9634)
Epoch 716 Sharpe: tensor(0.9634)
Epoch 717 Sharpe: tensor(0.9634)
Epoch 718 Sharpe: tensor(0.9634)
Epoch 719 Sharpe: tensor(0.9634)
Epoch 720 Sharpe: tensor(0.9634)
Epoch 721 Sharpe: tensor(0.9634)
Epoch 722 Sharpe: tensor(0.9634)
Epoch 723 Sharpe: tensor(0.9634)
Epoch 724 Sharpe: tensor(0.9634)
Epoch 725 Sharpe: tensor(0.9634)
Epoch 726 Sharpe: tensor(0.9634)
Epoch 727 Sharpe: tensor(0.9634)
Epoch 728 Sharpe: tensor(0.9634)
Epoch 729 Sharpe: tensor(0.9634)
Epoch 730 Sharpe: tensor(0.9634)
Epoch 731 Sharpe: tensor(0.9634)
Epoch 732 Sharpe: tensor(0.9634)
Epoch 733 Sharpe: tensor(0.9634)
Epoch 734 Sharpe: tensor(0.9634)
Epoch 735 Sharpe: tensor(0.9634)
Epoch 736 Sharpe: tensor(0.9634)
Epoch 737 Sharpe: tensor(0.9634)
Epoch 738 Sharpe: tensor(0.9634)
Epoch 739 Sharpe: tensor(0.9634)
Epoch 740 Sharpe: tensor(0.9634)
Epoch 741 Sharpe: tensor(0.9634)
Epoch 742 Sharpe: tensor(0.9634)
Epoch 743 Sharpe: tensor(0.9634)
Epoch 744 Sharpe: tensor(0.9634)
Epoch 745 Sharpe: tensor(0.9634)
Epoch 746 Sharpe: tensor(0.9634)
Epoch 747 Sharpe: tensor(0.9634)
Epoch 748 Sharpe: tensor(0.9634)
Epoch 749 Sharpe: tensor(0.9634)
Epoch 750 Sharpe: tensor(0.9634)
Epoch 751 Sharpe: tensor(0.9634)
Epoch 752 Sharpe: tensor(0.9634)
Epoch 753 Sharpe: tensor(0.9634)
Epoch 754 Sharpe: tensor(0.9634)
Epoch 755 Sharpe: tensor(0.9634)
Epoch 756 Sharpe: tensor(0.9634)
Epoch 757 Sharpe: tensor(0.9634)
Epoch 758 Sharpe: tensor(0.9634)
Epoch 759 Sharpe: tensor(0.9634)
Epoch 760 Sharpe: tensor(0.9634)
Epoch 761 Sharpe: tensor(0.9634)
Epoch 762 Sharpe: tensor(0.9634)
Epoch 763 Sharpe: tensor(0.9634)
Epoch 764 Sharpe: tensor(0.9634)
Epoch 765 Sharpe: tensor(0.9634)
Epoch 766 Sharpe: tensor(0.9634)
Epoch 767 Sharpe: tensor(0.9634)
Epoch 768 Sharpe: tensor(0.9634)
Epoch 769 Sharpe: tensor(0.9634)
Epoch 770 Sharpe: tensor(0.9634)
Epoch 771 Sharpe: tensor(0.9634)
Epoch 772 Sharpe: tensor(0.9634)
Epoch 773 Sharpe: tensor(0.9634)
Epoch 774 Sharpe: tensor(0.9634)
Epoch 775 Sharpe: tensor(0.9634)
Epoch 776 Sharpe: tensor(0.9634)
Epoch 777 Sharpe: tensor(0.9634)
Epoch 778 Sharpe: tensor(0.9634)
Epoch 779 Sharpe: tensor(0.9634)
Epoch 780 Sharpe: tensor(0.9634)
Epoch 781 Sharpe: tensor(0.9634)
Epoch 782 Sharpe: tensor(0.9634)
Epoch 783 Sharpe: tensor(0.9634)
Epoch 784 Sharpe: tensor(0.9634)
Epoch 785 Sharpe: tensor(0.9634)
Epoch 786 Sharpe: tensor(0.9634)
Epoch 787 Sharpe: tensor(0.9634)
Epoch 788 Sharpe: tensor(0.9634)
Epoch 789 Sharpe: tensor(0.9634)
Epoch 790 Sharpe: tensor(0.9634)
Epoch 791 Sharpe: tensor(0.9634)
Epoch 792 Sharpe: tensor(0.9634)
Epoch 793 Sharpe: tensor(0.9634)
Epoch 794 Sharpe: tensor(0.9634)
Epoch 795 Sharpe: tensor(0.9634)
Epoch 796 Sharpe: tensor(0.9634)
Epoch 797 Sharpe: tensor(0.9634)
Epoch 798 Sharpe: tensor(0.9634)
Epoch 799 Sharpe: tensor(0.9634)
Epoch 800 Sharpe: tensor(0.9634)
Epoch 801 Sharpe: tensor(0.9634)
Epoch 802 Sharpe: tensor(0.9634)
Epoch 803 Sharpe: tensor(0.9634)
Epoch 804 Sharpe: tensor(0.9634)
Epoch 805 Sharpe: tensor(0.9634)
Epoch 806 Sharpe: tensor(0.9634)
Epoch 807 Sharpe: tensor(0.9634)
Epoch 808 Sharpe: tensor(0.9634)
Epoch 809 Sharpe: tensor(0.9634)
Epoch 810 Sharpe: tensor(0.9634)
Epoch 811 Sharpe: tensor(0.9634)
Epoch 812 Sharpe: tensor(0.9634)
Epoch 813 Sharpe: tensor(0.9634)
Epoch 814 Sharpe: tensor(0.9634)
Epoch 815 Sharpe: tensor(0.9634)
Epoch 816 Sharpe: tensor(0.9634)
Epoch 817 Sharpe: tensor(0.9634)
Epoch 818 Sharpe: tensor(0.9634)
Epoch 819 Sharpe: tensor(0.9634)
Epoch 820 Sharpe: tensor(0.9634)
Epoch 821 Sharpe: tensor(0.9634)
Epoch 822 Sharpe: tensor(0.9634)
Epoch 823 Sharpe: tensor(0.9634)
Epoch 824 Sharpe: tensor(0.9634)
Epoch 825 Sharpe: tensor(0.9634)
Epoch 826 Sharpe: tensor(0.9634)
Epoch 827 Sharpe: tensor(0.9634)
Epoch 828 Sharpe: tensor(0.9634)
Epoch 829 Sharpe: tensor(0.9634)
Epoch 830 Sharpe: tensor(0.9634)
Epoch 831 Sharpe: tensor(0.9634)
Epoch 832 Sharpe: tensor(0.9634)
Epoch 833 Sharpe: tensor(0.9634)
Epoch 834 Sharpe: tensor(0.9634)
Epoch 835 Sharpe: tensor(0.9634)
Epoch 836 Sharpe: tensor(0.9634)
Epoch 837 Sharpe: tensor(0.9634)
Epoch 838 Sharpe: tensor(0.9634)
Epoch 839 Sharpe: tensor(0.9634)
Epoch 840 Sharpe: tensor(0.9634)
Epoch 841 Sharpe: tensor(0.9634)
Epoch 842 Sharpe: tensor(0.9634)
Epoch 843 Sharpe: tensor(0.9634)
Epoch 844 Sharpe: tensor(0.9634)
Epoch 845 Sharpe: tensor(0.9634)
Epoch 846 Sharpe: tensor(0.9634)
Epoch 847 Sharpe: tensor(0.9634)
Epoch 848 Sharpe: tensor(0.9634)
Epoch 849 Sharpe: tensor(0.9634)
Epoch 850 Sharpe: tensor(0.9634)
Epoch 851 Sharpe: tensor(0.9634)
Epoch 852 Sharpe: tensor(0.9634)
Epoch 853 Sharpe: tensor(0.9634)
Epoch 854 Sharpe: tensor(0.9634)
Epoch 855 Sharpe: tensor(0.9634)
Epoch 856 Sharpe: tensor(0.9634)
Epoch 857 Sharpe: tensor(0.9634)
Epoch 858 Sharpe: tensor(0.9634)
Epoch 859 Sharpe: tensor(0.9634)
Epoch 860 Sharpe: tensor(0.9634)
Epoch 861 Sharpe: tensor(0.9634)
Epoch 862 Sharpe: tensor(0.9634)
Epoch 863 Sharpe: tensor(0.9634)
Epoch 864 Sharpe: tensor(0.9634)
Epoch 865 Sharpe: tensor(0.9634)
Epoch 866 Sharpe: tensor(0.9634)
Epoch 867 Sharpe: tensor(0.9634)
Epoch 868 Sharpe: tensor(0.9634)
Epoch 869 Sharpe: tensor(0.9634)
Epoch 870 Sharpe: tensor(0.9634)
Epoch 871 Sharpe: tensor(0.9634)
Epoch 872 Sharpe: tensor(0.9634)
Epoch 873 Sharpe: tensor(0.9634)
Epoch 874 Sharpe: tensor(0.9634)
Epoch 875 Sharpe: tensor(0.9634)
Epoch 876 Sharpe: tensor(0.9634)
Epoch 877 Sharpe: tensor(0.9634)
Epoch 878 Sharpe: tensor(0.9634)
Epoch 879 Sharpe: tensor(0.9634)
Epoch 880 Sharpe: tensor(0.9634)
Epoch 881 Sharpe: tensor(0.9634)
Epoch 882 Sharpe: tensor(0.9634)
Epoch 883 Sharpe: tensor(0.9634)
Epoch 884 Sharpe: tensor(0.9634)
Epoch 885 Sharpe: tensor(0.9634)
Epoch 886 Sharpe: tensor(0.9634)
Epoch 887 Sharpe: tensor(0.9634)
Epoch 888 Sharpe: tensor(0.9634)
Epoch 889 Sharpe: tensor(0.9634)
Epoch 890 Sharpe: tensor(0.9634)
Epoch 891 Sharpe: tensor(0.9634)
Epoch 892 Sharpe: tensor(0.9634)
Epoch 893 Sharpe: tensor(0.9634)
Epoch 894 Sharpe: tensor(0.9634)
Epoch 895 Sharpe: tensor(0.9634)
Epoch 896 Sharpe: tensor(0.9634)
Epoch 897 Sharpe: tensor(0.9634)
Epoch 898 Sharpe: tensor(0.9634)
Epoch 899 Sharpe: tensor(0.9634)
Epoch 900 Sharpe: tensor(0.9634)
Epoch 901 Sharpe: tensor(0.9634)
Epoch 902 Sharpe: tensor(0.9634)
Epoch 903 Sharpe: tensor(0.9634)
Epoch 904 Sharpe: tensor(0.9634)
Epoch 905 Sharpe: tensor(0.9634)
Epoch 906 Sharpe: tensor(0.9634)
Epoch 907 Sharpe: tensor(0.9634)
Epoch 908 Sharpe: tensor(0.9634)
Epoch 909 Sharpe: tensor(0.9634)
Epoch 910 Sharpe: tensor(0.9634)
Epoch 911 Sharpe: tensor(0.9634)
Epoch 912 Sharpe: tensor(0.9634)
Epoch 913 Sharpe: tensor(0.9634)
Epoch 914 Sharpe: tensor(0.9634)
Epoch 915 Sharpe: tensor(0.9634)
Epoch 916 Sharpe: tensor(0.9634)
Epoch 917 Sharpe: tensor(0.9634)
Epoch 918 Sharpe: tensor(0.9634)
Epoch 919 Sharpe: tensor(0.9634)
Epoch 920 Sharpe: tensor(0.9634)
Epoch 921 Sharpe: tensor(0.9634)
Epoch 922 Sharpe: tensor(0.9634)
Epoch 923 Sharpe: tensor(0.9634)
Epoch 924 Sharpe: tensor(0.9634)
Epoch 925 Sharpe: tensor(0.9634)
Epoch 926 Sharpe: tensor(0.9634)
Epoch 927 Sharpe: tensor(0.9634)
Epoch 928 Sharpe: tensor(0.9634)
Epoch 929 Sharpe: tensor(0.9634)
Epoch 930 Sharpe: tensor(0.9634)
Epoch 931 Sharpe: tensor(0.9634)
Epoch 932 Sharpe: tensor(0.9634)
Epoch 933 Sharpe: tensor(0.9634)
Epoch 934 Sharpe: tensor(0.9634)
Epoch 935 Sharpe: tensor(0.9634)
Epoch 936 Sharpe: tensor(0.9634)
Epoch 937 Sharpe: tensor(0.9634)
Epoch 938 Sharpe: tensor(0.9634)
Epoch 940 Sharpe: tensor(0.9634)
Epoch 941 Sharpe: tensor(0.9634)
Epoch 942 Sharpe: tensor(0.9634)
Epoch 943 Sharpe: tensor(0.9634)
Epoch 944 Sharpe: tensor(0.9634)
Epoch 945 Sharpe: tensor(0.9634)
Epoch 946 Sharpe: tensor(0.9634)
Epoch 947 Sharpe: tensor(0.9634)
Epoch 948 Sharpe: tensor(0.9634)
Epoch 949 Sharpe: tensor(0.9634)
Epoch 950 Sharpe: tensor(0.9634)
Epoch 951 Sharpe: tensor(0.9634)
Epoch 952 Sharpe: tensor(0.9634)
Epoch 953 Sharpe: tensor(0.9634)
Epoch 954 Sharpe: tensor(0.9634)
Epoch 955 Sharpe: tensor(0.9634)
Epoch 956 Sharpe: tensor(0.9634)
Epoch 957 Sharpe: tensor(0.9634)
Epoch 958 Sharpe: tensor(0.9634)
Epoch 959 Sharpe: tensor(0.9634)
Epoch 960 Sharpe: tensor(0.9634)
Epoch 961 Sharpe: tensor(0.9634)
Epoch 962 Sharpe: tensor(0.9634)
Epoch 963 Sharpe: tensor(0.9634)
Epoch 964 Sharpe: tensor(0.9634)
Epoch 965 Sharpe: tensor(0.9634)
Epoch 966 Sharpe: tensor(0.9634)
Epoch 967 Sharpe: tensor(0.9634)
Epoch 968 Sharpe: tensor(0.9634)
Epoch 969 Sharpe: tensor(0.9634)
Epoch 970 Sharpe: tensor(0.9634)
Epoch 971 Sharpe: tensor(0.9634)
Epoch 972 Sharpe: tensor(0.9634)
Epoch 973 Sharpe: tensor(0.9634)
Epoch 974 Sharpe: tensor(0.9634)
Epoch 975 Sharpe: tensor(0.9634)
Epoch 976 Sharpe: tensor(0.9634)
Epoch 977 Sharpe: tensor(0.9634)
Epoch 978 Sharpe: tensor(0.9634)
Epoch 979 Sharpe: tensor(0.9634)
Epoch 980 Sharpe: tensor(0.9634)
Epoch 981 Sharpe: tensor(0.9634)
Epoch 982 Sharpe: tensor(0.9634)
Epoch 983 Sharpe: tensor(0.9634)
Epoch 984 Sharpe: tensor(0.9634)
Epoch 985 Sharpe: tensor(0.9634)
Epoch 986 Sharpe: tensor(0.9634)
Epoch 987 Sharpe: tensor(0.9634)
Epoch 988 Sharpe: tensor(0.9634)
Epoch 989 Sharpe: tensor(0.9634)
Epoch 990 Sharpe: tensor(0.9634)
Epoch 991 Sharpe: tensor(0.9634)
Epoch 992 Sharpe: tensor(0.9634)
Epoch 993 Sharpe: tensor(0.9634)
Epoch 994 Sharpe: tensor(0.9634)
Epoch 995 Sharpe: tensor(0.9634)
Epoch 996 Sharpe: tensor(0.9634)
Epoch 997 Sharpe: tensor(0.9634)
Epoch 998 Sharpe: tensor(0.9634)
Epoch 999 Sharpe: tensor(0.9634)
Cum_return_train tensor(31921.8359, grad_fn=<SelectBackward>)
###Markdown
4. Visualization
###Code
plt.rcParams['figure.figsize'] = [7, 3]
# plt.title("sharp's ratio optimization GRURL")
plt.plot(rewards)
# plt.ylim((-0.5,0.5))
# plt.legend(['sharp\'s ratio(GRL):M=25,layer=3'],loc='upper left')
plt.savefig("SRGRL10-3.png", dpi=300)
plt.show()
plt.rcParams['figure.figsize'] = [15, 8]
f, axes = plt.subplots(3, 1, sharex=True)
# plt.suptitle("BTC GRURL Train", fontsize=16)
axes[0].set_ylabel("Prices")
axes[0].plot(prices[M:M+T])
axes[1].set_ylabel("Strategy Returns")
axes[1].plot(Cum_returns.detach().numpy())
axes[2].set_ylabel("Ft")
axes[2].bar(list(range(len(Cum_returns))), Ft[:-1].detach().numpy())
# plt.legend(['Train(GRL):M=25,layer=3'],loc='upper left')
plt.savefig("TrainGRL10-3.png", dpi=300)
plt.show()
# data:M+T:M+T+N+1
prices_test = np.array(raw_prices[1])[OFFSET+M+T:]
# OFFSET = 0 #ๆฐๆฎ่ตทๅง็น
# M = 25 #่พๅ
ฅ็ฝ็ป็ๅๅฒ็ชๅฃ็ๅคงๅฐ๏ผ็จไบๅจๆฏไธชๆถ้ดๆญฅๆดๆฐๆ้็ๅๅฒๅคงๅฐ
# T = 2000 #ไบคๆ่
่พๅ
ฅ็ๆถ้ดๅบๅ้ฟๅบฆ
# N = 600 #้ช่ฏ้ๅคงๅฐ
# prices_test = np.array(raw_prices[1])[OFFSET+M+T:]
# print(prices[OFFSET+M+T:])
asset_returns_test = asset_returns[:M+N+1]
scaler = StandardScaler()
normalized_asset_returns_test = torch.tensor(scaler.fit_transform(asset_returns_test[:M+N+1][:, None])[:, 0]).to(torch.float32)
# normalized_asset_returns_test = normalized_asset_returns[M+T:T+M+N+1]
# print(asset_returns_test,normalized_asset_returns_test)
rewards_test = []
Ft_test = torch.zeros(N).to(normalized_asset_returns_test.device)
for i in range(1, N):
data_test = normalized_asset_returns_test[i-1:i+M-1]
input_test = data_test.view(1,M,1).to(device)
Ft_test[i] = model(input_test)
returns_test = miu * (Ft_test[:N-1] * asset_returns_test[M:M+N-1]) - (delta * torch.abs(Ft_test[1:] - Ft_test[:N-1]))
expected_return_test = torch.mean(returns_test, dim=-1)
std_return_test = torch.std(returns_test, dim=-1)
sharpe_test = expected_return_test / (torch.sqrt(std_return_test) + eps)
rewards_test.append(sharpe_test.detach().cpu())
Cum_return_tests = returns_test.cumsum(dim=-1)
print(Cum_return_tests[-1])
plt.rcParams['figure.figsize'] = [15, 8]
f, axes = plt.subplots(4, 1, sharex=True)
axes[0].set_ylabel("Prices")
axes[0].plot(prices_test[M:M+N+1])
axes[1].set_ylabel("Sharpe ratio")
axes[1].plot(rewards_test)
axes[2].set_ylabel("Strategy Returns")
axes[2].plot(Cum_return_tests.detach().numpy())
axes[3].set_ylabel("Ft")
axes[3].bar(list(range(len(Cum_return_tests))), Ft_test[:-1].detach().numpy())
plt.savefig("TestGRL10-3.png", dpi=300)
plt.show()
###Output
_____no_output_____ |
notebooks/CompareSparseMixtureGraph.ipynb | ###Markdown
Properties:* The number of nodes increases as tau decreases (minimum > 0).* The number of nodes increases as alpha increases* Expected number of dense node is : -alpha / sigma * tau ^ sigmaBasic parameter config (sparse alpha, sigma, tau + dense alpha, sigma tau):* 100, 0.5, 1, 100, -1, 0.1 (generate the largest graph among basic configurations)* 100, 0.5, 1, 100, -1, 1Additional parameter configurations* 100, 0, 1 + 100, -1, 1* 100, 0.5, 0.1 + 100, -1, 0.1
###Code
mdest = '../result/random_network/mixture/'
sdest = '../result/random_network/sparse/'
m_f = '%d_%.2f_%.2f_%.2f_%.2f_%.2f_%.2f.pkl'
s_f = '%d_%.2f_%.2f_%.2f.pkl'
colors = cm.rainbow(np.linspace(0, 1, 7))
np.random.shuffle(colors)
colors = itertools.cycle(colors)
def degree_dist_list(graph, ddist):
_ddict = nx.degree(graph)
_ddist = defaultdict(int)
for k, v in _ddict.items():
_ddist[v] += 1
for k, v in _ddist.items():
ddist[k].append(v)
del _ddict, _ddist
return ddist
def avg_degree_dist(path_list):
""" Compute average degree distribution over repeated simulations
"""
ddist = defaultdict(list)
for path in path_list:
sample = pickle.load(open(path, 'rb'))
G = sparse_to_networkx(sample[0])
degree_dist_list(G, ddist)
del G, sample
avg_dist = dict()
for k, v in ddist.items():
avg_dist[k] = sum(ddist[k])/len(ddist[k])
return avg_dist
def scatter(_ddist, path, color=None):
""" print scatter plot of given degree distribution dictionary
"""
plt.scatter(list(_ddist.keys()), list(_ddist.values()), label=os.path.basename(path), color=color)
def degree_dist(graph):
""" Compute digree distribution of given graph
"""
_ddict = nx.degree(graph)
_ddist = defaultdict(int)
for k, v in _ddict.items():
_ddist[v] += 1
return _ddist
###Output
_____no_output_____
###Markdown
Comparision bewteen sparse and mixed graph
###Code
alpha = 100
sigma = 0.5
tau = 1
d_alpha = 100
d_sigma = -1
d_taus = [0.1, 1]
n_samples = 5
plt.figure(figsize=(12, 8))
for d_tau in d_taus:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
alphas = [100, 150]
for alpha in alphas:
path_list = list()
for i in range(n_samples):
path_list.append(os.path.join(sdest, s_f % (i, alpha, sigma, tau)))
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
###Output
_____no_output_____
###Markdown
Varying sigma in the sparse part of the mixed graph
###Code
sigmas = [0, 0.5, 0.9]
alpha = 100
tau = 1
d_alpha = 100
d_sigma = -1
d_tau = 1
plt.figure(figsize=(12, 8))
for sigma in sigmas:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
###Output
_____no_output_____
###Markdown
Varying tau in the sparse part of the mixed graph
###Code
alpha = 100
sigma = 0.5
taus = [0.1, 0.5, 1]
d_alpha = 100
d_sigma = -1
d_tau = 1
plt.figure(figsize=(12, 8))
for tau in taus:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
###Output
_____no_output_____
###Markdown
Varying sigma in the dense part of the mixed graph
###Code
alpha = 100
sigma = 0.5
tau = 1
d_alpha = 100
d_tau = 1
sigmas = [-0.5, -1, -2]
plt.figure(figsize=(12, 8))
plt.figure(figsize=(12, 8))
for d_sigma in sigmas:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
###Output
_____no_output_____
###Markdown
Varying tau in the dense part of the mixed graph
###Code
alpha = 100
sigma = 0.5
tau = 1
d_alpha = 100
d_sigma = -1
taus = [0.1, 0.5, 1]
plt.figure(figsize=(12, 8))
for d_tau in taus:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
# for d_tau in taus:
# mfile = os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau))
# if os.path.exists(mfile):
# sample = pickle.load(open(mfile, 'rb'))
# G = sparse_to_networkx(sample[0])
# ddist = degree_dist(G)
# scatter(ddist, mfile, next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
###Output
_____no_output_____ |
DSA/tree/pathSum.ipynb | ###Markdown
You are given a binary tree in which each node contains an integer value.Find the number of paths that sum to a given value.The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.Example: root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8 10 / \ 5 -3 / \ \ 3 2 11 / \ \ 3 -2 1 Return 3. The paths that sum to 8 are: 1. 5 -> 3 2. 5 -> 2 -> 1 3. -3 -> 11
###Code
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> int:
if not root:
return 0
return self.cnt(root, sum) + self.pathSum(root.left, sum) + self.pathSum(root.right, sum)
def cnt(self, root,sum):
if not root:
return 0
top = 1 if root.val == sum else 0
left = self.cnt(root.left,sum-root.val)
right = self.cnt(root.right, sum-root.val)
return top + left + right
###Output
_____no_output_____ |
helloworld3.ipynb | ###Markdown
###Code
print("This line will be printed.")
###Output
This line will be printed.
|
projectC_output_final_sol.ipynb | ###Markdown
Project 2C Let's start by importing the pandas and the Numpy libraries.
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Let's read the dataset into a pandas dataframe.
###Code
concrete_data = pd.read_csv('concrete_data.csv')
concrete_data.head()
concrete_data.shape
###Output
_____no_output_____
###Markdown
So, there are approximately 1000 samples to train our model on. Because of the few samples, we have to be careful not to overfit the training data. Let's check the dataset for any missing values.
###Code
concrete_data.describe()
concrete_data.isnull().sum()
###Output
_____no_output_____
###Markdown
The data looks very clean and is ready to be used to build our model. The target variable in this problem is the concrete sample strength. Therefore, our predictors will be all the other columns.
###Code
concrete_data_columns = concrete_data.columns
predictors = concrete_data[concrete_data_columns[concrete_data_columns != 'Strength']] # all columns except Strength
target = concrete_data['Strength'] # Strength column
###Output
_____no_output_____
###Markdown
Let's do a quick sanity check of the predictors and the target dataframes.
###Code
predictors.head()
target.head()
###Output
_____no_output_____
###Markdown
Finally, the last step is to normalize the data by substracting the mean and dividing by the standard deviation.
###Code
predictors_norm = (predictors - predictors.mean()) / predictors.std()
predictors_norm.head()
n_cols = predictors_norm.shape[1] # number of predictors
###Output
_____no_output_____
###Markdown
Building Keras model
###Code
import keras
###Output
_____no_output_____
###Markdown
As you can see, the TensorFlow backend was used to install the Keras library. Let's import the rest of the packages from the Keras library that we will need to build our regressoin model.
###Code
from keras.models import Sequential
from keras.layers import Dense
# define the model itself
def regression_model():
# creating the model
model = Sequential()
model.add(Dense(10, activation='relu', input_shape=(n_cols,)))
model.add(Dense(1))
# compiling model
model.compile(optimizer='adam', loss='mean_squared_error')
return model
###Output
_____no_output_____
###Markdown
The above function creates a model that has one hidden layer with 10 neurons and a ReLU activation function. It uses the adam optimizer and the mean squared error as the loss function. Let's import scikit-learn in order to randomly split the data into a training and test sets
###Code
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Splitting the data into a training and test sets by holding 30% of the data for testing
###Code
X_train, X_test, y_train, y_test = train_test_split(predictors_norm, target, test_size=0.3, random_state=42)
###Output
_____no_output_____
###Markdown
Train and Test Let's call the function now to create our model.
###Code
# build the model
model = regression_model()
###Output
_____no_output_____
###Markdown
Next, we will train the model for 50 epochs.
###Code
# fit the model
epochs = 100
model.fit(X_train, y_train, epochs=epochs, verbose=2)
###Output
Epoch 1/100
23/23 - 0s - loss: 1639.9762
Epoch 2/100
23/23 - 0s - loss: 1622.0172
Epoch 3/100
23/23 - 0s - loss: 1604.0128
Epoch 4/100
23/23 - 0s - loss: 1586.4027
Epoch 5/100
23/23 - 0s - loss: 1568.1458
Epoch 6/100
23/23 - 0s - loss: 1550.0372
Epoch 7/100
23/23 - 0s - loss: 1530.8644
Epoch 8/100
23/23 - 0s - loss: 1511.4619
Epoch 9/100
23/23 - 0s - loss: 1491.4005
Epoch 10/100
23/23 - 0s - loss: 1470.5549
Epoch 11/100
23/23 - 0s - loss: 1449.0493
Epoch 12/100
23/23 - 0s - loss: 1427.0072
Epoch 13/100
23/23 - 0s - loss: 1404.1825
Epoch 14/100
23/23 - 0s - loss: 1380.7894
Epoch 15/100
23/23 - 0s - loss: 1356.4185
Epoch 16/100
23/23 - 0s - loss: 1331.5461
Epoch 17/100
23/23 - 0s - loss: 1305.6908
Epoch 18/100
23/23 - 0s - loss: 1279.4860
Epoch 19/100
23/23 - 0s - loss: 1252.4337
Epoch 20/100
23/23 - 0s - loss: 1225.2687
Epoch 21/100
23/23 - 0s - loss: 1197.2546
Epoch 22/100
23/23 - 0s - loss: 1168.2578
Epoch 23/100
23/23 - 0s - loss: 1139.4092
Epoch 24/100
23/23 - 0s - loss: 1109.3973
Epoch 25/100
23/23 - 0s - loss: 1079.7864
Epoch 26/100
23/23 - 0s - loss: 1049.3912
Epoch 27/100
23/23 - 0s - loss: 1018.6868
Epoch 28/100
23/23 - 0s - loss: 988.2154
Epoch 29/100
23/23 - 0s - loss: 957.0792
Epoch 30/100
23/23 - 0s - loss: 926.6161
Epoch 31/100
23/23 - 0s - loss: 895.8762
Epoch 32/100
23/23 - 0s - loss: 865.6386
Epoch 33/100
23/23 - 0s - loss: 835.4490
Epoch 34/100
23/23 - 0s - loss: 806.1541
Epoch 35/100
23/23 - 0s - loss: 777.1281
Epoch 36/100
23/23 - 0s - loss: 748.5349
Epoch 37/100
23/23 - 0s - loss: 720.6773
Epoch 38/100
23/23 - 0s - loss: 693.2247
Epoch 39/100
23/23 - 0s - loss: 666.8769
Epoch 40/100
23/23 - 0s - loss: 640.7676
Epoch 41/100
23/23 - 0s - loss: 615.5834
Epoch 42/100
23/23 - 0s - loss: 591.1942
Epoch 43/100
23/23 - 0s - loss: 567.4484
Epoch 44/100
23/23 - 0s - loss: 544.8269
Epoch 45/100
23/23 - 0s - loss: 522.6080
Epoch 46/100
23/23 - 0s - loss: 501.4168
Epoch 47/100
23/23 - 0s - loss: 481.4689
Epoch 48/100
23/23 - 0s - loss: 461.6960
Epoch 49/100
23/23 - 0s - loss: 443.0972
Epoch 50/100
23/23 - 0s - loss: 425.2880
Epoch 51/100
23/23 - 0s - loss: 408.1208
Epoch 52/100
23/23 - 0s - loss: 391.5843
Epoch 53/100
23/23 - 0s - loss: 376.3988
Epoch 54/100
23/23 - 0s - loss: 361.5271
Epoch 55/100
23/23 - 0s - loss: 347.8190
Epoch 56/100
23/23 - 0s - loss: 334.6894
Epoch 57/100
23/23 - 0s - loss: 322.5668
Epoch 58/100
23/23 - 0s - loss: 310.9607
Epoch 59/100
23/23 - 0s - loss: 300.3114
Epoch 60/100
23/23 - 0s - loss: 290.2910
Epoch 61/100
23/23 - 0s - loss: 280.9143
Epoch 62/100
23/23 - 0s - loss: 272.5145
Epoch 63/100
23/23 - 0s - loss: 264.3984
Epoch 64/100
23/23 - 0s - loss: 257.1267
Epoch 65/100
23/23 - 0s - loss: 250.1771
Epoch 66/100
23/23 - 0s - loss: 243.7432
Epoch 67/100
23/23 - 0s - loss: 237.9279
Epoch 68/100
23/23 - 0s - loss: 232.6532
Epoch 69/100
23/23 - 0s - loss: 227.5508
Epoch 70/100
23/23 - 0s - loss: 223.0684
Epoch 71/100
23/23 - 0s - loss: 218.9074
Epoch 72/100
23/23 - 0s - loss: 215.0144
Epoch 73/100
23/23 - 0s - loss: 211.2430
Epoch 74/100
23/23 - 0s - loss: 207.9448
Epoch 75/100
23/23 - 0s - loss: 204.7307
Epoch 76/100
23/23 - 0s - loss: 201.8097
Epoch 77/100
23/23 - 0s - loss: 199.0492
Epoch 78/100
23/23 - 0s - loss: 196.4989
Epoch 79/100
23/23 - 0s - loss: 194.0759
Epoch 80/100
23/23 - 0s - loss: 191.9000
Epoch 81/100
23/23 - 0s - loss: 189.8580
Epoch 82/100
23/23 - 0s - loss: 187.9676
Epoch 83/100
23/23 - 0s - loss: 186.0655
Epoch 84/100
23/23 - 0s - loss: 184.3512
Epoch 85/100
23/23 - 0s - loss: 182.6721
Epoch 86/100
23/23 - 0s - loss: 181.1197
Epoch 87/100
23/23 - 0s - loss: 179.5754
Epoch 88/100
23/23 - 0s - loss: 178.2276
Epoch 89/100
23/23 - 0s - loss: 176.7040
Epoch 90/100
23/23 - 0s - loss: 175.3513
Epoch 91/100
23/23 - 0s - loss: 174.0276
Epoch 92/100
23/23 - 0s - loss: 172.8292
Epoch 93/100
23/23 - 0s - loss: 171.5767
Epoch 94/100
23/23 - 0s - loss: 170.5147
Epoch 95/100
23/23 - 0s - loss: 169.4660
Epoch 96/100
23/23 - 0s - loss: 168.3748
Epoch 97/100
23/23 - 0s - loss: 167.3183
Epoch 98/100
23/23 - 0s - loss: 166.3222
Epoch 99/100
23/23 - 0s - loss: 165.3357
Epoch 100/100
23/23 - 0s - loss: 164.3899
###Markdown
Next we need to evaluate the model on the test data.
###Code
loss_val = model.evaluate(X_test, y_test)
y_pred = model.predict(X_test)
loss_val
###Output
10/10 [==============================] - 0s 796us/step - loss: 163.8517
###Markdown
Now we need to compute the mean squared error between the predicted concrete strength and the actual concrete strength. Let's import the mean_squared_error function from Scikit-learn.
###Code
from sklearn.metrics import mean_squared_error
mean_square_error = mean_squared_error(y_test, y_pred)
mean = np.mean(mean_square_error)
standard_deviation = np.std(mean_square_error)
print(mean, standard_deviation)
###Output
163.85169936599985 0.0
###Markdown
Create a list of 50 mean squared errors and report mean and the standard deviation of the mean squared errors.
###Code
total_mean_squared_errors = 50
epochs = 100
mean_squared_errors = []
for i in range(0, total_mean_squared_errors):
X_train, X_test, y_train, y_test = train_test_split(predictors_norm, target, test_size=0.3, random_state=i)
model.fit(X_train, y_train, epochs=epochs, verbose=0)
MSE = model.evaluate(X_test, y_test, verbose=0)
print("MSE "+str(i+1)+": "+str(MSE))
y_pred = model.predict(X_test)
mean_square_error = mean_squared_error(y_test, y_pred)
mean_squared_errors.append(mean_square_error)
mean_squared_errors = np.array(mean_squared_errors)
mean = np.mean(mean_squared_errors)
standard_deviation = np.std(mean_squared_errors)
print('\n')
print("Below is the mean and standard deviation of " +str(total_mean_squared_errors) + " mean squared errors with normalized data. Total number of epochs for each training is: " +str(epochs) + "\n")
print("Mean: "+str(mean))
print("Standard Deviation: "+str(standard_deviation))
###Output
MSE 1: 92.90280151367188
MSE 2: 72.89657592773438
MSE 3: 45.128334045410156
MSE 4: 41.41788864135742
MSE 5: 40.89964294433594
MSE 6: 43.26124954223633
MSE 7: 43.950382232666016
MSE 8: 33.9612922668457
MSE 9: 37.5832633972168
MSE 10: 36.8336296081543
MSE 11: 36.9637336730957
MSE 12: 32.54510498046875
MSE 13: 41.16047286987305
MSE 14: 40.655967712402344
MSE 15: 35.097721099853516
MSE 16: 30.85770606994629
MSE 17: 33.12465286254883
MSE 18: 33.22416687011719
MSE 19: 32.20165252685547
MSE 20: 35.859432220458984
MSE 21: 31.51734733581543
MSE 22: 32.2318229675293
MSE 23: 27.29950714111328
MSE 24: 32.86106872558594
MSE 25: 33.01205825805664
MSE 26: 34.990657806396484
MSE 27: 29.890884399414062
MSE 28: 30.436325073242188
MSE 29: 35.37593078613281
MSE 30: 35.04522705078125
MSE 31: 31.48307228088379
MSE 32: 30.4407958984375
MSE 33: 31.57286834716797
MSE 34: 32.47333908081055
MSE 35: 34.725189208984375
MSE 36: 40.25605773925781
MSE 37: 27.05064582824707
MSE 38: 34.96195602416992
MSE 39: 30.34077262878418
MSE 40: 29.424448013305664
MSE 41: 33.697383880615234
MSE 42: 28.469573974609375
MSE 43: 33.258872985839844
MSE 44: 32.80592727661133
MSE 45: 37.69941329956055
MSE 46: 30.771860122680664
MSE 47: 32.15293884277344
MSE 48: 32.5310173034668
MSE 49: 31.6402645111084
MSE 50: 37.60192108154297
Below is the mean and standard deviation of 50 mean squared errors with normalized data. Total number of epochs for each training is: 100
Mean: 36.33089598386751
Standard Deviation: 10.573438053434236
|
Introduction-to-jupyter/Import-csv-data.ipynb | ###Markdown
Using the CSV format in Jupyter The data collected by the CMS detector can be handled in many different file formats. One easy way is to handle the data in CSV files (comma-separated values). A CSV file is basically a regular text file which includes values separated by commas and lines. Reading the CSV file CSV files can be read for example with the function _read_\__csv( )_ of the _pandas_ module. Let's read the file _Zmumu_\__Run2011A.csv_ which is in the folder _Data_ in the parent directory. Let's also save the content of the file to the variable _dataset_.The file contains events from the CMS primary dataset [1] with the specific selection criteria [2].[1] CMS collaboration (2016). DoubleMu primary dataset in AOD format from RunA of 2011 (/DoubleMu/Run2011A-12Oct2013-v1/AOD). CERN Open Data Portal. DOI: [10.7483/OPENDATA.CMS.RZ34.QR6N](http://doi.org/10.7483/OPENDATA.CMS.RZ34.QR6N).[2] Thomas McCauley (2016). Jpsimumu. Jupyter Notebook file. https://github.com/tpmccauley/cmsopendata-jupyter/blob/hst-0.1/Jpsimumu.ipynb.
###Code
import pandas
dataset = pandas.read_csv('../Data/Zmumu_Run2011A.csv')
###Output
_____no_output_____
###Markdown
We can check what kind of information the file we read contains. Let's use the command _head( )_ of the _pandas_ module which will print the first five lines of the DataFrame variable written before the command ([pandas documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html)).
###Code
dataset.head()
###Output
_____no_output_____
###Markdown
Notice that there are more lines in the variable _dataset_ than the five printed. We can check the number of the lines with the function _len( )_ which will return the length of the variable given in the brackets.
###Code
len(dataset)
###Output
_____no_output_____
###Markdown
Observing and selecting the values From the print above, we can see that the content of the file has been saved into a table (DataFrame tabular data structure). Each line of the table represent a different collision event and the columns include different saved values for the event. Some of the values are measured by the detector and some have been calculated from the measured values.Values in the table can be accessed with the _pandas_ module. For example the data we are using contains the charges of two muons marked as _Q1_ and _Q2_. We can select certain columns from a table e.g. the charges of the first muon for all of the events by referring to the column name:
###Code
dataset['Q1']
###Output
_____no_output_____
###Markdown
Now the code printed the values of the column _Q1_ of the variable _dataset_. Of course all of the values will not be printed (there are over 10 000 of them) and on the last line of the print you can see the name, lengt and tyoe of the information printed.The numbers on the left tell the index of the line and the numbers on the right are the values of the charges. By replacing the _Q1_ in the code it is possible to select any of the column from the _dataset_ (e.g. _pt1_, _eta1_, _phi2_, ...).If for example only the ten first values of the charges are wanted to be selected, it can be done by using the _.loc_ method. In the method the brackets first include the indexes of the lines wanted to be selected (here lines 0--10) and after those the name of the column from where the lines will be selected (here _Q1_). With Python 2, the method is _.ix_.
###Code
dataset.loc[0:10, 'Q1']
# If you use Python 2, use
# dataset.ix[0:10, 'Q1']
###Output
_____no_output_____
###Markdown
Also individual values can be picked. Let's say we want to see that charges from indices 0,1,5 and 10. This can be done with
###Code
dataset.loc[[0,1,5,10],'Q1']
# If you use Python 2, use
# dataset.ix[[0,1,5,10],'Q1']
###Output
_____no_output_____ |
Python/.ipynb_checkpoints/ADS_Chart_Assignment2-portfolio-checkpoint.ipynb | ###Markdown
Assignment 2Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.An NOAA dataset has been stored in the file `data/C2A2_data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv`. This is the dataset to use for this assignment. Note: The data for this assignment comes from a subset of The National Centers for Environmental Information (NCEI) [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe.Each row in the assignment datafile corresponds to a single observation.The following variables are provided to you:* **id** : station identification code* **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012)* **element** : indicator of element type * TMAX : Maximum temperature (tenths of degrees C) * TMIN : Minimum temperature (tenths of degrees C)* **value** : data value for element (tenths of degrees C)For this assignment, you must:1. Read the documentation and familiarize yourself with the dataset, then write some python code which returns a line graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between the record high and record low temperatures for each day should be shaded.2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015.3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for the purpose of this visualization.4. Make the visual nice! Leverage principles from the first module in this course when developing your solution. Consider issues such as legends, labels, and chart junk.The data you have been given is near **Ann Arbor, Michigan, United States**, and the stations the data comes from are shown on the map below. Start my work
###Code
# initial imports for backend and scripting layer.
%matplotlib notebook
import matplotlib as mpl
mpl.get_backend() # this backend is very important to be able to access objects
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime
#will probably need numpy
import numpy as np # matplotlib is really based on np arrays
import pandas as pd
from random import randint # if you want some random numbers
from datetime import datetime
raw = pd.read_csv('fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv')
trans = raw.copy()
trans['Date'] = pd.to_datetime(trans['Date'])
#convert temperature data from tenths of a degree to whole degrees
trans['Data_Value'] = trans['Data_Value']/10
# create date
trans['Month_day'] = trans['Date'].dt.strftime('%m-%d')
#create 4 data sets 05 to 14 max/min & 15 max/min
mask_max5_14 = (trans['Date'] >= '2005-01-01') & (trans['Date'] <= '2014-12-31') & \
(trans['Month_day'] != '02-29') & (trans['Element'] == 'TMAX')
mask_min5_14 = (trans['Date'] >= '2005-01-01') & (trans['Date'] <= '2014-12-31') & \
(trans['Month_day'] != '02-29') & (trans['Element'] == 'TMIN')
trans5_14mx = trans.loc[mask_max5_14]
trans5_14mn = trans.loc[mask_min5_14]
#groupby sets index to grouping column
trans5_14mx = trans5_14mx.groupby(['Month_day']).agg({'Data_Value':np.max}).rename(columns={'Data_Value': 'Max Temp'})
trans5_14mn = trans5_14mn.groupby(['Month_day']).agg({'Data_Value':np.min}).rename(columns={'Data_Value': 'Min Temp'})
mask15_mx = (trans['Date'] >='2015-01-01') & (trans['Date'] <= '2015-12-31') & \
(trans['Month_day'] != '2015-02-29') & (trans['Element'] == 'TMAX')
mask15_mn = (trans['Date']>='2015-01-01') & (trans['Date'] <= '2015-12-31') & \
(trans['Month_day'] != '2015-02-29') & (trans['Element'] == 'TMIN')
trans15_mx = trans.loc[mask15_mx]
trans15_mn = trans.loc[mask15_mn]
trans15_mx = trans15_mx.groupby(['Month_day']).agg({'Data_Value':np.max}).rename(columns={'Data_Value': 'Max Temp 2015'})
trans15_mn = trans15_mn.groupby(['Month_day']).agg({'Data_Value': np.min}).rename(columns={'Data_Value': 'Min Temp 2015'})
df = trans5_14mx.join(trans5_14mn, how = 'inner')
df_2015 = trans15_mx.join(trans15_mn, how = 'inner')
df = df.join(df_2015, how = 'inner')
print('trans head \n', trans.head())
print('\n trans describe \n', trans.describe())
print('trans5_14mx head \n', trans5_14mx.head())
print('\n trans5_14mx describe \n', trans5_14mx.describe())
print('trans5_14mn head \n', trans5_14mn.head())
print('\n trans5_14mn describe \n', trans5_14mn.describe())
print('trans15_mx head \n', trans15_mx.head())
print('\n trans15_mx describe \n', trans15_mx.describe())
print('trans15_mn head \n', trans15_mn.head())
print('\n trans5_14mn describe \n', trans15_mn.describe())
# smsk = (trans['Month_day'] == '01-01')
# s = trans.loc[smsk]
# s
# filter for 2015 max temps > max for '05 to '14 & 2015 min temps < '05 to '14 min temps
mask = (df['Max Temp 2015'] > df['Max Temp'])
df_mx = df.loc[mask]
df_mx = df_mx.drop(['Max Temp', 'Min Temp', 'Min Temp 2015'], axis = 1)
# But, changes will show if run code at this point
mask = (df['Min Temp 2015'] < df['Min Temp'])
df_min = df.loc[mask]
df_min = df_min.drop(['Max Temp', 'Min Temp', 'Max Temp 2015'], axis = 1)
df_2015 = df_mx.join(df_min, how = 'outer')
df = df.drop(['Max Temp 2015', 'Min Temp 2015'], axis = 1)
df = df.join(df_2015, how = 'outer')
df.head()
plt.figure(figsize=(9,6))
xvals = df.index.tolist()
#loop to convert Month_day index to date time
xvals = [datetime.strptime(x, '%m-%d').date() for x in xvals]
ax = plt.gcf().gca()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.xaxis.set_major_locator(mdates.MonthLocator())
## You can set tick marks to anything you want ....
# #set spacing of tick labels relative to dataframe axis i.e. 365/12 = ~30
# ax.get_xaxis().set_ticks([1, 31, 61, 92, 123, 154, 184, 214, 244, 274, 304, 334])
# # set x tik labels
# Xtk = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec']
# ax.set_xticklabels(Xtk)
#zorder shows which is in the front or in the back
plt.scatter(xvals, df['Max Temp 2015'], color = '#7B241C', zorder = 3, label = "2015 High Temperature Record")
plt.scatter(xvals, df['Min Temp 2015'], color = '#1A5276', zorder = 3, label = '2015 Low Temperature Record')
plt.plot(xvals, df['Max Temp'], color='#CD6155', zorder=2, label="High Temperature 2005-2014")
plt.plot(xvals, df['Min Temp'], color='#5DADE2', zorder = 2, label = 'Low Temerature 2005-2014')
plt.xlabel('Day of the Year')
plt.ylabel('Degrees Celcius')
plt.title('High and Low Temperature Records from 2005-2015; Ann Arbor, MI, USA')
plt.gca().fill_between(xvals, df['Max Temp'], df['Min Temp'], facecolor='#95A5A6', alpha=0.25, zorder=1)
for spine in ["top", "right"]:
ax.spines[spine].set_visible(False)
plt.savefig('Temperature.png', bbox_inches='tight')
plt.show()
###Output
_____no_output_____ |
notebooks/MLS_split/MLS_split_features_extractor.ipynb | ###Markdown
Imports
###Code
import os
import librosa
import matplotlib.pyplot as plt
import numpy as np
import json
import pandas as pd
from scipy.io import wavfile
import scipy.stats as stats
import re
import asyncio
import time
import nest_asyncio
import parselmouth
###Output
_____no_output_____
###Markdown
Extract Frequency Features Get Metadata
###Code
project_root = os.path.dirname(os.path.dirname(os.getcwd()))
source = os.path.join(project_root, "MLS", "Full_split")
metadata = pd.read_csv(os.path.join(source, "metainfo.csv"))
data = metadata[['SPEAKER','GENDER']]
data_dict = dict(zip(data.SPEAKER, data.GENDER))
data_dict
###Output
_____no_output_____
###Markdown
Extract Features
###Code
async def get_frequencies(file):
file_path = os.path.join(source, file)
audio_data = parselmouth.Sound(file_path)
audio_data = audio_data.values[0]
sample_rate = 22050
splited_file = file.split('_')
if data_dict[int(splited_file[0])] == 'F':
gender = 0
if data_dict[int(splited_file[0])] == 'M':
gender = 1
step = int(sample_rate/5) #3200 sampling points every 1/5 sec
window_frequencies = []
for i in range(0,len(audio_data),step):
ft = np.fft.fft(audio_data[i:i+step]) #fft returns the list N complex numbers
freqs = librosa.fft_frequencies(sr=sample_rate, n_fft=len(ft))
freqs = np.fft.fftfreq(len(ft)) #fftq tells you the frequencies associated with the coefficients
imax = np.argmax(np.abs(ft))
freq = freqs[imax]
freq_in_hz = abs(freq *sample_rate)
window_frequencies.append(freq_in_hz)
return window_frequencies, gender, file
async def get_features(count, file):
async with sem:
frequencies, gender, file_name = await get_frequencies(file)
nobs, minmax, mean, variance, skew, kurtosis = stats.describe(frequencies)
median = np.median(frequencies)
mode = stats.mode(frequencies).mode[0]
std = np.std(frequencies)
low,peak = minmax
q75,q25 = np.percentile(frequencies, [75 ,25])
iqr = q75 - q25
features_list.append([file_name, nobs, mean, skew, kurtosis, median, mode, std, low, peak, q25, q75, iqr, gender])
print(f"\r{count}/{len(audio_files)}", end='')
return
# #Calculo de tempo de disparo
start_time = time.time()
#inicio do Loop
loop = asyncio.get_event_loop()
#Controle de requisiรงรตes por vez
sem = asyncio.Semaphore(600)
#Array de tasks
sents = []
nest_asyncio.apply()
#Coleta as recomendaรงรตes para envio
gender_list = []
file_list = []
features_list = []
audio_files = os.listdir(source)
for k, file in enumerate(audio_files):
if file.endswith('.wav'):
sent = asyncio.ensure_future(get_features(count=k+1, file=file))
sents.append(sent)
done, _ = loop.run_until_complete(asyncio.wait(sents))
dataframe_features = pd.DataFrame(features_list, columns = ['FileName', 'nobs', 'mean', 'skew', 'kurtosis', 'median', 'mode', 'std', 'low', 'peak', 'q25', 'q75', 'iqr', 'Gender'])
dataframe_features.to_csv('D:\dev\Speaker-Gender-Recognition\data\MLS_split\Features_data.csv', index=False)
###Output
_____no_output_____
###Markdown
Extract MFCCs Get Metadata
###Code
project_root = os.path.dirname(os.path.dirname(os.getcwd()))
source = os.path.join(project_root, "MLS", "Full_split")
metadata = pd.read_csv(os.path.join(source, "metainfo.csv"))
data = metadata[['SPEAKER','GENDER']]
data_dict = dict(zip(data.SPEAKER, data.GENDER))
###Output
_____no_output_____
###Markdown
Extract Features
###Code
async def extract_MFCCs(count, file):
async with sem:
file_path = os.path.join(source, file)
audio_data, sample_rate = librosa.load(file_path)
mfccs = librosa.feature.mfcc(y=audio_data, sr=sample_rate)
mfccs_mean = list(np.mean(mfccs.T, axis= 0))
splited_file = file.split('_')
if data_dict[int(splited_file[0])] == 'F':
gender = 0
if data_dict[int(splited_file[0])] == 'M':
gender = 1
audio_data = parselmouth.Sound(file_path)
audio_data = audio_data.values[0]
sample_rate = 22050
mfccs = librosa.feature.mfcc(y=audio_data, sr=sample_rate)
mfccs_mean = list(np.mean(mfccs.T, axis= 0))
sample_features = mfccs_mean
sample_features.insert(0,str(file))
sample_features.append(gender)
print(f"\r{count}/{len(audio_files)}",end='')
features_list.append(sample_features)
return
# #Calculo de tempo de disparo
start_time = time.time()
#inicio do Loop
loop = asyncio.get_event_loop()
#Controle de requisiรงรตes por vez
sem = asyncio.Semaphore(600)
#Array de tasks
sents = []
nest_asyncio.apply()
#Coleta as recomendaรงรตes para envio
gender_list = []
file_list = []
features_list = []
audio_files = os.listdir(source)
for k, file in enumerate(audio_files):
if file.endswith('.wav'):
sent = asyncio.ensure_future(extract_MFCCs(count=k+1, file=file))
sents.append(sent)
done, _ = loop.run_until_complete(asyncio.wait(sents))
dataframe_features = pd.DataFrame(features_list, columns = ['FileName','MFCC_1','MFCC_2','MFCC_3','MFCC_4','MFCC_5',
'MFCC_6','MFCC_7','MFCC_8','MFCC_9','MFCC_10','MFCC_11',
'MFCC_12','MFCC_13','MFCC_14','MFCC_15','MFCC_16','MFCC_17',
'MFCC_18','MFCC_19','MFCC_20','Gender'])
dataframe_features.to_csv('D:\dev\Speaker-Gender-Recognition\data\MLS_split\MFCCs_data.csv', index=False)
###Output
_____no_output_____
###Markdown
Extract f0 Get Metadata
###Code
project_root = os.path.dirname(os.path.dirname(os.getcwd()))
source = os.path.join(project_root, "MLS", "Full_split")
metadata = pd.read_csv(os.path.join(source, "metainfo.csv"))
data = metadata[['SPEAKER','GENDER']]
data_dict = dict(zip(data.SPEAKER, data.GENDER))
###Output
_____no_output_____
###Markdown
Extract Features
###Code
async def extract_F0(count, file):
async with sem:
file_path = os.path.join(source, file)
audio_data = parselmouth.Sound(file_path)
pitch = audio_data.to_pitch()
pitch_values = pitch.selected_array['frequency']
nobs_pitch, minmax_pitch, mean_pitch, variance_pitch, skew_pitch, kurtosis_pitch = stats.describe(pitch_values)
median_pitch = np.median(pitch_values)
mode_pitch = stats.mode(pitch_values).mode[0]
std_pitch = np.std(pitch_values)
low_pitch,peak_pitch = minmax_pitch
q75_pitch,q25_pitch = np.percentile(pitch_values, [75 ,25])
iqr_pitch = q75_pitch - q25_pitch
splited_file = file.split('_')
if data_dict[int(splited_file[0])] == 'F':
gender = 0
if data_dict[int(splited_file[0])] == 'M':
gender = 1
sample_features = [nobs_pitch, mean_pitch, skew_pitch, kurtosis_pitch, median_pitch, mode_pitch, std_pitch, low_pitch, peak_pitch, q25_pitch, q75_pitch, iqr_pitch]
sample_features.insert(0,str(file))
sample_features.append(gender)
string = ','.join(str(item) for item in sample_features)
print(f"\r{count}/{len(audio_files)}",end='')
features_list.append(sample_features)
return
# #Calculo de tempo de disparo
start_time = time.time()
#inicio do Loop
loop = asyncio.get_event_loop()
#Controle de requisiรงรตes por vez
sem = asyncio.Semaphore(600)
#Array de tasks
sents = []
nest_asyncio.apply()
#Coleta as recomendaรงรตes para envio
gender_list = []
file_list = []
features_list = []
audio_files = os.listdir(source)
for k, file in enumerate(audio_files):
if file.endswith('.wav'):
sent = asyncio.ensure_future(extract_F0(count=k+1, file=file))
sents.append(sent)
done, _ = loop.run_until_complete(asyncio.wait(sents))
dataframe_features = pd.DataFrame(features_list, columns = ['FileName', 'nobs_pitch', 'mean_pitch', 'skew_pitch', 'kurtosis_pitch',
'median_pitch', 'mode_pitch', 'std_pitch', 'low_pitch', 'peak_pitch', 'q25_pitch', 'q75_pitch', 'iqr_pitch', 'Gender'])
dataframe_features.to_csv('D:\dev\Speaker-Gender-Recognition\data\MLS_split\F0_data.csv', index=False)
###Output
2336/4913 |
OSIC Pulmonary Fibrosis Progression/inference-rnn-and-features.ipynb | ###Markdown
CT scan pre-processing Credits to https://www.kaggle.com/gzuidhof/full-preprocessing-tutorial and https://www.kaggle.com/arnavkj95/candidate-generation-and-luna16-preprocessing
###Code
def load_scan(path):
slices = [pydicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: float(x.InstanceNumber))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except NameError:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
except:
slice_thickness = slices[0].SliceThickness
if slice_thickness==0:
slice_thickness=slices[0].SliceThickness
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
# Convert to int16
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def window_image(image, window_center, window_width):
img_min = window_center - window_width // 2
img_max = window_center + window_width // 2
window_image = image.copy()
window_image[window_image < img_min] = img_min
window_image[window_image > img_max] = img_max
return window_image
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = np.array([scan[0].SliceThickness] + list(scan[0].PixelSpacing), dtype=np.float32)
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')
return image, new_spacing
def get_segmented_lungs(im, threshold):
'''
Step 1: Convert into a binary image.
'''
binary = np.array(im < threshold, dtype=np.int8)
'''
Step 2: Remove the blobs connected to the border of the image.
'''
cleared = clear_border(binary)
'''
Step 3: Label the image.
'''
label_image = label(cleared)
'''
Step 4: Keep the labels with 2 largest areas.
'''
areas = [r.area for r in regionprops(label_image)]
areas.sort()
if len(areas) > 2:
for region in regionprops(label_image):
if region.area < areas[-2]:
for coordinates in region.coords:
label_image[coordinates[0], coordinates[1]] = 0
binary = label_image > 0
'''
Step 5: Erosion operation with a disk of radius 2. This operation is
seperate the lung nodules attached to the blood vessels.
'''
selem = disk(2)
binary = binary_erosion(binary, selem)
'''
Step 6: Closure operation with a disk of radius 10. This operation is
to keep nodules attached to the lung wall.
'''
selem = disk(10)
binary = binary_closing(binary, selem)
'''
Step 7: Fill in the small holes inside the binary mask of lungs.
'''
edges = roberts(binary)
binary = ndi.binary_fill_holes(edges)
'''
Step 8: Superimpose the binary mask on the input image.
'''
# get_high_vals = binary == 0
# im[get_high_vals] = 0
im = binary* im
return im, binary.astype(int)
#MIN_BOUND = -1000.0
#MAX_BOUND = 320.0
def normalize(image, MIN_BOUND, MAX_BOUND):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
def lung_volume(masks, spacing):
slice_thickness = spacing[0]
pixel_spacing = (spacing[1], spacing[2])
return np.round(np.sum(masks) * slice_thickness * pixel_spacing[0]*pixel_spacing[1], 3)
def lung_process(image, spacing, threshold):
segmented = []
masks = []
for im in image:
segment,mask = get_segmented_lungs(im,threshold)
masks.append(mask.astype(int))
segmented.append(segment)
#vol = lung_volume(np.asarray(masks), spacing)
return np.asarray(segmented), np.asarray(masks)
def compute_stats(img):
kurt = kurtosis(img.ravel()[img.ravel() <0.6])
ske = skew(img.ravel()[img.ravel() <0.6])
std_i = img.ravel()[img.ravel() <0.6].std()
mean_i = img.ravel()[img.ravel() <0.6].mean()
median_i = np.median(img.ravel()[img.ravel() <0.6])
return kurt, ske, std_i, mean_i, median_i
def chunks(l, n):
# Credit: Ned Batchelder
# Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def mean(l):
return sum(l) / len(l)
def reduce_slices(slices):
new_slices = []
chunk_sizes = math.ceil(len(slices) / HM_SLICES)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES+2:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
if len(new_slices) == HM_SLICES+1:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
return new_slices
def preprocess_file(patient_id):
patient = load_scan(test_dir + patient_id)
patient_pixels = get_pixels_hu(patient)
if patient_pixels.mean()<-1500 and patient_pixels.mean()>=-1800:
lung_image = window_image(patient_pixels, -1500, 3000)
pix_resampled, spacing = resample(lung_image, patient, [1,1,1])
segmented, mask = lung_process(pix_resampled, spacing, -1400)
normalized = normalize(segmented, -3000, 1500)
elif patient_pixels.mean()<-1800:
lung_image = window_image(patient_pixels, -3000, 4500)
pix_resampled, spacing = resample(lung_image, patient, [1,1,1])
segmented, mask = lung_process(pix_resampled, spacing, -2200)
normalized = normalize(segmented, -4000, 300)
else:
lung_image = window_image(patient_pixels, -300, 1200)
pix_resampled, spacing = resample(lung_image, patient, [1,1,1])
segmented, mask = lung_process(pix_resampled, spacing, -200)
normalized = normalize(segmented, -1500, 900)
return normalized.astype(np.float16), mask
save_img = dicom_arrays_dir
save_mask = mask_dir
def save_arrays(patient_ids):
segmented, mask = preprocess_file(patient_ids)
array_path = f'{save_img}/{patient_ids}.npy'
mask_path = f'{save_mask}/{patient_ids}_mask.npy'
np.save(str(array_path), segmented)
np.save(str(mask_path), mask)
gc.collect()
def cache_dataset():
patient_ids = test_df.drop_duplicates(subset=['Patient']).Patient
with Pool(processes=4) as pool:
show_run_results = list(
tqdm(pool.imap(save_arrays, patient_ids), total = len(patient_ids))
)
patient_df = test_df.copy()
patient_df = patient_df.drop_duplicates(subset=['Patient'])
print(len(patient_df))
if volume_array_file.exists() and kurts_array_file.exists() and skews_array_file():
print('loading pre-calculated arrays')
volumes = torch.load(volume_array_file)
kurts = torch.load(kurts_array_file)
skews = torch.load(skews_array_file)
means = torch.load(means_array_file)
stds = torch.load(stds_array_file)
medians = torch.load(medians_array_file)
else:
print('Processing dicom images and caching dataset...')
volumes = []
kurts = []
skews = []
means = []
stds = []
medians = []
cache_dataset()
print('Calculating image statistics...')
for i, patient_id in tqdm(enumerate(patient_df.Patient), total=len(patient_df.Patient)):
segmented = []
cached_img_path = f'{dicom_arrays_dir}/{patient_id}.npy'
cached_mask_file = mask_dir/f'{patient_id}_mask.npy'
img_array = np.load(cached_img_path)
mask = np.load(cached_mask_file)
vol = lung_volume(np.asarray(mask), (1,1,1))
kurt, ske, std_i, mean_i, median_i = compute_stats(img_array)
volumes.append(vol)
means.append(mean_i)
stds.append(std_i)
medians.append(median_i)
kurts.append(kurt)
skews.append(ske)
gc.collect()
torch.save(volumes, 'volume_array.pt')
torch.save(kurts, 'kurts_array.pt')
torch.save(skews, 'skews_array.pt')
torch.save(means, 'mean_array.pt')
torch.save(stds, 'std_array.pt')
torch.save(medians, 'median_array.pt')
patient_df["volume"] = np.asarray(volumes)/1e6
patient_df["kurts"] = kurts
patient_df["skews"] = skews
patient_df["mean_vals"] = means
#patient_df["std_vals"] = stds
#patient_df["median_vals"] = medians
patient_df['kurts'].fillna((patient_df['kurts'].mean()), inplace=True)
patient_df['skews'].fillna((patient_df['skews'].mean()), inplace=True)
patient_df['mean_vals'].fillna((patient_df['mean_vals'].mean()), inplace=True)
#patient_df['median_vals'].fillna((patient_df['median_vals'].mean()), inplace=True)
FE += ['kurts','skews','mean_vals']
patient_df.head()
test_df=test_df.merge(patient_df[['Patient','kurts','skews','mean_vals','volume']],how='left',on='Patient')
test_df.head()
class AutoEncoder(nn.Module):
def __init__(self, latent_features=10):
super(AutoEncoder, self).__init__()
# Encoder
self.conv1 = nn.Conv3d(1, 16, 3)
self.conv2 = nn.Conv3d(16, 32, 3)
self.conv3 = nn.Conv3d(32, 96, 2)
self.conv4 = nn.Conv3d(96, 1, 1)
self.pool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.pool2 = nn.MaxPool3d(kernel_size=3, stride=3, return_indices=True)
self.pool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.pool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.fc1 = nn.Linear(10 * 10, latent_features)
# Decoder
self.fc2 = nn.Linear(latent_features, 10 * 10)
self.deconv0 = nn.ConvTranspose3d(1, 96, 1)
self.deconv1 = nn.ConvTranspose3d(96, 32, 2)
self.deconv2 = nn.ConvTranspose3d(32, 16, 3)
self.deconv3 = nn.ConvTranspose3d(16, 1, 3)
self.unpool0 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.unpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.unpool2 = nn.MaxUnpool3d(kernel_size=3, stride=3)
self.unpool3 = nn.MaxUnpool3d(kernel_size=2, stride=2)
def encode(self, x, return_partials=True):
# Encoder
x = self.conv1(x)
up3out_shape = x.shape
x, i1 = self.pool1(x)
x = self.conv2(x)
up2out_shape = x.shape
x, i2 = self.pool2(x)
x = self.conv3(x)
up1out_shape = x.shape
x, i3 = self.pool3(x)
x = self.conv4(x)
up0out_shape = x.shape
x, i4 = self.pool4(x)
x = x.view(-1, 10 * 10)
x = F.relu(self.fc1(x))
if return_partials:
return x, up3out_shape, i1, up2out_shape, i2, up1out_shape, i3, \
up0out_shape, i4
else:
return x
def forward(self, x):
x, up3out_shape, i1, up2out_shape, i2, \
up1out_shape, i3, up0out_shape, i4 = self.encode(x)
# Decoder
x = F.relu(self.fc2(x))
x = x.view(-1, 1, 1, 10, 10)
x = self.unpool0(x, output_size=up0out_shape, indices=i4)
x = self.deconv0(x)
x = self.unpool1(x, output_size=up1out_shape, indices=i3)
x = self.deconv1(x)
x = self.unpool2(x, output_size=up2out_shape, indices=i2)
x = self.deconv2(x)
x = self.unpool3(x, output_size=up3out_shape, indices=i1)
x = self.deconv3(x)
return x
class RNNFeatures(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, in_ctscan_features=10):
super().__init__()
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
self.in_ctscan_features = in_ctscan_features
self.match_sz = nn.Linear(in_ctscan_features, input_dim)
self.rnn = nn.RNN(input_dim*2, hidden_dim, layer_dim, batch_first=True, nonlinearity='relu',dropout=0.1)
self.fc = nn.Linear(hidden_dim, hidden_dim)
self.fc_out = nn.Linear(hidden_dim, output_dim)
#self.batch_size = None
#self.hidden = None
def forward(self, x1, x2):
x1 = x1.view(-1, len(x1), len(x1[0]))
x2 = F.relu(self.match_sz(x2))
x2 = x2.view(-1, len(x2), len(x2[0]))
x = torch.cat([x1, x2], dim=2)
h0 = self.init_hidden(x)
out, hn = self.rnn(x, h0)
out = F.relu(self.fc(out[:, -1, :]))
out = self.fc_out(out)
return out
def init_hidden(self, x):
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
return h0
autoencoder_models = []
for path in MODELS1:
state_dict = torch.load(path,map_location=torch.device('cpu'))
model = AutoEncoder()
model.load_state_dict(state_dict)
model.to(device)
model.float()
model.eval()
autoencoder_models.append(model)
models = []
for path in MODELS:
#state_dict = torch.load(path,map_location=torch.device('cpu'))
model = RNNFeatures(12, 150, 2, 3).to(device)
model.load_state_dict(torch.load(path))
model.to(device)
model.float()
model.eval()
models.append(model)
# Helper function that generates all latent features
class GenerateLatentFeatures:
def __init__(self, autoencoder_models, latent_dir):
#self.df = df.drop_duplicates(subset=['Patient'])
self.latent_dir = Path(latent_dir)
#self.cache_dir = Path(cache_dir)
def __call__(self, img_id, img_array):
cached_latent_file = self.latent_dir/f'{img_id}_lat.npy'
if cached_latent_file.is_file():
#latent_features = torch.load(cached_latent_file, map_location=torch.device('cpu'))
latent_features = np.load(cached_latent_file)
else:
latent_features = []
if len(img_array)>HM_SLICES:
img_array = np.asarray(reduce_slices(img_array))
if len(img_array) < HM_SLICES:
img_array = np.pad(img_array,[[0,HM_SLICES-len(img_array)],[0,0],[0,0]],constant_values=0.0)
else:
if len(img_array) < HM_SLICES:
img_array = np.pad(img_array,[[0,HM_SLICES-len(img_array)],[0,0],[0,0]],constant_values=0.0)
img = torch.tensor(img_array).unsqueeze(0).float()
img = F.interpolate(img, size=256)
img = img.view(img.shape[0], 1, img.shape[1], img.shape[2], img.shape[3])
img = torch.tensor(img).to(device)
preds = 0.0
with torch.no_grad():
for model in autoencoder_models:
pred = model.encode(img, return_partials=False).squeeze(0)
preds+=pred.detach().cpu().numpy()
preds = preds/len(autoencoder_models)
latent_features.append(preds)
latent_features = np.concatenate(latent_features)
np.save(cached_latent_file, latent_features)
return latent_features
class fibrosisDataset(Dataset):
def __init__(self,
df,
rand=False,
mode='train',
extract_features=None,
):
self.df = df.sort_values(by=['Patient','Weeks'],ascending=True).reset_index(drop=True)
self.rand = rand
self.mode = mode
self.extract_features = extract_features
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.iloc[index]
img_id = row.Patient
label = row.FVC
file_path = f'{dicom_arrays_dir}/{img_id}.npy'
img_array = np.load(file_path)
tabular_data = row[FE]
if self.extract_features:
features = self.extract_features(img_id, img_array)
if self.mode=='train' or self.mode=='valid':
return torch.tensor(tabular_data), torch.tensor(label), torch.tensor(features)
else:
return torch.tensor(tabular_data), torch.tensor(features)
test_df.head()
def test():
test_dataset = fibrosisDataset(test_df, mode='test', extract_features=GenerateLatentFeatures(autoencoder_models, latent_dir))
avg_preds = np.zeros((len(test_dataset), len(quantiles)))
PREDS = []
dataloader = DataLoader(test_dataset, batch_size=CFG.batch_size, shuffle=False,
num_workers=num_workers, pin_memory=False)
bar = tqdm(enumerate(dataloader), total=len(dataloader))
preds = []
for i, batch in bar:
preds = 0
inputs = batch[0].float()
features = batch[1].float()
with torch.no_grad():
for model in models:
x = model(inputs, features)
preds+=x
preds /= len(models)
PREDS.append(preds)
avg_preds = torch.cat(PREDS, dim=0).numpy()
df = pd.DataFrame(data=avg_preds, columns=list(quantiles))
return df
df = test()
sub_file = sub_csv[['Patient_Week', 'FVC', 'Confidence']]
sub_file['FVC'] = df[quantiles[1]]
sub_file['Confidence'] = df[quantiles[2]] - df[quantiles[0]]
sub_file.head()
sub_file.to_csv('submission.csv', index=False)
sub = pd.read_csv('submission.csv')
sub.head()
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/02---Munging Atlantic Data-checkpoint.ipynb | ###Markdown
`HURDAT2` Data Munge**NOTE**: This notebook is a mirror copy of notebook `01`, with one important difference: it runs through Atlantic data instead of Pacific data. This means that variables named `pacific` there are renamed `atlantic` here, and the origin and destination URLs are different. But otherwise everything is exactly the same.This isn't the most elegant way of doing this, but it's the fastest way of doing this, within the time allotted. IntroductionThis notebook acquires, cleans up, and saves a copy of the United States National Oceanic and Atmospheric Administration's (NOAA) HURDAT2 dataset.HURDAT2 is the NOAA's current data export of historical hurricane tracking data. It's split into two files, one for the Atlantic Ocean and one for the Pacific. These two files have different start dates (1851 and 1949 respectively). Original TextFrom its [description](http://www.nhc.noaa.gov/data/hurdat) on the NOAA's data web page:---Best Track Data (HURDAT2)Atlantic hurricane database (HURDAT2) 1851-2015 (5.9MB download)This dataset was provided on 6 July 2016 to include the 1956 to 1960 revisions to the best tracks.This dataset (known as Atlantic HURDAT2) hasa comma-delimited, text format with six-hourly information on the location,maximum winds, central pressure, and (beginning in 2004) size of all known tropical cyclones and subtropical cyclones.The original HURDAT database has been retired.Detailed information regarding the Atlantic Hurricane Database Re-analysis Project is available from theHurricane Research Division.Northeast and North Central Pacific hurricane database (HURDAT2)1949-2015 (3.2MB download)This dataset was provided on 9 May 2016 to include the remaining 2014 best tracks for Genevieve, Iselle, and Julio in the Central Pacific Hurricane Center (CPHC) areaof responsibility. Note that the 2015 best tracks from CPHC are not yet available and are not currently included. Once CPHCcompletes their post-storm analyses, this dataset will be updated.This dataset (known as NE/NC Pacific HURDAT2) has a comma-delimited, text format with six-hourly information on the location, maximum winds, central pressure, and (beginning in 2004)size of all known tropical cyclones and subtropical cyclones. Theoriginal HURDAT database has been retired.--- Data DictionaryThe dataset's [data dictionary](http://www.nhc.noaa.gov/data/hurdat/hurdat2-format-atlantic.pdf) shows that the files follow a modified CSV format, with individual hurricanes and storms getting their own subheadings:
###Code
from IPython.display import IFrame
IFrame("http://www.nhc.noaa.gov/data/hurdat/hurdat2-format-atlantic.pdf", width=900, height=600)
###Output
_____no_output_____
###Markdown
Initial ReadBecause of the non-standard format, a naive `pandas.read_csv` won't get useable data. It will be confused about the storm subheadings, for example, the first row in the following block:```EP202015, PATRICIA, 19,20151020, 0600, , TD, 13.4N, 94.0W, 25, 1007, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20151020, 1200, , TD, 13.3N, 94.2W, 30, 1006, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20151020, 1800, , TD, 13.2N, 94.6W, 30, 1006, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...```Curiously, the file doesn't seem to quite follow the format specified in the data dictionary, either, as it doesn't have any of the homogenized data lines mentioned in the data dictionary. So for instance, the following (used as an example in the data dictionary) never actually shows up:```AL092011, IRENE, 39,1234567890123456789012345768901234567```It looks just like the example line above instead.Since the start of each subheader line is `AL` or `EP` or something, whilst the start of a line of data is a date starting with the year (`2` or `1`), we can remove the subheadings by telling `pandas.read_csv` to ignore lines starting with the characters `A` or `E` (via `comment="E"`. But then we lose the position of those lines!It's easiest to just build our own parser.
###Code
import requests
atlantic_raw = requests.get("http://www.nhc.noaa.gov/data/hurdat/hurdat2-1851-2015-070616.txt")
atlantic_raw.raise_for_status() # check that we actually got something back
###Output
_____no_output_____
###Markdown
Double-checking the sentinels:
###Code
import io
from collections import Counter
c = Counter()
for line in io.StringIO(atlantic_raw.text).readlines():
c[line[:2]] += 1
c
import io
atlantic_storms_r = []
atlantic_storm_r = {'header': None, 'data': []}
for i, line in enumerate(io.StringIO(atlantic_raw.text).readlines()):
if line[:2] == 'AL':
atlantic_storms_r.append(atlantic_storm_r.copy())
atlantic_storm_r['header'] = line
atlantic_storm_r['data'] = []
else:
atlantic_storm_r['data'].append(line)
atlantic_storms_r = atlantic_storms_r[1:]
len(atlantic_storms_r)
atlantic_storms_r[0]
atlantic_storms_r[0]['data']
import pandas as pd
atlantic_storm_dfs = []
for storm_dict in atlantic_storms_r:
storm_id, storm_name, storm_entries_n = storm_dict['header'].split(",")[:3]
# remove hanging newline ('\n'), split fields
data = [[entry.strip() for entry in datum[:-1].split(",")] for datum in storm_dict['data']]
frame = pd.DataFrame(data)
frame['id'] = storm_id
frame['name'] = storm_name
atlantic_storm_dfs.append(frame)
len(atlantic_storm_dfs)
atlantic_storm_dfs[0]
atlantic_storms = pd.concat(atlantic_storm_dfs)
atlantic_storms.head(10)
len(atlantic_storms)
###Output
_____no_output_____
###Markdown
Setting columns Now we read the column headers out of the data dictionary and assign them appropriate variable names.
###Code
atlantic_storms = atlantic_storms.reindex(columns=atlantic_storms.columns[-2:] | atlantic_storms.columns[:-2])
atlantic_storms.head()
atlantic_storms.iloc[0]
atlantic_storms.columns
atlantic_storms.columns = [
"id",
"name",
"date",
"hours_minutes",
"record_identifier",
"status_of_system",
"latitude",
"longitude",
"maximum_sustained_wind_knots",
"maximum_pressure",
"34_kt_ne",
"34_kt_se",
"34_kt_sw",
"34_kt_nw",
"50_kt_ne",
"50_kt_se",
"50_kt_sw",
"50_kt_nw",
"64_kt_ne",
"64_kt_se",
"64_kt_sw",
"64_kt_nw",
"na"
]
del atlantic_storms['na']
pd.set_option("max_columns", None)
atlantic_storms.head()
###Output
_____no_output_____
###Markdown
Inserting sentinels -999 is used as a sentinel value for old data for which that data point is actually unknown. It'd be better to pass those as blank lines (e.g. `,,`) instead, so let's fill them in thusly.
###Code
atlantic_storms.iloc[0]['34_kt_sw']
import numpy as np
atlantic_storms = atlantic_storms.replace(to_replace='-999', value=np.nan)
###Output
_____no_output_____
###Markdown
The variables are all string types:
###Code
atlantic_storms.dtypes
###Output
_____no_output_____
###Markdown
There are some empty strings present:
###Code
atlantic_storms.iloc[0]['record_identifier']
atlantic_storms['record_identifier'].value_counts()
###Output
_____no_output_____
###Markdown
Which we `nan`-ify:
###Code
atlantic_storms = atlantic_storms.replace(to_replace="", value=np.nan)
atlantic_storms['record_identifier'].value_counts(dropna=False)
atlantic_storms.head()
###Output
_____no_output_____
###Markdown
Datafying columnsSome of the columns could be better formatted.To start with, the latitude and longitude include `N` and `W` indicators, which we don't really want. We can just use negatives to indicate `S` and `W` (we'll upconvert dtype later).
###Code
atlantic_storms['latitude'] = atlantic_storms['latitude'].map(lambda lat: lat[:-1] if lat[-1] == "N" else -lat[:-1])
atlantic_storms['longitude']= atlantic_storms['longitude'].map(lambda long: long[:-1] if long[-1] == "E" else "-" + long[:-1])
atlantic_storms.head()
###Output
_____no_output_____
###Markdown
Next let's store the date in a more standard format. Output to ISO 8601 is automatically covered when we convert a column to `datetime` dtype.
###Code
atlantic_storms['date'] = pd.to_datetime(atlantic_storms['date'])
atlantic_storms['date'] = atlantic_storms\
.apply(
lambda srs: srs['date'].replace(hour=int(srs['hours_minutes'][:2]), minute=int(srs['hours_minutes'][2:])),
axis='columns'
)
del atlantic_storms['hours_minutes']
atlantic_storms.head()
###Output
_____no_output_____
###Markdown
Final fixes These were detecting by inspecting saves.Fix an issue with character stripping in the names:
###Code
atlantic_storms['name'].iloc[0]
atlantic_storms['name'] = atlantic_storms['name'].map(lambda n: n.strip())
atlantic_storms['name'].iloc[0]
###Output
_____no_output_____
###Markdown
Reindex, and attach a name to the index:
###Code
atlantic_storms.index = range(len(atlantic_storms.index))
atlantic_storms.index.name = "index"
###Output
_____no_output_____
###Markdown
The data is printable as is.
###Code
atlantic_storms.to_csv("../data/atlantic_storms.csv", encoding='utf-8')
###Output
_____no_output_____ |
Lessons/15_Model_Finalization.ipynb | ###Markdown
Finalize your Model---
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Finalize Your Model with picklePickle is the standard way of serializing objects in Python. You can use the pickle1 operation to serialize your machine learning algorithms and save the serialized format to a file. Later you can load this file to deserialize your model and use it to make new predictions.
###Code
# Save Model Using Pickle
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from pickle import dump
from pickle import load
filename = "/content/drive/MyDrive/Colab Notebooks/ML Mastery python/Dataset/pima-indians-diabetes.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=7)
# Fit the model on 33%
model = LogisticRegression()
model.fit(X_train, Y_train)
# save the model to disk
filename = 'finalized_model.sav'
dump(model, open(filename, 'wb'))
# some time later...
# load the model from disk
loaded_model = load(open(filename, 'rb'))
result = loaded_model.score(X_test, Y_test)
print(result)
###Output
_____no_output_____
###Markdown
Finalize Your Model with JoblibThe Joblib library is part of the SciPy ecosystem and provides utilities for pipelining Python jobs. It provides utilities for saving and loading Python objects that make use of NumPy datastructures, eciently3. This can be useful for some machine learning algorithms that require a lot of parameters or store the entire dataset (e.g. k-Nearest Neighbors).
###Code
# Save Model Using joblib
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.externals.joblib import dump
from sklearn.externals.joblib import load
filename = "/content/drive/MyDrive/Colab Notebooks/ML Mastery python/Dataset/pima-indians-diabetes.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=7)
# Fit the model on 33%
model = LogisticRegression(max_iter=10000)
model.fit(X_train, Y_train)
# save the model to disk
filename = 'finalized_model.sav'
dump(model, filename)
# some time later...
# load the model from disk
loaded_model = load(filename)
result = loaded_model.score(X_test, Y_test)
print(result)
###Output
0.7874015748031497
|
my_ml_recipes/text/mle/intro.ipynb | ###Markdown
Maximum Likelihood EstimateSuppose we are given a problem where we can assume the _parametric class_ of distribution (e.g. Normal Distribution) that generates a set of data, and we want to determine the most likely parameters of this distribution using the given data. Since this class of distribution has a finite number of parameters (e.g. mean $\mu$ and standard deviation $\sigma$, in case of normal distribution) that need to be figured out in order to identify the particular member of the class, we will use the given data to do so.The obtained parameter estimates will be called **Maximum Likelihood Estimates**.Let us consider a Random Variable $X$ to be normally distributed with some mean $\mu$ and standard deviation $\sigma$. We need to estimate $\mu$ and $\sigma$ using our samples which accurately represent the actual $X$ and not just the samples that we have drawn out. Estimating ParametersLet's have a look at the Probability Density Function (PDF) for the Normal Distribution and see what they mean.$$\begin{equation}f(x; \mu, \sigma) = \frac{e^{-(x - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}}\end{equation}$$ (eq_normal_dist)This equation is used to obtain the probability of our sample $x$ being from our random variable $X$, when the true parameters of the distribution are $\mu$ and $\sigma$. Normal distributions with different $\mu$ and $\sigma$ are shown below.
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
np.random.seed(10)
plt.style.use('seaborn')
plt.rcParams['figure.figsize'] = (12, 8)
def plot_normal(x_range, mu=0, sigma=1, **kwargs):
'''
https://emredjan.github.io/blog/2017/07/19/plotting-distributions/
'''
x = x_range
y = norm.pdf(x, mu, sigma)
plt.plot(x, y, **kwargs)
mus = np.linspace(-6, 6, 6)
sigmas = np.linspace(1, 3, 6)
assert len(mus) == len(sigmas)
x_range = np.linspace(-10, 10, 200)
for mu, sigma in zip(mus, sigmas):
plot_normal(x_range, mu, sigma, label=f'$\mu$ = {mu:.2f}, $\sigma$ = {sigma:.2f}')
plt.legend();
###Output
_____no_output_____
###Markdown
Let us consider that our sample = 5. Then what is the probability that it comes from a normal distribution with $\mu = 4$ and $\sigma = 1$? To get this probability, we only need to plug in the values of $x, \mu$ and $\sigma$ in Equation {eq}`eq_normal_dist`. Scipy as a handy function [`norm.pdf()`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html) that we can use to obtain this easily.
###Code
from scipy.stats import norm
norm.pdf(5, 4, 1)
###Output
_____no_output_____
###Markdown
What if our sample came from a different distribution with $\mu = 3$ and $\sigma = 2$?
###Code
norm.pdf(5, 3, 2)
###Output
_____no_output_____
###Markdown
As we can see, the PDF equation {eq}`eq_normal_dist` shows us how likely our sample are from a distribution with certain parameters. Current results show that our sample is more likely to have come from the first distribution. But this with just a single sample. What if we had multiple samples and we wanted to estimate the parameters? Let us assume we have multiple samples from $X$ which we assume to have come from some normal distribution. Also, all the samples are mutually independent of one another. In this case, the we can get the total probability of observing all samples by multiplying the probabilities of observing each sample individually.E.g., The probability that both $7$ and $1$ are drawn from a normal distribution with $\mu = 4$ and $\sigma=2$ is equal to:
###Code
norm.pdf(7, 4, 2) * norm.pdf(1, 4, 2)
###Output
_____no_output_____
###Markdown
Likelihood of many samples
###Code
x_data = np.random.randint(-9, high=9, size=5)
print(x_data)
###Output
[ 0 -5 6 -9 8]
###Markdown
In maximum likelihood estimation (MLE), we specify a distribution of unknown parameters and then use our data to obtain the actual parameter values. In essence, MLE's aim is to find the set of parameters for the probability distribution that maximizes the likelihood of the data points. This can be formally expressed as:$$\begin{equation}\hat{\mu}, \hat{\sigma} = \operatorname*{argmax}_{\mu, \sigma} \prod_{i=1}^n f(x_i)\end{equation}$$ (eq_likelihood)However, it is difficult to optimize this product of probabilities because of long and messy calculations. Thus, we use log-likelihood which is the logarithm of the probability that the data point is observed. Formally Equation {eq}`eq_likelihood` can be re-written as,$$\begin{equation}\hat{\mu}, \hat{\sigma} = \operatorname*{argmax}_{\mu, \sigma}\Sigma_{i=1}^n \ln f(x_i)\end{equation}$$ (eq_log_likelihood)This is because logarithmic function is a monotonically increasing function. Thus, taking the log of another function does not change the point where the original function peaks. There are two main advantages of using log-likelihood:1. The exponential terms in the probability density function are more manageable and easily optimizable.1. The product of all likelihoods become a sum of individual likelihoods which allows these individual components to be maximized rather than working with the product of $n$ probability density functions. Now, let us find the maximum likelihood estimates using the log likelihood function.$$\begin{align*}\begin{split}& \ln\left[ L(\mu, \sigma|x_1, ..., x_n)\right] \\ &= \ln \left( \frac{e^{-(x_1 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \times \frac{e^{-(x_2 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \times ... \times \frac{e^{-(x_n - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) \\ &= \ln\left( \frac{e^{-(x_1 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) + \ln\left( \frac{e^{-(x_2 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) + ... \\ &\quad + \ln\left( \frac{e^{-(x_n - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right)\end{split}\end{align*} $$ (eq_mle_all_terms)Here,$$\begin{align*}&\ln\left( \frac{e^{-(x_1 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) \\ &= \ln\left( \frac{1} {\sigma\sqrt{2\pi}} \right) + \ln\left( e^{-(x_1 - \mu)^{2}/(2\sigma^{2}) } \right) \\ &= \ln\left[ (2\pi\sigma^2)^{\frac{-1}{2}} \right] - \frac{(x_1 - \mu)^2}{2\sigma^2}\ln(e) \\&= -\frac{1}{2}\ln ( 2\pi\sigma^2) - \frac{(x_1-\mu)^2}{2\sigma^2} \\&= -\frac{1}{2}\ln(2\pi) - \frac{1}{2}\ln(\sigma^2) - \frac{(x_1 - \mu)^2}{2\sigma^2} \\\end{align*} $$$$\begin{align*}&= -\frac{1}{2}\ln(2\pi) - \ln(\sigma) - \frac{(x_1 - \mu)^2}{2\sigma^2} \\\end{align*}$$ (eq_mle_single_term)Thus, Equation {eq}`eq_mle_all_terms` can be written as:$$\begin{align*}\ln\left[ L(\mu, \sigma|x_1, ..., x_n)\right] &= \ln\left( \frac{e^{-(x_1 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) + \ln\left( \frac{e^{-(x_2 - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) + ... \\ &\quad + \ln\left( \frac{e^{-(x_n - \mu)^{2}/(2\sigma^{2}) }} {\sigma\sqrt{2\pi}} \right) \\&= \left[ -\frac{1}{2}\ln(2\pi) - \ln(\sigma) - \frac{(x_1 - \mu)^2}{2\sigma^2} \right] \\ &\quad + \left[ -\frac{1}{2}\ln(2\pi) - \ln(\sigma) - \frac{(x_2- \mu)^2}{2\sigma^2} \right] \\ &\quad + ... + \left[ -\frac{1}{2}\ln(2\pi) - \ln(\sigma) - \frac{(x_n - \mu)^2}{2\sigma^2} \right] \\\end{align*} $$$$\begin{align*}&= -\frac{n}{2}\ln(2\pi) - n\ln(\sigma) - \frac{(x_1-\mu)^2}{2\sigma^2} - \frac{(x_2-\mu)^2}{2\sigma^2} - ... - \frac{(x_n-\mu)^2}{2\sigma^2}\end{align*}$$ (eq_mle_simplified) Values of parametersNow, we will use Equation {eq}`eq_mle_simplified` to find the values of $\mu$ and $\sigma$. For this purpose, we take the partial derivative of Equation {eq}`eq_mle_simplified` with respect to $\mu$ and $\sigma$.$$\begin{align*}\frac{\partial}{\partial \mu}\ln\left[L(\mu, \sigma|x_1, x_2, ..., x_n) \right] &= 0 - 0 + \frac{x_1 - \mu}{\sigma^2} + \frac{x_2 - \mu}{\sigma^2} + ... + \frac{x_n - \mu}{\sigma^2} \\\end{align*} $$$$\begin{align*}\frac{\partial}{\partial \mu}\ln\left[L(\mu, \sigma|x_1, x_2, ..., x_n) \right] &= \frac{1}{\sigma^2}\left[ (x_1 + x_2 + ... + x_n) - n\mu \right]\end{align*} $$ (eq_mu)$$\begin{align*}\frac{\partial}{\partial \sigma}\ln\left[L(\mu, \sigma|x_1, x_2, ..., x_n) \right] &= 0 - \frac{n}{\sigma} + \frac{(x_1 - \mu)^2}{\sigma^3} + \frac{(x_2 - \mu)^2}{\sigma^3} + ... + \frac{(x_n - \mu)^2}{\sigma^3} \\ \end{align*} $$$$\begin{align*}\frac{\partial}{\partial \sigma}\ln\left[L(\mu, \sigma|x_1, x_2, ..., x_n) \right] &= -\frac{n}{\sigma} + \frac{1}{\sigma^3}\left[ (x_1 - \mu)^2 + (x_2 - \mu)^2 + ...+ (x_n - \mu)^2 \right]\end{align*}$$ (eq_sigma)Now, to find the maximum likelihood estimate for $\mu$ and $\sigma$, we need to solve for the derivative with respect to $\mu = 0$ and $\sigma = 0$, because the slope is 0 at the peak of the curve.Thus, using Equation {eq}`eq_mu` and setting $\frac{\partial}{\partial \mu}\ln\left[L(\mu, \sigma|x_1, x_2, ..., x_n) \right] = 0$, we get,$$\begin{align*}0 &= \frac{1}{\sigma^2}\left[ (x_1 + x_2 + ... + x_n) - n\mu \right] \\0 &= (x_1+x_2 + ... + x_n) - n\mu \\\end{align*} $$$$\begin{align*}\mu &= \frac{(x_1+x_2+...+x_n)}{n}\end{align*} $$ (eq_mu_final)Thus, the maximum likelihood estimate for $\mu$ is the mean of the samples.Simialrly, using Equation {eq}`eq_sigma` and setting $\frac{\partial}{\partial \sigma}\ln\left[L(\mu, \sigma|x_1, x_2, ..., x_n) \right] = 0$, we get,$$\begin{align*}0 &= -\frac{n}{\sigma} + \frac{1}{\sigma^3}\left[ (x_1 - \mu)^2 + (x_2 - \mu)^2 + ...+ (x_n - \mu)^2 \right] \\0 &= -n + \frac{1}{\sigma^2}\left[ (x_1-\mu)^2 + (x_2-\mu)^2 + ...+ (x_n-\mu)^2 \right] \\n\sigma^2 &= (x_1-\mu)^2 + (x_2-\mu)^2 + ...+ (x_n-\mu)^2 \\ \end{align*} $$$$\begin{align*}\sigma &= \sqrt{\frac{(x_1-\mu)^2 + (x_2-\mu)^2 + ...+ (x_n-\mu)^2}{n}} \\\end{align*} $$ (eq_sigma_final)Thus, the maximum likelihood estimate for $\sigma$ is the standard deviation of the samples. An Example Let us now consider 5 samples with values 0, -5, 6, -9 and 8. We want to know the normal distribution from which all of these samples were most likely to be drawn. In other words, we would like to maximize the value of $f(0, -5, 6, -9, 8)$ as given in Equation {eq}`eq_normal_dist`. Since we do not know the values of $\mu$ and $\sigma$ for the required distribution, we need to estimate them using Equations {eq}`eq_mu_final` and {eq}`eq_sigma_final` respectively.Using the formulae of $\mu$ and $\sigma$, we get,
###Code
samples = np.array([0, -5, 6, -9, 8])
mu = np.mean(samples)
sigma = np.std(samples)
print(f'mu = {mu:.2f} and sigma = {sigma:.2f}')
###Output
mu = 0.00 and sigma = 6.42
###Markdown
Let us plot the normal distribution with these values and also mark the given points.
###Code
x_range = np.linspace(-20, 20, 200)
plot_normal(x_range, mu, sigma)
plt.axhline(y=0)
plt.vlines(samples, ymin=0, ymax=norm.pdf(samples, mu, sigma), linestyle=':')
plt.plot(samples, [0]*samples.shape[0], 'o', zorder=10, clip_on=False);
###Output
_____no_output_____ |
sphinx/datascience/source/lda.ipynb | ###Markdown
Latent Dirichlet AllocationThe purpose of this notebook is to demonstrate how to simulate data appropriate for use with [Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) (LDA) to learn topics. There are a lot of moving parts involved with LDA, and it makes very strong assumptions about how word, topics and documents are distributed. In a nutshell, the distributions are all based on the [Dirichlet-Multinomial distribution](https://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution), and so if you understand that compound distribution, you will have an easier time understanding how to sample the topics (from the document) and the words (from the topic). At any rate, the Wikipedia site does a good enough job to enumerate the moving parts; here they are for completeness.* $K$ is the number of topics* $N$ is the number of words in a document; sometimes also denoted as $V$; when the number of words vary from document to document, then $N_d$ is the number of words for the $d$ document; here we assume $N$, $V$ and $N_d$ are all the same* $M$ is the number of documents* $\alpha$ is a vector of length $K$ on the priors of the $K$ topics; these alpha are `sparse` (less than 1)* $\beta$ is a vector of length $N$ on the priors of the $N$ words; typically these are `symmetric` (all set to the same value e.g. 0.001)* $\theta$ is the $M$ by $K$ matrix of document-topic (documents to topics) where each element is $P(K=k|D=d)$* $\varphi$ is the $K$ by $V$ matrix of topic-word (topics to words) where each element is $P(W=w|K=k)$The Wikipedia article states the sampling as follows.$\begin{align}\boldsymbol\varphi_{k=1 \dots K} &\sim \operatorname{Dirichlet}_V(\boldsymbol\beta) \\\boldsymbol\theta_{d=1 \dots M} &\sim \operatorname{Dirichlet}_K(\boldsymbol\alpha) \\z_{d=1 \dots M,w=1 \dots N_d} &\sim \operatorname{Categorical}_K(\boldsymbol\theta_d) \\w_{d=1 \dots M,w=1 \dots N_d} &\sim \operatorname{Categorical}_V(\boldsymbol\varphi_{z_{dw}})\end{align}$Note the following.* $z_{dw} \in [1 \ldots K]$ ($z_{dw}$ is an integer between 1 and $K$) and serves as a pointer back to $\varphi_k$ (the k-th row in $\varphi$ that you will use as priors to sample the words)* $w_{dw} \in [1 \ldots N]$ ($w_{dw}$ is an integer between 1 and $N$) which is the n-th word* $z_{dw}$ is actually sampled from $\operatorname{Multinomial}(\boldsymbol\theta_d)$ taking the arg max, e.g. $z_{dw} \sim \underset{\theta_d}{\operatorname{arg\,max}}\ \operatorname{Multinomial}(\boldsymbol\theta_d)$* $w_{dw}$ is actually sampled from $\operatorname{Multinomial}(\boldsymbol\varphi_{z_{dw}})$ taking the arg max, e.g. $z_{dw} \sim \underset{\boldsymbol\varphi_{w_{dw}}}{\operatorname{arg\,max}}\ \operatorname{Multinomial}(\boldsymbol\varphi_{z_{dw}})$The code below should make it clear as there are a lot of sub-scripts and moving parts. Simulate the dataLet's get ready to sample. Note the following.* $K = 10$ (ten topics)* $N = 100$ (one hundred words)* $M = 1000$ (one thousand documents)* $\alpha = [0.1, 0.2, 0.3, 0.4, 0.025, 0.015, 0.37, 0.88, 0.03, 0.08]$ (10 sparse priors on topics)* $\beta = [0.001 \ldots 0.001]$ (100 symetric priors on words)Below, we store the sampled documents and associated words in* `texts` as string literal (e.g. w1 w1 w83 ....)* `docs` as a dictionary of counts (e.g. { 1: 2, 83: 1, ...})The matrices* `C` stores the counts* `X` stores the [tf-idf](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) values
###Code
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import dirichlet, multinomial
from scipy.sparse import lil_matrix
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
np.random.seed(37)
# number of topics
K = 10
# number of words
N = 100
# number of documents
M = 1000
# priors on K topics
a = np.array([0.1, 0.2, 0.3, 0.4, 0.025, 0.015, 0.37, 0.88, 0.03, 0.08])
# priors on N words
b = np.full((1, N), 0.001, dtype=float)[0]
# distribution of words in topic k
phi = np.array([dirichlet.rvs(b)[0] for _ in range(K)])
# distribution of topics in document d
theta = np.array([dirichlet.rvs(a)[0] for _ in range(M)])
# simulate the documents
texts = []
docs = []
# for each document
for i in range(M):
d = {}
t = []
# for each word
for j in range(N):
# sample the possible topics
z_ij = multinomial.rvs(1, theta[i])
# get the identity of the topic; the one with the highest probability
topic = np.argmax(z_ij)
# sample the possible words from the topic
w_ij = multinomial.rvs(1, phi[topic])
# get the identity of the word; the one with the highest probability
word = np.argmax(w_ij)
if word not in d:
d[word] = 0
d[word] = d[word] + 1
t.append('w{}'.format(word))
docs.append(d)
texts.append(' '.join(t))
# make a nice matrix
# C is a matrix of word counts (rows are documents, columns are words, elements are count values)
C = lil_matrix((M, N), dtype=np.int16)
for i, d in enumerate(docs):
counts = sorted(list(d.items()), key=lambda tup: tup[0])
for tup in counts:
C[i, tup[0]] = tup[1]
# X is a matrix of tf-idf (rows are documents, columns are words, elements are tf-idf values)
X = TfidfTransformer().fit_transform(C)
###Output
_____no_output_____
###Markdown
Gaussian mixture models (GMMs)Let's see if GMMs can help us recover the number of topics using the [AIC](https://en.wikipedia.org/wiki/Akaike_information_criterion) score to guide us.
###Code
from scipy.sparse.linalg import svds
from sklearn.mixture import GaussianMixture
def get_gmm_labels(X, k):
gmm = GaussianMixture(n_components=k, max_iter=200, random_state=37)
gmm.fit(X)
aic = gmm.aic(X)
print('{}: aic={}'.format(k, aic))
return k, aic
U, S, V = svds(X, k=20)
gmm_scores = [get_gmm_labels(U, k) for k in range(2, 26)]
###Output
2: aic=-91377.4925931899
3: aic=-115401.48064693023
4: aic=-140093.33933540556
5: aic=-140323.78987370015
6: aic=-141875.7608870883
7: aic=-148775.55233751616
8: aic=-144864.34044251204
9: aic=-145063.4922621106
10: aic=-150715.19037699007
11: aic=-152996.5234889565
12: aic=-155759.24880410862
13: aic=-154738.52657589084
14: aic=-155298.3570419242
15: aic=-155273.86266190943
16: aic=-158229.54424744606
17: aic=-158801.92826365907
18: aic=-158146.93107164893
19: aic=-157399.88209837917
20: aic=-158964.20247723104
21: aic=-156443.29839085325
22: aic=-156545.28924475564
23: aic=-156265.51016605442
24: aic=-155860.4914350854
25: aic=-157396.56289736537
###Markdown
k-means clustering (KMC)Let's see if KMC can help us to recover the number of topics using the [Silhouette score](https://en.wikipedia.org/wiki/Silhouette_%28clustering%29) to guide us.
###Code
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
def get_kmc(X, k):
model = KMeans(k, random_state=37)
model.fit(X)
labels = model.predict(X)
score = silhouette_score(X, labels)
print('{}: score={}'.format(k, score))
return k, score
kmc_scores = [get_kmc(X, k) for k in range(2, 26)]
###Output
2: score=0.22136552497539078
3: score=0.2606191325546754
4: score=0.2985364557161296
5: score=0.32764563696557253
6: score=0.34711980577628615
7: score=0.36212754809252495
8: score=0.3693035922796191
9: score=0.3118628444238988
10: score=0.32070416934016466
11: score=0.3056882384904699
12: score=0.28297903762485543
13: score=0.28462816984240946
14: score=0.2747613933318139
15: score=0.2787478862359055
16: score=0.27452088253304896
17: score=0.2548015324435892
18: score=0.25961952207924777
19: score=0.25650479556223627
20: score=0.251690199350559
21: score=0.2566617758778615
22: score=0.25866268014756943
23: score=0.24607465357359543
24: score=0.24936289940720038
25: score=0.2579644562276278
###Markdown
LDA modelingHere, we will use LDA topic modeling technique and the [coherence score](https://radimrehurek.com/gensim/models/coherencemodel.html) to guide us recovering the number of topics.
###Code
from gensim import corpora
from gensim.models import LdaModel
from gensim.models.coherencemodel import CoherenceModel
def learn_lda_model(corpus, dictionary, k):
lda = LdaModel(corpus,
id2word=dictionary,
num_topics=k,
random_state=37,
iterations=100,
passes=5,
per_word_topics=False)
cm = CoherenceModel(model=lda, corpus=corpus, coherence='u_mass')
coherence = cm.get_coherence()
print('{}: {}'.format(k, coherence))
return k, coherence
T = [t.split(' ') for t in texts]
dictionary = corpora.Dictionary(T)
corpus = [dictionary.doc2bow(text) for text in T]
lda_scores = [learn_lda_model(corpus, dictionary, k) for k in range(2, 26)]
###Output
2: -7.112621491925517
3: -6.770771537876562
4: -6.654850158110881
5: -6.495525290205532
6: -6.592127872424598
7: -6.4394384370150854
8: -6.431505215171467
9: -6.376827700591723
10: -6.207008469326988
11: -6.235774265382583
12: -6.289107652710713
13: -6.254881861190534
14: -6.550148968159432
15: -6.6008249817300415
16: -6.560176401338963
17: -6.607477085524114
18: -6.707151535098344
19: -6.712047152650457
20: -6.723101440691804
21: -6.906780797634873
22: -6.6622351856878375
23: -6.773847370134338
24: -6.735329093161339
25: -6.676802294304821
###Markdown
Visualize the techniques and scores versus the number of topicsHere, we visualize the scores (GMM AIC, KMC Silhouette and LDA Coherence) versus the number of topics (k). For AIC, the lower the score, the better; for silhouette, the higher the better; for coherence, the higher the better. It seems that KCM's silhouette does not really agree with AIC or coherence; and AIC and coherence (although negative correlated) seem to hint at the same number of topics.When relying on LDA and coherence, k=10 is the highest, as we'd expect since we simulated the data from 10 latent/hidden topics.
###Code
def plot_scores(scores, ax, ylabel):
_x = [s[0] for s in scores]
_y = [s[1] for s in scores]
ax.plot(_x, _y, color='tab:blue')
ax.set_xlabel('k')
ax.set_ylabel(ylabel)
ax.set_title('{} vs k'.format(ylabel))
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
plot_scores(gmm_scores, ax[0], 'GMM AIC')
plot_scores(kmc_scores, ax[1], 'KMC Sillhouette')
plot_scores(lda_scores, ax[2], 'LDA Coherence')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Visualize the topicsThis visualization tool allows us to `interrogate` the topics. As we hover over each topic, the words most strongly associated with them are show.
###Code
import pyLDAvis.gensim
import warnings
warnings.filterwarnings('ignore')
lda = LdaModel(corpus,
id2word=dictionary,
num_topics=10,
random_state=37,
iterations=100,
passes=5,
per_word_topics=False)
lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=False)
pyLDAvis.display(lda_display)
###Output
_____no_output_____
###Markdown
Close to real-world exampleHere's a list of 10 book titles when searching on `programming` and `economics` from Amazon (5 each). Again, when the number of topics is k=2, that model has the highest coherence score.
###Code
import nltk
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
def clean(text):
t = text.lower().strip()
t = t.split()
t = remove_stop_words(t)
t = [get_lemma(w) for w in t]
t = [get_stem(w) for w in t]
return t
def get_stem(w):
return PorterStemmer().stem(w)
def get_lemma(w):
lemma = wn.morphy(w)
return w if lemma is None else lemma
def remove_stop_words(tokens):
stop_words = nltk.corpus.stopwords.words('english')
return [token for token in tokens if token not in stop_words]
texts = [
'The Art of Computer Programming',
'Computer Programming Learn Any Programming Language In 2 Hours',
'The Self-Taught Programmer The Definitive Guide to Programming Professionally',
'The Complete Software Developers Career Guide How to Learn Your Next Programming Language',
'Cracking the Coding Interview 189 Programming Questions and Solutions',
'The Economics Book Big Ideas Simply Explained',
'Economics in One Lesson The Shortest and Surest Way to Understand Basic Economics',
'Basic Economics',
'Aftermath Seven Secrets of Wealth Preservation in the Coming Chaos',
'Economics 101 From Consumer Behavior to Competitive Markets Everything You Need to Know About Economics'
]
texts = [clean(t) for t in texts]
dictionary = corpora.Dictionary(texts)
dictionary.filter_extremes(no_below=3)
corpus = [dictionary.doc2bow(text) for text in texts]
lda_scores = [learn_lda_model(corpus, dictionary, k) for k in range(2, 10)]
###Output
2: -26.8263021597115
3: -26.863492751597203
4: -26.88208804754005
5: -26.848616514842924
6: -26.9006833434829
7: -26.874118634993117
8: -26.88208804754005
9: -26.863492751597203
###Markdown
Learn the model with 2 topics.
###Code
lda = LdaModel(corpus,
id2word=dictionary,
num_topics=2,
random_state=37,
iterations=100,
passes=20,
per_word_topics=False)
###Output
_____no_output_____
###Markdown
Print what the model predicts for each book title. Note the 9-th book title is a tie (50/50)? Otherwise, all the predictions (based on highest probabilities) are correct.
###Code
corpus_lda = lda[corpus]
for d in corpus_lda:
print(d)
###Output
[(0, 0.25178078), (1, 0.7482192)]
[(0, 0.16788824), (1, 0.8321117)]
[(0, 0.25178385), (1, 0.74821615)]
[(0, 0.25177962), (1, 0.7482204)]
[(0, 0.2517812), (1, 0.7482188)]
[(0, 0.7482479), (1, 0.25175208)]
[(0, 0.83213073), (1, 0.16786925)]
[(0, 0.74824756), (1, 0.2517524)]
[(0, 0.5), (1, 0.5)]
[(0, 0.8321298), (1, 0.16787016)]
###Markdown
The first topic is about `econom` (economics) and the second about `programming`, as we'd expect. Observe how each topic has a little of the other's words? This observation is the result of the assumption from LDA that documents are a mixture of topics and topics have distributions over words.
###Code
lda.print_topics()
###Output
_____no_output_____
###Markdown
This book title is a `holdout` title from the economics search result. It is correctly placed in the 0-th topic (economics).
###Code
lda[dictionary.doc2bow(clean('Naked Economics Undressing the Dismal Science'))]
###Output
_____no_output_____
###Markdown
This book title is a `holdout` title from the programming search result. It is correctly placed in the 1-st topic (programming).
###Code
lda[dictionary.doc2bow(clean('Elements of Programming Interviews in Python The Insiders Guide'))]
###Output
_____no_output_____
###Markdown
Since this example is trivial, the visualization is not very interesting, but displayed below anyways.
###Code
lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=False)
pyLDAvis.display(lda_display)
###Output
_____no_output_____ |
python3.6/python22.ipynb | ###Markdown
****** 22. ์ฝํ์ฐธ์กฐ, ๋ฐ๋ณต์, ๋ฐ์์****** *** 1 ์ฝํ ์ฐธ์กฐ*** 1-1 ์ฝํ ์ฐธ์กฐ์ ์ ์- ์ฝํ ์ฐธ์กฐ (Weak Reference) - ๋ ํผ๋ฐ์ค ์นด์ดํธ๋ก ๊ณ ๋ ค๋์ง ์๋ ์ฐธ์กฐ 1-2 ์ฝํ ์ฐธ์กฐ์ ํ์์ฑ  - 1) ๋ ํผ๋ฐ์ค ์นด์ดํธ๊ฐ ์ฆ๊ฐ๋์ง ์์ผ๋ฏ๋ก ์ํ ์ฐธ์กฐ๊ฐ ๋ฐฉ์ง๋๋ค. - ์ํ ์ฐธ์กฐ (Cyclic Reference) - ์๋ก ๋ค๋ฅธ ๊ฐ์ฒด๋ค ์ฌ์ด์ ์ฐธ์กฐ ๋ฐฉ์์ด ์ํ ํํ๋ก ์ฐ๊ฒฐ๋๋ ๋ฐฉ์ - ๋
๋ฆฝ์ ์ผ๋ก ์กด์ฌํ์ง๋ง ์ํ ์ฐธ์กฐ๋๋ ์๋ก ๋ค๋ฅธ ๊ฐ์ฒด ๊ทธ๋ฃน์ ์ฐ๋ ๊ธฐ ์์ง์ด ์๋๋ค. - ์ฃผ๊ธฐ์ ์ผ๋ก ์ํ ์ฐธ์กฐ๋ฅผ ์กฐ์ฌํ์ฌ ์ฐ๋ ๊ธฐ ์์งํ๋ ๊ธฐ๋ฅ์ด ์์ง๋ง, CPU ์์ ๋ญ๋น๊ฐ ์ฌํ๋ค. - ์ด๋ฌํ ์ฐ๋ ๊ธฐ ์์ง ๋น๋๊ฐ ๋ฎ์ผ๋ฉด ์ํ ์ฐธ์กฐ๋๋ ๋ง์ ๊ฐ์ฒด๋ค์ด ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ธ๋ฐ์์ด ์ ์ ํ๊ฒ ๋จ- 2) ๋ค์ํ ์ธ์คํด์ค๋ค ์ฌ์ด์์ ๊ณต์ ๋๋ ๊ฐ์ฒด์ ๋ํ ์ผ์ข
์ ์ผ์(Cache)๋ฅผ ๋ง๋๋ ๋ฐ ํ์ฉ๋๋ค. 1-3 ์ฝํ ์ฐธ์กฐ ๋ชจ๋ 1) weakref.ref(o)- weakref ๋ชจ๋์ ref(o) ํจ์ - ๊ฐ์ฒด o์ ๋ํ ์ฝํ ์ฐธ์กฐ๋ฅผ ์์ฑํ๋ค. - ํด๋น ๊ฐ์ฒด๊ฐ ๋ฉ๋ชจ๋ฆฌ์ ์ ์์ ์ผ๋ก ๋จ์ ์๋์ง ์กฐ์ฌํ๋ค. - ๊ฐ์ฒด๊ฐ ๋ฉ๋ชจ๋ฆฌ์ ๋จ์ ์์ง ์์ผ๋ฉด None์ ๋ฐํํ๋ค.- ์ฝํ ์ฐธ์กฐ๋ก ๋ถํฐ ์ค์ ๊ฐ์ฒด๋ฅผ ์ฐธ์กฐํ๋ ๋ฐฉ๋ฒ - ์ฝํ ์ฐธ์กฐ ๊ฐ์ฒด์ ํจ์ํํ ํธ์ถ
###Code
import sys
import weakref # weakref ๋ชจ๋ ์ํฌํธ
class C:
pass
c = C() # ํด๋์ค C์ ์ธ์คํด์ค ์์ฑ
c.a = 1 # ์ธ์คํด์ค c์ ํ
์คํธ์ฉ ๊ฐ ์ค์
print("refcount -", sys.getrefcount(c)) # ๊ฐ์ฒด c์ ๋ ํผ๋ฐ์ค ์นด์ดํธ ์กฐํ
print()
d = c # ์ผ๋ฐ์ ์ธ ๋ ํผ๋ฐ์ค ์นด์ดํธ ์ฆ๊ฐ ๋ฐฉ๋ฒ
print("refcount -", sys.getrefcount(c)) # ๊ฐ์ฒด c์ ๋ ํผ๋ฐ์ค ์นด์ดํธ ์กฐํ
print()
r = weakref.ref(c) # ์ฝํ ์ฐธ์กฐ ๊ฐ์ฒด r ์์ฑ
print("refcount -", sys.getrefcount(c)) # ๊ฐ์ฒด c์ ๋ ํผ๋ฐ์ค ์นด์ดํธ ์กฐํ --> ์นด์ดํธ ๋ถ๋ณ
print()
print(r) # ์ฝํ ์ฐธ์กฐ(weakref) ๊ฐ์ฒด
print(r()) # ์ฝํ ์ฐธ์กฐ๋ก ๋ถํฐ ์ค์ ๊ฐ์ฒด๋ฅผ ์ฐธ์กฐํ๋ ๋ฐฉ๋ฒ: ์ฝํ ์ฐธ์กฐ ๊ฐ์ฒด์ ํจ์ํํ๋ก ํธ์ถ
print(c)
print(r().a) # ์ฝํ ์ฐธ์กฐ๋ฅผ ์ด์ฉํ ์ค์ ๊ฐ์ฒด ๋ฉค๋ฒ ์ฐธ์กฐ
print()
del c # ๊ฐ์ฒด ์ ๊ฑฐ
print(d)
del d
print(r()) # None์ ๋ฆฌํดํ๋ค
print(r().a) # ์์ฑ๋ ์ฐธ์กฐํ ์ ์๋ค
###Output
<weakref at 0x10e767b88; to 'C' at 0x10e71fe80>
<__main__.C object at 0x10e71fe80>
<__main__.C object at 0x10e71fe80>
1
<__main__.C object at 0x10e71fe80>
None
###Markdown
- ๋ด์ฅ ์๋ฃํ ๊ฐ์ฒด (๋ฆฌ์คํธ, ํํ, ์ฌ์ ๋ฑ)์ ๋ํด์๋ ์ฝํ ์ฐธ์กฐ๋ฅผ ๋ง๋ค ์ ์๋ค.
###Code
d = {'one': 1, 'two': 2}
wd = weakref.ref(d)
###Output
_____no_output_____
###Markdown
2) weakref.proxy(o)- weakref์ proxy(o)๋ ๊ฐ์ฒด o์ ๋ํ ์ฝํ ์ฐธ์กฐ ํ๋ก์๋ฅผ ์์ฑํ๋ค. - ํ๋ก์๋ฅผ ์ด์ฉํ๋ฉด ํจ์ ํ์์ ์ฌ์ฉํ์ง ์์๋ ์ค์ ๊ฐ์ฒด๋ฅผ ๋ฐ๋ก ์ฐธ์กฐํ ์ ์๋ค. - ref(o) ํจ์๋ณด๋ค ๋ ์ ํธ๋๋ ํจ์
###Code
import sys
import weakref
class C:
pass
c = C()
c.a = 2
print("refcount -", sys.getrefcount(c)) # ๊ฐ์ฒด c์ ๋ ํผ๋ฐ์ค ์นด์ดํธ ์กฐํ
p = weakref.proxy(c) # ํ๋ก์ ๊ฐ์ฒด๋ฅผ ๋ง๋ ๋ค
print("refcount -", sys.getrefcount(c)) # ๊ฐ์ฒด c์ ๋ ํผ๋ฐ์ค ์นด์ดํธ ์กฐํ --> ์นด์ดํธ ๋ถ๋ณ
print( )
print(p)
print(c)
print(p.a)
import weakref
class C:
pass
c = C() # ์ฐธ์กฐํ ๊ฐ์ฒด ์์ฑ
r = weakref.ref(c) # weakref ์์ฑ
p = weakref.proxy(c) # weakref ํ๋ก์ ์์ฑ
print(weakref.getweakrefcount(c)) # weakref ๊ฐ์ ์กฐํ
print(weakref.getweakrefs(c)) # weakref ๋ชฉ๋ก ์กฐํ
###Output
2
[<weakref at 0x10e79b228; to 'C' at 0x10e71f978>, <weakproxy at 0x10e7678b8 to C at 0x10e71f978>]
###Markdown
1-4 ์ฝํ ์ฌ์ - ์ฝํ ์ฌ์ (Weak Dictionary) - ์ฌ์ ์ ํค(key)๋ ๊ฐ(value)์ผ๋ก ๋ค๋ฅธ ๊ฐ์ฒด๋ค์ ๋ํ ์ฝํ ์ฐธ์กฐ๋ฅผ ์ง๋๋ ์ฌ์ - ์ฃผ๋ก ๋ค๋ฅธ ๊ฐ์ฒด๋ค์ ๋ํ ์บ์(Cache)๋ก ํ์ฉ - ์ผ๋ฐ์ ์ธ ์ฌ์ ๊ณผ์ ์ฐจ์ด์ - ํค(key)๋ ๊ฐ(value)์ผ๋ก ์ฌ์ฉ๋๋ ๊ฐ์ฒด๋ ์ฝํ ์ฐธ์กฐ๋ฅผ ์ง๋๋ค. - ์ค์ ๊ฐ์ฒด๊ฐ ์ญ์ ๋๋ฉด ์๋์ ์ผ๋ก ์ฝํ ์ฌ์ ์ ์๋ (ํค, ๊ฐ)์ ์๋ ์ญ์ ๋๋ค. - ์ฆ, ์ค์ ๊ฐ์ฒด๊ฐ ์ฌ๋ผ์ง๋ฉด ์บ์์ญํ ์ ํ๋ ์ฝํ ์ฌ์ ์์๋ ํด๋น ์์ดํ
์ด ์ ๊ฑฐ๋๋ฏ๋ก ํจ์จ์ ์ธ ๊ฐ์ฒด ์๋ฉธ ๊ด๋ฆฌ๊ฐ ๊ฐ๋ฅํ๋ค. - weakref ๋ชจ๋์ WeakValueDictionary ํด๋์ค - weakref ๋ชจ๋์ WeakValueDictionary ํด๋์ค์ ์์ฑ์๋ ์ฝํ ์ฌ์ ์ ์์ฑํ๋ค.
###Code
import weakref
class C:
pass
c = C()
c.a = 4
d = weakref.WeakValueDictionary() # WeakValueDictionary ๊ฐ์ฒด ์์ฑ
print(d)
d[1] = c # ์ค์ ๊ฐ์ฒด์ ๋ํ ์ฝํ ์ฐธ์กฐ ์์ดํ
์์ฑ
print(list(d.items())) # ์ฌ์ ๋ด์ฉ ํ์ธ
print(d[1].a) # ์ค์ ๊ฐ์ฒด์ ์์ฑ ์ฐธ์กฐ
del c # ์ค์ ๊ฐ์ฒด ์ญ์
print(list(d.items())) # ์ฝํ ์ฌ์ ์ ํด๋น ๊ฐ์ฒด ์์ดํ
๋ ์ ๊ฑฐ๋์ด ์์
###Output
<WeakValueDictionary at 0x10e71f978>
[(1, <__main__.C object at 0x10e7a0278>)]
4
[]
###Markdown
- ์ผ๋ฐ ์ฌ์ ์ ํตํ์ฌ ๋์ผํ ์์ ๋ฅผ ์ํํ๋ฉด ๋ง์ง๋ง์ ํด๋น ๊ฐ์ฒด๋ฅผ ์ญ์ ํด๋ ์ผ๋ฐ ์ฌ์ ๋ด์์๋ ์ฌ์ ํ ์กด์ฌํจ
###Code
class C:
pass
c = C()
c.a = 4
d = {} # ์ผ๋ฐ ์ฌ์ ๊ฐ์ฒด ์์ฑ
print(d)
d[1] = c # ์ค์ ๊ฐ์ฒด์ ๋ํ ์ผ๋ฐ ์ฐธ์กฐ ์์ดํ
์์ฑ
print(list(d.items())) # ์ฌ์ ๋ด์ฉ ํ์ธ
print(d[1].a) # ์ค์ ๊ฐ์ฒด์ ์์ฑ ์ฐธ์กฐ
del c # ๊ฐ์ฒด ์ญ์ (์ฌ์ ์ ํด๋น ๊ฐ์ฒด์ ๋ ํผ๋ฐ์ค๊ฐ ์์ผ๋ฏ๋ก ๊ฐ์ฒด๋ ์ค์ ๋ก ๋ฉ๋ชจ๋ฆฌ ํด์ ๋์ง ์์)
print(list(d.items())) # ์ผ๋ฐ ์ฌ์ ์ ํด๋น ๊ฐ์ฒด ์์ดํ
์ด ์ฌ์ ํ ๋จ์ ์์
###Output
{}
[(1, <__main__.C object at 0x10e7a01d0>)]
4
[(1, <__main__.C object at 0x10e7a01d0>)]
###Markdown
*** 2 ๋ฐ๋ณต์ (Iterator)*** 2-1 ๋ฐ๋ณต์ (Iterlator) ๊ฐ์ฒด- ๋ฐ๋ณต์ ๊ฐ์ฒด - ๋ด๋ถ์ ์ผ๋ก \_\_next\_\_(self)๋ฅผ ์ง๋๊ณ ์๋ ๊ฐ์ฒด - ๋ด๋ถ์ ์ผ๋ก ์ง๋ Sequence ์๋ฃ๋ฅผ ์ฐจ๋ก๋ก ๋ฐํ - ๋ ์ด์ ๋๊ฒจ์ค ์๋ฃ๊ฐ ์์ ๋ StopIteration ์์ธ๋ฅผ ๋ฐ์์ํด - next() ๋ด์ฅ ํจ์์ ๋์๋จ - ์์์ ๊ฐ์ฒด์ ๋ํด ๋ฐ๋ณต์ ๊ฐ์ฒด๋ฅผ ์ป์ด์ค๋ ๋ฐฉ๋ฒ - iter(o) ๋ด์ฅ ํจ์ - ๊ฐ์ฒด o์ ๋ฐ๋ณต์ ๊ฐ์ฒด๋ฅผ ๋ฐํํ๋ค.- ์งํฉ์ ๊ฐ์ฒด A --> iter(A) --> ๋ฐ๋ณต์ ๊ฐ์ฒด B ๋ฐํ --> next(B) --> ์งํฉ์ ์๋ฃํ ์์ ๋ด๋ถ ์์๋ฅผ ํ๋์ฉ ๋ฐํ- ๋ฐ๋ณต์ ๊ฐ์ฒด์ ๋ฉ๋ชจ๋ฆฌ ํจ์จ์ฑ - ๋ฐ๋ณต์๊ฐ ์ ๊ฐ์ฒด์ ์์๋ค์ ๋ณต์ฌํ์ฌ ์ง๋๊ณ ์์ง ์๋ค.
###Code
L = [1,2,3]
# print(next(L)) <-- ์๋ฌ ๋ฐ์
I = iter(L)
print(I)
# Python3์์๋ next() ๋ด์ฅ ํจ์ ์ฌ์ฉ
print(next(I))
print(next(I))
print(next(I))
print(next(I))
K = {1: "aaa", 2: "bbb", 3: "ccc"}
J = iter(K)
print(next(J))
print(next(J))
print(next(J))
###Output
1
2
3
###Markdown
- ๋ฆฌ์คํธ ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ํ์ฉํ ์
###Code
t = iter([1, 2, 3])
while True:
try:
x = next(t)
except StopIteration:
break
print(x)
###Output
1
2
3
###Markdown
- ๋ฆฌ์คํธ ๊ฐ์ฒด์ ๋ํด ์ผ๋ฐ์ ์ธ for ~ in ๋ฐ๋ณต ๋ฌธ ์ฌ์ฉ์
###Code
def f(x):
print(x + 1)
for x in [1,2,3]:
f(x)
###Output
2
3
4
###Markdown
- for ~ in ๊ตฌ๋ฌธ์ ๋ฐ๋ณต์๋ฅผ ํ์ฉํ ์ ์๋ค. - for ๋ฌธ์ด ๋๋ ๋ง๋ค ๋ฐ๋ณต์ ๊ฐ์ฒด์ next() ํจ์๊ฐ ์๋์ผ๋ก ํธ์ถ๋์ด ์์ฐจ์ ์ผ๋ก ๊ฐ ๊ฐ์ฒด์ ์ ๊ทผ ๊ฐ๋ฅํ๋ค. - StopIteration์ด ๋ฐ์ํ๋ฉด for ~ in ๊ตฌ๋ฌธ์ด ์๋์ผ๋ก ๋ฉ์ถ๋ค.
###Code
def f(x):
print(x + 1)
t = iter([1, 2, 3])
for x in t:
f(x)
def f(x):
print(x + 1)
for x in iter([1, 2, 3]):
f(x)
def f(x):
print(x + 1)
for x in iter((1, 2, 3)):
f(x)
###Output
2
3
4
###Markdown
2-2 ํด๋์ค์ ๋ฐ๋ณต์ ๊ตฌํํ๊ธฐ
###Code
class Seq:
def __getitem__(self, n):
if n == 10:
raise IndexError()
return n
s = Seq()
for line in s:
print(line)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
- ๋ด์ฅ ํจ์ iter()์ ๋์๋๋ \_\_iter\_\_(self) ๋ฐ next()์ ๋์๋๋ \_\_next\_\_(self)์ ๊ตฌํ - ๊ฐ์ฒด o์ iter()๋ฅผ ํธ์ถํ๋ฉด ์๋์ผ๋ก \_\_iter\_\_(self) ํจ์ ํธ์ถ - \_\_iter\_\_(self) ํจ์๋ \_\_next\_\_(self) ํจ์๋ฅผ ์ง๋ ๋ฐ๋ณต์ ๊ฐ์ฒด๋ฅผ ๋ฐํ - for ~ in ํธ์ถ์์๋ ____next ____() ํจ์๊ฐ ____getitem ____() ๋ณด๋ค ์ฐ์ ํ์ฌ ํธ์ถ๋จ
###Code
class Seq:
def __init__(self, file):
self.file = open(file)
def __getitem__(self, n): # for ~ in ์ด ํธ์ถ๋ ๋์๋ __next__() ํจ์์ ์ํ์ฌ ๊ฐ๋ ค์ง.
if n == 10:
raise StopIteration()
return n
def __iter__(self):
return self
def __next__(self):
line = self.file.readline() # ํ ๋ผ์ธ์ ์ฝ๋๋ค.
if not line:
raise StopIteration # ์ฝ์ ์ ์์ผ๋ฉด ์์ธ ๋ฐ์
return line # ์ฝ์ ๋ผ์ธ์ ๋ฆฌํดํ๋ค.
s = Seq('readme.txt') # s ์ธ์คํด์ค๊ฐ next() ๋ฉ์๋๋ฅผ ์ง๋๊ณ ์์ผ๋ฏ๋ก s ์ธ์คํด์ค ์์ฒด๊ฐ ๋ฐ๋ณต์์
for line in s: # ์ฐ์ __iter__() ๋ฉ์๋๋ฅผ ํธ์ถํ์ฌ ๋ฐ๋ณต์๋ฅผ ์ป๊ณ , ๋ฐ๋ณต์์ ๋ํด์ for ~ in ๊ตฌ๋ฌธ์ ์ํ์ฌ __next__() ๋ฉ์๋๊ฐ ํธ์ถ๋จ
print(line)
print()
print(Seq('readme.txt'))
# list() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
print(list(Seq('readme.txt')))
# tuple() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
print(tuple(Seq('readme.txt')))
class Seq:
def __init__(self, fname):
self.file = open(fname)
#def __getitem__(self, n):
# if n == 10:
# raise StopIteration()
# return n
def __iter__(self):
return self
def __next__(self):
line = self.file.readline() # ํ ๋ผ์ธ์ ์ฝ๋๋ค.
if not line:
raise StopIteration() # ์ฝ์ ์ ์์ผ๋ฉด ์์ธ ๋ฐ์
return line # ์ฝ์ ๋ผ์ธ์ ๋ฆฌํดํ๋ค.
s = Seq('readme.txt') # s ์ธ์คํด์ค๊ฐ next() ๋ฉ์๋๋ฅผ ์ง๋๊ณ ์์ผ๋ฏ๋ก s ์ธ์คํด์ค ์์ฒด๊ฐ ๋ฐ๋ณต์์
for line in s: # ์ฐ์ __iter__() ๋ฉ์๋๋ฅผ ํธ์ถํ์ฌ ๋ฐ๋ณต์๋ฅผ ์ป๊ณ , ๋ฐ๋ณต์์ ๋ํด์ for ~ in ๊ตฌ๋ฌธ์ ์ํ์ฌ next() ๋ฉ์๋๊ฐ ํธ์ถ๋จ
print(line),
print()
print(Seq('readme.txt'))
print(list(Seq('readme.txt'))) # list() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
print(tuple(Seq('readme.txt'))) # tuple() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
class Seq:
def __init__(self, fname):
self.file = open(fname)
def __getitem__(self, n):
if n == 10:
raise StopIteration()
return n
# def __iter__(self):
# return self
# def __next__(self):
# line = self.file.readline() # ํ ๋ผ์ธ์ ์ฝ๋๋ค.
# if not line:
# raise StopIteration() # ์ฝ์ ์ ์์ผ๋ฉด ์์ธ ๋ฐ์
# return line # ์ฝ์ ๋ผ์ธ์ ๋ฆฌํดํ๋ค.
s = Seq('readme.txt') # s ์ธ์คํด์ค๊ฐ next() ๋ฉ์๋๋ฅผ ์ง๋๊ณ ์์ผ๋ฏ๋ก s ์ธ์คํด์ค ์์ฒด๊ฐ ๋ฐ๋ณต์์
for line in s: # ์ฐ์ __iter__() ๋ฉ์๋๋ฅผ ํธ์ถํ์ฌ ๋ฐ๋ณต์๋ฅผ ์ป๊ณ , ๋ฐ๋ณต์์ ๋ํด์ for ~ in ๊ตฌ๋ฌธ์ ์ํ์ฌ next() ๋ฉ์๋๊ฐ ํธ์ถ๋จ
print(line),
print()
print(Seq('readme.txt'))
print(list(Seq('readme.txt'))) # list() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
print(tuple(Seq('readme.txt'))) # tuple() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
###Output
0
1
2
3
4
5
6
7
8
9
<__main__.Seq object at 0x10e6da438>
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
###Markdown
2-3 ์ฌ์ ์ ๋ฐ๋ณต์ - ์ฌ์ ์ ๋ํด for ~ in ๊ตฌ๋ฌธ์ ํค์ ๋ํด ๋ฐ๋ณตํ๋ค.
###Code
d = {'one':1, 'two':2, 'three':3, 'four':4, 'five':5}
for key in d:
print(key, d[key])
d = {'one':1, 'two':2, 'three':3, 'four':4, 'five':5}
for key in iter(d):
print(key, d[key])
###Output
one 1
two 2
three 3
four 4
five 5
###Markdown
- python2.x - d.iterkeys() ํจ์ - ์ฌ์ d๊ฐ ์ง๋ ํค์ ๋ํ ๋ฐ๋ณต์ ๊ฐ์ฒด๋ฅผ ๋ฐํํ๋ค.- python3.x - iter(d) ๋๋ iter(d.keys()) ์ฌ์ฉ
###Code
print(type(d.keys()))
print(type(iter(d.keys())))
next(d.keys())
L = [1, 2, 3]
next(L)
next(iter(d.keys()))
#python3.x
for key in d.keys(): # ํค์ ๋ํ ๋ฐ๋ณต์, iter(d.keys()) ๊ฐ ๋ฐํํ ๋ฐ๋ณต์์ ๋ํด __next__(self) ํจ์๊ฐ ์์ฐจ์ ์ผ๋ก ๋ถ๋ฆฌ์์ง
print(key, end=" ")
print()
for key in iter(d.keys()): # ํค์ ๋ํ ๋ฐ๋ณต์, iter(d.keys()) ๊ฐ ๋ฐํํ ๋ฐ๋ณต์์ ๋ํด __next__(self) ํจ์๊ฐ ์์ฐจ์ ์ผ๋ก ๋ถ๋ฆฌ์์ง
print(key, end=" ")
keyset = iter(d)
print(next(keyset)) # ๋ฐ๋ณต์ ๊ฐ์ฒด๋ ํญ์ next() ๋ด์ฅ ํจ์์ ๊ฐ์ ๋ฐํํ ์ ์์ (๋ด๋ถ์ ์ผ๋ก __next__(self) ํธ์ถ)
for key in keyset: # keyset ๋ฐ๋ณต์์ ๋ํด next() ๋ฉ์๋๊ฐ ์์ฐจ์ ์ผ๋ก ํธ์ถ๋จ
print(key, end=" ")
print(type(d.values()))
print(type(iter(d.values())))
#python3.x
for key in d.values(): # ํค์ ๋ํ ๋ฐ๋ณต์, iter(d.keys()) ๊ฐ ๋ฐํํ ๋ฐ๋ณต์์ ๋ํด __next__(self) ํจ์๊ฐ ์์ฐจ์ ์ผ๋ก ๋ถ๋ฆฌ์์ง
print(key, end=" ")
print()
for key in iter(d.values()): # ํค์ ๋ํ ๋ฐ๋ณต์, iter(d.keys()) ๊ฐ ๋ฐํํ ๋ฐ๋ณต์์ ๋ํด __next__(self) ํจ์๊ฐ ์์ฐจ์ ์ผ๋ก ๋ถ๋ฆฌ์์ง
print(key, end=" ")
print(type(d.items()))
print(type(iter(d.items())))
#python3.x
for key, value in iter(d.items()): # ํค์ ๋ํ ๋ฐ๋ณต์, iter(d.keys()) ๊ฐ ๋ฐํํ ๋ฐ๋ณต์์ ๋ํด __next__(self) ํจ์๊ฐ ์์ฐจ์ ์ผ๋ก ๋ถ๋ฆฌ์์ง
print(key, value)
###Output
one 1
two 2
three 3
four 4
five 5
###Markdown
2-4 ํ์ผ ๊ฐ์ฒด์ ๋ฐ๋ณต์ - ํ์ผ ๊ฐ์ฒด๋ ๊ทธ ์์ฒด๊ฐ ๋ฐ๋ณต์์ - next() ํจ์์ ์ํด ๊ฐ ๋ผ์ธ์ด ์์ฐจ์ ์ผ๋ก ์ฝํ์ง
###Code
#python3.x
f = open('readme.txt')
print("next(f) - ", next(f))
for line in f: # f.next() ๊ฐ ์์ฐจ์ ์ผ๋ก ํธ์ถ๋จ
print(line)
###Output
next(f) - abc
def
ghi
###Markdown
*** 3 ๋ฐ์์*** 3-1 ๋ฐ์์๋?- ๋ฐ์์(Generator) - (์ค๋จ๋จ ์์ ๋ถํฐ) ์ฌ์คํ ๊ฐ๋ฅํ ํจ์ - ์๋ ํจ์ f()๋ ์์ ์ ์ธ์ ๋ฐ ๋ด๋ถ ๋ก์ปฌ ๋ณ์๋ก์ a, b, c, d๋ฅผ ์ง๋๊ณ ์๋ค. - ์ด๋ฌํ a, b, c, d ๋ณ์๋ค์ ํจ์๊ฐ ์ข
๋ฃ๋๊ณ ๋ฐํ๋ ๋ ๋ชจ๋ ์ฌ๋ผ์ง๋ค.- ๋ฐ์์๋ f()์ ๊ฐ์ด ํจ์๊ฐ (์์๋ก) ์ข
๋ฃ๋ ๋ ๋ด๋ถ ๋ก์ปฌ ๋ณ์๊ฐ ๋ฉ๋ชจ๋ฆฌ์์ ํด์ ๋๋ ๊ฒ์ ๋ง๊ณ ๋ค์ ํจ์๊ฐ ํธ์ถ ๋ ๋ ์ด์ ์ ์ํ์ด ์ข
๋ฃ๋์๋ ์ง์ ๋ถํฐ ๊ณ์ ์ํ์ด ๊ฐ๋ฅํ๋๋ก ๊ตฌํ๋ ํจ์์ด๋ค.
###Code
def f(a, b):
c = a * b
d = a + b
return c, d
x, y = f(1, 2)
print(x, y)
###Output
2 3
###Markdown
- yield ํค์๋ - return ๋์ ์ yield์ ์ํด ๊ฐ์ ๋ฐํํ๋ ํจ์๋ ๋ฐ์์์ด๋ค. - yield๋ return๊ณผ ์ ์ฌํ๊ฒ ์์์ ๊ฐ์ ๋ฐํํ์ง๋ง ํจ์์ ์คํ ์ํ๋ฅผ ๋ณด์กดํ๋ฉด์ ํจ์๋ฅผ ํธ์ถํ ์ชฝ์ผ๋ก ๋ณต๊ท์์ผ์ค๋ค. - ๋ฐ์์๋ ๊ณง ๋ฐ๋ณต์์ด๋ค. - ์ฆ, ๋ฐ์์์๊ฒ next() ํธ์ถ์ด ๊ฐ๋ฅํ๋ค.
###Code
def f(a, b):
c = a * b
d = a + b
yield c, d
g = f(1, 2)
x, y = next(g)
print(x, y)
x, y = next(g)
print(x, y)
def f(a, b):
for _ in range(2):
c = a * b
d = a + b
yield c, d
g = f(1, 2)
x, y = next(g)
print(x, y)
x, y = next(g)
print(x, y)
def f(a, b):
c = 0
d = 0
for _ in range(2):
c += a
d += b
yield c, d
g = f(1, 2)
x, y = next(g)
print(x, y)
x, y = next(g)
print(x, y)
def generate_ints(N):
for i in range(N):
yield i
gen = generate_ints(3) # ๋ฐ์์ ๊ฐ์ฒด๋ฅผ ์ป๋๋ค. generate_ints() ํจ์์ ๋ํ ์ด๊ธฐ ์คํ ํ๋ ์์ด ๋ง๋ค์ด์ง๋ ์คํ์ ์ค๋จ๋์ด ์๋ ์ํ์
print(gen)
# print(gen.next())
# print(gen.next())
# print(gen.next())
# print(gen.next())
print(next(gen)) # ๋ฐ์์ ๊ฐ์ฒด๋ ๋ฐ๋ณต์ ์ธํฐํ์ด์ค๋ฅผ ๊ฐ์ง๋ค. ๋ฐ์์์ ์คํ์ด ์์๋จ. yield์ ์ํด ๊ฐ ๋ฐํ ํ ์คํ์ด ์ค๋จ๋จ
print(next(gen)) # ๋ฐ์์ ์คํ ์ฌ๊ฐ. yield์ ์ํด ๊ฐ ๋ฐํ ํ ๋ค์ ์ค๋จ
print(next(gen)) # ๋ฐ์์ ์คํ ์ฌ๊ฐ. yield์ ์ํด ๊ฐ ๋ฐํ ํ ๋ค์ ์ค๋จ
print(next(gen)) # ๋ฐ์์ ์คํ ์ฌ๊ฐ. yield์ ์ํด ๋ ์ด์ ๋ฐํํ ๊ฐ์ด ์๋ค๋ฉด StopIteration ์์ธ๋ฅผ ๋์ง
###Output
<generator object generate_ints at 0x10e6fa1a8>
0
1
2
###Markdown
- ์์ ๊ฐ์ ์ธ๋ถ ๋์ ๋ฐฉ์์ ์ด์ฉํ์ฌ, ๋ค์๊ณผ ๊ฐ์ด for ~ in ๊ตฌ๋ฌธ์ ์ ์ฉํ ์ ์๋ค.
###Code
for i in generate_ints(5):
print(i, end=" ")
###Output
0 1 2 3 4
###Markdown
- ๋ฐ์์ ํจ์์ ์ผ๋ฐ ํจ์์ ์ฐจ์ด์ - ์ผ๋ฐ ํจ์๋ ํจ์๊ฐ ํธ์ถ๋๋ฉด ๊ทธ ํจ์ ๋ด๋ถ์ ์ ์๋ ๋ชจ๋ ์ผ์ ๋ง์น๊ณ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํจ - ๋ฐ์์ ํจ์๋ ํจ์ ๋ด์์ ์ํ ์ค์ ์ค๊ฐ ๊ฒฐ๊ณผ ๊ฐ์ ๋ฐํํ ์ ์์ - ๋ฐ์์๊ฐ ์ ์ฉํ๊ฒ ์ฌ์ฉ๋๋ ๊ฒฝ์ฐ - ํจ์ ์ฒ๋ฆฌ์ ์ค๊ฐ ๊ฒฐ๊ณผ๋ฅผ ๋ค๋ฅธ ์ฝ๋์์ ์ฐธ์กฐํ ๊ฒฝ์ฐ - ์ฆ, ๋ชจ๋ ๊ฒฐ๊ณผ๋ฅผ ํ๊บผ๋ฒ์ ๋ฐํ ๋ฐ๋ ๊ฒ์ด ์๋๋ผ ํจ์ ์ฒ๋ฆฌ ์ค์ ๋์จ ์ค๊ฐ ๊ฒฐ๊ณผ๋ฅผ ๋ฐ์์ ์ฌ์ฉํด์ผ ํ ๊ฒฝ์ฐ - ์ํ์ค ์๋ฃํ์ ํจ์จ์ ์ผ๋ก ๋ง๋ค๊ณ ์ ํ๋ ๊ฒฝ์ฐ 3-2 ๋ฐ์์ ๊ตฌ๋ฌธ- ๋ฆฌ์คํธ ๋ดํฌ(List Comprehension) - ๋ฆฌ์คํธ ๊ฐ์ฒด์ ์๋ก์ด ์์ฑ - ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ค์ ๋ก ์ ์ ํ๋ฉด์ ์์ฑ๋จ
###Code
a = [k for k in range(100) if k % 5 == 0]
print(a)
type(a)
###Output
[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95]
###Markdown
- ๋ฆฌ์คํธ ๋ดํฌ ๊ตฌ๋ฌธ์ []๊ฐ ์๋๋ผ () ์ฌ์ฉ - ๋ฆฌ์คํธ ๋์ ์ ๋ฐ์์ ์์ฑ - ์ฆ, ์ฒ์๋ถํฐ ๋ชจ๋ ์์๊ฐ ์์ฑ๋์ง ์๊ณ ํ์ํ ์์ ์ ๊ฐ ์์๊ฐ ๋ง๋ค์ด์ง - ๋ฉ๋ชจ๋ฆฌ๋ฅผ ๋ณด๋ค ํจ์จ์ ์ผ๋ก ์ฌ์ฉํ ์ ์์
###Code
a = (k for k in range(100) if k % 5 == 0)
print(a)
type(a)
# print(a.next())
# print(a.next())
# print(a.next())
print(next(a))
print(next(a))
print(next(a))
for i in a:
print(i, end=" ")
###Output
0
5
10
15 20 25 30 35 40 45 50 55 60 65 70 75 80 85 90 95
###Markdown
- ์๋ ์๋ sum ๋ด์ฅ ํจ์์ ๋ฐ์์๋ฅผ ๋ฃ์ด์ค - sum์ ํธ์ถํ๋ ์์ ์๋ ๋ฐ์์๊ฐ ์์ง ํธ์ถ๋๊ธฐ ์ง์ ์ด๋ฏ๋ก ๊ฐ ์์๋ค์ ์์ง ์กด์ฌํ์ง ์๋๋ค. - sum ๋ด๋ถ์์ ๋ฐ์์๊ฐ ์ง๋๊ณ ์๋ next() ํจ์๋ฅผ ํธ์ถํ์ฌ ๊ฐ ์์๋ค์ ์ง์ ๋ง๋ค์ด ํ์ฉํ๋ค. - ๋ฉ๋ชจ์ ์ฌ์ฉ ํจ์จ์ด ๋๋ค.
###Code
a = [1, 2, 3]
print(sum(a))
a = (k for k in range(100) if k % 5 == 0)
print(sum(a))
print(a)
print(next(a))
###Output
950
<generator object <genexpr> at 0x10e7a51a8>
###Markdown
3-3 ๋ฐ์์์ ํ์ฉ ์ 1 - ํผ๋ณด๋์น ์์ด
###Code
def fibonacci(a = 1, b = 1):
while 1:
yield a
a, b = b, a + b
for k in fibonacci(): # ๋ฐ์์๋ฅผ ์ง์ for ~ in ๊ตฌ๋ฌธ์ ํ์ฉ
if k > 100:
break
print(k, end=" ")
###Output
1 1 2 3 5 8 13 21 34 55 89
###Markdown
3-4 ๋ฐ์์์ ํ์ฉ ์ 2 - ํ์ ์งํฉ ๋ง๋ค๊ธฐ - ๋ฐ๋ณต์๋ฅผ ํ์ฉํ ์
###Code
#python3.x
class Odds:
def __init__(self, limit = None): # ์์ฑ์ ์ ์
self.data = -1 # ์ด๊ธฐ ๊ฐ
self.limit = limit # ํ๊ณ ๊ฐ
def __iter__(self): # Odds ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ๋ฐํํ๋ ํน์ ํจ์
return self
def __next__(self): # ๋ฐ๋ณต์์ ํ์ ํจ์
self.data += 2
if self.limit and self.limit <= self.data:
raise StopIteration()
return self.data
for k in Odds(20):
print(k, end=" ")
print()
print(list(Odds(20))) # list() ๋ด์ฅ ํจ์๊ฐ ๊ฐ์ฒด๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๊ฐ์ฒด์ ๋ฐ๋ณต์๋ฅผ ์ป์ด์ __next__(self)๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
###Output
1 3 5 7 9 11 13 15 17 19
[1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
###Markdown
- ๋ฐ์์๋ฅผ ํ์ฉํ ๊ฐ์ฅ ์ข์ ์
###Code
def odds(limit=None):
k = 1
while not limit or limit >= k:
yield k
k += 2
for k in odds(20):
print(k, end=" ")
print()
print(list(odds(20))) # list() ๋ด์ฅ ํจ์๊ฐ ๋ฐ์์๋ฅผ ์ธ์๋ก ๋ฐ์ผ๋ฉด ํด๋น ๋ฐ์์์ next()๋ฅผ ๋งค๋ฒ ํธ์ถํ์ฌ ๊ฐ ์์๋ฅผ ์ป์ด์จ๋ค.
###Output
1
3
5
7
9
11
13
15
17
19
[1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
|
data/Siamese_plots.ipynb | ###Markdown
Siamese Network with simulated scatter plot data.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import random
from PIL import Image
import torch
from torch.autograd import Variable
import PIL.ImageOps
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import torchvision
import glob
from torchviz import *
POS_LABEL = 0 # Pair of Images that match
NEG_LABEL = 1 # Pair of Images that do not match
#If you reverse the labels, you have to change the Contrastive Loss function.
SZ = 128
MARGIN = 5.0
###Output
_____no_output_____
###Markdown
Model
###Code
class SiameseNetwork(nn.Module):
def __init__(self):
super(SiameseNetwork, self).__init__()
self.cnn1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(1, 4, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(4),
nn.ReflectionPad2d(1),
nn.Conv2d(4, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
nn.ReflectionPad2d(1),
nn.Conv2d(8, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8)
)
self.fc1 = nn.Sequential(
nn.Linear(8*SZ*SZ, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 5))
def feature_extract(self, x):
output = self.cnn1(x)
output = output.view(output.size()[0], -1)
output = self.fc1(output)
return output
def forward(self, input1, input2):
output1 = self.feature_extract(input1) #extract features from image0
output2 = self.feature_extract(input2) #extract features from image1
return output1, output2
net = SiameseNetwork()
print(net)
X0 = torch.zeros((2,1, SZ, SZ)) #channel first (after batch)
X1 = torch.zeros((2,1, SZ, SZ)) #channel first (after batch)
d1,d2 = net(X0,X1)
###Output
SiameseNetwork(
(cnn1): Sequential(
(0): ReflectionPad2d((1, 1, 1, 1))
(1): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1))
(2): ReLU(inplace=True)
(3): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(4): ReflectionPad2d((1, 1, 1, 1))
(5): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1))
(6): ReLU(inplace=True)
(7): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(8): ReflectionPad2d((1, 1, 1, 1))
(9): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1))
(10): ReLU(inplace=True)
(11): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(fc1): Sequential(
(0): Linear(in_features=131072, out_features=500, bias=True)
(1): ReLU(inplace=True)
(2): Linear(in_features=500, out_features=500, bias=True)
(3): ReLU(inplace=True)
(4): Linear(in_features=500, out_features=5, bias=True)
)
)
###Markdown
Contrastive Loss function
###Code
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=MARGIN):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim = True)
loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
pred = (self.margin < euclidean_distance).type(torch.float)
return loss_contrastive, euclidean_distance, pred
###Output
_____no_output_____
###Markdown
BCELoss (did not do well)
###Code
#did not work well, you can try by setting criterion = SimpleBCELoss() below.
class SimpleBCELoss(torch.nn.Module):
def __init__(self):
super(SimpleBCELoss,self).__init__()
self.bce_loss = nn.BCELoss()
def forward(self,output1,output2,label):
edist = nn.PairwiseDistance(p=2,keepdim=True)(output1,output2)
edist = torch.sigmoid(edist)
loss_bce = self.bce_loss(edist,label)
return loss_bce
###Output
_____no_output_____
###Markdown
Make paired data
###Code
def get_positive_pairs(path='./data/mol_data/*'): #both images of same digit
positive_pairs = []
all_fam_dirs = glob.glob(path)
for famdir in all_fam_dirs:
mol_files = glob.glob(famdir+'/*.png')
for ff1 in mol_files:
for ff2 in mol_files:
if ff1 < ff2:
positive_pairs.append((ff1,ff2))
return positive_pairs
def get_negative_pairs(path='./data/mol_data/*',cnt=100): #images are from different digits
negative_pairs = []
all_fam_dirs = glob.glob(path)
random.shuffle(all_fam_dirs)
all_fam_dirs_rev = all_fam_dirs[::-1] #reversed
for famdir1,famdir2 in zip(all_fam_dirs,all_fam_dirs_rev):
if famdir1!=famdir2:
mol_files_1 = glob.glob(famdir1+'/*.png')
mol_files_2 = glob.glob(famdir2+'/*.png')
for ff1 in mol_files_1:
for ff2 in mol_files_2:
negative_pairs.append((ff1,ff2))
if len(negative_pairs) >= cnt:
break
return negative_pairs
def read_img(img_path):
img = Image.open(img_path)
img = img.convert('L')
img = img.resize((SZ,SZ))
img = np.asarray(img,dtype=np.float32)/255.0
return img
def build_paired_data(path,shuffle):
positive_pairs = get_positive_pairs(path)
negative_pairs = get_negative_pairs(path,len(positive_pairs))
print('Got ',len(positive_pairs),'positive_pairs')
print('Got ',len(negative_pairs),'negative_pairs')
if shuffle:
random.shuffle(positive_pairs)
random.shuffle(negative_pairs)
positive_labels = [POS_LABEL]*len(positive_pairs)
negative_labels = [NEG_LABEL]*len(negative_pairs)
all_pairs = positive_pairs + negative_pairs
all_labels = positive_labels + negative_labels
data = list(zip(all_pairs,all_labels))
random.shuffle(data)
print('Loading data size',len(data))
pairImages = []
pairLabels = []
pairNames = []
for image_pair,label in data:
img0 = read_img(image_pair[0])
img1 = read_img(image_pair[1])
pairImages.append([img0,img1])
pairLabels.append([label]) #very important to have labels as shape `batch_size` x 1
pairNames.append([image_pair[0],image_pair[1]])
return np.expand_dims(np.array(pairImages),axis=2), np.array(pairLabels), np.array(pairNames)
pairTrain, labelTrain, pairNames = build_paired_data('./data/mol_data/*',True)
print(pairTrain.shape, labelTrain.shape)
###Output
Got 200 positive_pairs
Got 200 negative_pairs
Loading data size 400
(400, 2, 1, 128, 128) (400, 1)
###Markdown
Create Siamese network and train
###Code
new_net = SiameseNetwork().cuda()
criterion = ContrastiveLoss()
optimizer = optim.Adam(new_net.parameters(),lr = 0.0005)
num_epochs = 10
batch_size = 64
num_batches = len(pairTrain) // batch_size
counter = []
loss_history = []
itr_no = 0
for epoch in range(num_epochs):
epoch_loss = [] # Sum of training loss, no. of tokens
epoch_accuracy = []
for batch_no in range(num_batches):
optimizer.zero_grad()
# Local batches and labels
X = pairTrain[batch_no*batch_size:(batch_no+1)*batch_size,]
y = labelTrain[batch_no*batch_size:(batch_no+1)*batch_size,]
X0 = torch.tensor(X[:, 0]).float().cuda()
X1 = torch.tensor(X[:, 1]).float().cuda()
Y = torch.tensor(y).float().cuda()
output1,output2 = new_net(X0,X1)
loss_contrastive, edist, predictions = criterion(output1,output2,Y)
loss_contrastive.backward()
optimizer.step()
epoch_loss.append(loss_contrastive.item())
acc = (Y==predictions).type(torch.float).cpu().numpy()
epoch_accuracy.extend(acc)
epoch_loss = np.mean(epoch_loss)
print('epoch',epoch,'loss=',epoch_loss,'acc=',np.mean(epoch_accuracy))
loss_history.append(epoch_loss)
counter.append(epoch)
###Output
epoch 0 loss= 8.871821959813436 acc= 0.8203125
epoch 1 loss= 4.87911335627238 acc= 0.9505208
epoch 2 loss= 3.5122199058532715 acc= 0.9244792
epoch 3 loss= 0.9253520170847574 acc= 0.9661458
epoch 4 loss= 0.33208129554986954 acc= 0.984375
epoch 5 loss= 0.1407537336150805 acc= 0.9895833
epoch 6 loss= 0.07411744073033333 acc= 1.0
epoch 7 loss= 0.033875590190291405 acc= 0.9895833
epoch 8 loss= 0.019601694618662197 acc= 1.0
epoch 9 loss= 0.015237660147249699 acc= 1.0
###Markdown
Plot training loss
###Code
def show_plot(iteration,loss):
plt.plot(iteration,loss)
plt.show()
show_plot(counter,loss_history)
###Output
_____no_output_____
###Markdown
Visualize dynamic graph
###Code
make_dot(loss_contrastive)
###Output
Warning: Could not load "/opt/conda/lib/graphviz/libgvplugin_pango.so.6" - file not found
###Markdown
Plot some images and their predictions
###Code
def imshow(img,text=None,should_save=False):
npimg = img.numpy()
plt.axis("off")
if text:
plt.text(280, 10, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
X = pairTrain[100:110,]
y = labelTrain[100:110,]
X0 = torch.tensor(X[:, 0]).float().cuda()
X1 = torch.tensor(X[:, 1]).float().cuda()
Y = torch.tensor(y).float().cuda()
output1,output2 = new_net(X0,X1)
loss_contrastive, edist, predictions = criterion(output1,output2,Y)
#edist = F.pairwise_distance(output1, output2, keepdim = True)
for i in range(10):
z = torch.cat((X0[i:i+1],X1[i:i+1]),0).cpu()
d = edist[i].cpu().item()
pred = int(predictions[i].cpu().item())
imshow(torchvision.utils.make_grid(z),'Dissimilarity: {:.2f} {}'.format(d,pred))
###Output
_____no_output_____ |
examples/ilp.ipynb | ###Markdown
ILP solving in PythonThis is a small example to show how you use the [Gurobi Optimizer](https://www.gurobi.com/) using Python to do ILP solving.First, [install Gurobi](https://www.gurobi.com/downloads/) (e.g., using `conda install -c gurobi gurobi`). Get a [(free academic) license](https://www.gurobi.com/academia/academic-program-and-licenses/), and then install the license using `grbgetkey` (e.g., with `grbgetkey ae36ac20-16e6-acd2-f242-4da6e765fa0a`, where `ae...0a` is replaced by your license number).Let's start with importing the gurobipy library:
###Code
import gurobipy as gp
from gurobipy import GRB
###Output
_____no_output_____
###Markdown
We will then create an optimization model, add some binary integer variables to the model, and add some linear constraints to the model.
###Code
# Create a model
model = gp.Model();
# Add binary variables v[0], v[1], v[2], v[3]
v = model.addVars(4, vtype=GRB.BINARY, name="v");
# Add some constraints
model.addConstr(v[0] + 2 * v[2] + 3 * v[3] <= 4, "constr0");
model.addConstr(gp.quicksum([v[i] for i in [0,1]]) == 1, "constr1");
###Output
_____no_output_____
###Markdown
We can also set an optimization objective. In this case, let's add the assignment to maximize the sum v[1] + v[2] + v[3].
###Code
# Add maximization objective
model.setObjective(gp.quicksum([v[i] for i in [1,2,3]]), GRB.MAXIMIZE);
###Output
_____no_output_____
###Markdown
Now, let's call the solver to find a model that satisfies the constraints and achieves the optimization objective.
###Code
model.optimize();
###Output
_____no_output_____
###Markdown
If `model.optimize()` found an (optimal) model, we can access it as follows, for example:
###Code
if model.status == GRB.OPTIMAL:
for v in model.getVars():
print("{}: {}".format(v.varName, v.x));
else:
print("No optimal model found!");
###Output
v[0]: 0.0
v[1]: 1.0
v[2]: 1.0
v[3]: 0.0
|
HopML/Logistic Regression.ipynb | ###Markdown
Logistic Regression SummaryA logistic regression model tries to model $P(Y|X)$. We use a logit transformation to make sure the $P(Y|X)$ is between 0 and 1. The linear function (used inside logit) is trained via maximum likelihood estimation. Detailed summary$$\log(\frac{p(X)}{1 - p(X)}) = \beta_0 + \beta_1 X$$As $p(X)$ increases, the $\frac{p(X)}{1 - p(X)}$ will increase monotonically. As $p(x)$ increases, the $\beta_0 + \beta_1 x$ increases monotonically.$p(x)$ is always between 0 and 1.We train this via MLE: describe the distribution of observing $Y | X$ where $$\begin{cases} 0 & \hbox{with probability } 1 - p(x)\\ 1 & \hbox{with probability } p(x) \\\end{cases}$$where $P(Y=1) = \frac{e^{\beta_0 + \beta_1 x}}{1 + e^{\beta_0 + \beta_1 x}} = p(x)$.If $Y_i \sim Bernuoilli(p(X_i))$, then $p(Y=k) = p^k(x) (1 - p(x))^{1 - k}$Given each sample is independent, then the likelihood is defined as the product (for all samples) of $p(Y=k)$$$L(\hat{\beta}) = \Pi_{i=1}^n p(y_i = k) = \Pi_{i=1}^n \{ p(x_i)^y_i [1 - p(x_i)]^{1 - y_i} \} = \Pi_{i: y_i = 1} p(x_i) \Pi_{i: y_i = 0} [1 - p(x_i)]$$From here, we can solve normally by taking log-likelihood and derivative equal to zero.The maximum likelihood is an optimization which allows us to solve $\vec{\beta}$.The coefficient describes the effect of parameter $X$ on the log-odds of the class. Multiple classes $p_k(X) = Pr(Y=k|X) = \frac{e^{\beta_k^T X}}{\sum_{j=1}^K e^{\beta_k^T X}}$Let $p_k(x) = e^{\beta_k^T x}$The log odds of class 2 and 3 can be found by $\beta_k^\star = \beta_k - \beta_K$.
###Code
import numpy as np
def softmax(X):
return np.exp(X) / np.sum(np.exp(X), axis=0)
class LogisticRegression:
def __init__(self):
pass
def fit(self, X, y, learning_rate=0.01, num_iters=100):
pass
def forward(self, X):
pass
def logistic_regression():
pass
###Output
_____no_output_____
###Markdown
Iterative Reweighted Least Squares (IRLS) Background IdeasIn a generalized linear model (glm), the idea is to relate a general response variable $y_i$ with a set of covariates, in order to get a predictive model similar to the one provided by simple regression.Assume $n$ observations of a response variable $y_1, y_2, ..., y_n$ and $k$ explanatory variables $x_1, x_2, ..., x_k$. with unknown parameters $\beta_0, \beta_1, ..., \beta_k$. 3 parts for the GLM:1. Random component ($y_i$) -> distribution of $y_i$2. Systematic component - explanatory variable form a linear predictor $\beta_0 + \beta_1 x_1 + ... + \beta_k x_k$3. Link between random component and systematic components.**Example 1:** In linear regression, $y_i \sim N(\mu, \sigma^2)$, where $\mu = E[Y_i]$ (which is the random component), and we have systematic component which is $\beta_0 + \beta_1 x_1 + ... + \beta_k x_k$. In linear regression, we set the random component and systematic component equal. Therefore, the link function is the identity function.**Example 2:** Let's say $y_i \sim Ber(\pi_i)$, so $E[y_i] = \pi_i$ and we know $\pi_i \in [0, 1]$. This is the random component. The systematic component again is $\beta_0 + \beta_1 x_1 + ... + \beta_k x_k$. However, we cannot set these equal (aka, use identity as link function) because linear regression does not constrict the domain to $[0, 1]$. Instead, we use the logit link function where$$logit(\pi) = \log(\frac{\pi}{1 - \pi})$$This is the log odds of success. Now, assume that $y_i | z_i \sim_{iid} Ber(\pi_i), i=1, ..., n$ where $\pi_i$ is related to the set of covariates $z_i$ by the logit link function, i.e.$$logit (\pi_i) = \log(\frac{\pi_i}{1 - \pi_i}) = \vec{Z}_i^\prime \vec{\beta}$$For simplicity, we may assume that $\vec{z}_i = (1, z_i)^\prime$ and $\vec{\beta} = (\beta_0, \beta_1)^\prime$. So, the link function relates the natural parameter of the Bernuolli as a member of the exponential family with the set of covariates.The log likelihood is $$l(\vec{\beta}) = \vec{y}^\prime \vec{z} \vec{\beta} - \vec{b}^\prime \vec{1}$$ where $\vec{1}$ is a vector of ones, $\vec{y} = (y_1, y_2, ..., y_n)^\prime$, $\vec{z}$ is the $n \times 2$ matrix whose $i$th row is $z_i^\prime$ and $b = \{ -\log(1 - \pi_i) \}^n_{i=1}$. PROVE THIS We want to use the Newton's method to find the MLE $\hat{\beta}$ which maximizes the likelihood.To do this, we need to find the first and second derivatives.The score function is $$l^\prime(\beta) = z^\prime(y - \vec{\pi})$$ where $\vec{\pi}$ is the column vector of the Bernuolli probability $\pi_i$.The Hessian matrix is given by $$l^{\prime \prime}(\beta) = \frac{d}{d \beta}( z^\prime (y - \pi)) = - \vec{z}^\prime \vec{w} \vec{z}$$ where $\vec{w}$ is the diagonal matrix with $i$th diagonal entry $\pi_i (1 - \pi_i)$. With these, we apply Newton-Raphson method (see root-finding page).The Newton's update is given by:$$\begin{align*}\beta^{(t+1)} &= \beta^{(t)} - [l^{\prime \prime} (\beta^{(t)}) ]^{-1} l^\prime(\beta^{(t)})\\&= \beta^{(t)} + (Z^\prime w^{(t)} Z)^{-1} (Z^\prime (y - \pi^{(t)}))\\\end{align*}$$where $\pi^{(t)}$ is the value of $\pi$ corresponding to $\beta^{(t)}$ and $w^{(t)}$ is evaluated at $\pi^{(t)}$.As a reminder, OLS would find that $\hat{\beta} = (Z^\prime Z))^{-1} Z^\prime y$. This shares the same structure as our Newton raphson update.**Remarks**1. From the update formula, it follows that the problem of finding MLE in a GLM framework reduces to a repeated weighted least squares applications in which the inverse of the diagonal values of $W$ are the appropriate weights.2. Since the Hessian does not depend on the data, the expected Fisher information matrix is equal to the observed Fisher information matrix. (TODO: verify whether this is true)3. IRLS can be slow and unreliable unless the model fits the data well.**Example:** Example 2.5 on page 37 (G + H)irls.R
###Code
# Do exercise.
###Output
_____no_output_____ |
Design_Patterns.ipynb | ###Markdown
Observer Pattern ่งๅฏ่
ๆจกๅผhttps://www.cnblogs.com/lizhitai/p/4459126.html ไธคไธช่ง่ฒ๏ผ 1. ไธป้ขๅๅธ่
1. ไธป้ข่ฎข้
่
ๅฅฝๅค๏ผ ๅๅฐไปฃ็ ่ฆๅ๏ผๅฎนๆๅ่ฝๆฉๅฑๅ็ปดๆค ๅธธ่ง็ๅบ็จๅบๆฏ Event_Driving_Engineๅบไบไบไปถ็ๅค็ๅผๆ็ๅฌไบไปถ๏ผๆนๅ็ถๆใๅฆ๏ผ็ๅฌstockไนฐๅไบไปถ๏ผๆนๅๆไป็ถๆใ็ฐ้ๆๆ็ถๆ ไธไธช็ฎๅ็pythonๅฎ็ฐ: ่ฎฉๆไปฌๅฎ็ฐไธไธชไธๅ็จๆทๅจTechForum ไธๅๅธๆๆฏ้ฎไปถ็ไพๅญ๏ผๅฝไปปไฝ็จๆทๅๅธไธไธชๆฐ็้ฎไปถ๏ผๅ
ถไป็จๆทๅฐฑไผๆฅๆถๅฐๆฐ้ฎไปถ้็ฅใ ไปๅฏน่ฑก็่งๅบฆๅป็๏ผๆไปฌๅบ่ฏฅๆไธไธช TechForumๅฏน่ฑก๏ผๆไปฌ้่ฆๆๅฆๅคไธไบ้่ฆ็จๆทๅฏน่ฑกๅจTechForumไธๆณจๅ๏ผๅฝๆฐ้ฎไปถ้็ฅ็ๆถๅ๏ผๅบ่ฏฅๅ้้ฎไปถๆ ้ขใไธไธช็ฎๅ็ไพๅญๅๆไผ่ๆณๅฐไธญไปๆบๆๅ้ไธป็ๅ
ณ็ณปใ่ฟๅฐฑๆฏๆ่่
ๅๅบ่่
ๅ
ณ็ณป็ๅปถไผธใ้่ฟไธไธชๅทฅไฝไธญไปไผๅๅธไธๅ็ง็ฑป็ๅทฅไฝไฟกๆฏ๏ผๅบ่่
ไผๅปๅฏปๆพ็ธๅ
ณ็ๅทฅไฝไฟกๆฏ๏ผๆ่่
ไนไผๅฏปๆพๅจไธญไปๆณจๅ่ฟ็ๅบ่่
ใ ๅๅธ่
ๆฝ่ฑก็ฑป
###Code
class Publisher:
def __init__(self):
pass
def register(self):
pass
def unregister(self):
pass
def notifyAll(self):
pass
###Output
_____no_output_____
###Markdown
ๅๅธ่
ๅฎ็ฐ
###Code
class TechForum(Publisher):
def __init__(self):
self._listOfUsers = []
self.postname = None
def register(self, userObj):
if userObj not in self._listOfUsers:
self._listOfUsers.append(userObj)
def unregister(self, userObj):
self._listOfUsers.remove(userObj)
def notifyAll(self):
for objects in self._listOfUsers:
objects.notify(self.postname)
def writeNewPost(self , postname):
self.postname = postname
self.notifyAll()
###Output
_____no_output_____
###Markdown
่ฎข้
่
ๆฝ่ฑก็ฑป
###Code
class Subscriber:
def __init__(self):
pass
def notify(self):
pass
###Output
_____no_output_____
###Markdown
่ฎข้
่
ๅฎ็ฐ
###Code
class User1(Subscriber):
def notify(self, postname):
print("User1 notified of a new post %s" % postname)
class User2(Subscriber):
def notify(self, postname):
print("User2 notified of a new post %s" % postname)
class SisterSites(Subscriber):
def __init__(self):
self._sisterWebsites = ["Site1" , "Site2", "Site3"]
def notify(self, postname):
for site in self._sisterWebsites:
print("Send nofication to site:%s " % site)
if __name__ == "__main__":
techForum = TechForum()
user1 = User1()
user2 = User2()
sites = SisterSites()
techForum.register(user1)
techForum.register(user2)
techForum.register(sites)
techForum.writeNewPost("Observe Pattern in Python")
techForum.unregister(user2)
techForum.writeNewPost("MVC Pattern in Python")
###Output
User1 notified of a new post Observe Pattern in Python
User2 notified of a new post Observe Pattern in Python
Send nofication to site:Site1
Send nofication to site:Site2
Send nofication to site:Site3
User1 notified of a new post MVC Pattern in Python
Send nofication to site:Site1
Send nofication to site:Site2
Send nofication to site:Site3
###Markdown
trade
###Code
clas
###Output
_____no_output_____ |
1 - Python/2 - Python Function [exercise-functions-and-getting-help].ipynb | ###Markdown
**This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/functions-and-getting-help).**--- Functions are powerful. Try writing some yourself.As before, don't forget to run the setup code below before jumping into question 1.
###Code
# SETUP. You don't need to worry for now about what this code does or how it works.
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex2 import *
print('Setup complete.')
###Output
Setup complete.
###Markdown
1.Complete the body of the following function according to its docstring.HINT: Python has a built-in function `round`.
###Code
def round_to_two_places(num):
"""Return the given number rounded to two decimal places.
>>> round_to_two_places(3.14159)
3.14
"""
# Replace this body with your own code.
# ("pass" is a keyword that does literally nothing. We used it as a placeholder
# because after we begin a code block, Python requires at least one line of code)
return round(num,2)
# Check your answer
q1.check()
# Uncomment the following for a hint
#q1.hint()
# Or uncomment the following to peek at the solution
#q1.solution()
###Output
_____no_output_____
###Markdown
2.The help for `round` says that `ndigits` (the second argument) may be negative.What do you think will happen when it is? Try some examples in the following cell.
###Code
# Put your test code here
print(round(3.1258,-1))
###Output
0.0
###Markdown
Can you think of a case where this would be useful? Once you're ready, run the code cell below to see the answer and to receive credit for completing the problem.
###Code
# Check your answer (Run this code cell to receive credit!)
q2.solution()
###Output
_____no_output_____
###Markdown
3.In the previous exercise, the candy-sharing friends Alice, Bob and Carol tried to split candies evenly. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they'll take 30 each and smash 1.Below is a simple function that will calculate the number of candies to smash for *any* number of total candies.Modify it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before.Update the docstring to reflect this new behaviour.
###Code
def to_smash(total_candies,n = 3):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
return total_candies % n
# Check your answer
q3.check()
#q3.hint()
#q3.solution()
###Output
_____no_output_____
###Markdown
4. (Optional)It may not be fun, but reading and understanding error messages will be an important part of your Python career.Each code cell below contains some commented buggy code. For each cell...1. Read the code and predict what you think will happen when it's run.2. Then uncomment the code and run it to see what happens. (**Tip**: In the kernel editor, you can highlight several lines and press `ctrl`+`/` to toggle commenting.)3. Fix the code (so that it accomplishes its intended purpose without throwing an exception)
###Code
round_to_two_places(9.9999)
x = -10
y = 5
# # Which of the two variables above has the smallest absolute value?
smallest_abs = min(abs(x), abs(y))
print(smallest_abs)
def f(x):
y = abs(x)
return y
print(f(5))
###Output
5
|
ensemble_subnetwork_performance_eval/dd2412_model_performance_analysis.ipynb | ###Markdown
Replication of results for optimal number of subnetworks
###Code
import functools
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from tensorflow.python.summary.summary_iterator import summary_iterator
%load_ext tensorboard
# %reload_ext tensorboard
from google.colab import drive
drive.mount('gdrive')
# Load the datasets - this directory contain folders of the model at each epoch
ROOT_PATH = 'gdrive/My Drive/KTH/MNIST_models/MNIST_NN/WRN28-2/'
# All model paths:
model_paths = [
'M1/summaries/events.out.tfevents.1637871015.deeplearning-1-vm.9861.0.v2',
'M2/summaries/events.out.tfevents.1637674717.deeplearning-1-vm.26676.0.v2',
'M3/summaries/events.out.tfevents.1637590374.deeplearning-1-vm.27110.0.v2',
'M4/summaries/events.out.tfevents.1637741894.deeplearning-1-vm.20526.0.v2',
'M5/summaries/events.out.tfevents.1637828638.deeplearning-1-vm.19571.0.v2',
'M6/summaries/events.out.tfevents.1637913700.deeplearning-1-vm.32648.0.v2',
'M10/summaries/events.out.tfevents.1638560063.deeplearning-1-vm.12253.0.v2'
]
MODEL_PATH = ROOT_PATH+'M1/summaries/events.out.tfevents.1637871015.deeplearning-1-vm.9861.0.v2'
model_folders = os.listdir(ROOT_PATH)
print(len(model_folders))
!tensorboard --inspect --event_file=MODEL_PATH
from tensorboard.backend.event_processing import event_accumulator
ea = event_accumulator.EventAccumulator(MODEL_PATH)
ea.Reload()
ea.Tags()
ensemble_accuracies = []
ensemble_nll = []
member_accuracies = []
member_nlls = []
ensemble_members = [1,2,3,4,5,6,10]
all_member_accuracies = []
for i in range(7):
model_path = model_paths[i]
a = summary_iterator(ROOT_PATH + model_path)
TAG_NAME = "test/accuracy"
NLL_NAME = 'test/negative_log_likelihood'
MEMBER_TAG_NAMES = []
MEMBER_NLL_NAMES = []
for j in range(ensemble_members[i]): # collect the data for each of the ensemble members in the current model
MEMBER_TAG_NAMES.append('test/accuracy_member_'+str(j))
MEMBER_NLL_NAMES.append('test/nll_member_'+str(j))
acc_list = []
nll_list = []
member_acc = {}
member_nll = {}
for e in a:
for v in e.summary.value:
if v.tag == TAG_NAME:
value = tf.make_ndarray(v.tensor)
acc_list.append(value)
elif v.tag == NLL_NAME:
value = tf.make_ndarray(v.tensor)
nll_list.append(value)
elif v.tag in MEMBER_TAG_NAMES:
value = tf.make_ndarray(v.tensor)
if not v.tag in member_acc:
member_acc[v.tag] = []
member_acc[v.tag].append(value)
elif v.tag in MEMBER_NLL_NAMES:
value = tf.make_ndarray(v.tensor)
if not v.tag in member_nll:
member_nll[v.tag] = []
member_nll[v.tag].append(value)
ensemble_accuracies.append(np.max(np.array(acc_list)))
ensemble_nll.append(np.min(np.array(nll_list)))
member_accuracies.append(member_acc)
member_nlls.append(member_nll)
print('ensemble accs: ', ensemble_accuracies)
# parse the member accuracies and nlls
# for each of the different network architectures
accs = []
nlls = []
for i in range(len(member_accuracies)):
current_accuracies = member_accuracies[i]
current_nll = member_nlls[i]
accs_curr = []
nlls_curr = []
for key, value in current_accuracies.items():
# member_accuracies[i][key] = value[-1]
accs_curr.append(np.max(np.array(value)))
for key, value in current_nll.items():
# member_nlls[i][key] = value[-1]
nlls_curr.append(np.min(np.array(value)))
accs.append(accs_curr)
nlls.append(nlls_curr)
print(accs)
print(nlls)
# compute means and stds of each number of members
acc_means = []
acc_std = []
nll_means = []
nll_std = []
for i in range(7):
acc_means.append(np.mean(np.array(accs[i])))
acc_std.append(np.std(np.array(accs[i])))
nll_means.append(np.mean(np.array(nlls[i])))
nll_std.append(np.std(np.array(nlls[i])))
def plot_results(num_members, ensemble, member_means, member_stds, title, include_var=True):
plt.plot(num_members, member_means, label='Members')
plt.plot(num_members, ensemble, label="Ensemble")
if include_var:
plt.errorbar(num_members, member_means, member_stds, linestyle='None', marker='^', label='variance')
plt.title(title)
plt.xlabel('M')
plt.legend()
plt.show()
plt.savefig(ROOT_PATH + 'acc_comparison_'+title+'.png')
num_members = [1, 2, 3, 4, 5, 6, 10]
plt.plot(num_members, ensemble_accuracies, label="Ensemble")
plt.title('Ensemble accuracy')
plt.xlabel('M')
plt.ylabel('Accuracy')
plt.show()
plt.savefig(ROOT_PATH + 'ensemble_accuracy_M1_to_M10.png')
print('ensemble accs:', ensemble_accuracies)
print('ensemble nlls: ', ensemble_nll)
print('member accs:', acc_means)
print('member nlls: ', nll_means)
plot_results(num_members[:6], ensemble_accuracies[:6], acc_means[:6], acc_std[:6], 'Accuracy M=1-6')
plot_results(num_members[:6], ensemble_nll[:6], nll_means[:6], nll_std[:6], 'NLL M=1-6')
###Output
_____no_output_____
###Markdown
Disagreement of subnetworks
###Code
# Get the disagreement between the subnetworks
d_kl = []
d_disagreement = []
d_cosine_sim = []
ensemble_members = ['1','2','3','4','5','6', '10']
# for M = 10 the disagreement is super high! must be wrong..
for i in range(7):
model_path = model_paths[i]
a = summary_iterator(ROOT_PATH + model_path)
DIS = "test/diversity/disagreement"
KL = 'test/diversity/average_kl'
COS = 'test/diversity/cosine_similarity'
dis_list = []
kl_list = []
cos_list = []
for e in a:
for v in e.summary.value:
if v.tag == DIS:
value = tf.make_ndarray(v.tensor)
dis_list.append(value)
elif v.tag == KL:
value = tf.make_ndarray(v.tensor)
kl_list.append(value)
elif v.tag == COS:
value = tf.make_ndarray(v.tensor)
cos_list.append(value)
d_kl.append(kl_list[-1])
d_disagreement.append(dis_list[-1])
d_cosine_sim.append(cos_list[-1])
print('KL: ', d_kl[2])
print('Disagreement: ', d_disagreement[2])
print('Cosine similarity: ', d_cosine_sim)
plt.plot(ensemble_members[1:6], d_kl[1:6], label="KL")
plt.plot(ensemble_members[1:6], d_disagreement[1:6], label="Disagreement")
# plt.plot(d_cosine_sim, label="Cosine similarity")
plt.title('Measure of disagreement')
plt.xlabel('M')
plt.legend()
plt.show()
plt.savefig(ROOT_PATH + 'disagreement_and_KL_M1_toM6.png')
###Output
_____no_output_____ |
quests/data-science-on-gcp-edition1_tf2/07_sparkml_and_bqml/logistic_regression.ipynb | ###Markdown
Logistic Regression using Spark ML Set up bucket
###Code
BUCKET='cs358-bucket' # CHANGE ME
import os
os.environ['BUCKET'] = BUCKET
# Create spark session
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark import SparkContext
sc = SparkContext('local', 'logistic')
spark = SparkSession \
.builder \
.appName("Logistic regression w/ Spark ML") \
.getOrCreate()
print(spark)
print(sc)
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.regression import LabeledPoint
###Output
_____no_output_____
###Markdown
Read dataset
###Code
traindays = spark.read \
.option("header", "true") \
.csv('gs://{}/flights/trainday.csv'.format(BUCKET))
traindays.createOrReplaceTempView('traindays')
spark.sql("SELECT * from traindays LIMIT 5").show()
from pyspark.sql.types import StringType, FloatType, StructType, StructField
header = 'FL_DATE,UNIQUE_CARRIER,AIRLINE_ID,CARRIER,FL_NUM,ORIGIN_AIRPORT_ID,ORIGIN_AIRPORT_SEQ_ID,ORIGIN_CITY_MARKET_ID,ORIGIN,DEST_AIRPORT_ID,DEST_AIRPORT_SEQ_ID,DEST_CITY_MARKET_ID,DEST,CRS_DEP_TIME,DEP_TIME,DEP_DELAY,TAXI_OUT,WHEELS_OFF,WHEELS_ON,TAXI_IN,CRS_ARR_TIME,ARR_TIME,ARR_DELAY,CANCELLED,CANCELLATION_CODE,DIVERTED,DISTANCE,DEP_AIRPORT_LAT,DEP_AIRPORT_LON,DEP_AIRPORT_TZOFFSET,ARR_AIRPORT_LAT,ARR_AIRPORT_LON,ARR_AIRPORT_TZOFFSET,EVENT,NOTIFY_TIME'
def get_structfield(colname):
if colname in ['ARR_DELAY', 'DEP_DELAY', 'DISTANCE', 'TAXI_OUT']:
return StructField(colname, FloatType(), True)
else:
return StructField(colname, StringType(), True)
schema = StructType([get_structfield(colname) for colname in header.split(',')])
inputs = 'gs://{}/flights/tzcorr/all_flights-00000-*'.format(BUCKET) # 1/30th; you may have to change this to find a shard that has training data
#inputs = 'gs://{}/flights/tzcorr/all_flights-*'.format(BUCKET) # FULL
flights = spark.read\
.schema(schema)\
.csv(inputs)
# this view can now be queried ...
flights.createOrReplaceTempView('flights')
###Output
_____no_output_____
###Markdown
Clean up
###Code
trainquery = """
SELECT
f.*
FROM flights f
JOIN traindays t
ON f.FL_DATE == t.FL_DATE
WHERE
t.is_train_day == 'True'
"""
traindata = spark.sql(trainquery)
print(traindata.head(2)) # if this is empty, try changing the shard you are using.
traindata.describe().show()
###Output
+-------+----------+--------------+------------------+-------+------------------+------------------+---------------------+---------------------+------+------------------+-------------------+-------------------+------+-------------------+-------------------+------------------+-----------------+-------------------+-------------------+------------------+-------------------+-------------------+------------------+--------------------+-----------------+--------------------+-----------------+-----------------+------------------+--------------------+------------------+------------------+--------------------+-----+-----------+
|summary| FL_DATE|UNIQUE_CARRIER| AIRLINE_ID|CARRIER| FL_NUM| ORIGIN_AIRPORT_ID|ORIGIN_AIRPORT_SEQ_ID|ORIGIN_CITY_MARKET_ID|ORIGIN| DEST_AIRPORT_ID|DEST_AIRPORT_SEQ_ID|DEST_CITY_MARKET_ID| DEST| CRS_DEP_TIME| DEP_TIME| DEP_DELAY| TAXI_OUT| WHEELS_OFF| WHEELS_ON| TAXI_IN| CRS_ARR_TIME| ARR_TIME| ARR_DELAY| CANCELLED|CANCELLATION_CODE| DIVERTED| DISTANCE| DEP_AIRPORT_LAT| DEP_AIRPORT_LON|DEP_AIRPORT_TZOFFSET| ARR_AIRPORT_LAT| ARR_AIRPORT_LON|ARR_AIRPORT_TZOFFSET|EVENT|NOTIFY_TIME|
+-------+----------+--------------+------------------+-------+------------------+------------------+---------------------+---------------------+------+------------------+-------------------+-------------------+------+-------------------+-------------------+------------------+-----------------+-------------------+-------------------+------------------+-------------------+-------------------+------------------+--------------------+-----------------+--------------------+-----------------+-----------------+------------------+--------------------+------------------+------------------+--------------------+-----+-----------+
| count| 109502| 109502| 109502| 109502| 109502| 109502| 109502| 109502|109502| 109502| 109502| 109502|109502| 109502| 108731| 108731| 108690| 108690| 108632| 108632| 109502| 108632| 108357| 109502| 832| 109502| 109502| 109502| 109502| 109502| 109502| 109502| 109502| 0| 0|
| mean| null| null|19954.338824861647| null|2112.0671494584576|12673.419992328907| 1267344.7629814981| 31694.690480539168| null|12666.298095011964| 1266632.5732498036| 31690.008082044165| null| null| null|10.269656307768713|15.67514030729598| null| null|7.2230189999263565| null| null| 5.393403287281855|0.007598034739091523| null|0.002858395280451...|827.3461854577998|36.61749564165272|-95.34463998971351| -18603.638289711602|36.615216781709016|-95.34129966514209| -18591.04673887235| null| null|
| stddev| null| null| 405.7079879970824| null| 1718.929182142528|1525.1564464953865| 152515.3281104015| 1275.8107315629143| null|1523.6634323619962| 152366.02784252557| 1274.310123188187| null| null| null| 36.33808613526115|9.041035791097107| null| null| 5.026854743565597| null| null|38.619522453448084| 0.08683532384804916| null| 0.05338774097192427|608.1283282825307|5.990320091334113|18.184969050578424| 5036.738509262714| 5.989349480318269|18.173736914931528| 5052.552764921264| null| null|
| min|2015-05-16| AA| 19393| AA| 1| 10135| 1013503| 30070| ABE| 10135| 1013503| 30070| ABE|2015-05-16T09:40:00|2015-05-16T09:37:00| -41.0| 1.0|2015-05-16T09:46:00|2015-05-16T10:38:00| 1.00|2015-05-16T11:00:00|2015-05-16T10:44:00| -69.0| 0.00| A| 0.00| 31.0| -14.33000000| -100.49638889| -14400.0| -14.33000000| -100.49638889| -14400.0| null| null|
| max|2015-05-24| WN| 21171| WN| 999| 16218| 1621801| 35991| YUM| 16218| 1621801| 35991| YUM|2015-05-25T09:15:00|2015-05-25T09:09:00| 1492.0| 160.0|2015-05-25T09:23:00|2015-05-25T14:54:00| 9.00|2015-05-26T03:58:00|2015-05-25T14:58:00| 1480.0| 1.00| C| 1.00| 4983.0| 71.28472222| 144.79722222| 36000.0| 71.28472222| 144.79722222| 36000.0| null| null|
+-------+----------+--------------+------------------+-------+------------------+------------------+---------------------+---------------------+------+------------------+-------------------+-------------------+------+-------------------+-------------------+------------------+-----------------+-------------------+-------------------+------------------+-------------------+-------------------+------------------+--------------------+-----------------+--------------------+-----------------+-----------------+------------------+--------------------+------------------+------------------+--------------------+-----+-----------+
###Markdown
Note that the counts for the various columns are all different; We have to remove NULLs in the delay variables (these correspond to canceled or diverted flights). Logistic regression
###Code
trainquery = """
SELECT
DEP_DELAY, TAXI_OUT, ARR_DELAY, DISTANCE
FROM flights f
JOIN traindays t
ON f.FL_DATE == t.FL_DATE
WHERE
t.is_train_day == 'True' AND
f.dep_delay IS NOT NULL AND
f.arr_delay IS NOT NULL
"""
traindata = spark.sql(trainquery)
traindata.describe().show()
trainquery = """
SELECT
DEP_DELAY, TAXI_OUT, ARR_DELAY, DISTANCE
FROM flights f
JOIN traindays t
ON f.FL_DATE == t.FL_DATE
WHERE
t.is_train_day == 'True' AND
f.CANCELLED == '0.00' AND
f.DIVERTED == '0.00'
"""
traindata = spark.sql(trainquery)
traindata.describe().show()
def to_example(fields):
return LabeledPoint(\
float(fields['ARR_DELAY'] < 15), #ontime? \
[ \
fields['DEP_DELAY'], \
fields['TAXI_OUT'], \
fields['DISTANCE'], \
])
examples = traindata.rdd.map(to_example)
lrmodel = LogisticRegressionWithLBFGS.train(examples, intercept=True)
print(lrmodel.weights,lrmodel.intercept)
print(lrmodel.predict([6.0,12.0,594.0]))
print(lrmodel.predict([36.0,12.0,594.0]))
lrmodel.clearThreshold()
print(lrmodel.predict([6.0,12.0,594.0]))
print(lrmodel.predict([36.0,12.0,594.0]))
lrmodel.setThreshold(0.7) # cancel if prob-of-ontime < 0.7
print(lrmodel.predict([6.0,12.0,594.0]))
print(lrmodel.predict([36.0,12.0,594.0]))
###Output
1
0
###Markdown
Predict with the model First save the model
###Code
!gsutil -m rm -r gs://$BUCKET/flights/sparkmloutput/model
MODEL_FILE='gs://' + BUCKET + '/flights/sparkmloutput/model'
lrmodel.save(sc, MODEL_FILE)
print('{} saved'.format(MODEL_FILE))
lrmodel = 0
print(lrmodel)
###Output
0
###Markdown
Now retrieve the model
###Code
from pyspark.mllib.classification import LogisticRegressionModel
lrmodel = LogisticRegressionModel.load(sc, MODEL_FILE)
lrmodel.setThreshold(0.7)
print(lrmodel.predict([36.0,12.0,594.0]))
print(lrmodel.predict([8.0,4.0,594.0]))
###Output
1
###Markdown
Examine the model behavior For dep_delay=20 and taxiout=10, how does the distance affect prediction?
###Code
lrmodel.clearThreshold() # to make the model produce probabilities
print(lrmodel.predict([20, 10, 500]))
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
dist = np.arange(10, 2000, 10)
prob = [lrmodel.predict([20, 10, d]) for d in dist]
sns.set_style("whitegrid")
ax = plt.plot(dist, prob)
plt.xlabel('distance (miles)')
plt.ylabel('probability of ontime arrival')
delay = np.arange(-20, 60, 1)
prob = [lrmodel.predict([d, 10, 500]) for d in delay]
ax = plt.plot(delay, prob)
plt.xlabel('departure delay (minutes)')
plt.ylabel('probability of ontime arrival')
###Output
_____no_output_____
###Markdown
Evaluate model Evaluate on the test data
###Code
inputs = 'gs://{}/flights/tzcorr/all_flights-00001-*'.format(BUCKET) # you may have to change this to find a shard that has test data
flights = spark.read\
.schema(schema)\
.csv(inputs)
flights.createOrReplaceTempView('flights')
testquery = trainquery.replace("t.is_train_day == 'True'","t.is_train_day == 'False'")
print(testquery)
testdata = spark.sql(testquery)
examples = testdata.rdd.map(to_example)
testdata.describe().show() # if this is empty, change the shard you are using
def eval(labelpred):
'''
data = (label, pred)
data[0] = label
data[1] = pred
'''
cancel = labelpred.filter(lambda data: data[1] < 0.7)
nocancel = labelpred.filter(lambda data: data[1] >= 0.7)
corr_cancel = cancel.filter(lambda data: data[0] == int(data[1] >= 0.7)).count()
corr_nocancel = nocancel.filter(lambda data: data[0] == int(data[1] >= 0.7)).count()
cancel_denom = cancel.count()
nocancel_denom = nocancel.count()
if cancel_denom == 0:
cancel_denom = 1
if nocancel_denom == 0:
nocancel_denom = 1
return {'total_cancel': cancel.count(), \
'correct_cancel': float(corr_cancel)/cancel_denom, \
'total_noncancel': nocancel.count(), \
'correct_noncancel': float(corr_nocancel)/nocancel_denom \
}
# Evaluate model
lrmodel.clearThreshold() # so it returns probabilities
labelpred = examples.map(lambda p: (p.label, lrmodel.predict(p.features)))
print('All flights:')
print(eval(labelpred))
# keep only those examples near the decision threshold
print('Flights near decision threshold:')
labelpred = labelpred.filter(lambda data: data[1] > 0.65 and data[1] < 0.75)
print(eval(labelpred))
###Output
All flights:
{'total_cancel': 548, 'correct_cancel': 0.8175182481751825, 'total_noncancel': 4321, 'correct_noncancel': 0.9706086554038417}
Flights near decision threshold:
{'total_cancel': 54, 'correct_cancel': 0.3148148148148148, 'total_noncancel': 49, 'correct_noncancel': 0.6122448979591837}
|
Python/Arrays in Python.ipynb | ###Markdown
09/10/20 **Write a program to read a linear list of items and store it in an array.** **1. Copy the contents of one array to another array.**
###Code
arr = [1, 2, 3, 4, 3, 7, 6, 7, 8, 9]
arr.sort(reverse = False)
arr2 = arr.copy()
arr
print(arr)
print(arr2)
print(arr == arr2)
###Output
True
###Markdown
**2. Copy the contents from one array to another array in reverse order.**
###Code
arr
arr3 = arr.copy()[::-1]
print(arr3)
###Output
[9, 8, 7, 6, 5, 4, 3, 2, 1]
###Markdown
**3. Delete duplicate elements from an array.**
###Code
new_arr = [11, 66, 3, 2, 9, 1, 4, 4, 7, 7]
del_duplicate = set(new_arr)
del_duplicate
del_duplicate = list(del_duplicate)
del_duplicate
###Output
_____no_output_____
###Markdown
16/10/20
###Code
import array as a
arr = a.array("i", [1, 2, 3, 4, 5])
for element in arr:
print("arr =",arr.index(element), element)
for
import array as arr
a = arr.array('i', [1, 2, 3, 4, 5])
b = arr.array('i', [0, 9, 8, 7, 6])
print(a)
print(type(a))
a.insert(4, 100)
print(a)
a.append(500)
print(a)
for i in a:
print(i)
###Output
1
2
3
4
100
5
500
###Markdown
1. Copy the contents from one array to another array
###Code
source = arr.array('i', [10, 20, 30, 40])
dest = arr.array('i', [])
for index in range(0, len(source)):
dest.insert(index, source[index])
for i in dest:
print(i, end=" ")
print(type(dest))
###Output
10 20 30 40 <class 'array.array'>
###Markdown
2. Copy the contents from one array to another array in reverse order
###Code
for i in range(len(source)-1, -1, -1):
dest.insert(index, source[index])
print(dest[i], end=" ")
###Output
40 30 20 10
###Markdown
3. Delete the duplicate elements from an array
###Code
arr1 = arr.array('i', [1, 3, 6, 6, 8, 1, 9, 4, 3, 0, 4])
for index1 in range(len(arr1)-1):
for index2 in range(index1+1, len(arr1)):
if (arr1[index1] == arr1[index2]):
arr1.remove(arr1[index2])
for x in arr1:
print(x, end= " ")
###Output
6 8 1 9 3 0 4 |
dmu28/dmu28_GAMA-12/CIGALE_catalogue_preparation.ipynb | ###Markdown
This notebook prepare the catalogues that will be analysed by CIGALE for SED fitting and physical parameter estimation.
###Code
import numpy as np
import os
os.environ['LOG_LEVEL'] = 'INFO'
from astropy.table import Table
from herschelhelp.filters import correct_galactic_extinction
from herschelhelp.external import convert_table_for_cigale
SUFFIX = '20180218'
master_catalogue = Table.read("../../dmu32/dmu32_GAMA-12/data/GAMA-12_{}_cigale.fits".format(SUFFIX))
len(master_catalogue)
###Output
_____no_output_____
###Markdown
Best sourcesDefine a good far-IR measurement as:- an existing flux in the band;- the flag from XID+ must not be set;- the signal to noise ratio must be over 2.
###Code
good = {}
for band in [ 'spire_250', 'spire_350', 'spire_500']:
good[band] = (~np.isnan(master_catalogue['f_{}'.format(band)]) &
~master_catalogue['flag_{}'.format(band)])
good[band][good[band]] &= (master_catalogue[good[band]]['f_{}'.format(band)] /
master_catalogue[good[band]]['ferr_{}'.format(band)] >= 2)
###Output
_____no_output_____
###Markdown
We will keep only sources with at leat 2 good far-IR measurements (we may actually use less sources are not all may have a redshift).
###Code
combined_good = np.sum(list(good.values()), axis=0) >= 2
print("Number of good sources: {}".format(np.sum(combined_good)))
# Only sources with at least two optical and at least two near infrared detections
optnir = ((master_catalogue['flag_optnir_det'] == 3)
| (master_catalogue['flag_optnir_det'] == 7))
###Output
_____no_output_____
###Markdown
Ldust catalogue for CIGALE
###Code
#best_catalogue = master_catalogue[combined_good].copy()
best_catalogue = master_catalogue[optnir].copy()
# Correction for galactic extinction
best_catalogue = correct_galactic_extinction(best_catalogue, inplace=True)
# Convertion to CIGALE format
best_catalogue = convert_table_for_cigale(best_catalogue, inplace=True, remove_zerofluxes=True)
###Output
INFO:herschelhelp.external:For 7118 sources, the band omegacam_u should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 6145 sources, the band omegacam_g should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 6131 sources, the band suprime_g should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 6140 sources, the band decam_g should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 6139 sources, the band gpc1_g should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 5728 sources, the band suprime_r should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 5697 sources, the band gpc1_r should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 5661 sources, the band omegacam_r should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 5570 sources, the band decam_r should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 54 sources, the band gpc1_i should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 145 sources, the band omegacam_i should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
INFO:herschelhelp.external:For 52 sources, the band suprime_i should not be used because it overlaps or is below the Lyman limit at the redshift of these sources. These fluxes were set to NaN.
###Markdown
Band selectionWe want to use only one filter for similar bands. We define an order of preference and set to NaN the flux in the lower prefered bands when a prefered band is available. Some band may have a 0 flux, we set there values to NaN.
###Code
u_bands = [ "omegacam_u"]
g_bands = ["suprime_g", "omegacam_g", "decam_g", "gpc1_g"]
r_bands = ["suprime_r", "omegacam_r", "decam_r", "gpc1_r"]
i_bands = ["suprime_i", "omegacam_i", "gpc1_i"]
z_bands = ["suprime_z", "decam_z", "gpc1_z"]
y_bands = ["suprime_y", "gpc1_y"]
def remove_unneeded_fluxes(list_of_bands):
for band_idx, band in enumerate(list_of_bands[:-1]):
mask = ~np.isnan(best_catalogue[band])
for lower_band in list_of_bands[band_idx+1:]:
best_catalogue[lower_band][mask] = np.nan
best_catalogue["{}_err".format(lower_band)][mask] = np.nan
remove_unneeded_fluxes(g_bands)
remove_unneeded_fluxes(u_bands)
remove_unneeded_fluxes(r_bands)
remove_unneeded_fluxes(i_bands)
remove_unneeded_fluxes(z_bands)
remove_unneeded_fluxes(y_bands)
best_catalogue.write("data_tmp/GAMA-12_cigale_optnir_extcor_{}.fits".format(SUFFIX), overwrite=True)
###Output
_____no_output_____
###Markdown
Main catalogue for CIGALE
###Code
#best_catalogue = master_catalogue[combined_good].copy()
best_catalogue = master_catalogue[combined_good].copy()
# Correction for galactic extinction
best_catalogue = correct_galactic_extinction(best_catalogue, inplace=True)
# Convertion to CIGALE format
best_catalogue = convert_table_for_cigale(best_catalogue, inplace=True, remove_zerofluxes=True)
remove_unneeded_fluxes(g_bands)
remove_unneeded_fluxes(u_bands)
remove_unneeded_fluxes(r_bands)
remove_unneeded_fluxes(i_bands)
remove_unneeded_fluxes(z_bands)
remove_unneeded_fluxes(y_bands)
best_catalogue.write("data_tmp/GAMA-12_cigale_best_extcor_{}.fits".format(SUFFIX), overwrite=True)
###Output
_____no_output_____
###Markdown
Catalogue using spectroscopic redshift
###Code
best_catalogue = master_catalogue[optnir].copy()
best_catalogue.remove_column("redshift")
best_catalogue["zspec"].name = "redshift"
best_catalogue = best_catalogue[~np.isnan(best_catalogue["redshift"])]
print("Number of sources with z-spec: {}".format(len(best_catalogue)))
# Correction for galactic extinction
best_catalogue = correct_galactic_extinction(best_catalogue, inplace=True)
# Convertion to CIGALE format
os.environ['LOG_LEVEL'] = 'INFO'
best_catalogue = convert_table_for_cigale(best_catalogue, inplace=True, remove_zerofluxes=True)
remove_unneeded_fluxes(g_bands)
remove_unneeded_fluxes(u_bands)
remove_unneeded_fluxes(r_bands)
remove_unneeded_fluxes(i_bands)
remove_unneeded_fluxes(z_bands)
remove_unneeded_fluxes(y_bands)
best_catalogue.write("data_tmp/GAMA-12_cigale_optnir_extcor_zspec_{}.fits".format(SUFFIX), overwrite=True)
###Output
_____no_output_____ |
tirgulim/tirgul3/tirgul3.ipynb | ###Markdown
Add nice equations using LaTEX syntax in Markdown Sphere or Quadratic equations::$ x^2 + y^2 = z^2 \\$$ \\ x_1,_2 = \frac{- b\pm \sqrt{b^2-4ac}}{2a} $
###Code
import pandas as pd
data = pd.read_csv('titanic_short.csv')
data[10:19]
# not null
filteredData = data[pd.notnull(data['age'])]
filteredData[10:19]
# Mean
dataAge = data['age']
dataAge.mean()
dataAge.min()
dataAge.max()
dataAge.median()
## Numpy
Matrices and much more, this is where the fun begins..
[Numpy official docs](https://numpy.org/doc/stable/reference/index.html)
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
print(arr*arr)
#### vector multiplication (AKA dot producdt)
print("arr.dot(arr)\t=",arr.dot(arr))
# or by
print("arr@arr\t\t=",arr@arr)
# create ones matrix of specific size:
np.ones((2,3))
# self excercise: create zeros matrix of specific size:
np.zeros((2,3))
# The I matrix
d = np.eye(3)
print(d)
# self excercise: create the following 3X3 diagonal matrix
[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]
[1,2,3]*np.eye(3)
# random
e = np.random.random((2,4))
print(e)
# append
arr1 = np.array([1, 2, 3, 4, 5])
arr2 = np.array([11, 12, 13, 14, 15])
arr3 = np.append(arr1,arr2)
print(arr3)
# we can add arrays of same length
print(arr1+arr2)
# However arrays must have same length
arr3 = np.array([1,2])
print(arr1+arr3) # ERROR
# self excercise: Assume hypothethical case where you wish for each student to calculate average of the highest 5 grades out of 6 student's assignment
# step 1. generate 10X6 random integer numbers between 70 to 100 to represent students grades
# step 2. calculate mean of top 5 grades for each student
numCols = 6
grades = np.random.randint(70,100,size=(10,numCols))
print(grades)
top5Avg = (grades.sum(axis=1)-grades.min(axis=1)) / (numCols-1)
top5Avg = top5Avg.reshape(len(top5Avg),1)
print(top5Avg)
###Output
[[84.2]
[89.4]
[90.6]
[82. ]
[90. ]
[83.2]
[86. ]
[86.8]
[92. ]
[93.4]]
|
tutorials/3 QComponent Designer/3.1 Creating a QComponent - Basic.ipynb | ###Markdown
Creating a QComponent - Basic Now that you have become familiar with Qiskit Metal and feel comfortable using the available aspects and functionality, the next step is learning how to make your own circuit component in Metal.We will start off by going over the sample `my_qcomponent` in `qiskit_metal>qlibrary>user_components` as a basis, which we will walk through below. Reviewing my_qcomponent
###Code
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
###Output
_____no_output_____
###Markdown
Always be sure to include the proper copyright and license information, and give yourself credit for any components you create!
###Code
from qiskit_metal import draw, Dict
from qiskit_metal.toolbox_metal import math_and_overrides
from qiskit_metal.qlibrary.base.base import QComponent
###Output
_____no_output_____
###Markdown
Import any classes or functions you will be wanting to use when creating your component. The geometries in Metal are shapely objects, which can be readily generated and manipulated through functions in `draw`. Mathematical functions can be accessed via `math_and_overrides`. Any imports that are part of the Metal requirement list can also be used.The key import is what the parent class of your new component will be. For this example, we are using `QComponent` as the parent for `MyQComponent`, which is the base component class in Metal and contains a great deal of automated functionality. All component hiearchy must have QComponent as the top base class.
###Code
dir(QComponent)
###Output
_____no_output_____
###Markdown
`MyQComponent` creates a simple rectangle of a variable width, height, position and rotation.
###Code
class MyQComponent(QComponent):
"""
Use this class as a template for your components - have fun
Description:
Options:
"""
# Edit these to define your own tempate options for creation
# Default drawing options
default_options = Dict(width='500um',
height='300um',
pos_x='0um',
pos_y='0um',
orientation='0',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component',
_qgeometry_table_poly='True')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# EDIT HERE - Replace the following with your code
# Create some raw geometry
# Use autocompletion for the `draw.` module (use tab key)
rect = draw.rectangle(p.width, p.height, p.pos_x, p.pos_y)
rect = draw.rotate(rect, p.orientation)
rect = draw.translate(rect,p.pos_x,p.pos_y)
geom = {'my_polygon': rect}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
###Output
_____no_output_____
###Markdown
The docstring at the start of the class should clearly explain what the component is, what the parameterized values of the component refer to (in a sense the 'inputs'), and any other information you believe would be relevant for a user making use of your component.`default_options` is a dictionary to be included in the class of all components. The keywords, of type string, in the default dictionary are the parameters the Front End User is allowed to modify. The keywords in the above indicate that the width and height can be modified via the components options, but have a default value of 500um and 300 um respectively. Further, the position and rotation can also be changed. The `layer` is an expected keyword in a default dictionary, as it is used by renderers to help determine further properties of the `qgeometry` of the component when rendered, eg. GDS QRenderer uses the layer to define which layer the qgeometry is on.`component_metadata` is a dictionary which contains some important pieces of information, such as the default/shorthand name of the component (`short_name`), or indicating what types of qgeometry are included in this component, eg. `_qgeometry_table_poly='True'`.The `component_metadata` must contain the flags for each type of qgeometry table being used via `add_qgeometry` methods at the end of the `make()` function, in order for renderer options to be updated correctly. Currently the options are;`_qgeometry_table_path='True'``_qgeometry_table_poly='True'``_qgeometry_table_junction='True'`The `make()` method is where the actual generation of the component's layout is written. The make() method Although not required, a good first line is `p = self.parse_options()` to cut down on your amount of typing. The `parse_options()` translates the keywords in `self.options` from strings into their appropriate value with respect to the prefix included, eg.`p.width`=> "500um" -> 0.0005Following this, all code generating the shapely geometries that are to represent your circuit layout should be written, via the `draw` module or even written in directly. It is a good practice to play around with the geometries in a jupyter notebook first for quick visual feedback, such as;
###Code
draw.rectangle(1,2,0,0)
draw.rotate(draw.rectangle(1,2,0,0), 45)
###Output
_____no_output_____
###Markdown
Or for a little bit more complexity;
###Code
face = draw.shapely.geometry.Point(0, 0).buffer(1)
eye = draw.shapely.geometry.Point(0, 0).buffer(0.2)
eye_l = draw.translate(eye, -0.4, 0.4)
eye_r = draw.translate(eye, 0.4, 0.4)
smile = draw.shapely.geometry.Point(0, 0).buffer(0.8)
cut_sq = draw.shapely.geometry.box(-1, -0.3, 1, 1)
smile = draw.subtract(smile, cut_sq)
face = draw.subtract(face, smile)
face = draw.subtract(face, eye_r)
face = draw.subtract(face, eye_l)
face
###Output
_____no_output_____
###Markdown
Once you are happy with your geometries, and have them properly parameterized to allow the Front End User as much customization of your component as you wish, it is time to convert the geometries into Metal `qgeometries` via `add_qgeometry` add_qgeometry
###Code
import qiskit_metal as metal
?metal.qlibrary.base.QComponent.add_qgeometry
###Output
_____no_output_____
###Markdown
`add_qgeometry` is the method by which the shapely geometries you have drawn are converted into Metal qgeometries, the format which allows for the easy translatability between different renderers and the variable representation of quantum elements such as Josephson junctions.Currently there are three kinds of qgeometries, `path`, `poly` and `junction`.`path` -> shapely LineString`poly` -> any other shapely geometry (currently)`junction` -> shapely LineStringBoth `path` and `junction` also take and input of `width`, with is added to the qgeometry table to inform renderers of, as an example, how much to buffer the LineString of a cpw transmission line to turn it into a proper 2D sheet.`subtract` indicates this qgeometry is to be subtracted from the ground plane of that layer. A ground plane is automatically included for that layer at the dimension of the chip size if any qgeometry has `subtract = True`. As an example, a cpw transmission line's dielectric gap could be drawn by using the same LineString as previously, having the `width = cpw_width + 2*cpw_gap` and setting `subtract = True`.`fillet` is an option that informs a renderer that the vertices of this qgeometry are to be filleted by that value (eg. `fillet = "100um"`). add_pin
###Code
?metal.qlibrary.base.QComponent.add_pin
###Output
_____no_output_____
###Markdown
The final step for creating your QComponent is adding of pins. This is not necessarily a requirement for your component, but to have full functionality with Metal and be able to make use of autorouting components with it, you will want to indicate where the "ports" of your component are.Following from the above docstring, pins can be added from two coordinates indicating either an orthogonal vector to the port plane of your component, or a tangent to the port plane of your component. For the former, you want the vector to be pointing to the middle point of your intended plane, with the latter being across the length of your inteded plane (as indicated in the above figure). The `width` should be the size of the plane (say, in the case of a CPW transmission line, the trace width), with the `gap` following the same logic (though this value can be ignored for any non-coplanar structure). Example Below is a simple QComponent that implements everything we have reviewed.
###Code
from qiskit_metal import draw, Dict
from qiskit_metal.toolbox_metal import math_and_overrides
from qiskit_metal.qlibrary.base.base import QComponent
class MySimpleGapCapacitor(QComponent):
"""
Inherits 'QComponent' class.
Description:
A simple CPW style gap capacitor, with endcap islands each coupled to their own
cpw transmission line that ends in a pin.
Options:
* cpw_width: width of the cpw trace of the transmission line
* cpw_gap: dielectric gap of the cpw transmission line
* cap_width: width of the gap capacitor (size of the charge islands)
* cap_gap: dielectric space between the two islands
* pos_x/_y: position of the capacitor on chip
* orientation: 0-> is parallel to x-axis, with rotation (in degrees) counterclockwise.
* layer: the layer number for the layout
"""
# Edit these to define your own tempate options for creation
# Default drawing options
default_options = Dict(cpw_width='15um',
cpw_gap='9um',
cap_width='35um',
cap_gap='3um',
pos_x='0um',
pos_y='0um',
orientation='0',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component',
_qgeometry_table_poly='True',
_qgeometry_table_path='True')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
pad = draw.rectangle(p.cpw_width, p.cap_width, 0, 0)
pad_left = draw.translate(pad,-(p.cpw_width+p.cap_gap)/2,0)
pad_right = draw.translate(pad,(p.cpw_width+p.cap_gap)/2,0)
pad_etch = draw.rectangle(2*p.cpw_gap+2*p.cpw_width+p.cap_gap,2*p.cpw_gap+p.cap_width)
cpw_left = draw.shapely.geometry.LineString([[-(p.cpw_width+p.cap_gap/2),0],[-(p.cpw_width*3 +p.cap_gap/2),0]])
cpw_right = draw.shapely.geometry.LineString([[(p.cpw_width+p.cap_gap/2),0],[(p.cpw_width*3 +p.cap_gap/2),0]])
geom_list = [pad_left,pad_right,cpw_left,cpw_right,pad_etch]
geom_list = draw.rotate(geom_list,p.orientation)
geom_list = draw.translate(geom_list,p.pos_x,p.pos_y)
[pad_left,pad_right,cpw_left,cpw_right,pad_etch] = geom_list
self.add_qgeometry('path', {'cpw_left':cpw_left, 'cpw_right':cpw_right}, layer=p.layer, width = p.cpw_width)
self.add_qgeometry('path', {'cpw_left_etch':cpw_left, 'cpw_right_etch':cpw_right}, layer=p.layer, width = p.cpw_width+2*p.cpw_gap, subtract=True)
self.add_qgeometry('poly', {'pad_left':pad_left, 'pad_right':pad_right}, layer=p.layer)
self.add_qgeometry('poly', {'pad_etch':pad_etch}, layer=p.layer, subtract=True)
self.add_pin('cap_left', cpw_left.coords, width = p.cpw_width, gap = p.cpw_gap, input_as_norm=True)
self.add_pin('cap_right', cpw_right.coords, width = p.cpw_width, gap = p.cpw_gap, input_as_norm=True)
design = metal.designs.DesignPlanar()
gui = metal.MetalGUI(design)
my_cap = MySimpleGapCapacitor(design,'my_cap')
gui.rebuild()
###Output
_____no_output_____
###Markdown
You should now see *my_cap* in the Metal gui. One can work on the layout of the component through these cells by changing the above class, such as how the parameterized values are used. By enabling `overwrite_enabled` (which should normally be kept to False), the code can quickly be iterated through until you, the component designer, is happy with the qcomponent you have just created.
###Code
design.overwrite_enabled = True
###Output
_____no_output_____
###Markdown
Creating a QComponent - Basic Now that you have become familiar with Qiskit Metal and feel comfortable using the available aspects and functionality, the next step is learning how to make your own circuit component in Metal.We will start off by going over the sample `my_qcomponent` in `qiskit_metal>qlibrary>user_components` as a basis, which we will walk through below. Reviewing my_qcomponent
###Code
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
###Output
_____no_output_____
###Markdown
Always be sure to include the proper copyright and license information, and give yourself credit for any components you create!
###Code
from qiskit_metal import draw, Dict
from qiskit_metal.toolbox_metal import math_and_overrides
from qiskit_metal.qlibrary.base.base import QComponent
###Output
_____no_output_____
###Markdown
Import any classes or functions you will be wanting to use when creating your component. The geometries in Metal are shapely objects, which can be readily generated and manipulated through functions in `draw`. Mathematical functions can be accessed via `math_and_overrides`. Any imports that are part of the Metal requirement list can also be used.The key import is what the parent class of your new component will be. For this example, we are using `QComponent` as the parent for `MyQComponent`, which is the base component class in Metal and contains a great deal of automated functionality. All component hierarchy must have QComponent as the top base class.
###Code
dir(QComponent)
###Output
_____no_output_____
###Markdown
`MyQComponent` creates a simple rectangle of a variable width, height, position and rotation.
###Code
class MyQComponent(QComponent):
"""
Use this class as a template for your components - have fun
Description:
Options:
"""
# Edit these to define your own tempate options for creation
# Default drawing options
default_options = Dict(width='500um',
height='300um',
pos_x='0um',
pos_y='0um',
orientation='0',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component',
_qgeometry_table_poly='True')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# EDIT HERE - Replace the following with your code
# Create some raw geometry
# Use autocompletion for the `draw.` module (use tab key)
rect = draw.rectangle(p.width, p.height, p.pos_x, p.pos_y)
rect = draw.rotate(rect, p.orientation)
rect = draw.translate(rect,p.pos_x,p.pos_y)
geom = {'my_polygon': rect}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
###Output
_____no_output_____
###Markdown
The docstring at the start of the class should clearly explain what the component is, what the parameterized values of the component refer to (in a sense the 'inputs'), and any other information you believe would be relevant for a user making use of your component.`default_options` is a dictionary to be included in the class of all components. The keywords, of type string, in the default dictionary are the parameters the front-end user is allowed to modify. The keywords in the above indicate that the width and height can be modified via the components options, but have a default value of 500um and 300 um respectively. Further, the position and rotation can also be changed. The `layer` is an expected keyword in a default dictionary, as it is used by renderers to help determine further properties of the `qgeometry` of the component when rendered, eg. GDS QRenderer uses the layer to define which layer the qgeometry is on.`component_metadata` is a dictionary which contains some important pieces of information, such as the default/shorthand name of the component (`short_name`), or indicating what types of qgeometry tables are included in this component, eg. `_qgeometry_table_poly='True'`.The `component_metadata` must contain the flags for each type of qgeometry table being used via `add_qgeometry` methods at the end of the `make()` function, in order for renderer options to be updated correctly. Currently the options are:`_qgeometry_table_path='True'``_qgeometry_table_poly='True'``_qgeometry_table_junction='True'`The `make()` method is where the actual generation of the component's layout is written. The make() method Although not required, a good first line is `p = self.parse_options()` to cut down on your amount of typing. The `parse_options()` translates the keywords in `self.options` from strings into their appropriate value with respect to the prefix included, e.g.,`p.width`=> "500um" -> 0.0005Following this, all code generating the shapely geometries that are to represent your circuit layout should be written, via the `draw` module or even written in directly. It is a good practice to play around with the geometries in a jupyter notebook first for quick visual feedback, such as:
###Code
draw.rectangle(1,2,0,0)
draw.rotate(draw.rectangle(1,2,0,0), 45)
###Output
_____no_output_____
###Markdown
Or for a little more complexity:
###Code
face = draw.shapely.geometry.Point(0, 0).buffer(1)
eye = draw.shapely.geometry.Point(0, 0).buffer(0.2)
eye_l = draw.translate(eye, -0.4, 0.4)
eye_r = draw.translate(eye, 0.4, 0.4)
smile = draw.shapely.geometry.Point(0, 0).buffer(0.8)
cut_sq = draw.shapely.geometry.box(-1, -0.3, 1, 1)
smile = draw.subtract(smile, cut_sq)
face = draw.subtract(face, smile)
face = draw.subtract(face, eye_r)
face = draw.subtract(face, eye_l)
face
###Output
_____no_output_____
###Markdown
Once you are happy with your geometries, and have them properly parameterized to allow the Front End User as much customization of your component as you wish, it is time to convert the geometries into Metal `qgeometries` via `add_qgeometry` add_qgeometry
###Code
import qiskit_metal as metal
?metal.qlibrary.base.QComponent.add_qgeometry
###Output
_____no_output_____
###Markdown
`add_qgeometry` is the method by which the shapely geometries you have drawn are converted into Metal qgeometries, the format which allows for the easy translatability between different renderers and the variable representation of quantum elements such as Josephson junctions.Currently there are three kinds of qgeometries, `path`, `poly` and `junction`.`path` -> shapely LineString`poly` -> any other shapely geometry (currently)`junction` -> shapely LineStringBoth `path` and `junction` also take and input of `width`, with is added to the qgeometry table to inform renderers of, as an example, how much to buffer the LineString of a cpw transmission line to turn it into a proper 2D sheet.`subtract` indicates this qgeometry is to be subtracted from the ground plane of that layer. A ground plane is automatically included for that layer at the dimension of the chip size if any qgeometry has `subtract = True`. As an example, a cpw transmission line's dielectric gap could be drawn by using the same LineString as previously, having the `width = cpw_width + 2*cpw_gap` and setting `subtract = True`.`fillet` is an option that informs a renderer that the vertices of this qgeometry are to be filleted by that value (eg. `fillet = "100um"`). add_pin
###Code
?metal.qlibrary.base.QComponent.add_pin
###Output
_____no_output_____
###Markdown
The final step for creating your QComponent is adding of pins. This is not necessarily a requirement for your component, but to have full functionality with Metal and be able to make use of auto-routing components with it, you will want to indicate where the "ports" of your component are.Following from the above docstring, pins can be added from two coordinates indicating either an orthogonal vector to the port plane of your component, or a tangent to the port plane of your component. For the former, you want the vector to be pointing to the middle point of your intended plane, with the latter being across the length of your intended plane (as indicated in the above figure). The `width` should be the size of the plane (say, in the case of a CPW transmission line, the trace width), with the `gap` following the same logic (though this value can be ignored for any non-coplanar structure). Example Below is a simple QComponent that implements everything we have reviewed.
###Code
from qiskit_metal import draw, Dict
from qiskit_metal.toolbox_metal import math_and_overrides
from qiskit_metal.qlibrary.base.base import QComponent
class MySimpleGapCapacitor(QComponent):
"""
Inherits 'QComponent' class.
Description:
A simple CPW style gap capacitor, with endcap islands each coupled to their own
cpw transmission line that ends in a pin.
Options:
* cpw_width: width of the cpw trace of the transmission line
* cpw_gap: dielectric gap of the cpw transmission line
* cap_width: width of the gap capacitor (size of the charge islands)
* cap_gap: dielectric space between the two islands
* pos_x/_y: position of the capacitor on chip
* orientation: 0-> is parallel to x-axis, with rotation (in degrees) counterclockwise.
* layer: the layer number for the layout
"""
# Edit these to define your own tempate options for creation
# Default drawing options
default_options = Dict(cpw_width='15um',
cpw_gap='9um',
cap_width='35um',
cap_gap='3um',
pos_x='0um',
pos_y='0um',
orientation='0',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component',
_qgeometry_table_poly='True',
_qgeometry_table_path='True')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
pad = draw.rectangle(p.cpw_width, p.cap_width, 0, 0)
pad_left = draw.translate(pad,-(p.cpw_width+p.cap_gap)/2,0)
pad_right = draw.translate(pad,(p.cpw_width+p.cap_gap)/2,0)
pad_etch = draw.rectangle(2*p.cpw_gap+2*p.cpw_width+p.cap_gap,2*p.cpw_gap+p.cap_width)
cpw_left = draw.shapely.geometry.LineString([[-(p.cpw_width+p.cap_gap/2),0],[-(p.cpw_width*3 +p.cap_gap/2),0]])
cpw_right = draw.shapely.geometry.LineString([[(p.cpw_width+p.cap_gap/2),0],[(p.cpw_width*3 +p.cap_gap/2),0]])
geom_list = [pad_left,pad_right,cpw_left,cpw_right,pad_etch]
geom_list = draw.rotate(geom_list,p.orientation)
geom_list = draw.translate(geom_list,p.pos_x,p.pos_y)
[pad_left,pad_right,cpw_left,cpw_right,pad_etch] = geom_list
self.add_qgeometry('path', {'cpw_left':cpw_left, 'cpw_right':cpw_right}, layer=p.layer, width = p.cpw_width)
self.add_qgeometry('path', {'cpw_left_etch':cpw_left, 'cpw_right_etch':cpw_right}, layer=p.layer, width = p.cpw_width+2*p.cpw_gap, subtract=True)
self.add_qgeometry('poly', {'pad_left':pad_left, 'pad_right':pad_right}, layer=p.layer)
self.add_qgeometry('poly', {'pad_etch':pad_etch}, layer=p.layer, subtract=True)
self.add_pin('cap_left', cpw_left.coords, width = p.cpw_width, gap = p.cpw_gap, input_as_norm=True)
self.add_pin('cap_right', cpw_right.coords, width = p.cpw_width, gap = p.cpw_gap, input_as_norm=True)
design = metal.designs.DesignPlanar()
gui = metal.MetalGUI(design)
my_cap = MySimpleGapCapacitor(design,'my_cap')
gui.rebuild()
###Output
_____no_output_____
###Markdown
You should now see *my_cap* in the Metal gui. One can work on the layout of the component through these cells by changing the above class, such as how the parameterized values are used. By enabling `overwrite_enabled` (which should normally be kept to False), the code can quickly be iterated through until you, the component designer, is happy with the qcomponent you have just created.
###Code
design.overwrite_enabled = True
###Output
_____no_output_____
###Markdown
We will delve into more complex QComponent topics in the next notebook, `Creating a QComponent - Advanced` Use the command below to close Metal GUI.
###Code
gui.main_window.close()
###Output
_____no_output_____ |
warez/ml-training/recommender.ipynb | ###Markdown
Convert Customer and Product Ids to integers
###Code
# now we have the dataset like the actual dataset with strings in customerId and productId
# replace each with integers
customerIds = dataset['customerId'].unique().tolist()
customerMapping = dict( zip(customerIds, range(len(customerIds))) )
dataset.replace({'customerId': customerMapping},inplace=True)
productIds = dataset['productId'].unique().tolist()
productMapping = dict( zip(productIds, range(len(productIds))) )
dataset.replace({'productId': productMapping},inplace=True)
customer_idxs = np.array(dataset.customerId, dtype = np.int)
product_idxs = np.array(dataset.productId, dtype = np.int)
ratings = np.array(dataset.scaled_purchase_freq)
###Output
_____no_output_____
###Markdown
Data Pre-processing
###Code
n_customers = int(dataset['customerId'].drop_duplicates().max()) + 1
n_products = int(dataset['productId'].drop_duplicates().max()) + 1
n_factors = 50
input_shape = (1,)
print(n_customers)
print(n_products)
###Output
1895
2
###Markdown
Tensorflow Session
###Code
# create TF session and set it in Keras
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(1)
###Output
_____no_output_____
###Markdown
The Model
###Code
class DeepCollaborativeFiltering(Model):
def __init__(self, n_customers, n_products, n_factors, p_dropout = 0.2):
x1 = Input(shape = (1,), name="user")
P = Embedding(n_customers, n_factors, input_length = 1)(x1)
P = Reshape((n_factors,))(P)
x2 = Input(shape = (1,), name="product")
Q = Embedding(n_products, n_factors, input_length = 1)(x2)
Q = Reshape((n_factors,))(Q)
x = concatenate([P, Q], axis=1)
x = Dropout(p_dropout)(x)
x = Dense(n_factors)(x)
x = Activation('relu')(x)
x = Dropout(p_dropout)(x)
output = Dense(1)(x)
super(DeepCollaborativeFiltering, self).__init__([x1, x2], output)
def rate(self, customer_idxs, product_idxs):
if (type(customer_idxs) == int and type(product_idxs) == int):
return self.predict([np.array(customer_idxs).reshape((1,)), np.array(product_idxs).reshape((1,))])
if (type(customer_idxs) == str and type(product_idxs) == str):
return self.predict([np.array(customerMapping[customer_idxs]).reshape((1,)), np.array(productMapping[product_idxs]).reshape((1,))])
return self.predict([
np.array([customerMapping[customer_idx] for customer_idx in customer_idxs]),
np.array([productMapping[product_idx] for product_idx in product_idxs])
])
###Output
_____no_output_____
###Markdown
Hyperparameters
###Code
bs = 64
val_per = 0.25
epochs = 1
model = DeepCollaborativeFiltering(n_customers, n_products, n_factors)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
user (InputLayer) (None, 1) 0
__________________________________________________________________________________________________
product (InputLayer) (None, 1) 0
__________________________________________________________________________________________________
embedding_1 (Embedding) (None, 1, 50) 94750 user[0][0]
__________________________________________________________________________________________________
embedding_2 (Embedding) (None, 1, 50) 100 product[0][0]
__________________________________________________________________________________________________
reshape_1 (Reshape) (None, 50) 0 embedding_1[0][0]
__________________________________________________________________________________________________
reshape_2 (Reshape) (None, 50) 0 embedding_2[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 100) 0 reshape_1[0][0]
reshape_2[0][0]
__________________________________________________________________________________________________
dropout_1 (Dropout) (None, 100) 0 concatenate_1[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 50) 5050 dropout_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 50) 0 dense_1[0][0]
__________________________________________________________________________________________________
dropout_2 (Dropout) (None, 50) 0 activation_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 51 dropout_2[0][0]
==================================================================================================
Total params: 99,951
Trainable params: 99,951
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Training
###Code
model.compile(optimizer = 'adam', loss = mean_squared_logarithmic_error)
model.fit(x = [customer_idxs, product_idxs], y = ratings, batch_size = bs, epochs = epochs, validation_split = val_per)
model.rate(9, 0)
model.rate('e9a87a97-38df-4858-86df-9b02defccd5c', '9910eb1b-9d99-4025-badc-13ef455bb49a')
model.rate(25, 0)
model.output[1].name
###Output
_____no_output_____
###Markdown
Save Tensorflow Model
###Code
print('Done training!')
print ("input 0", model.input[0].name)
print ("input 1", model.input[1].name)
print ("input ", model.input)
print ("output 0", model.output[0].name)
print ("output 1", model.output[1].name)
print ("output", model.output)
# create the saver
# Saver op to save and restore all the variables
saver = tf.train.Saver()
# Save produced model
model_path = "/Users/debasishghosh/models/"
model_name = "ProductRecommender"
save_path = saver.save(sess, model_path+model_name+".ckpt")
print ("Saved model at ", save_path)
graph_path = tf.train.write_graph(sess.graph_def, model_path, model_name+".pb", as_text=True)
print ("Saved graph at :", graph_path)
###Output
Saved model at /Users/debasishghosh/models/ProductRecommender.ckpt
Saved graph at : /Users/debasishghosh/models/ProductRecommender.pb
###Markdown
Freeze Computation Graph
###Code
# Now freeze the graph (put variables into graph)
input_saver_def_path = ""
input_binary = False
output_node_names = "dense_2/BiasAdd" # Model result node
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_frozen_graph_name = model_path + 'frozen_' + model_name + '.pb'
clear_devices = True
freeze_graph.freeze_graph(graph_path, input_saver_def_path,
input_binary, save_path, output_node_names,
restore_op_name, filename_tensor_name,
output_frozen_graph_name, clear_devices, "")
print ("Model is frozen")
###Output
WARNING:tensorflow:From /Users/debasishghosh/anaconda3/lib/python3.6/site-packages/tensorflow/python/tools/freeze_graph.py:249: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.gfile.GFile.
INFO:tensorflow:Restoring parameters from /Users/debasishghosh/models/ProductRecommender.ckpt
INFO:tensorflow:Froze 6 variables.
INFO:tensorflow:Converted 6 variables to const ops.
Model is frozen
###Markdown
Optimize and Save Optimzed Graph
###Code
# optimizing graph
input_graph_def = tf.GraphDef()
with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
data = f.read()
input_graph_def.ParseFromString(data)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
['user', 'product'], # an array of the input node(s)
["dense_2/BiasAdd"], # an array of output nodes
tf.float32.as_datatype_enum)
[node.op.name for node in model.outputs]
[node.op.name for node in model.inputs]
# Save the optimized graph
tf.train.write_graph(output_graph_def, model_path, "optimized_" + model_name + ".pb", as_text=False)
tf.train.write_graph(output_graph_def, model_path, "optimized_text_" + model_name + ".pb", as_text=True)
###Output
_____no_output_____
###Markdown
Read optimized graph as binary
###Code
with open(model_path + "optimized_" + model_name + ".pb", "rb") as f:
model_file_binary = f.read()
###Output
_____no_output_____
###Markdown
Generate Model Id
###Code
## generate a model Id based on current timestamp
model_id = 'recommender-model-' + '{:%Y-%m-%d-%H:%M:%S}'.format(datetime.datetime.now())
###Output
_____no_output_____
###Markdown
Generate Avro with Schema
###Code
## Generate avro directly
# Parse the schema file
schema = avro.schema.Parse(open("avro/RecommenderModel.avsc", "rb").read())
# Create a data file using DataFileWriter
dataFile = open(model_path + "recommender.avro", "wb")
writer = DataFileWriter(dataFile, DatumWriter(), schema)
# Write data using DatumWriter
writer.append({"modelId": model_id,
"tensorFlowModel": model_file_binary,
"productMap": productMapping,
"customerMap": customerMapping
})
writer.close()
reader = DataFileReader(open(model_path + "recommender.avro", "rb"), DatumReader())
for model in reader:
r = model
reader.close()
type(r)
r.keys()
r["modelId"]
r["productMap"]
###Output
_____no_output_____
###Markdown
Generate Avro Schemaless
###Code
writer = avro.io.DatumWriter(schema)
bytes_writer = io.BytesIO()
encoder = avro.io.BinaryEncoder(bytes_writer)
# Write data using DatumWriter
writer.write({"modelId": model_id,
"tensorFlowModel": model_file_binary,
"productMap": productMapping,
"customerMap": customerMapping
}, encoder)
raw_bytes = bytes_writer.getvalue()
open(model_path + "recommender-no-schema.avro", 'wb').write(raw_bytes)
bytes_reader = io.BytesIO(raw_bytes)
decoder = avro.io.BinaryDecoder(bytes_reader)
reader = avro.io.DatumReader(schema)
r = reader.read(decoder)
r["productMap"]
###Output
_____no_output_____ |
2020/02-python-bibliotecas-manipulacao-dados/Numpy.ipynb | ###Markdown
NumPyNumpy รฉ um pacote fundamental para programaรงรฃo cientรญfica com Python. Ele traz consigo uma variedade de operaรงรตes matemรกticas, principalmente referente ร operaรงรตes algรฉbricas com dados N-dimensionais!
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
A base de seu funcionamento รฉ o *np.array*, que retorna o objeto *array* sobre o qual todas as funรงรตes estรฃo implementadas
###Code
a = np.array([1, 2, 3])
print(repr(a), a.shape, end="\n\n")
b = np.array([(1, 2, 3), (4, 5, 6)])
print(repr(b), b.shape)
###Output
_____no_output_____
###Markdown
O array traz consigo diversos operadores jรก implementados:
###Code
print(b.T, end="\n\n") # transpoe uma matriz
print(a + b, end="\n\n") # soma um vetor linha/coluna a todas as linhas/colunas de uma matriz
print(b - a, end="\n\n") # subtrai um vetor linha/coluna a todas as linhas/colunas de uma matriz
# multiplica os elementos de um vetor linha/coluna
# a todos os elementos das linhas/colunas de uma matriz
print(a * b, end="\n\n")
print(a**2, end="\n\n") # eleva os elementos ao quadrado
###Output
_____no_output_____
###Markdown
O Numpy traz consigo diversas operaรงรตes matemรกticas implementadas, as quais podem ser aplicadas sobre um valor ou um array de valores.**OBS:** podemos ver a aplicaรงรตes dessas funรงรตes como uma operaรงรฃo de transformaรงรฃo (*map*)
###Code
print(10*np.sin(1)) # seno trigonomรฉtrico de 1
print(10*np.sin(a)) # seno trigonomรฉtrico de cada elemento de a
###Output
_____no_output_____
###Markdown
Uma operaรงรฃo booleana pode ser aplicada sobre todos os elementos de um array, retornando um array de mesmas dimensรตes com o resultado da operaรงรฃo
###Code
b<35
###Output
_____no_output_____
###Markdown
Existem tambรฉm operaรงรตes utilitรกrias prรฉ-definidas em um array
###Code
print(b,end="\n\n")
print('Axis 1: %s' % b[0], end="\n\n") # retorna um vetor
print(np.average(b), end="\n\n") # tira a mรฉdia dos elementos
print(np.average(b, axis=1), end="\n\n") # tira a mรฉdia dos elementos dos vetores no eixo 1
print(b.sum(), end="\n\n") # retorna as somas dos valores
print(b.sum(axis=1), end="\n\n") # retorna as somas dos valores no eixo 1
print(b.min(), end="\n\n") # retorna o menor valor
print(b.max(), end="\n\n") # retorna o maior valor
###Output
_____no_output_____
###Markdown
Existem tambรฉm funรงรตes para gerar arrays prรฉ-inicializados
###Code
print(np.zeros((3, 5)), end="\n\n") # array de zeros com dimensรตes [3,5]
print(np.ones((2,3,4)), end="\n\n------------\n\n") # array de uns com dimensรตes [2,3,4]
print(np.full((2, 2), 10), end="\n\n") # array de 10 com dimensรตes [2,2]
print(np.arange(10, 30, 5), end="\n\n") # valores de 10 a 30 com passo 5
print(np.random.rand(2, 3), end="\n\n") # array cde dimensao [2,3] com valores aleatรณrios
###Output
_____no_output_____
###Markdown
Podemos selecionar intervalos do array, permitindo recuperar apenas uma porรงรฃo dele
###Code
d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
d
d[:, 0] # todas as linhas (:) da primeira coluna (0)
d[:, 1] # todas as linhas (:) da segunda coluna (1)
d[:, 0:2] # todas as linhas (:) das colunas de 0 ร 2
d[:, 2] # todas as linhas (:) da terceira coluna (2)
###Output
_____no_output_____
###Markdown
O Numpy conta tambรฉm com funรงรตes para salvar/ler arrays de arquivos
###Code
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
np.save('/tmp/x.npy', x)
del(x)
x = np.load('/tmp/x.npy')
print(x)
###Output
_____no_output_____ |
Natural-Language-Processing/Transformers/Sentiment Analysis/airline_tweet_sentiment_analysis/notebooks/airline_tweet_distillBERT_fine_tuning.ipynb | ###Markdown
###Code
!pip install transformers
!wget -nc https://lazyprogrammer.me/course_files/AirlineTweets.csv
!pip install dask
!pip install 'fsspec>=0.3.3'
!pip install datasets
!pip install torchinfo
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Not connected to a GPU')
else:
print(gpu_info)
from transformers import pipeline
import torch
import spacy
import tqdm as notebook_tqdm
from torchinfo import summary
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report, f1_score, precision_score, recall_score,roc_auc_score,accuracy_score
from sklearn.metrics import plot_confusion_matrix,classification_report
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from datasets import load_dataset
from spacy.lang.en import English
import warnings
from tqdm import tqdm
import re
import string
warnings.filterwarnings("ignore")
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
import concurrent.futures
import dask.bag as db
import dask
import graphviz
from dask import visualize
data = pd.read_csv('./AirlineTweets.csv')
data.head(2)
data['text'].iloc[0:10]
###Output
_____no_output_____
###Markdown
Text Preprocessing
###Code
def load_stopwords(filename):
stopwords = []
with open(filename, "r") as f:
stopwords = []
for line in tqdm(f):
line = re.sub(r"\n","",line, flags=re.I)
stopwords.append(line)
return set(stopwords)
stopwords_file = "/content/drive/Shareddrives/MSML641 Project/msml_641_project_scripts/mallet_en_stoplist.txt"
stopwords= load_stopwords(stopwords_file)
nlp = English(parser=False)
def spacy_preprocessing(text):
'''
text: accepts stings text
stopwords: list of stopwords
proceduralwords: list of procedural words in politics
exclude_list: Custom list of words to include ex: ['mr','managers']
clean_tokens: maps words like you're to you are
returns a clean string
Parameters
remove_punctuations: yes removes all puntuations
remove_stopwords: yes removes all stopwords
remove_nonalpha: yes removes all characters execpt uppercase and lowercase letters
Example: text = text = "I am soooooo excited Mr. , to learn nlp. s123 2003 you're doing great. He will be awesome!! managers for life"
'''
exclude_list=[]
remove_punctuations='yes'
remove_stopwords='no'
remove_nonalpha='yes'
#removing any websit
text = re.sub(r"http[s]://[a-zA-Z.\/0-9?=]*\b", "", text)
# replaces single random characters in the text with space
text = re.sub(r"\b([a-zA-Z]{1})\b", " ", text)
# replaces special characters with spaces
if remove_nonalpha == 'yes':
text = re.sub(r"[^a-zA-Z]", " ", text)
# replaces multiple character with a word with one like pooooost will be post
text = re.sub(r"(.)\1{3,}", r"\1", text)
# replaces multiple space in the line with single space
text = re.sub(r"\s{2,}", r" ", text)
clean_text = []
doc = nlp(text)
for token in doc:
if (remove_punctuations == 'yes') & (remove_stopwords == 'yes'):
if (token.orth_ not in string.punctuation) & (token.orth_.lower() not in stopwords) & (token.orth_.lower() not in exclude_list):
clean_text.append(token.orth_.lower())
elif (remove_punctuations == 'yes') & (remove_stopwords == 'no'):
if (token.orth_ not in string.punctuation):
clean_text.append(token.orth_.lower())
elif (remove_punctuations == 'no') & (remove_stopwords == 'yes') & (token.orth_.lower() not in exclude_list):
if (token.orth_ not in stopwords) & (
token.orth_ not in string.punctuation):
clean_text.append(token.orth_.lower())
else:
clean_text.append(token.orth_.lower())
continue
clean_string = " ".join(clean_text).lstrip()
return clean_string
data.shape
###Output
_____no_output_____
###Markdown
Comparing Dask vs Pandas for Text Preprocessing Pandas
###Code
%%time
data['clean_lines'] = data['text'].apply(lambda x: spacy_preprocessing(x))
###Output
CPU times: user 5.07 s, sys: 25.6 ms, total: 5.1 s
Wall time: 6.51 s
###Markdown
Dask
###Code
#Convert the list to a Dask bag
reviews_list = data['text'].to_list()
review_bag = db.from_sequence(reviews_list, npartitions=3)
clean_reviews = review_bag.map(spacy_preprocessing)
dask.visualize(clean_reviews)
%%time
clean_text = clean_reviews.compute()
type(clean_text)
###Output
_____no_output_____
###Markdown
Creating Dataset Object
###Code
data2 = data[['clean_lines','airline_sentiment']]
target_map = {'negative': 0, 'positive': 1,'neutral':2}
data2['label'] = data2['airline_sentiment'].map(target_map)
import seaborn as sns
label_hist = data['airline_sentiment'].value_counts().reset_index()
label_hist
ax = sns.barplot(x='index',y='airline_sentiment',data=label_hist)
plt.plot()
data2=data2[['clean_lines','label']]
#targets must have the label name label as per current hugging face documentation
data2.columns = ['sentence','label']
data2.to_csv('/content/drive/MyDrive/data/cleaned_tweets_airline.csv',index = None)
!head /content/drive/MyDrive/data/cleaned_tweets_airline.csv
#loading custom dataset
raw_datasets = load_dataset('csv',data_files='/content/drive/MyDrive/data/cleaned_tweets_airline.csv')
#dataset object
raw_datasets
#dataset object has method to split the data into test and train
split = raw_datasets['train'].train_test_split(test_size=0.3, seed=42)
#dataset object split in test and train
split
###Output
_____no_output_____
###Markdown
Tokenizing
###Code
from transformers import AutoTokenizer
checkpoint = 'distilbert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
###Output
_____no_output_____
###Markdown
Tokenization the entire dataset
###Code
def tokenize_fn(batch):
return tokenizer(batch['sentence'], truncation=True) #no padding option since it will be handled by the trainer in thsi case
tokenized_dataset = split.map(tokenize_fn, batched=True)
print(tokenize_fn(raw_datasets['train'][:3]))
tokenized_dataset
tokenized_dataset['test']['label'][0:5]
###Output
_____no_output_____
###Markdown
Config File
###Code
from transformers import AutoConfig
#Autoconfig can also be replaced by checkpoint specific configs
config = AutoConfig.from_pretrained(checkpoint)
config
config.id2label
config.label2id
config.id2label = {v:k for k,v in target_map.items()}
config.id2label
config.label2id = target_map
config.label2id
###Output
_____no_output_____
###Markdown
Modeling
###Code
from transformers import TrainingArguments, Trainer, AutoModelForSequenceClassification
from transformers import AutoModelForSequenceClassification
import torch
model_ckpt = 'distilbert-base-uncased'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_labels=3
model = (AutoModelForSequenceClassification.from_pretrained(model_ckpt, config=config).to(device))
#Note config and num_labels parameter cannot be passed together
summary(model)
###Output
_____no_output_____
###Markdown
Training Arguments
###Code
batch_size= 16
logging_steps = len(tokenized_dataset["train"]) // batch_size
model_name = "real_airline_tweet_analysis_model"
training_args = TrainingArguments(
'airline_tweet_lp_real',
evaluation_strategy='epoch',
num_train_epochs=2,
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
save_strategy='epoch',
logging_steps=logging_steps,
log_level="error",
push_to_hub=False,
disable_tqdm=False
)
###Output
_____no_output_____
###Markdown
Custom Metric Function- Output should always be dictionary with each metric as the key
###Code
def compute_metrics(pred):
labels=pred.label_ids
preds = pred.predictions.argmax(-1)
f1 = f1_score(labels,preds, average="weighted"),
acc = accuracy_score(labels,preds)
return {"accuracy":acc,"f1":f1}
###Output
_____no_output_____
###Markdown
Training
###Code
from transformers import Trainer
trainer = Trainer(model=model, args=training_args,
compute_metrics=compute_metrics,
train_dataset=tokenized_dataset['train'],
eval_dataset = tokenized_dataset['test'],
tokenizer=tokenizer)
trainer.train()
tokenized_dataset['train']['input_ids'][0]
###Output
_____no_output_____
###Markdown
Loading in the Checkpoints
###Code
!ls -ltra airline_tweet_lp_real
###Output
total 20
drwxr-xr-x 1 root root 4096 Jun 18 16:02 ..
drwxr-xr-x 2 root root 4096 Jun 18 16:03 checkpoint-641
drwxr-xr-x 5 root root 4096 Jun 18 16:03 .
drwxr-xr-x 2 root root 4096 Jun 18 16:03 checkpoint-1282
drwxr-xr-x 4 root root 4096 Jun 18 16:37 runs
###Markdown
Creating a Pipeline for Prediction
###Code
from transformers import pipeline
#loading the trained model with a specific checkpoint
savedmodel = pipeline('text-classification',
model='airline_tweet_lp_real/checkpoint-1282',
device=0)
#loading the test set
split['test']
#prediction on test set
test_pred = savedmodel(split['test']['sentence'])
test_pred[0:2]
target_map
test_inx = [target_map[k['label']] for k in test_pred]
test_inx[0:2]
print(type(test_inx[0]))
print(type(split['test']['label'][0]))
cm = confusion_matrix(split['test']['label'], test_inx, labels=[0, 1,2])
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=['negative', 'positive','neutral'])
disp.plot()
plt.grid(False)
plt.show()
print(classification_report(split['test']['label'], test_labels))
###Output
precision recall f1-score support
0 0.89 0.92 0.90 2811
1 0.74 0.80 0.77 664
2 0.70 0.60 0.65 917
accuracy 0.83 4392
macro avg 0.78 0.77 0.77 4392
weighted avg 0.83 0.83 0.83 4392
|
images/interactive/dotnet-spark/02-basic-example.ipynb | ###Markdown
A basic .NET for Apache Spark example Preparation Start the Backend in Debug mode**_Important_**: Before you run any cells in this example, please ensure that you have [started the .NET for Apache Spark DotnetBacken in Debug mode](01-start-spark-debug.ipynb). Install the Microsoft.Spark NuGet package
###Code
#r "nuget: Microsoft.Spark,1.0.0"
###Output
_____no_output_____
###Markdown
--- Coding Create a new SparkSessionThe entry point to all .NET for Apache Spark functionality is a SparkSession. To create one, just use SparkSession.Builder():
###Code
using Microsoft.Spark.Sql;
using Microsoft.Spark.Sql.Types;
using static Microsoft.Spark.Sql.Functions;
var spark = SparkSession.Builder().GetOrCreate();
###Output
_____no_output_____
###Markdown
Create a new DataFrameThere are multiple ways of creating new DataFrames. Most of the time you will read data from another source. For this basic example, we just define our DataFrame via the code below, however.
###Code
var data = new List<GenericRow>
{
new GenericRow(new object[] { "Batman", "M", 3093, true, new Date(1939, 5, 1) }),
new GenericRow(new object[] { "Superman", "M", 2496, true, new Date(1986, 10, 1) }),
new GenericRow(new object[] { "Wonder Woman", "F", 1231, true, new Date(1941, 12, 1) }),
new GenericRow(new object[] { "Lois Lane", "F", 934, true, new Date(1938, 6, 1) })
};
var schema = new StructType(new List<StructField>()
{
new StructField("Name", new StringType()),
new StructField("Sex", new StringType()),
new StructField("Appearances", new IntegerType()),
new StructField("Alive", new BooleanType()),
new StructField("FirstAppearance", new DateType())
});
DataFrame df = spark.CreateDataFrame(data, schema);
###Output
_____no_output_____
###Markdown
Get a quick overview of your dataTo verify/display the Spark data types of a DataFrame use **PrintSchema()**
###Code
df.PrintSchema();
###Output
_____no_output_____
###Markdown
Use **Show()** to have a look at the first couple of rows of your DataFrame.
###Code
df.Show();
###Output
_____no_output_____
###Markdown
To get some basic DataFrame statistics, use **Describe()**.
###Code
df.Describe().Show();
###Output
_____no_output_____
###Markdown
Filtering Column style filtering
###Code
df.Filter(df.Col("Name") == "Batman").Show();
df.Filter(df["Appearances"] > 1000).Show();
###Output
_____no_output_____
###Markdown
SQL style Filtering
###Code
df.Filter("Sex == 'F'").Show();
df.Filter("FirstAppearance >= '1971-01-01'").Show()
df.Filter("Name not like '%man'").Show()
###Output
_____no_output_____
###Markdown
Grouping
###Code
df.GroupBy("Sex").Count().Show();
df.GroupBy("Sex")
.Agg(Count(df["Sex"]), Avg(df["Appearances"]), Min(df["Appearances"]), Max(df["Appearances"]))
.OrderBy(Desc("avg(Appearances)"))
.Show();
###Output
_____no_output_____
###Markdown
CleanupStop your spark session, once you are done.
###Code
spark.Stop();
###Output
_____no_output_____ |
fillna method.ipynb | ###Markdown
fillna fill the NaN values with given values(input)syntex :- DataFrame.fillna()
###Code
import pandas as pd
demo=pd.read_csv('C:\\Users\\admin\\Desktop\\book1.csv')
demo
# fill with zero (0) where NaN value is Present
demo.fillna(0)
demo.fillna(2)
demo.fillna({'names':'none', 'Date': "not given" })
demo.fillna(method = "ffill") #ffile means forward fill
demo.fillna(method = "bfill") #backword fill
demo.fillna(axis = 0 ,method = "ffill" )
demo.fillna(axis = 1 ,method = "ffill" )
demo.fillna(axis = 1 ,method = "bfill" )
demo.fillna( 0,limit = 2 )
demo.fillna( method = 'ffill',limit = 1 )
demo.fillna(5 ,inplace = True)
demo
###Output
_____no_output_____ |
Chatbot_Tensorflow.ipynb | ###Markdown
Connect to the GDrive
###Code
#from google.colab import drive
#drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
Change root path
###Code
#root_path = '/content/gdrive/My Drive/Colab/'
import os
# Set your working directory to a folder in your Google Drive. This way, if your notebook times out,
# your files will be saved in your Google Drive!
# the base Google Drive directory
root_dir = "/content/drive/My Drive/"
# choose where you want your project files to be saved
project_folder = "Colab/"
def create_and_set_working_directory(project_folder):
# check if your project folder exists. if not, it will be created.
if os.path.isdir(root_dir + project_folder) == False:
os.mkdir(root_dir + project_folder)
print(root_dir + project_folder + ' did not exist but was created.')
# change the OS to use your project folder as the working directory
os.chdir(root_dir + project_folder)
# create a test file to make sure it shows up in the right place
!touch 'test_file.txt'
print('\nYour working directory was changed to ' + root_dir + project_folder + \
"\n\nAn empty text file was created there. You can also run !pwd to confirm the current working directory." )
create_and_set_working_directory(project_folder)
###Output
Your working directory was changed to /content/drive/My Drive/Colab/
An empty text file was created there. You can also run !pwd to confirm the current working directory.
###Markdown
Download dataset
###Code
#!wget http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip
#!unzip cornell_movie_dialogs_corpus.zip
###Output
_____no_output_____
###Markdown
Firstly import all necessaries libraries
###Code
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import re
import time
from tensorflow.python.compiler.tensorrt import trt_convert as trt
###Output
_____no_output_____
###Markdown
Preprocessing Import dataset to the project
###Code
lines = open('dataset/movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conversations = open('dataset/movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
###Output
_____no_output_____
###Markdown
Create a dictionary that maps each line and it's ID
###Code
id2line = {} #init dictionary
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
# JUST TO CHECK DATA IN GOOGLE COLAB
# retrieve key/value pairs
#els = list(id2line.items()) # explicitly convert to a list, in case it's Python 3.x
# get first inserted element
#print(els[0])
###Output
_____no_output_____
###Markdown
Create a list of all of the conversations
###Code
conversations_ids = []
for conversation in conversations[:-1]:
_conversation = conversation.split(" +++$+++ ")[-1][1:-1].replace("'","").replace(" ","")
conversations_ids.append(_conversation.split(","))
#print(conversations_ids[0])
#print(conversations_ids[0])
###Output
_____no_output_____
###Markdown
Getting separately the questions and the answers
###Code
questions = []
answers = []
for conversation in conversations_ids:
for i in range(len(conversation) - 1):
questions.append(id2line[conversation[i]])
answers.append(id2line[conversation[i+1]])
#print(questions[0])
#print(answers[0])
###Output
_____no_output_____
###Markdown
Cleaning text function define
###Code
def clean_text(text):
#put all the text to lowercase
text = text.lower()
#removing aposthrophies with re library
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "what is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " whould", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"[-()\"#/@;:<>{}+=~|.?,]", "", text)
return text
###Output
_____no_output_____
###Markdown
Cleaning questions and answers
###Code
clean_questions = []
clean_answers = []
for question in questions:
clean_questions.append(clean_text(question))
for answer in answers:
clean_answers.append(clean_text(answer))
###Output
_____no_output_____
###Markdown
Creating a dictionary that maps each word to it's number of occurencies
###Code
word2count = {}
for question in clean_questions:
for word in question.split():
if word not in word2count:
word2count[word] = 1
else:
word2count[word] += 1
for answer in clean_answers:
for word in answer.split():
if word not in word2count:
word2count[word] = 1
else:
word2count[word] += 1
###Output
_____no_output_____
###Markdown
Creating two dictionaries that map the questions words and the answers words to a unique integer
###Code
threshold = 20
questionwords2int = {}
word_number = 0
for word, count in word2count.items():
if count > threshold:
questionwords2int[word] = word_number
word_number += 1
answerwords2int = {}
word_number = 0
for word, count in word2count.items():
if count > threshold:
answerwords2int[word] = word_number
word_number += 1
# print(answerwords2int.items())
###Output
_____no_output_____
###Markdown
Adding the last tokens to these two dictionaries
###Code
tokens = ['<PAD>', '<EOS>', '<OUT>', '<SOS>']
for token in tokens:
questionwords2int[token] = len(questionwords2int) + 1
for token in tokens:
answerwords2int[token] = len(answerwords2int) + 1
#print(answerwords2int.items())
###Output
_____no_output_____
###Markdown
Create an inverse dictionary of the answerwords2int dictionary
###Code
answerint2word = {w_i: w for w, w_i in answerwords2int.items()}
###Output
_____no_output_____
###Markdown
Adding the EOS token to the end of every answer
###Code
for i in range(len(clean_answers)):
clean_answers[i] += ' <EOS>'
###Output
_____no_output_____
###Markdown
Translating all the questions and the answers into integers and replacing all the words that were filtered out by
###Code
questions_to_int = []
for question in clean_questions:
ints = []
for word in question.split():
if word not in questionwords2int:
ints.append(questionwords2int['<OUT>'])
else:
ints.append(questionwords2int[word])
questions_to_int.append(ints)
print(questions_to_int[0])
answers_to_int = []
for answer in clean_answers:
ints = []
for word in answer.split():
if word not in answerwords2int:
ints.append(answerwords2int['<OUT>'])
else:
ints.append(answerwords2int[word])
answers_to_int.append(ints)
###Output
[0, 1, 2, 3, 4, 8541, 8541, 5, 6, 8541, 7, 8, 9, 10, 8541, 11, 12, 13, 14, 15, 8541, 16]
###Markdown
Sorting questions and answers by the length of questions (this will speed up the training)
###Code
sorted_clean_questions = []
sorted_clean_answers = []
for length in range(1,25+1):
for i in enumerate(questions_to_int):
if len(i[1]) == length:
sorted_clean_questions.append(questions_to_int[i[0]])
sorted_clean_answers.append(answers_to_int[i[0]])
print(sorted_clean_questions[0])
print(sorted_clean_answers[0])
###Output
[47]
[15, 48, 25, 47, 18, 49, 50, 15, 51, 52, 45, 53, 8541, 54, 52, 55, 41, 56, 18, 57, 58, 59, 60, 61, 8540]
###Markdown
Building seq2seq model Creating placeholders for the inputs and the targets
###Code
def model_inputs():
inputs = tf.compat.v1.placeholder(tf.int32, [None, None], name = 'inputs')
targets = tf.compat.v1.placeholder(tf.int32, [None, None], name = 'targets')
lr = tf.compat.v1.placeholder(tf.float32, name = 'learning_rate')
keep_prob = tf.compat.v1.placeholder(tf.float32, name = 'keep_prob')
return inputs, targets, lr, keep_prob
###Output
_____no_output_____
###Markdown
Preprocessing the targets
###Code
# create batches and add <sos> token at each raw at batch
def preprocess_targets(targets, word2int, batch_size):
left_side = tf.fill([batch_size], 1, word2int['<SOS>'])
right_side = tf.strided_slice(targets, [0,0], [batch_size, -1],[1,1])
preprocessed_targets = tf.concat([left_side, right_side], 1)
return preprocessed_targets
###Output
_____no_output_____
###Markdown
Creating the Encoder RNN Layer
###Code
def encoder_rnn(rnn_inputs, rnn_size, num_layers, keep_prob, sequence_length):
#first create LSTM - object of basic lstm cell
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob)
encoder_cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout] * num_layers)
encoder_output, encoder_state = tf.nn_bidirectional_dynamic_rnn(cell_fw = encoder_cell,
cell_bw = encoder_cell,
sequence_length = sequence_length,
inputs = rnn_inputs,
dtype = tf.float32) # this will build forward and backward RNNs
return encoder_state
###Output
_____no_output_____
###Markdown
Creating the Decoder of the training set
###Code
def decode_training_set(encoder_state, decoder_cell, decoder_embedded_input, sequence_length, decoding_scope, output_function, keep_prob, batch_size):
attention_state = tf.zeros([batch_size, 1, decoder_cell.output_size])
attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(attention_state, attention_option = "bahdanau", num_units = decoder_cell.output_size)
training_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_train(encoder_state[0],
attention_keys,
attention_values,
attention_score_function,
attention_construct_function,
name = "attn_dec_train")
decoder_output, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_function(decoder_cell,
training_decoder_function,
decoder_embedded_input,
sequence_length,
scope = decoding_scope)
decoder_output_dropout = tf.nn.dropout(decoder_output, keep_prob)
return output_function(decoder_output_dropout)
###Output
_____no_output_____
###Markdown
Decoding the test/validation set
###Code
def decode_test_set(encoder_state, decoder_cell, decoder_embeddings_matrix, sos_id, eos_id, maximum_length, num_words, sequence_length, decoding_scope, output_function, keep_prob, batch_size):
attention_state = tf.zeros([batch_size, 1, decoder_cell.output_size])
attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(attention_state, attention_option = "bahdanau", num_units = decoder_cell.output_size)
test_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_inference(output_function,
encoder_state[0],
attention_keys,
attention_values,
attention_score_function,
attention_construct_function,
decoder_embeddings_matrix,
sos_id,
eos_id,
maximum_length,
num_words,
name = "attn_dec_inf")
test_predictions, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_function(decoder_cell,
test_decoder_function,
scope = decoding_scope)
return test_predictions
###Output
_____no_output_____
###Markdown
Creating the Doceder RNN
###Code
def decoder_rnn(decoder_embedded_inputs, decoder_embeddings_matrix, encoder_state, num_words, sequence_length, rnn_size, num_layers, word2int, keep_prob, batch_size):
with tf.variable_scope("decoding") as decoding_scope:
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob)
decoder_cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout] * num_layers)
weights = tf.truncated_normal_initializer(stddev = 0.1)
biases = tf.zeros_initializer()
output_function = lambda x: tf.contrib.layers.fully_connected(x,
num_words,
None,
scope = decoding_scope,
weights_initializer = weights,
biases_initializer = biases)
training_predictions = decoode_training_set(encoder_state,
decoder_cell,
decoder_embedded_input,
sequence_lentgh,
decoding_scope,
output_function,
keep_prob,
batch_size)
decoding_scope.reuse_variables()
test_predictions = decode_test_set(encoder_state,
decoder_cell,
decoder_embeddings_matrix,
word2int['<SOS>'],
word2int['<EOS>'],
sequence_length-1,
num_words,
decoding_scope,
output_function,
keep_prob,
batch_size)
return training_predictions, test_predictions
###Output
_____no_output_____
###Markdown
Building seq2seq
###Code
def seq2seq_model(inputs, targets, keep_prob, batch_size, sequence_length, answers_num_words, questions_num_words, encoder_embedding_size, decoder_embedding_size, rnn_size, num_layers, questionswords2int):
encoder_embedded_input = tf.contrib.layers.embed_sequence(inputs,
answers_num_words + 1,
encoder_embedding_size,
initializer = tf.random_uniform_initializer(0,1)
)
#ouput of encoder and input of decoder
encoder_state = encoder_rnn(encoder_embedded_input, rnn_size, num_layers, keep_prob, sequence_length)
preprocessed_targets = preprocess_targets(targets, questionswords2int, batch_size)
decoder_embeddings_matrix = tf.Variable(tf.random_uniform([questions_num_words + 1, decoder_embedding_size], 0, 1))
decoder_embedded_inputs = tf.nn.embeddeding_lookup(decoder_embeddings_matrix, preprocessed_targets)
training_predictions, test_predictions = decoder_rnn(decoder_embedded_input,
decoder_embeddings_matrix,
encoder_state,
questions_num_words,
sequence_length,
rnn_size,
num_layers,
questions2wordsint,
keep_prob,
batch_size)
return training_predictions, test_prediciotns
###Output
_____no_output_____
###Markdown
Setting the Hyperparameters
###Code
epochs = 100
batch_size = 64
rnn_size = 512
num_layers = 3
encoding_embedding_size = 512 #512 columns
decoding_embedding_size = 512 #512 columns
learning_rate = 0.01
learning_rate_dacay = 0.9 #which percentage the learning rate is reduce over the iteration of training
min_learning_rate = 0.0001
keep_probability = 0.5
###Output
_____no_output_____
###Markdown
Defining a session (tensorflow)
###Code
tf.compat.v1.reset_default_graph()
session = tf.compat.v1.InteractiveSession()
###Output
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py:1751: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s).
warnings.warn('An interactive session is already active. This can '
###Markdown
Loading the model inputs
###Code
inputs, targets, lr, keep_prob = model_inputs()
###Output
_____no_output_____
###Markdown
Seeting the sequence length
###Code
sequence_length = tf.compat.v1.placeholder_with_default(25, None, name = 'sequence_length')
###Output
_____no_output_____
###Markdown
Getting the shape of the inputs tensor
###Code
input_shape = tf.shape(inputs)
print(input_shape)
###Output
Tensor("Shape:0", shape=(2,), dtype=int32)
###Markdown
Getting the training and test predictions
###Code
training_predictions, test_predictions = seq2seq_model(tf.reverse(inputs, [-1]),
targets,
keep_prob,
batch_size,
sequence_length,
len(answerwords2int),
len(questionwords2int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
questionwords2int)
###Output
_____no_output_____ |
hierarchical clustering/hierarchical.ipynb | ###Markdown
Hierarchical Clustering.This my kernel following the previous cluster analysis from scratch.You can consider seeing it first if you do not understand the concept of hierachical clustering .This project also is a continuation of k-means clustering algorithm.I will use the same dataset i used previously in K-Means.This means that the exploratory data analysis part will be skipped because there is no need to repeat the same things. Business value.The business value of this project is to categorize customers registered in this particular mall into simmilar categories based on their previous spending behaviour in this supermarket.The problem here is that the mall doesnt know what the groups.The number groups are also not known ahead of time.In k-means we had specified 3 clusters,in this case however we are going to leave the algorithm to do that on its own. Loading libraries.
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Loading DataImportinng the mall dataset with pandas.
###Code
df = pd.read_csv("/Users/admin/Downloads/PYTHON FOR DATA ANALYSIS/clustering/Mall_Customers.csv")
###Output
_____no_output_____
###Markdown
Quick look into the data
###Code
df.head()
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
Feature Selection.In this problem,the main features of interes will be annual income and spending score.
###Code
X = df.iloc[:,[3,4]].values
###Output
_____no_output_____
###Markdown
Using a dendogram to find Optimal number of clusters
###Code
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X,method="ward"))
plt.title("Dendrogram")
plt.xlabel("Customers")
plt.ylabel("Eucledian Distances")
plt.show()
###Output
_____no_output_____
###Markdown
Based on the above dendrogram,we can see that the data can be classified into 5 clusters.This is simmilar to the number of cluster that was identified using the elbow method using K-means clustering!Sounds good. Fitting Hierachical Clustering
###Code
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=5,affinity="euclidean",linkage='ward')
#the "ward" linkage is used to minimize the zarince within the clusters.
y_hc = hc.fit_predict(X)
###Output
_____no_output_____
###Markdown
Visualizing the clusters
###Code
plt.figure(figsize=(10,5))
plt.scatter(X[y_hc==0,0],X[y_hc == 0,1],s=10.0,c="red",label="Careful")
plt.scatter(X[y_hc==1,0],X[y_hc == 1,1],s=10.0,c="green",label="Standard")
plt.scatter(X[y_hc==2,0],X[y_hc == 2,1],s=10.0,c="black",label="Target")
plt.scatter(X[y_hc==3,0],X[y_hc == 3,1],s=10.0,c="cyan",label="Careles")
plt.scatter(X[y_hc==4,0],X[y_hc == 4,1],s=10.0,c="blue",label="Sensible")
plt.legend(loc=1)
plt.title("Cluster analysis")
plt.ylabel("Spending score")
plt.xlabel("Income")
###Output
_____no_output_____ |
Pandas Trick 001 - Return 3 rows from the head method.ipynb | ###Markdown
Pandas Trick 1 - Change the default number of rows returned from the `head` methodThe pandas DataFrame `head` methdo returns the first 5 rows by default. This is controlled by the parameter `n`. In this trick, we will use `partialmethod` from the `functools` standard library to set `n` to a different number.First, let's read in a sample DataFrame containing bike rides from the city of Chicago and call the `head` method with the defaults. Note, that it returns 5 rows.
###Code
import pandas as pd
bikes = pd.read_csv('data/bikes.csv')
bikes.head()
###Output
_____no_output_____
###Markdown
The `functools` standard library comes packaged with `partialmethod` which allows you to set parameters of a particular method. You can set any number of parameters with it. Below, we reassign the DataFrame `head` method so that it returns 3 rows as a default instead of 5.
###Code
from functools import partialmethod
pd.DataFrame.head = partialmethod(pd.DataFrame.head, n=3)
bikes.head()
###Output
_____no_output_____ |
01_ML_basics_with_Keras/03_Text classification with TensorFlow Hub.ipynb | ###Markdown
Transfer learning The tutorial demonstrates the basic application of transfer learning with TensorFlow Hub and Keras.This notebook uses tf.keras, a high-level API to build and train models in TensorFlow, and tensorflow_hub, a library for loading trained models from TFHub in a single line of code. For a more advanced text classification tutorial using tf.keras, see the MLCC Text Classification Guide.
###Code
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.list_physical_devices("GPU") else "NOT AVAILABLE")
# Split the training set into 60% and 40% to end up with 15,000 examples
# for training, 10,000 examples for validation and 25,000 examples for testing.
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews",
split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
###Output
2021-09-10 09:54:36.049031: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 47220 MB memory: -> device: 0, name: Quadro RTX 8000, pci bus id: 0000:67:00.0, compute capability: 7.5
2021-09-10 09:54:36.050538: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 46873 MB memory: -> device: 1, name: Quadro RTX 8000, pci bus id: 0000:68:00.0, compute capability: 7.5
###Markdown
Explore the data
###Code
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch
train_labels_batch
###Output
_____no_output_____
###Markdown
Build the model
###Code
embedding = "https://tfhub.dev/google/nnlm-en-dim50/2"
hub_layer = hub.KerasLayer(embedding, input_shape=[], dtype=tf.string, trainable= True)
hub_layer(train_examples_batch[:3])
model = tf.keras.Sequential([hub_layer,
tf.keras.layers.Dense(16,activation='relu')
,tf.keras.layers.Dense(1)])
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=10,
validation_data=validation_data.batch(512),
verbose=1)
###Output
Epoch 1/10
30/30 [==============================] - 3s 71ms/step - loss: 0.6565 - accuracy: 0.5287 - val_loss: 0.6182 - val_accuracy: 0.5759
Epoch 2/10
30/30 [==============================] - 2s 57ms/step - loss: 0.5620 - accuracy: 0.6649 - val_loss: 0.5195 - val_accuracy: 0.7374
Epoch 3/10
30/30 [==============================] - 2s 57ms/step - loss: 0.4329 - accuracy: 0.8015 - val_loss: 0.4134 - val_accuracy: 0.8064
Epoch 4/10
30/30 [==============================] - 2s 58ms/step - loss: 0.3126 - accuracy: 0.8761 - val_loss: 0.3523 - val_accuracy: 0.8458
Epoch 5/10
30/30 [==============================] - 2s 56ms/step - loss: 0.2323 - accuracy: 0.9144 - val_loss: 0.3223 - val_accuracy: 0.8579
Epoch 6/10
30/30 [==============================] - 2s 59ms/step - loss: 0.1740 - accuracy: 0.9431 - val_loss: 0.3095 - val_accuracy: 0.8662
Epoch 7/10
30/30 [==============================] - 2s 59ms/step - loss: 0.1316 - accuracy: 0.9611 - val_loss: 0.3054 - val_accuracy: 0.8691
Epoch 8/10
30/30 [==============================] - 2s 58ms/step - loss: 0.0972 - accuracy: 0.9735 - val_loss: 0.3074 - val_accuracy: 0.8683
Epoch 9/10
30/30 [==============================] - 2s 58ms/step - loss: 0.0720 - accuracy: 0.9840 - val_loss: 0.3155 - val_accuracy: 0.8665
Epoch 10/10
30/30 [==============================] - 2s 58ms/step - loss: 0.0521 - accuracy: 0.9911 - val_loss: 0.3268 - val_accuracy: 0.8664
###Markdown
Evaluate the model
###Code
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
history_dict = history.history
history_dict.keys()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
import matplotlib.pyplot as plt
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
###Output
_____no_output_____ |
Semantic Textual Similarity.ipynb | ###Markdown
Finding Semantic Textual Similarity PROBLEM STATEMENTGiven two paragraphs, quantify the degree of similarity between the two text-based on Semanticsimilarity. Semantic Textual Similarity (STS) assesses the degree to which two sentences aresemantically equivalent to each other. The STS task is motivated by the observation that accuratelymodelling the meaning similarity of sentences is a foundational language understanding problemrelevant to numerous applications including machine translation (MT), summarization, generation,question-answering (QA), short answer grading, semantic search.STS is the assessment of pairs of sentences according to their degree of semantic similarity. The taskinvolves producing real-valued similarity scores for sentence pairs. Dataset Link:https://drive.google.com/file/d/1OSJR7wLfNunt1WPD03Kj63WAH6Ch1cFf/view?ts=5db2bda5The data contains a pair of paragraphs. These text paragraphs are randomly sampled from a rawdataset. Each pair of the sentence may or may not be semantically similar. The candidate is topredict a value between 0-1 indicating a degree of similarity between the pair of text paras.0 means highly similar1 means highly dissimilar Import some library
###Code
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
import collections
from sklearn.cluster import KMeans
from nltk.stem import WordNetLemmatizer # For Lemmetization of words
from nltk.corpus import stopwords # Load list of stopwords
from nltk import word_tokenize # Convert paragraph in tokens
import pickle
import sys
from gensim.models import word2vec # For represent words in vectors
import gensim
###Output
_____no_output_____
###Markdown
Read Data-Set
###Code
# Read given data-set using pandas
text_data = pd.read_csv("Text_Similarity_Dataset.csv")
print("Shape of text_data : ", text_data.shape)
text_data.head(3)
text_data.isnull().sum() # Check if text data have any null values
###Output
_____no_output_____
###Markdown
Preprocessing of text1 & text2 * Convert phrases like won't to will not using function decontracted() below* Remove Stopwords.* Remove any special symbols and lower case all words* lemmatizing words using WordNetLemmatizer define in function word_tokenizer below
###Code
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
# Combining all the above stundents
preprocessed_text1 = []
# tqdm is for printing the status bar
for sentance in tqdm(text_data['text1'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
sent = ' '.join(e for e in sent.split() if e not in stopwords.words('english'))
preprocessed_text1.append(sent.lower().strip())
# Merging preprocessed_text1 in text_data
text_data['text1'] = preprocessed_text1
text_data.head(3)
# Combining all the above stundents
from tqdm import tqdm
preprocessed_text2 = []
# tqdm is for printing the status bar
for sentance in tqdm(text_data['text2'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
sent = ' '.join(e for e in sent.split() if e not in stopwords.words('english'))
preprocessed_text2.append(sent.lower().strip())
# Merging preprocessed_text2 in text_data
text_data['text2'] = preprocessed_text2
text_data.head(3)
def word_tokenizer(text):
#tokenizes and stems the text
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(t) for t in tokens]
return tokens
###Output
_____no_output_____
###Markdown
Proposed Approach Word embeddings :* Word embeddings are low dimensional vectors obtained by training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. Pre-trained word embeddings are also available in the word2vec code.google page.* In this i am using Google news pre trained vectors and compare similarity between text1 & text2 using n_similarity method in gensim library which is nothing compares cosine similarity between two * Consider it as a unsupervised problem.
###Code
# Load pre_trained Google News Vectors after download file
wordmodelfile="GoogleNews-vectors-negative300.bin.gz"
wordmodel= gensim.models.KeyedVectors.load_word2vec_format(wordmodelfile, binary=True)
# This code check if word in text1 & text2 present in our google news vectors vocabalry.
# if not it removes that word and if present it compares similarity score between text1 and text2 words
similarity = [] # List for store similarity score
for ind in text_data.index:
s1 = text_data['text1'][ind]
s2 = text_data['text2'][ind]
if s1==s2:
similarity.append(0.0) # 0 means highly similar
else:
s1words = word_tokenizer(s1)
s2words = word_tokenizer(s2)
vocab = wordmodel.vocab #the vocabulary considered in the word embeddings
if len(s1words and s2words)==0:
similarity.append(1.0)
else:
for word in s1words.copy(): #remove sentence words not found in the vocab
if (word not in vocab):
s1words.remove(word)
for word in s2words.copy(): #idem
if (word not in vocab):
s2words.remove(word)
similarity.append((1-wordmodel.n_similarity(s1words, s2words))) # as it is given 1 means highly dissimilar & 0 means highly similar
###Output
_____no_output_____
###Markdown
Final Submission* Make Final DataFrame and save a CSV file of similarity scores with Unique_ID. (Columns : Unique_ID, Similarity_Score)
###Code
# Get Unique_ID and similarity
final_score = pd.DataFrame({'Unique_ID':text_data.Unique_ID,
'Similarity_score':similarity})
final_score.head(3)
# SAVE DF as CSV file
final_score.to_csv('final_score.csv',index=False)
###Output
_____no_output_____ |
Debebe_U2BW_Project.ipynb | ###Markdown
###Code
!pip install eli5
!pip install xgboost
!pip install eli5
!pip install xgboost
!pip install category_encoders
!pip install shap
!pip install scikit-learn==0.22.1
###Output
Collecting eli5
[?25l Downloading https://files.pythonhosted.org/packages/97/2f/c85c7d8f8548e460829971785347e14e45fa5c6617da374711dec8cb38cc/eli5-0.10.1-py2.py3-none-any.whl (105kB)
[K |โโโ | 10kB 15.3MB/s eta 0:00:01
[K |โโโโโโโ | 20kB 1.7MB/s eta 0:00:01
[K |โโโโโโโโโโ | 30kB 2.2MB/s eta 0:00:01
[K |โโโโโโโโโโโโโ | 40kB 2.5MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโ | 51kB 2.0MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโโโโ | 61kB 2.2MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโโโโโโโ | 71kB 2.5MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโโโโโโโโโโ | 81kB 2.7MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 92kB 2.9MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 102kB 2.8MB/s eta 0:00:01
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 112kB 2.8MB/s
[?25hRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.18.5)
Requirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (20.2.0)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.2)
Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.7)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.15.0)
Requirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.2.post1)
Requirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.16.0)
Installing collected packages: eli5
Successfully installed eli5-0.10.1
Requirement already satisfied: xgboost in /usr/local/lib/python3.6/dist-packages (0.90)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.18.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.4.1)
Requirement already satisfied: eli5 in /usr/local/lib/python3.6/dist-packages (0.10.1)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.2)
Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.7)
Requirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.18.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)
Requirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.15.0)
Requirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.2.post1)
Requirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (20.2.0)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.16.0)
Requirement already satisfied: xgboost in /usr/local/lib/python3.6/dist-packages (0.90)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.4.1)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.18.5)
Collecting category_encoders
[?25l Downloading https://files.pythonhosted.org/packages/44/57/fcef41c248701ee62e8325026b90c432adea35555cbc870aff9cfba23727/category_encoders-2.2.2-py2.py3-none-any.whl (80kB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 81kB 2.2MB/s
[?25hRequirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.1.2)
Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.22.2.post1)
Requirement already satisfied: numpy>=1.14.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.18.5)
Requirement already satisfied: patsy>=0.5.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.5.1)
Requirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.4.1)
Requirement already satisfied: statsmodels>=0.9.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.10.2)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2.8.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2018.9)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.20.0->category_encoders) (0.16.0)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from patsy>=0.5.1->category_encoders) (1.15.0)
Installing collected packages: category-encoders
Successfully installed category-encoders-2.2.2
Collecting shap
[?25l Downloading https://files.pythonhosted.org/packages/d2/17/37ee6c79cafbd9bb7423b54e55ea90beec66aa7638664d607bcc28de0bae/shap-0.36.0.tar.gz (319kB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 327kB 2.8MB/s
[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from shap) (1.18.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from shap) (1.4.1)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from shap) (0.22.2.post1)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from shap) (1.1.2)
Requirement already satisfied: tqdm>4.25.0 in /usr/local/lib/python3.6/dist-packages (from shap) (4.41.1)
Collecting slicer
Downloading https://files.pythonhosted.org/packages/46/cf/f37ac7f61214ed044b0df91252ab19376de5587926c5b572f060eb7bf257/slicer-0.0.4-py3-none-any.whl
Requirement already satisfied: numba in /usr/local/lib/python3.6/dist-packages (from shap) (0.48.0)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->shap) (0.16.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->shap) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas->shap) (2.8.1)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from numba->shap) (50.3.0)
Requirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba->shap) (0.31.0)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas->shap) (1.15.0)
Building wheels for collected packages: shap
Building wheel for shap (setup.py) ... [?25l[?25hdone
Created wheel for shap: filename=shap-0.36.0-cp36-cp36m-linux_x86_64.whl size=456451 sha256=08c338587e9dfc8b353bedcb43aa3512a6e36939a465cc09ea70d9ecba5a1b7c
Stored in directory: /root/.cache/pip/wheels/fb/15/e1/8f61106790da27e0765aaa6e664550ca2c50ea339099e799f4
Successfully built shap
Installing collected packages: slicer, shap
Successfully installed shap-0.36.0 slicer-0.0.4
Collecting scikit-learn==0.22.1
[?25l Downloading https://files.pythonhosted.org/packages/d1/48/e9fa9e252abcd1447eff6f9257636af31758a6e46fd5ce5d3c879f6907cb/scikit_learn-0.22.1-cp36-cp36m-manylinux1_x86_64.whl (7.0MB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 7.1MB 2.2MB/s
[?25hRequirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.22.1) (1.4.1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.22.1) (0.16.0)
Requirement already satisfied: numpy>=1.11.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.22.1) (1.18.5)
Installing collected packages: scikit-learn
Found existing installation: scikit-learn 0.22.2.post1
Uninstalling scikit-learn-0.22.2.post1:
Successfully uninstalled scikit-learn-0.22.2.post1
Successfully installed scikit-learn-0.22.1
###Markdown
Import of Libraries needed
###Code
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from category_encoders import OrdinalEncoder
from xgboost import XGBClassifier
from sklearn.inspection import permutation_importance
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import classification_report, plot_confusion_matrix, plot_roc_curve, accuracy_score
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
###Output
_____no_output_____
###Markdown
Import Datasets
###Code
census = pd.read_csv('https://raw.githubusercontent.com/VPDeb/DS-Unit-2-Applied-Modeling/master/Build%20Week%20Project/census.csv')
print(census.shape)
###Output
(48842, 15)
###Markdown
Begin EDA
###Code
#checking for null values and column types, interesting to see no 'missing' values I'll dive a little further.
census.info()
#Aha missing values are disguised as '?'. Lets fix that.
census['workclass'].value_counts()
#Found 3 Object Columns with '?' for missing values. We will fill these with the top value of each row.
census.isin(['?']).sum()
#Time to make the 'missing' values into NaN so we can work with them
census.replace({'?': np.NaN}, inplace=True)
#No more '?'
census.workclass.value_counts()
# They are now registered as NaN. These will be replaced with the top value_counts in each column
census.isnull().sum()
census.head()
#Printing Top Values to Fill NaNs
print('Top Value:',census['native-country'].describe())
print('Top Value:',census['occupation'].describe())
print('Top Value:',census['workclass'].describe())
#filling NaN values
census['workclass'].replace({np.NaN : 'Private'},inplace=True)
census['occupation'].replace({np.NaN : 'Prof-specialty'}, inplace=True)
census['native-country'].replace({np.NaN : 'United-States'},inplace=True)
#Sanity check to assure NaNs have been fixed with working values.
census.isnull().sum()
#checking for high cardinality in the dataset as well as seeing what to do with the features. Looks like 'fnlwgt' has a very high cardinality and isnt useful for the model
census.astype(object).nunique()
###Output
_____no_output_____
###Markdown
Wrangle?? Working on the wrangle function. Not sure how to get these three def/if/else functions wrapped into one working or multi working function inside of a wranglefunction๐ค need additional time to work on getting this into a true wrangle function so for now I will go step by step Create New Features
###Code
#Create a New Feature that changes the income column into a 1 if they make more than 50K a year and 0 if they make 50K or less. New Feature called 'incomeover50K'.
def over50K(census):
if census['income'] == '>50K':
val = 1
else:
val = 0
return val
census['incomeover50K'] = census.apply(over50K, axis=1)
#Create a New Feature that changes the hours worked per week column into a 1 if they worked more than 40 hrs a week and 0 if they worked 40 or less. New Feature called 'over40hrs'.
def over40(census):
if census['hours-per-week'] >40:
val = 1
else:
val = 0
return val
census['over40hrs'] = census.apply(over40, axis=1)
#Create a New Feature that changes the sex column into a 1 if they were Female and 0 if they were Male. New Feature called 'gender-F/1-M/0'. This is new Target column.
def gender(census):
if census['sex'] == 'Female':
val = 1
else:
val = 0
return val
census['gender-F/1-M/0'] = census.apply(gender, axis=1)
#checking to see new features were successful. They are all there.
census.head()
###Output
_____no_output_____
###Markdown
Drop columns Feature'fnlwgt': I feel this column is not necessary as well as being very high carinality so its being dropped. Due to the new features created we drop 'income','hours-per-week','sex'. 'capital-gain','capital-loss' gives us no added value so its being dropped. Lastly I will drop 'relationship', this column was found to be quite leaky.
###Code
# Time to drop columns we don't need anylonger. Feature'fnlwgt' is high card and Unnecessary while 'sex' would now become a leaky feature and income and hours per week are now redundant
census = census.drop(columns=['fnlwgt','income','hours-per-week','sex','capital-gain','capital-loss','relationship'])
census
###Output
_____no_output_____
###Markdown
Splitting the Data
###Code
#Split data randomly with a 60/20/20 split
train, val, test = np.split(census.sample(frac=1), [int(.6*len(census)), int(.8*len(census))])
print('Training Set:',train.head(1))
print('Validation Set:',val.head(1))
print('Test Set',test.head(1))
#Split the data into X and y for training the model and making predictions
target = 'gender-F/1-M/0'
y_train = train[target]
X_train = train.drop(target,axis=1)
y_val = val[target]
X_val = val.drop(target,axis=1)
y_test = test[target]
X_test = test.drop(target,axis=1)
###Output
_____no_output_____
###Markdown
Establishing the Baseline
###Code
#First I will check that the target feature is between 50-70%. Its almost to far off but still within the parameters to continue.
y_train.value_counts(normalize=True)
y_train.value_counts()
print('Baseline Accuracy:', y_train.value_counts(normalize=True).max())
###Output
Baseline Accuracy: 0.6659955638969459
###Markdown
Building the Model(s) First I wanted to check out XGBoost. I really like the performance of it as well as the 'low maintenance' This ends up being the best model
###Code
#Starting with a pipeline. Using OrdinalEncoder for the object columns, we do not need and Imputer since they were all filled with top values and I am working with XGBClassifier.
modelxgb = make_pipeline(
OrdinalEncoder(),
XGBClassifier(n_jobs=-1)
)
modelxgb.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Checking Accuracy Scores with Train and Val sets. Not Bad!
###Code
print('Training accuracy:', modelxgb.score(X_train, y_train))
print('Validation accuracy:', modelxgb.score(X_val, y_val))
###Output
Training accuracy: 0.7943695615082751
Validation accuracy: 0.786036036036036
###Markdown
Next I wanted to use the Cross Validation Score to see how it would do on sampling. Results look good.
###Code
scores = cross_val_score(modelxgb, X_train, y_train, cv=20)
scores
scores = cross_val_score(modelxgb, X_val, y_val, cv=20)
scores
###Output
/usr/local/lib/python3.6/dist-packages/category_encoders/utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
elif pd.api.types.is_categorical(cols):
###Markdown
XGBoost Model Prediction Check I can work with these results!
###Code
#make the prediction
y_pred = modelxgb.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
###Output
Accuracy: 79.19%
###Markdown
Random Forest Classifier with Randomized Search CV Next I made a Random Forest Classifier, this is my second favorite to work with. I like that you can play with the parameters and train the model accordingly. Im alright with waiting for the answers here. ~The results were pretty close to XGBoost as well.
###Code
pipeline = make_pipeline(
OrdinalEncoder(),
RandomForestClassifier(random_state=42)
)
params = {
'randomforestclassifier__n_estimators': range(50,500,50),
'randomforestclassifier__max_depth': range(5,101,5),
'randomforestclassifier__max_samples': np.arange(0.2, 0.7, 0.2)
}
model = RandomizedSearchCV(
pipeline,
param_distributions=params,
cv=5,
verbose=1,
n_iter=5
)
model.fit(X_train,y_train)
###Output
Fitting 5 folds for each of 5 candidates, totalling 25 fits
###Markdown
I wanted to see how the Cross Validagion worked on this one too. This one takes a bit to come back.
###Code
scores = cross_val_score(model, X_train, y_train, cv=5)
scores
###Output
Fitting 5 folds for each of 5 candidates, totalling 25 fits
###Markdown
Accuracy Scores for the Random Forest finished model
###Code
print('Training accuracy:', model.score(X_train, y_train))
print('Validation accuracy:', model.score(X_val, y_val))
###Output
Training accuracy: 0.8531991127793892
Validation accuracy: 0.7836814086814087
###Markdown
Prediction Test for Random Forest. Just wanted to check on this.
###Code
#make the prediction
y_pred = model.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
###Output
Accuracy: 79.05%
###Markdown
K Fold testing I just wanted to test the K FoldCv on XGBoost and see what happened.
###Code
# k-fold cross validation evaluation of xgboost model
from numpy import loadtxt
import xgboost
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# load data
# split data into X and y
# CV model
model_w_kf = modelxgb
kfold = KFold(n_splits=3)
results = cross_val_score(modelxgb, X_train, y_train, cv=kfold)
print("Accuracy: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
###Output
/usr/local/lib/python3.6/dist-packages/category_encoders/utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
elif pd.api.types.is_categorical(cols):
###Markdown
Logistic Regression Next was the Logistic Regresion Model (Linear Model) It did not perform as well as the previous models but I also didn't play with it for long. I feel the XGBoost and Random Forest Classifier were my best options to work with for this dataset. began with 10 iterations and received accuracy of 68% and 68%. Then I made the iterations 50 and the accuracy went up to 73% while going to 100 gave no change.
###Code
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
log_model = make_pipeline(
OrdinalEncoder(),
LogisticRegression(max_iter=50)
)
log_model.fit(X_train, y_train)
print('Training accuracy:', log_model.score(X_train, y_train))
print('Validation accuracy:', log_model.score(X_val, y_val))
###Output
Training accuracy: 0.734959904453165
Validation accuracy: 0.7337223587223587
###Markdown
SVC I had read about the Support Vector Classifier and wanted to see how it performed.
###Code
from sklearn.svm import SVC
svc_model = make_pipeline(
OrdinalEncoder(),
SVC()
)
svc_model.fit(X_train, y_train)
print('Training accuracy:', svc_model.score(X_train, y_train))
print('Validation accuracy:', svc_model.score(X_val, y_val))
###Output
Training accuracy: 0.7398054939430131
Validation accuracy: 0.7332104832104832
###Markdown
Gradient Booster I also wanted to see how the Gradient Boosting Classifier would perform.
###Code
from sklearn.ensemble import GradientBoostingClassifier
model_skgb = make_pipeline(
OrdinalEncoder(),
GradientBoostingClassifier(random_state=42)
)
model_skgb.fit(X_train, y_train);
print('Training accuracy:', model_skgb.score(X_train, y_train))
print('Validation accuracy:', model_skgb.score(X_val, y_val))
# make predictions for test data
y_pred = model_skgb.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
import matplotlib.pyplot as plt
importances = modelxgb.named_steps['xgbclassifier'].feature_importances_
feat_imp = pd.Series(importances, index=X_train.columns).sort_values()
feat_imp.tail(10).plot(kind='barh')
plt.xlabel('Gini importance')
plt.ylabel('Feature')
plt.title('Feature importance for model_skgb');
# Using sklearn
from sklearn.inspection import permutation_importance
perm_imp = permutation_importance(modelxgb, X_val, y_val, n_jobs=10, random_state=42)
# Put results into DataFrame
data = {'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']}
df = pd.DataFrame(data, index=X_val.columns)
df.sort_values('importances_mean', ascending=True, inplace=True)
# Make plot
df['importances_mean'].tail(10).plot(kind='barh')
plt.xlabel('Importance (change in accuracy)')
plt.ylabel('Feature')
plt.title('Permutation importance for model_xgb');
perm_imp = permutation_importance(modelxgb, X_test, y_test, n_jobs=10, random_state=42)
data = {'importances_mean' : perm_imp['importances_mean'],
'importances_std' : perm_imp['importances_std']}
permutation_importances = pd.DataFrame(data, index=X_test.columns)
permutation_importances.sort_values('importances_mean', ascending=True, inplace=True)
permutation_importances
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(12,5))
plot_roc_curve(model, X_test, y_test, ax=ax1)
plot_roc_curve(modelxgb, X_test, y_test, ax=ax2)
ax1.plot([(0,0), (1,1)], color='grey', linestyle='--')
ax2.plot([(0,0), (1,1)], color='grey', linestyle='--')
ax1.set_title('Random Forest')
ax2.set_title('XG Boost')
plt.show()
%matplotlib inline
import seaborn as sns
sns.distplot(y_train);
###Output
/usr/local/lib/python3.6/dist-packages/seaborn/distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Breaking the Pipeline In order to use with shap plots I needed to break the pipeline down to steps.
###Code
#XGBoost model made without pipeline so shap graphing would not be an issue.
import category_encoders as ce
ore = ce.OrdinalEncoder()
XTO_train = ore.fit_transform(X_train)
XTO_val = ore.transform(X_val)
modelxgb2 = XGBClassifier()
modelxgb2.fit(XTO_train,y_train)
print('Training accuracy:', modelxgb2.score(XTO_train, y_train))
print('Validation accuracy:', modelxgb2.score(XTO_val, y_val))
import shap
row2 = X_test
shap_values = shap.TreeExplainer(modelxgb2).shap_values(XTO_train)
shap.summary_plot(shap_values, XTO_train, plot_type="bar")
###Output
_____no_output_____
###Markdown
I'm still working on completely understanding what the plot below is doing but it also isnt best used with my binary classification test set.
###Code
import shap
row = XTO_val.iloc[[750]]
explainer = shap.TreeExplainer(modelxgb2)
shap_values = explainer.shap_values(row)
shap.initjs()
shap.force_plot(
base_value=explainer.expected_value,
shap_values=shap_values,
features=row)
###Output
_____no_output_____
###Markdown
Here I chose to do a pull of the row used above and used the model to make the predction. I then pulled the row from the y_val set to compare the prediction and it worked on that row.
###Code
row
model.predict(row)
row_check = y_val.iloc[[750]]
row_check
!pip install pdpbox
import pdpbox.pdp as pdp
from pdpbox.pdp import pdp_isolate, pdp_plot
feature = 'incomeover50K'
isolate = pdp_isolate(
model=model,
dataset=XTO_val, # <-- use validation data
model_features=XTO_val.columns,
feature=feature
)
pdp_plot(isolate, feature_name=feature);
from pdpbox.pdp import pdp_interact, pdp_interact_plot
features = ['native-country', 'occupation']
interact = pdp_interact(
model=modelxgb2,
dataset=XTO_val,
model_features=XTO_val.columns,
features=features
)
pdp_interact_plot(interact, plot_type='contour', feature_names=features);
###Output
_____no_output_____ |
Resnet34_with_scheduler.ipynb | ###Markdown
Requirements
###Code
!ls
!nvidia-smi
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Scripts
###Code
%%writefile init.sh
pip install soundfile catalyst -q
pip install torchlibrosa -q
mkdir data/ data/raw/
unzip -q data/raw/audio_files.zip -d data/
unzip -q data/raw/AdditionalUtterances.zip -d data/
unzip -q data/raw/nlp_keywords_29Oct2020.zip -d data/
%%writefile init.py
import os, sys, gc, glob
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-prefix', type=str, default='./data/here/', help="postprocess folder")
parser.add_argument('-data', type=str, default='data/raw/', help="where to fing the zipfiles")
def preprocessing(args):
def what(splits, ns=-3):
return '_'.join([ splits[ns], splits[-1][:-4] ]).lower()
additionnal = glob.glob("data/latest_keywords/*/*.wav")
additionnal += glob.glob("data/nlp_keywords/*/*.wav")
os.makedirs(args.prefix, exist_ok=True)
add = pd.DataFrame({'fn': additionnal})
add['bname'] = add['fn'].apply(lambda x: what(x.split('/')))
add['type'] = 'add'
add['target'] = add['fn'].apply(lambda x: x.split('/')[-2])
add.to_csv(args.prefix + 'AddTrain.csv', index=False)
train = pd.read_csv(args.data + 'Train.csv')
train['bname'] = train['fn'].apply(lambda x: what(x.split('/'), -2))
train['type'] = 'base'
train['fn'] = 'data/' + train['fn']
train.rename(columns = {'label': 'target'}, inplace=True)
train.to_csv(args.prefix + 'BaseTrain.csv', index=False)
train = pd.concat([train, add], axis=0)
train.to_csv(args.prefix + 'Train.csv', index=False)
subs = pd.read_csv(args.data + 'SampleSubmission.csv')
subs['fn'] = 'data/' + subs['fn']
subs['bname'] = subs['fn'].apply(lambda x: what(x.split('/'), -2))
cols = subs.columns.tolist()
subs = subs[[cols[0]] + [cols[-1]] + cols[1:-1]]
subs.to_csv(args.prefix + 'SampleSubmission.csv', index=False)
if __name__ == '__main__':
args = parser.parse_args()
preprocessing(args)
###Output
Writing init.py
###Markdown
Data Download
###Code
!chmod +x init.sh
!./init.sh
!python init.py
###Output
_____no_output_____
###Markdown
Imports
###Code
import warnings
warnings.simplefilter('ignore')
import os
import gc
import sys
import h5py
import cv2
import glob
import math
import random
import librosa
import zipfile
import numpy as np
import pandas as pd
from librosa import display as libdisplay
from tqdm.notebook import tqdm
import torchlibrosa
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from torchvision import models
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import log_loss
from catalyst.contrib.nn.criterion import FocalLossMultiClass
import torch
import torch.nn as nn
import torch.nn.functional as F
from keras.utils import to_categorical
import IPython.display as ipd
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Envs
###Code
path = 'data/'
seed = 1999
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.makedirs('MODELS/', exist_ok=True)
# #Placeholder for the training and test spectogram's images
# #It is going to store the spec, we will shortly generate.
# os.makedirs('Imgs/Train/', exist_ok=True)
# os.makedirs('Imgs/Test/', exist_ok=True)
###Output
_____no_output_____
###Markdown
Utilities Basic functions
###Code
from joblib import Parallel, delayed
import multiprocessing
import time
def pad_or_truncate(x, audio_length):
"""Pad all audio to specific length."""
if len(x) <= audio_length:
return np.concatenate((x, np.zeros(audio_length - len(x))), axis=0)
return x[:audio_length]
def load_hdf5(hdf5_path):
hf = h5py.File(hdf5_path, 'r')
audio_name = hf['audio_name'][:].tolist()
waveform = hf['waveform']
target = hf['target'][:].tolist()
return audio_name, waveform, target, hf
def load_npy(npy_path):
return np.load(npy_path)
def pack_waveforms_to_npy(npy_path, df, sr=44100, secs=3):
"""Pack waveform and target of several audio clips to a single hdf5 file.
This can speed up loading and training.
"""
def __parallel(df, w, n):
row = df.loc[n, ['fn', 'bname', 'label']].values
audio_path, audio_name, target = row
if os.path.isfile(audio_path):
(audio, _) = librosa.core.load(audio_path, sr=sr, mono=True)
audio = pad_or_truncate(audio, clip_samples)
w[n] = audio
else:
print('{} File does not exist! {}'.format(n, audio_path))
# Arguments & parameters
clip_samples = sr*secs
audios_num = len(df)
# Pack waveform to hdf5
total_time = time.time()
wavs = np.empty((audios_num, clip_samples), dtype=np.float32)
_ = Parallel()( delayed(__parallel)(df, wavs, n) for n in tqdm(range(audios_num)) )
np.save(npy_path, wavs)
print('Write to {}'.format(npy_path))
print('Pack npy time: {:.3f}'.format(time.time() - total_time))
return wavs
###Output
_____no_output_____
###Markdown
Blocks functions
###Code
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class AttBlock(nn.Module):
def __init__(self, in_features: int, out_features: int, activation="linear", temperature=1.0):
super().__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.cla = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.bn_att = nn.BatchNorm1d(out_features)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
init_bn(self.bn_att)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.tanh(self.att(x)), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
def get_model(config):
return PANNsResnetAtt(**config)
###Output
_____no_output_____
###Markdown
ResNet
###Code
class PANNsResnetAtt(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num, arch='resnet34', fc=512, apply_aug=True, top_db=None, **args):
super(PANNsResnetAtt, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
self.interpolate_ratio = 32 # Downsampled ratio
self.apply_aug = apply_aug
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(
n_fft=window_size,
hop_length=hop_size,
win_length=window_size,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(
sr=sample_rate,
n_fft=window_size,
n_mels=mel_bins,
fmin=fmin,
fmax=fmax,
ref=ref,
amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(
time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(mel_bins)
att_size = 1024
self.fc1 = nn.Linear(fc, att_size, bias=True)
self.att_block = AttBlock(att_size, classes_num, activation='linear')
resnet = getattr(models, arch)(pretrained=True, progress=False)
self.resnet_features = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,
resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4
)
# del self.resnet_features.avgpool
# del self.resnet_features.fc
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
def cnn_feature_extractor(self, x):
x = self.resnet_features(x)
return x
def preprocess(self, input, mixup_lambda=None):
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training and self.apply_aug:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and self.apply_aug and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
return x, frames_num
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x, frames_num = self.preprocess(input, mixup_lambda=mixup_lambda)
if mixup_lambda is not None:
b = (b*c)//2
c = 1
# Output shape (batch size, channels, time, frequency)
x = x.expand(x.shape[0], 3, x.shape[2], x.shape[3])
x = self.cnn_feature_extractor(x)
# Aggregate in frequency axis
x = torch.mean(x, dim=3)
x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = x.transpose(1, 2)
x = F.relu_(self.fc1(x))
x = x.transpose(1, 2)
x = F.dropout(x, p=0.5, training=self.training)
(clipwise_output, norm_att, segmentwise_output) = self.att_block(x)
# segmentwise_output = segmentwise_output.transpose(1, 2)
# # Get framewise output
# framewise_output = interpolate(segmentwise_output,
# self.interpolate_ratio)
# framewise_output = pad_framewise_output(framewise_output, frames_num)
# frame_shape = framewise_output.shape
# clip_shape = clipwise_output.shape
# output_dict = {
# 'framewise_output': framewise_output.reshape(b, c, frame_shape[1],frame_shape[2]),
# 'clipwise_output': clipwise_output.reshape(b, c, clip_shape[1]),
# }
return clipwise_output
###Output
_____no_output_____
###Markdown
Dataset
###Code
class AudioDataset(torch.utils.data.Dataset):
def __init__(self, df, task='train', **kwargs):
super(AudioDataset, self).__init__()
self.df = df
self.task = task
self.c = len(words)
self.classes = words
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.loc[idx]
iidx, label = row["index"], 0
if self.task=='train':
label = row.label
waveform = waveforms[iidx]
else:
waveform = waveforms_[iidx]
return {
'wav': torch.tensor( waveform, dtype=torch.float ),
'target': torch.tensor( label, dtype=torch.long )
}
###Output
_____no_output_____
###Markdown
Training functions
###Code
def training_fn(dataloader, model, opt, criterion, scheduler=None):
avg_loss = 0
avg_acc = 0
size = len(dataloader)
model.train()
for i, data in enumerate(dataloader):
x,y = data['wav'].to(device), data['target'].to(device)
opt.zero_grad()
pred = model(x)
loss = criterion(pred, y)
avg_loss += loss.item()
pred = pred.detach().cpu()
ys = y.detach().cpu()
avg_acc += (ys == pred.argmax(1)).float().mean().item()
loss.backward()
opt.step()
if scheduler:
scheduler.step()
print('\r[Training][{}/{}] Loss: {:.5f} - Acc : {:.5f}'.format(
i+1, size, avg_loss/(i+1), avg_acc/(i+1) ), end='')
print()
def evaluate(dataloader, model, criterion):
avg_loss = 0
avg_acc = 0
size = len(dataloader)
model.eval()
with torch.no_grad():
for i, data in enumerate(dataloader):
x,y = data['wav'].to(device), data['target'].to(device)
pred = model(x)
avg_loss += criterion(pred, y).item()
pred = pred.detach().cpu()
ys = y.detach().cpu()
avg_acc += (ys == pred.argmax(1)).float().mean().item()
print('\r[Evaluation][{}/{}] Loss: {:.5f} - Acc : {:.5f}'.format(
i+1, size, avg_loss/(i+1), avg_acc/(i+1) ), end='')
print()
avg_loss /= size
avg_acc /= size
return avg_loss
def predict(df, bs=2):
test_ds = AudioDataset(df, task='test')
testloader = torch.utils.data.DataLoader(test_ds, bs, shuffle=False)
predictions_labels = []
predictions_proba = []
out = None
with torch.no_grad():
for data in tqdm(testloader):
x = data['wav'].to(device)
for i in range(n_folds):
if i == 0: out = F.softmax( MODELS[i](x), 1 )
else: out += F.softmax( MODELS[i](x), 1 )
out /= n_folds
out_labels = out.argmax(1).cpu().detach().numpy()
out_probas = out.cpu().detach().numpy()
predictions_labels += out_labels.tolist()
predictions_proba += out_probas.tolist()
return predictions_labels ,predictions_proba
def run_fold(fold, config, bs=16, eval_bs=8, lr=1e-4, path='MODELS/'):
with torch.cuda.device(device):
torch.cuda.empty_cache()
best_logloss = np.inf
fold_train = train[train.fold != fold].reset_index(drop=False)
fold_val = train[train.fold == fold].reset_index(drop=False)
train_ds = AudioDataset(fold_train)
val_ds = AudioDataset(fold_val)
trainloader = torch.utils.data.DataLoader(train_ds, batch_size=bs, shuffle=True)
validloader = torch.utils.data.DataLoader(val_ds, batch_size=eval_bs, shuffle=False)
model = get_model(config)
criterion = torch.nn.CrossEntropyLoss()
opt = torch.optim.AdamW(model.parameters(), lr=lr)
scheduler = None
if config["schedule"]:
scheduler = torch.optim.lr_scheduler.OneCycleLR(
opt, max_lr=1e-3, div_factor=4, steps_per_epoch=len(trainloader), epochs=epochs
)
model.to(device)
loader = tqdm(range(epochs), desc=f'Fold {fold}')
for epoch in loader:
print(f"[Epoch {epoch}]")
training_fn(trainloader, model, opt, criterion, scheduler)
avg_logloss = evaluate(validloader, model, criterion)
if avg_logloss < best_logloss:
best_logloss = avg_logloss
torch.save(model.state_dict(), f'{path}model_state_dict_{fold}.bin')
return best_logloss
###Output
_____no_output_____
###Markdown
Loading the CSVs' files
###Code
train = pd.read_csv(path+'here/Train.csv')
train.head()
sub = pd.read_csv(path+'here/SampleSubmission.csv')
sub.head(1)
words = sub.columns[2:]
label = np.linspace(0, len(words)-1, len(words), dtype=np.int16)
mapper = dict(zip(words, label))
train['label'] = train['target'].map(mapper).astype(int)
train.head()
###Output
_____no_output_____
###Markdown
Save wavs as npy
###Code
num_cores = multiprocessing.cpu_count()
sr = 44100
sec = 3
npy_path = f'drive/My Drive/Zindi/GIZ/train_sr={sr}_sec={sec}.npy'
test_npy_path = f'drive/My Drive/Zindi/GIZ/test_sr={sr}_sec={sec}.npy'
if not os.path.exists(npy_path):
waveforms = pack_waveforms_to_npy(npy_path, train, sr=sr, secs=sec)
if not os.path.exists(test_npy_path):
test = sub[['fn', 'bname']]
test['label'] = 0
waveforms_ = pack_waveforms_to_npy(test_npy_path, test, sr=sr, secs=sec)
###Output
_____no_output_____
###Markdown
Load wavs
###Code
%%time
waveforms = load_npy(npy_path)
waveforms_ = load_npy(test_npy_path)
gc.collect()
###Output
_____no_output_____
###Markdown
Training
###Code
n_folds = 10
def make_fold(n_folds, shuffle=True):
train['fold'] = -1
indexes = train[train['type']=='base'].index
fold = StratifiedKFold(n_splits = n_folds, random_state=seed, shuffle=shuffle)
for i, (tr, vr) in enumerate(fold.split(indexes, train.loc[indexes, 'label'])):
train.loc[indexes[vr], 'fold'] = i
make_fold(n_folds, shuffle=True)
train.fold.nunique()
epochs = 50
device = 'cuda:0'
lr = 1e-4
classes_num = 193
batch_size = 16
config = {
"sample_rate": sr,
"window_size": 1024,
"hop_size": 320,
"mel_bins": 64,
"fmin": 50,
"fmax": 14000,
"classes_num": classes_num,
'arch': 'resnet34',
'fc': 512,
"schedule": True,
}
gc.collect()
%%time
avg_logloss = 0
for fold in range(n_folds):
_fold_logloss = run_fold(fold, config, bs=batch_size, eval_bs=batch_size, lr=lr)
avg_logloss += _fold_logloss
print()
print("Avg LogLoss: ", avg_logloss/n_folds)
print()
###Output
_____no_output_____
###Markdown
Loading models
###Code
%%time
MODELS = []
for i in range(n_folds):
MODELS.append( get_model(config) )
MODELS[i].load_state_dict(torch.load(f'MODELS/model_state_dict_{i}.bin'))
MODELS[i].to(device)
MODELS[i].eval()
###Output
CPU times: user 9.92 s, sys: 2.13 s, total: 12.1 s
Wall time: 33.3 s
###Markdown
Prediction
###Code
predictions_labels, predictions_proba = predict(sub.reset_index(), bs=2)
###Output
_____no_output_____
###Markdown
Making a submission
###Code
submission = pd.DataFrame()
submission['fn'] = sub['fn'].apply(lambda x: '/'.join( x.split('/')[1:] ))
for i, label in enumerate(words):
submission[label] = 0.
for (label, i) in mapper.items():
submission.loc[:,label] = np.array(predictions_proba)[:,i]
submission.head()
csv_file = 'resnet34_with_scheduler.csv'
submission.to_csv(csv_file, index=False)
###Output
_____no_output_____ |
05_branch.ipynb | ###Markdown
Branch Wizardry > Summary Instant branching and merging are the most lethal of Git's killer features.*Problem*: External factors inevitably necessitate context switching. A severebug manifests in the released version without warning. The deadline for acertain feature is moved closer. A developer whose help you need for a key section of the project is about to leave. In all cases, you must abruptly drop what you are doing and focus on a completely different task.Interrupting your train of thought can be detrimental to your productivity, and the more cumbersome it is to switch contexts, the greater the loss. With centralized version control we must download a fresh working copy from the central server. Distributed systems fare better, as we can clone the desired version locally.But cloning still entails copying the whole working directory as well as the entire history up to the given point. Even though Git reduces the cost of this with file sharing and hard links, the project files themselves must be recreated in their entirety in the new working directory.*Solution*: Git has a better tool for these situations that is much faster and more space-efficient than cloning: *git branch*.With this magic word, the files in your directory suddenly shapeshift from one version to another. This transformation can do more than merely go back or forward in history. Your files can morph from the last release to the experimental version to the current development version to your friend's version and so on. The Boss Key Ever played one of those games where at the push of a button (``the boss key''), the screen would instantly display a spreadsheet or something? So if the boss walked in the office while you were playing the game you could quickly hide it away?In some directory:
###Code
! echo "I'm smarter than my boss" > myfile.txt
! git init
! git add .
! git commit -m "Initial commit"
###Output
_____no_output_____
###Markdown
We have created a Git repository that tracks one text file containing a certain message. Now type:
###Code
! git checkout -b boss # nothing seems to change after this
! echo "My boss is smarter than me" > myfile.txt
! git commit -a -m "Another commit"
###Output
_____no_output_____
###Markdown
It looks like we've just overwritten our file and committed it. But it's an illusion. Type:
###Code
! git checkout master # switch to original version of the file
###Output
_____no_output_____
###Markdown
and hey presto! The text file is restored. And if the boss decides to snoop around this directory, type:
###Code
! git checkout boss # switch to version suitable for boss' eyes
###Output
_____no_output_____
###Markdown
You can switch between the two versions of the file as much as you like, and commit to each independently. Dirty Work [[branch]]Say you're working on some feature, and for some reason, you need to go back three versions and temporarily put in a few print statements to see how something works. Then:
###Code
! git commit -a
! git checkout HEAD~3
###Output
_____no_output_____
###Markdown
Now you can add ugly temporary code all over the place. You can even commit these changes. When you're done,
###Code
! git checkout master
###Output
_____no_output_____
###Markdown
to return to your original work. Observe that any uncommitted changes are carried over.What if you wanted to save the temporary changes after all? Easy:
###Code
! git checkout -b dirty
###Output
_____no_output_____
###Markdown
and commit before switching back to the master branch. Whenever you want to return to the dirty changes, simply type:
###Code
! git checkout dirty
###Output
_____no_output_____
###Markdown
We touched upon this command in an earlier chapter, when discussing loading old states. At last we can tell the whole story: the files change to the requested state, but we must leave the master branch. Any commits made from now on take your files down a different road, which can be named later.In other words, after checking out an old state, Git automatically puts you in a new, unnamed branch, which can be named and saved with *git checkout -b*. Quick Fixes You're in the middle of something when you are told to drop everything and fix a newly discovered bug in commit `1b6d...`:
###Code
! git commit -a
! git checkout -b fixes 1b6d
###Output
_____no_output_____
###Markdown
Then once you've fixed the bug:
###Code
! git commit -a -m "Bug fixed"
! git checkout master
###Output
_____no_output_____
###Markdown
and resume work on your original task. You can even 'merge' in the freshlybaked bugfix:
###Code
! git merge fixes
###Output
_____no_output_____
###Markdown
Merging With some version control systems, creating branches is easy but merging themback together is tough. With Git, merging is so trivial that you might beunaware of it happening.We actually encountered merging long ago. The *pull* command in fact 'fetches'commits and then merges them into your current branch. If you have no localchanges, then the merge is a 'fast forward', a degenerate case akin to fetchingthe latest version in a centralized version control system. But if you do havelocal changes, Git will automatically merge, and report any conflicts.Ordinarily, a commit has exactly one 'parent commit', namely, the previouscommit. Merging branches together produces a commit with at least two parents.This begs the question: what commit does `HEAD~10` really refer to? A commitcould have multiple parents, so which one do we follow?It turns out this notation chooses the first parent every time. This isdesirable because the current branch becomes the first parent during a merge;frequently you're only concerned with the changes you made in the currentbranch, as opposed to changes merged in from other branches.You can refer to a specific parent with a caret. For example, to showthe logs from the second parent:
###Code
! git log HEAD^2
###Output
_____no_output_____
###Markdown
You may omit the number for the first parent. For example, to show thedifferences with the first parent:
###Code
! git diff HEAD^
###Output
_____no_output_____
###Markdown
You can combine this notation with other types. For example:
###Code
! git checkout 1b6d^^2~10 -b ancient
###Output
_____no_output_____
###Markdown
starts a new branch ``ancient'' representing the state 10 commits back from thesecond parent of the first parent of the commit starting with 1b6d. Uninterrupted Workflow Often in hardware projects, the second step of a plan must await the completion of the first step. A car undergoing repairs might sit idly in a garage until a particular part arrives from the factory. A prototype might wait for a chip to be fabricated before construction can continue.Software projects can be similar. The second part of a new feature may have towait until the first part has been released and tested. Some projects requireyour code to be reviewed before accepting it, so you might wait until the firstpart is approved before starting the second part.Thanks to painless branching and merging, we can bend the rules and work onPart II before Part I is officially ready. Suppose you have committed Part Iand sent it for review. Let's say you're in the `master` branch. Then branchoff:
###Code
! git checkout -b part2
###Output
_____no_output_____
###Markdown
Next, work on Part II, committing your changes along the way. To err is human,and often you'll want to go back and fix something in Part I.If you're lucky, or very good, you can skip these lines.
###Code
! git checkout master # Go back to Part I.
! fix_problem
! git commit -a # Commit the fixes.
! git checkout part2 # Go back to Part II.
! git merge master # Merge in those fixes.
###Output
_____no_output_____
###Markdown
Eventually, Part I is approved:
###Code
! git checkout master # Go back to Part I.
! submit files # Release to the world!
! git merge part2 # Merge in Part II.
! git branch -d part2 # Delete "part2" branch.
###Output
_____no_output_____
###Markdown
Now you're in the `master` branch again, with Part II in the working directory.It's easy to extend this trick for any number of parts. It's also easy tobranch off retroactively: suppose you belatedly realize you should have createda branch 7 commits ago. Then type:
###Code
! git branch -m master part2 # Rename "master" branch to "part2".
! git branch master HEAD~7 # Create new "master", 7 commits upstream.
###Output
_____no_output_____
###Markdown
The `master` branch now contains just Part I, and the `part2` branch containsthe rest. We are in the latter branch; we created `master` without switching toit, because we want to continue work on `part2`. This is unusual. Until now,we've been switching to branches immediately after creation, as in:
###Code
! git checkout HEAD~7 -b master # Create a branch, and switch to it.
###Output
_____no_output_____
###Markdown
Reorganizing a Medley Perhaps you like to work on all aspects of a project in the same branch. You want to keep works-in-progress to yourself and want others to see your commits only when they have been neatly organized. Start a couple of branches:
###Code
! git branch sanitized # Create a branch for sanitized commits.
! git checkout -b medley # Create and switch to a branch to work in.
###Output
_____no_output_____
###Markdown
Next, work on anything: fix bugs, add features, add temporary code, and so forth, committing often along the way. Then:
###Code
! git checkout sanitized
! git cherry-pick medley^^
###Output
_____no_output_____
###Markdown
applies the grandparent of the head commit of the ``medley'' branch to the ``sanitized'' branch. With appropriate cherry-picks you can construct a branch that contains only permanent code, and has related commits grouped together. Managing Branches List all branches by typing:
###Code
! git branch
###Output
_____no_output_____
###Markdown
By default, you start in a branch named ``master''. Some advocate leaving the``master'' branch untouched and creating new branches for your own edits.The *-d* and *-m* options allow you to delete and move (rename) branches.See *git help branch*.The ``master'' branch is a useful custom. Others may assume that yourrepository has a branch with this name, and that it contains the officialversion of your project. Although you can rename or obliterate the ``master''branch, you might as well respect this convention. Temporary Branches After a while you may realize you are creating short-lived branchesfrequently for similar reasons: every other branch merely serves tosave the current state so you can briefly hop back to an older state tofix a high-priority bug or something.It's analogous to changing the TV channel temporarily to see what else is on.But instead of pushing a couple of buttons, you have to create, check out,merge, and delete temporary branches. Luckily, Git has a shortcut that is asconvenient as a TV remote control:
###Code
! git stash
###Output
_____no_output_____
###Markdown
This saves the current state in a temporary location (a 'stash') andrestores the previous state. Your working directory appears exactly as it wasbefore you started editing, and you can fix bugs, pull in upstream changes, andso on. When you want to go back to the stashed state, type:
###Code
! git stash apply # You may need to resolve some conflicts.
###Output
_____no_output_____ |
notebooks/ganite_pytorch_train_evaluation.ipynb | ###Markdown
GANITE(PyTorch): Train and Evaluation This notebook presents the solution for training and evaluating the __GANITE__ algorithm(__PyToch__ version) over the [Twins](https://bitbucket.org/mvdschaar/mlforhealthlabpub/src/master/data/twins/) dataset.The implementation of GANITE is adapted in the local `ite` library. For the Unified API version, check [this notebook](https://github.com/bcebere/ite-api/blob/main/notebooks/unified_api_train_evaluation.ipynb). GANITEEstimating Individualized Treatment Effects(__ITE__) is the task that approximates whether a given treatment influences or determines an outcome([read more](https://www.vanderschaar-lab.com/individualized-treatment-effect-inference/)).[__GANITE__](https://openreview.net/pdf?id=ByKWUeWA-)(Generative Adversarial Nets for inference of Individualized Treatment Effects) is a framework for inferring the ITE using GANs.The implementation demonstrated in this notebook is [here](https://github.com/bcebere/ite-api/tree/main/src/ite/algs/ganite_torch) and is adapted from [this implementation](https://bitbucket.org/mvdschaar/mlforhealthlabpub/src/master/alg/ganite/). SetupFirst, make sure that all the depends are installed in the current environment.```pip install -r requirements.txtpip install .```Next, we import all the dependencies necessary for the task.
###Code
# Double check that we are using the correct interpreter.
import sys
print(sys.executable)
# Import depends
import ite.algs.ganite_torch.model as alg
import ite.datasets as ds
import ite.utils.numpy as utils
from matplotlib import pyplot as plt
import torch
import pandas as pd
###Output
/home/bcebere/anaconda3/envs/cambridge/bin/python
###Markdown
Load the DatasetThe example is done using the [Twins](https://bitbucket.org/mvdschaar/mlforhealthlabpub/src/master/data/twins/) dataset.Next, we load the dataset, process the data, and sample a training set and a test set.The logic is implemented [here](https://github.com/bcebere/ite-api/tree/main/src/ite/datasets), and it adapted from the original [GANITE pre-processing implementation](https://bitbucket.org/mvdschaar/mlforhealthlabpub/src/master/alg/ganite/data_preprocessing_ganite.py).
###Code
train_ratio = 0.8
dataset = ds.load("twins", train_ratio)
[Train_X, Train_T, Train_Y, Opt_Train_Y, Test_X, Test_Y] = dataset
pd.DataFrame(data=Train_X[:5])
###Output
_____no_output_____
###Markdown
Load the modelNext, we define the model.The constructor supports the following parameters: - `dim`: The number of features in X. - `dim_outcome`: The number of potential outcomes. - `dim_hidden`: hyperparameter for tuning the size of the hidden layer. - `depth`: hyperparameter for the number of hidden layers in the generator and inference blocks. - `num_iterations`: hyperparameter for the number of training epochs. - `alpha`: hyperparameter used for the Generator block loss. - `beta`: hyperparameter used for the ITE block loss. - `num_discr_iterations`: number of iterations executed by the discriminator. - `minibatch_size`: the size of the dataset batches. The hyperparameters used in this notebook are computed using the [hyperparameter tuning notebook](https://github.com/bcebere/ite-api/blob/main/notebooks/hyperparam_tuning.ipynb).
###Code
dim = len(Train_X[0])
dim_outcome = Test_Y.shape[1]
model = alg.GaniteTorch(
dim,
dim_outcome,
dim_hidden=30,
num_iterations=3000,
alpha=1,
beta=10,
minibatch_size=256,
num_discr_iterations=6,
depth=5,
)
assert model is not None
###Output
_____no_output_____
###Markdown
Train
###Code
metrics = model.train(*dataset)
###Output
100%|โโโโโโโโโโ| 3000/3000 [01:20<00:00, 37.07it/s]
100%|โโโโโโโโโโ| 3000/3000 [00:12<00:00, 239.58it/s]
###Markdown
Plot train metrics
###Code
metrics.print()
metrics.plot(plt, thresholds = [0.2, 0.25, 0.3, 0.35])
###Output
Counterfactual Block:
- Discriminator loss: 0.693 +/- 0.000
- Generator loss: -0.671 +/- 0.036
ITE Block:
- ITE loss: 0.982 +/- 0.293
ITE Block in-sample metrics:
- sqrt_PEHE: 0.295 +/- 0.000
- ATE: 0.013 +/- 0.002
- MSE: 0.090 +/- 0.024
ITE Block out-sample metrics:
- sqrt_PEHE: 0.279 +/- 0.000
- ATE: 0.018 +/- 0.001
- MSE: 0.089 +/- 0.024
###Markdown
Predict
###Code
hat_y = model.predict(Test_X)
utils.sqrt_PEHE(hat_y.to_numpy(), Test_Y)
###Output
_____no_output_____
###Markdown
TestWill can run inferences and get metrics directly
###Code
test_metrics = model.test(Test_X, Test_Y)
test_metrics.print()
###Output
sqrt_PHE = 0.278
ATE = 0.017
MSE = 0.075
Top 5 worst mistakes(indices) = [1656 1613 64 1911 398]
|
community/en/A_Guide_to_Training_Models_Using_tf_keras_and_tf_estimator.ipynb | ###Markdown
A Guide to Training Models Using ```tf.keras``` and ```tf.estimator```In Tensorflow, we can train models using both ```tf.keras``` as well as ```tf.estimator```. In this guide, we will examine training methods for both of them, as well as how to convert ```tf.keras``` models into ```tf.estimator``` models. Lastly, we will compare and contrast the advantages/disadvatages of both methods. Setting upFirst, let's set up our Tensorflow environment. Importing
###Code
import warnings
warnings.filterwarnings('ignore')
import numpy as np
np.random.seed(123) # for reproducibility
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, MaxPool2D, Conv2D, Dense, Reshape, Dropout
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import mnist
###Output
_____no_output_____
###Markdown
Loading data
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = to_categorical(y_train, 10)
Y_test = to_categorical(y_test, 10)
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
###Markdown
Training with ```tf.keras```Our first scenario is training a model for this dataset with ```tf.keras```. First, we'll define the model architecture and then we will compile and train the model. Let's get started!The model architecture we will be using will be based off of the tutorial found [here](https://www.tutorialspoint.com/tensorflow/tensorflow_keras.htm). I previously modified this in another GCI task to support Tensorflow 2.x and additionally modified it again, so it should work well as an example keras model for this guide. Defining model architecture
###Code
keras_model = Sequential()
keras_model.add(Conv2D(32, 3, 3, activation = 'relu', input_shape = (28,28,1)))
keras_model.add(Conv2D(32, 3, 3, activation = 'relu'))
keras_model.add(MaxPool2D(pool_size = (2,2)))
keras_model.add(Dropout(0.25))
keras_model.add(Flatten())
keras_model.add(Dense(128, activation = 'relu'))
keras_model.add(Dropout(0.5))
keras_model.add(Dense(10, activation = 'softmax'))
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
###Markdown
Compiling model
###Code
keras_model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
###Output
_____no_output_____
###Markdown
Fitting model
###Code
keras_model.fit(X_train, Y_train, batch_size = 32, epochs = 10, verbose = 1)
###Output
Train on 60000 samples
Epoch 1/10
60000/60000 [==============================] - 9s 158us/sample - loss: 0.8972 - acc: 0.7119
Epoch 2/10
60000/60000 [==============================] - 9s 153us/sample - loss: 0.5651 - acc: 0.8287
Epoch 3/10
60000/60000 [==============================] - 9s 152us/sample - loss: 0.4948 - acc: 0.8509
Epoch 4/10
60000/60000 [==============================] - 9s 151us/sample - loss: 0.4540 - acc: 0.8632
Epoch 5/10
60000/60000 [==============================] - 9s 151us/sample - loss: 0.4269 - acc: 0.8724
Epoch 6/10
60000/60000 [==============================] - 9s 146us/sample - loss: 0.4090 - acc: 0.8777
Epoch 7/10
60000/60000 [==============================] - 9s 152us/sample - loss: 0.3980 - acc: 0.8810
Epoch 8/10
60000/60000 [==============================] - 9s 151us/sample - loss: 0.3886 - acc: 0.8837
Epoch 9/10
60000/60000 [==============================] - 9s 152us/sample - loss: 0.3779 - acc: 0.8874
Epoch 10/10
60000/60000 [==============================] - 9s 149us/sample - loss: 0.3713 - acc: 0.8881
###Markdown
Training with ```tf.estimator```Next, we'll take a look at how to train a model for this dataset with ```tf.estimator```. We can take advantage of the premade estimators (specifically ```DNNClassifier```) and tweak it to the needs of our specific model. We will be using the same model structure as before for means of comparison. We will also be using the same test/train splits used before, as well as the same batch/epoch sizes. The number of steps can be calculated as (total number of images)/(batch size) * (number of epochs) = 60000/32 * 10 = 18750. Reload data
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data()
###Output
_____no_output_____
###Markdown
Defining model architecture
###Code
estimator_model = tf.estimator.DNNClassifier(
feature_columns=[tf.feature_column.numeric_column("x", shape=[28, 28])], #feature that we are specifying
hidden_units=[32, 32, 128, 10], #layers that we set up previously
optimizer=tf.train.AdamOptimizer(),
n_classes=10,
dropout=0.25,
)
###Output
_____no_output_____
###Markdown
Defining training inputs
###Code
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': X_train},
y=y_train.astype('int32'),
num_epochs=10,
batch_size=32,
shuffle=True,
)
###Output
_____no_output_____
###Markdown
Training model
###Code
estimator_model.train(input_fn=train_input_fn, steps=18750)
###Output
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmp2b8x8ooy/model.ckpt-18750
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/saver.py:1069: get_checkpoint_mtimes (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file utilities to get mtimes.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 18750 into /tmp/tmp2b8x8ooy/model.ckpt.
INFO:tensorflow:loss = 34.545532, step = 18751
INFO:tensorflow:global_step/sec: 274.076
INFO:tensorflow:loss = 36.36592, step = 18851 (0.366 sec)
WARNING:tensorflow:It seems that global step (tf.train.get_global_step) has not been increased. Current value (could be stable): 18853 vs previous value: 18853. You could increase the global step by passing tf.train.get_global_step() to Optimizer.apply_gradients or Optimizer.minimize.
INFO:tensorflow:global_step/sec: 357.32
INFO:tensorflow:loss = 27.184296, step = 18951 (0.280 sec)
INFO:tensorflow:global_step/sec: 350.188
INFO:tensorflow:loss = 26.193417, step = 19051 (0.285 sec)
INFO:tensorflow:global_step/sec: 392.717
INFO:tensorflow:loss = 45.261253, step = 19151 (0.257 sec)
INFO:tensorflow:global_step/sec: 362.822
INFO:tensorflow:loss = 34.116203, step = 19251 (0.274 sec)
INFO:tensorflow:global_step/sec: 377.175
INFO:tensorflow:loss = 30.473282, step = 19351 (0.267 sec)
INFO:tensorflow:global_step/sec: 377.754
INFO:tensorflow:loss = 24.974178, step = 19451 (0.262 sec)
INFO:tensorflow:global_step/sec: 342.173
INFO:tensorflow:loss = 44.794807, step = 19551 (0.295 sec)
INFO:tensorflow:global_step/sec: 356.229
INFO:tensorflow:loss = 33.134678, step = 19651 (0.281 sec)
INFO:tensorflow:global_step/sec: 390.361
INFO:tensorflow:loss = 26.22519, step = 19751 (0.257 sec)
INFO:tensorflow:global_step/sec: 367.261
INFO:tensorflow:loss = 31.334255, step = 19851 (0.269 sec)
INFO:tensorflow:global_step/sec: 375.517
INFO:tensorflow:loss = 16.325682, step = 19951 (0.267 sec)
INFO:tensorflow:global_step/sec: 385.892
INFO:tensorflow:loss = 33.443302, step = 20051 (0.259 sec)
INFO:tensorflow:global_step/sec: 364.749
INFO:tensorflow:loss = 22.575424, step = 20151 (0.274 sec)
INFO:tensorflow:global_step/sec: 377.906
INFO:tensorflow:loss = 42.258247, step = 20251 (0.265 sec)
INFO:tensorflow:global_step/sec: 331.661
INFO:tensorflow:loss = 30.58318, step = 20351 (0.302 sec)
INFO:tensorflow:global_step/sec: 384.816
INFO:tensorflow:loss = 24.376911, step = 20451 (0.260 sec)
INFO:tensorflow:global_step/sec: 358.829
INFO:tensorflow:loss = 30.067444, step = 20551 (0.279 sec)
INFO:tensorflow:global_step/sec: 366.708
INFO:tensorflow:loss = 22.273834, step = 20651 (0.272 sec)
INFO:tensorflow:global_step/sec: 375.349
INFO:tensorflow:loss = 42.661114, step = 20751 (0.267 sec)
INFO:tensorflow:global_step/sec: 378.376
INFO:tensorflow:loss = 28.87573, step = 20851 (0.267 sec)
INFO:tensorflow:global_step/sec: 372.336
INFO:tensorflow:loss = 20.934738, step = 20951 (0.266 sec)
INFO:tensorflow:global_step/sec: 336.525
INFO:tensorflow:loss = 28.98251, step = 21051 (0.297 sec)
INFO:tensorflow:global_step/sec: 358.998
INFO:tensorflow:loss = 39.779484, step = 21151 (0.279 sec)
INFO:tensorflow:global_step/sec: 359.085
INFO:tensorflow:loss = 30.009598, step = 21251 (0.279 sec)
WARNING:tensorflow:It seems that global step (tf.train.get_global_step) has not been increased. Current value (could be stable): 21312 vs previous value: 21312. You could increase the global step by passing tf.train.get_global_step() to Optimizer.apply_gradients or Optimizer.minimize.
INFO:tensorflow:global_step/sec: 324.675
INFO:tensorflow:loss = 23.9089, step = 21351 (0.309 sec)
INFO:tensorflow:global_step/sec: 379.573
INFO:tensorflow:loss = 29.586802, step = 21451 (0.262 sec)
INFO:tensorflow:global_step/sec: 386.232
INFO:tensorflow:loss = 26.502434, step = 21551 (0.262 sec)
INFO:tensorflow:global_step/sec: 368.42
INFO:tensorflow:loss = 33.148033, step = 21651 (0.270 sec)
INFO:tensorflow:global_step/sec: 333.566
INFO:tensorflow:loss = 33.997128, step = 21751 (0.299 sec)
INFO:tensorflow:global_step/sec: 356.12
INFO:tensorflow:loss = 35.566998, step = 21851 (0.280 sec)
INFO:tensorflow:global_step/sec: 379.197
INFO:tensorflow:loss = 29.739769, step = 21951 (0.264 sec)
INFO:tensorflow:global_step/sec: 365.64
INFO:tensorflow:loss = 28.741016, step = 22051 (0.276 sec)
INFO:tensorflow:global_step/sec: 357.331
INFO:tensorflow:loss = 24.947342, step = 22151 (0.280 sec)
INFO:tensorflow:global_step/sec: 357.79
INFO:tensorflow:loss = 31.443031, step = 22251 (0.277 sec)
INFO:tensorflow:global_step/sec: 367.959
INFO:tensorflow:loss = 32.031696, step = 22351 (0.273 sec)
INFO:tensorflow:global_step/sec: 367.677
INFO:tensorflow:loss = 21.824158, step = 22451 (0.271 sec)
INFO:tensorflow:global_step/sec: 372.81
INFO:tensorflow:loss = 30.078144, step = 22551 (0.271 sec)
INFO:tensorflow:global_step/sec: 357.796
INFO:tensorflow:loss = 29.711182, step = 22651 (0.276 sec)
INFO:tensorflow:global_step/sec: 339.181
INFO:tensorflow:loss = 30.581041, step = 22751 (0.295 sec)
INFO:tensorflow:global_step/sec: 342.683
INFO:tensorflow:loss = 23.282162, step = 22851 (0.294 sec)
INFO:tensorflow:global_step/sec: 369.6
INFO:tensorflow:loss = 34.82706, step = 22951 (0.271 sec)
INFO:tensorflow:global_step/sec: 352.078
INFO:tensorflow:loss = 32.043034, step = 23051 (0.282 sec)
INFO:tensorflow:global_step/sec: 360.114
INFO:tensorflow:loss = 25.365305, step = 23151 (0.278 sec)
INFO:tensorflow:global_step/sec: 370.462
INFO:tensorflow:loss = 22.883247, step = 23251 (0.270 sec)
INFO:tensorflow:global_step/sec: 331.028
INFO:tensorflow:loss = 28.205101, step = 23351 (0.303 sec)
INFO:tensorflow:global_step/sec: 333.246
INFO:tensorflow:loss = 28.205967, step = 23451 (0.302 sec)
INFO:tensorflow:global_step/sec: 379.297
INFO:tensorflow:loss = 32.466232, step = 23551 (0.263 sec)
INFO:tensorflow:global_step/sec: 356.931
INFO:tensorflow:loss = 20.641346, step = 23651 (0.278 sec)
INFO:tensorflow:global_step/sec: 363.432
INFO:tensorflow:loss = 33.126244, step = 23751 (0.275 sec)
INFO:tensorflow:global_step/sec: 392.867
INFO:tensorflow:loss = 24.59236, step = 23851 (0.259 sec)
INFO:tensorflow:global_step/sec: 356.975
INFO:tensorflow:loss = 46.124416, step = 23951 (0.276 sec)
INFO:tensorflow:global_step/sec: 340.678
INFO:tensorflow:loss = 44.33403, step = 24051 (0.293 sec)
INFO:tensorflow:global_step/sec: 361.536
INFO:tensorflow:loss = 30.566408, step = 24151 (0.278 sec)
INFO:tensorflow:global_step/sec: 338.341
INFO:tensorflow:loss = 28.726452, step = 24251 (0.295 sec)
INFO:tensorflow:global_step/sec: 357.551
INFO:tensorflow:loss = 27.842205, step = 24351 (0.279 sec)
INFO:tensorflow:global_step/sec: 337.031
INFO:tensorflow:loss = 32.505165, step = 24451 (0.297 sec)
INFO:tensorflow:global_step/sec: 385.238
INFO:tensorflow:loss = 35.12821, step = 24551 (0.262 sec)
INFO:tensorflow:global_step/sec: 382.552
INFO:tensorflow:loss = 29.590168, step = 24651 (0.259 sec)
INFO:tensorflow:global_step/sec: 359.97
INFO:tensorflow:loss = 28.597479, step = 24751 (0.280 sec)
INFO:tensorflow:global_step/sec: 355.139
INFO:tensorflow:loss = 33.054565, step = 24851 (0.279 sec)
INFO:tensorflow:global_step/sec: 380.548
INFO:tensorflow:loss = 22.732683, step = 24951 (0.263 sec)
INFO:tensorflow:global_step/sec: 347.031
INFO:tensorflow:loss = 35.066505, step = 25051 (0.291 sec)
INFO:tensorflow:global_step/sec: 307.196
INFO:tensorflow:loss = 30.2038, step = 25151 (0.323 sec)
INFO:tensorflow:global_step/sec: 360.526
INFO:tensorflow:loss = 28.448912, step = 25251 (0.277 sec)
INFO:tensorflow:global_step/sec: 353.463
INFO:tensorflow:loss = 36.736492, step = 25351 (0.283 sec)
INFO:tensorflow:global_step/sec: 343.929
INFO:tensorflow:loss = 28.905075, step = 25451 (0.291 sec)
INFO:tensorflow:global_step/sec: 349.476
INFO:tensorflow:loss = 39.079185, step = 25551 (0.286 sec)
INFO:tensorflow:global_step/sec: 356.226
INFO:tensorflow:loss = 20.493605, step = 25651 (0.280 sec)
INFO:tensorflow:global_step/sec: 358.908
INFO:tensorflow:loss = 24.033596, step = 25751 (0.279 sec)
INFO:tensorflow:global_step/sec: 361.377
INFO:tensorflow:loss = 32.44416, step = 25851 (0.277 sec)
INFO:tensorflow:global_step/sec: 367.764
INFO:tensorflow:loss = 29.573887, step = 25951 (0.272 sec)
INFO:tensorflow:global_step/sec: 353.577
INFO:tensorflow:loss = 25.570103, step = 26051 (0.286 sec)
INFO:tensorflow:global_step/sec: 363.933
INFO:tensorflow:loss = 25.418285, step = 26151 (0.272 sec)
INFO:tensorflow:global_step/sec: 352.714
INFO:tensorflow:loss = 25.39981, step = 26251 (0.283 sec)
INFO:tensorflow:global_step/sec: 352.496
INFO:tensorflow:loss = 35.787186, step = 26351 (0.284 sec)
INFO:tensorflow:global_step/sec: 341.906
INFO:tensorflow:loss = 31.000824, step = 26451 (0.295 sec)
INFO:tensorflow:global_step/sec: 368.014
INFO:tensorflow:loss = 34.740723, step = 26551 (0.271 sec)
INFO:tensorflow:global_step/sec: 360.799
INFO:tensorflow:loss = 23.603878, step = 26651 (0.278 sec)
INFO:tensorflow:global_step/sec: 388.282
INFO:tensorflow:loss = 29.348906, step = 26751 (0.256 sec)
INFO:tensorflow:global_step/sec: 390.05
INFO:tensorflow:loss = 18.619919, step = 26851 (0.255 sec)
INFO:tensorflow:global_step/sec: 366.252
INFO:tensorflow:loss = 28.764408, step = 26951 (0.273 sec)
INFO:tensorflow:global_step/sec: 366.853
INFO:tensorflow:loss = 36.327354, step = 27051 (0.276 sec)
INFO:tensorflow:global_step/sec: 385.743
INFO:tensorflow:loss = 31.547184, step = 27151 (0.256 sec)
INFO:tensorflow:global_step/sec: 391.79
INFO:tensorflow:loss = 20.443386, step = 27251 (0.258 sec)
INFO:tensorflow:global_step/sec: 354.903
INFO:tensorflow:loss = 39.186333, step = 27351 (0.280 sec)
INFO:tensorflow:global_step/sec: 347.165
INFO:tensorflow:loss = 36.819054, step = 27451 (0.288 sec)
INFO:tensorflow:global_step/sec: 354.084
INFO:tensorflow:loss = 23.075378, step = 27551 (0.282 sec)
INFO:tensorflow:global_step/sec: 369.992
INFO:tensorflow:loss = 29.486294, step = 27651 (0.271 sec)
INFO:tensorflow:global_step/sec: 354.573
INFO:tensorflow:loss = 26.701433, step = 27751 (0.281 sec)
INFO:tensorflow:global_step/sec: 372.916
INFO:tensorflow:loss = 76.3383, step = 27851 (0.269 sec)
INFO:tensorflow:global_step/sec: 364.427
INFO:tensorflow:loss = 23.440224, step = 27951 (0.275 sec)
INFO:tensorflow:global_step/sec: 354.064
INFO:tensorflow:loss = 20.93472, step = 28051 (0.282 sec)
INFO:tensorflow:global_step/sec: 372.908
INFO:tensorflow:loss = 27.907692, step = 28151 (0.271 sec)
INFO:tensorflow:global_step/sec: 375.56
INFO:tensorflow:loss = 36.77633, step = 28251 (0.263 sec)
INFO:tensorflow:global_step/sec: 351.392
INFO:tensorflow:loss = 29.933287, step = 28351 (0.285 sec)
INFO:tensorflow:global_step/sec: 354.136
INFO:tensorflow:loss = 34.30904, step = 28451 (0.282 sec)
INFO:tensorflow:global_step/sec: 378.876
INFO:tensorflow:loss = 32.34471, step = 28551 (0.267 sec)
INFO:tensorflow:global_step/sec: 368.005
INFO:tensorflow:loss = 34.253304, step = 28651 (0.268 sec)
INFO:tensorflow:global_step/sec: 350.063
INFO:tensorflow:loss = 29.121384, step = 28751 (0.286 sec)
INFO:tensorflow:global_step/sec: 331.47
INFO:tensorflow:loss = 45.072323, step = 28851 (0.304 sec)
INFO:tensorflow:global_step/sec: 371.134
INFO:tensorflow:loss = 24.53209, step = 28951 (0.268 sec)
INFO:tensorflow:global_step/sec: 341.836
INFO:tensorflow:loss = 37.567387, step = 29051 (0.291 sec)
INFO:tensorflow:global_step/sec: 326.918
INFO:tensorflow:loss = 21.330051, step = 29151 (0.306 sec)
INFO:tensorflow:global_step/sec: 353.015
INFO:tensorflow:loss = 31.070032, step = 29251 (0.283 sec)
INFO:tensorflow:global_step/sec: 341.108
INFO:tensorflow:loss = 26.279488, step = 29351 (0.297 sec)
INFO:tensorflow:global_step/sec: 346.638
INFO:tensorflow:loss = 27.554222, step = 29451 (0.285 sec)
INFO:tensorflow:global_step/sec: 353.974
INFO:tensorflow:loss = 36.878952, step = 29551 (0.283 sec)
INFO:tensorflow:global_step/sec: 308.263
INFO:tensorflow:loss = 26.443739, step = 29651 (0.324 sec)
INFO:tensorflow:global_step/sec: 344.878
INFO:tensorflow:loss = 19.660355, step = 29751 (0.290 sec)
INFO:tensorflow:global_step/sec: 310.854
INFO:tensorflow:loss = 35.42279, step = 29851 (0.322 sec)
INFO:tensorflow:global_step/sec: 356.512
INFO:tensorflow:loss = 19.684841, step = 29951 (0.281 sec)
INFO:tensorflow:global_step/sec: 335.564
INFO:tensorflow:loss = 31.338058, step = 30051 (0.298 sec)
INFO:tensorflow:global_step/sec: 357.163
INFO:tensorflow:loss = 35.750977, step = 30151 (0.284 sec)
INFO:tensorflow:global_step/sec: 334.747
INFO:tensorflow:loss = 20.251564, step = 30251 (0.295 sec)
INFO:tensorflow:global_step/sec: 334.311
INFO:tensorflow:loss = 31.402658, step = 30351 (0.300 sec)
INFO:tensorflow:global_step/sec: 349.365
INFO:tensorflow:loss = 16.776428, step = 30451 (0.286 sec)
INFO:tensorflow:global_step/sec: 353.205
INFO:tensorflow:loss = 31.354696, step = 30551 (0.283 sec)
INFO:tensorflow:global_step/sec: 338.887
INFO:tensorflow:loss = 43.503326, step = 30651 (0.295 sec)
INFO:tensorflow:global_step/sec: 319.213
INFO:tensorflow:loss = 28.007732, step = 30751 (0.314 sec)
INFO:tensorflow:global_step/sec: 329.675
INFO:tensorflow:loss = 24.168598, step = 30851 (0.305 sec)
INFO:tensorflow:global_step/sec: 355.957
INFO:tensorflow:loss = 30.947357, step = 30951 (0.279 sec)
INFO:tensorflow:global_step/sec: 380.178
INFO:tensorflow:loss = 25.1144, step = 31051 (0.263 sec)
INFO:tensorflow:global_step/sec: 373.859
INFO:tensorflow:loss = 23.713402, step = 31151 (0.269 sec)
INFO:tensorflow:global_step/sec: 360.018
INFO:tensorflow:loss = 21.41994, step = 31251 (0.276 sec)
INFO:tensorflow:global_step/sec: 367.147
INFO:tensorflow:loss = 29.591076, step = 31351 (0.272 sec)
INFO:tensorflow:global_step/sec: 362.419
INFO:tensorflow:loss = 30.606663, step = 31451 (0.276 sec)
INFO:tensorflow:global_step/sec: 352.382
INFO:tensorflow:loss = 30.279882, step = 31551 (0.284 sec)
INFO:tensorflow:global_step/sec: 342.067
INFO:tensorflow:loss = 21.496399, step = 31651 (0.292 sec)
INFO:tensorflow:global_step/sec: 358.704
INFO:tensorflow:loss = 16.197205, step = 31751 (0.279 sec)
INFO:tensorflow:global_step/sec: 363.487
INFO:tensorflow:loss = 25.158396, step = 31851 (0.276 sec)
INFO:tensorflow:global_step/sec: 360.163
INFO:tensorflow:loss = 35.937443, step = 31951 (0.277 sec)
INFO:tensorflow:global_step/sec: 337.35
INFO:tensorflow:loss = 35.07252, step = 32051 (0.299 sec)
INFO:tensorflow:global_step/sec: 338.086
INFO:tensorflow:loss = 29.837425, step = 32151 (0.294 sec)
INFO:tensorflow:global_step/sec: 366.197
INFO:tensorflow:loss = 39.362694, step = 32251 (0.274 sec)
INFO:tensorflow:global_step/sec: 358.728
INFO:tensorflow:loss = 27.178505, step = 32351 (0.277 sec)
INFO:tensorflow:global_step/sec: 364.375
INFO:tensorflow:loss = 35.356243, step = 32451 (0.275 sec)
INFO:tensorflow:global_step/sec: 349.177
INFO:tensorflow:loss = 30.378487, step = 32551 (0.286 sec)
INFO:tensorflow:global_step/sec: 358.608
INFO:tensorflow:loss = 31.869942, step = 32651 (0.280 sec)
INFO:tensorflow:global_step/sec: 376.02
INFO:tensorflow:loss = 28.799706, step = 32751 (0.266 sec)
INFO:tensorflow:global_step/sec: 366.619
INFO:tensorflow:loss = 30.492258, step = 32851 (0.271 sec)
INFO:tensorflow:global_step/sec: 341.324
INFO:tensorflow:loss = 41.74387, step = 32951 (0.294 sec)
INFO:tensorflow:global_step/sec: 380.502
INFO:tensorflow:loss = 33.064053, step = 33051 (0.263 sec)
INFO:tensorflow:global_step/sec: 331.166
INFO:tensorflow:loss = 25.378086, step = 33151 (0.301 sec)
INFO:tensorflow:global_step/sec: 369.591
INFO:tensorflow:loss = 38.7846, step = 33251 (0.274 sec)
INFO:tensorflow:global_step/sec: 359.244
INFO:tensorflow:loss = 39.79016, step = 33351 (0.275 sec)
INFO:tensorflow:global_step/sec: 386.933
INFO:tensorflow:loss = 34.191597, step = 33451 (0.258 sec)
INFO:tensorflow:global_step/sec: 351.822
INFO:tensorflow:loss = 27.918594, step = 33551 (0.284 sec)
INFO:tensorflow:global_step/sec: 371.109
INFO:tensorflow:loss = 26.432547, step = 33651 (0.273 sec)
INFO:tensorflow:global_step/sec: 360.037
INFO:tensorflow:loss = 24.122852, step = 33751 (0.275 sec)
INFO:tensorflow:global_step/sec: 349.257
INFO:tensorflow:loss = 46.8686, step = 33851 (0.286 sec)
INFO:tensorflow:global_step/sec: 365.319
INFO:tensorflow:loss = 24.293032, step = 33951 (0.276 sec)
INFO:tensorflow:global_step/sec: 373.017
INFO:tensorflow:loss = 23.540813, step = 34051 (0.268 sec)
INFO:tensorflow:global_step/sec: 337.191
INFO:tensorflow:loss = 28.020004, step = 34151 (0.297 sec)
INFO:tensorflow:global_step/sec: 352.802
INFO:tensorflow:loss = 16.167042, step = 34251 (0.281 sec)
INFO:tensorflow:global_step/sec: 348.456
INFO:tensorflow:loss = 33.748867, step = 34351 (0.287 sec)
INFO:tensorflow:global_step/sec: 369.144
INFO:tensorflow:loss = 27.72809, step = 34451 (0.271 sec)
INFO:tensorflow:global_step/sec: 362.19
INFO:tensorflow:loss = 46.633133, step = 34551 (0.276 sec)
INFO:tensorflow:global_step/sec: 364.246
INFO:tensorflow:loss = 26.052158, step = 34651 (0.275 sec)
INFO:tensorflow:global_step/sec: 350.454
INFO:tensorflow:loss = 23.241621, step = 34751 (0.285 sec)
INFO:tensorflow:global_step/sec: 378.577
INFO:tensorflow:loss = 38.118256, step = 34851 (0.264 sec)
INFO:tensorflow:global_step/sec: 329.964
INFO:tensorflow:loss = 27.368176, step = 34951 (0.303 sec)
INFO:tensorflow:global_step/sec: 367.447
INFO:tensorflow:loss = 27.651104, step = 35051 (0.272 sec)
INFO:tensorflow:global_step/sec: 359.523
INFO:tensorflow:loss = 34.896538, step = 35151 (0.279 sec)
INFO:tensorflow:global_step/sec: 375.929
INFO:tensorflow:loss = 20.26496, step = 35251 (0.265 sec)
INFO:tensorflow:global_step/sec: 326.993
INFO:tensorflow:loss = 33.577034, step = 35351 (0.307 sec)
INFO:tensorflow:global_step/sec: 355.503
INFO:tensorflow:loss = 22.365345, step = 35451 (0.281 sec)
INFO:tensorflow:global_step/sec: 357.121
INFO:tensorflow:loss = 30.745968, step = 35551 (0.279 sec)
INFO:tensorflow:global_step/sec: 360.309
INFO:tensorflow:loss = 25.206995, step = 35651 (0.281 sec)
INFO:tensorflow:global_step/sec: 365.518
INFO:tensorflow:loss = 36.831566, step = 35751 (0.271 sec)
INFO:tensorflow:global_step/sec: 369.374
INFO:tensorflow:loss = 21.710377, step = 35851 (0.273 sec)
INFO:tensorflow:global_step/sec: 373.344
INFO:tensorflow:loss = 32.91131, step = 35951 (0.265 sec)
INFO:tensorflow:global_step/sec: 364.278
INFO:tensorflow:loss = 25.128143, step = 36051 (0.274 sec)
INFO:tensorflow:global_step/sec: 374.304
INFO:tensorflow:loss = 32.65368, step = 36151 (0.268 sec)
INFO:tensorflow:global_step/sec: 357.773
INFO:tensorflow:loss = 31.729706, step = 36251 (0.279 sec)
INFO:tensorflow:global_step/sec: 345.494
INFO:tensorflow:loss = 34.536457, step = 36351 (0.292 sec)
INFO:tensorflow:global_step/sec: 354.348
INFO:tensorflow:loss = 30.685406, step = 36451 (0.282 sec)
INFO:tensorflow:global_step/sec: 369.201
INFO:tensorflow:loss = 34.486115, step = 36551 (0.271 sec)
INFO:tensorflow:global_step/sec: 354.237
INFO:tensorflow:loss = 29.600397, step = 36651 (0.279 sec)
INFO:tensorflow:global_step/sec: 341.844
INFO:tensorflow:loss = 34.32223, step = 36751 (0.293 sec)
INFO:tensorflow:global_step/sec: 374.266
INFO:tensorflow:loss = 24.338718, step = 36851 (0.267 sec)
INFO:tensorflow:global_step/sec: 366.529
INFO:tensorflow:loss = 22.090214, step = 36951 (0.275 sec)
INFO:tensorflow:global_step/sec: 365.98
INFO:tensorflow:loss = 20.613789, step = 37051 (0.271 sec)
INFO:tensorflow:global_step/sec: 376.839
INFO:tensorflow:loss = 24.526241, step = 37151 (0.265 sec)
INFO:tensorflow:global_step/sec: 378.378
INFO:tensorflow:loss = 19.778381, step = 37251 (0.267 sec)
INFO:tensorflow:global_step/sec: 352.328
INFO:tensorflow:loss = 31.784592, step = 37351 (0.281 sec)
INFO:tensorflow:global_step/sec: 345.436
INFO:tensorflow:loss = 28.827927, step = 37451 (0.290 sec)
INFO:tensorflow:Saving checkpoints for 37500 into /tmp/tmp2b8x8ooy/model.ckpt.
INFO:tensorflow:Loss for final step: 47.465935.
###Markdown
Converting ```tf.keras``` model to ```tf.estimator``` model We can convert a ```tf.keras``` model to a ```tf.estimator``` model by using ```tf.keras.estimator.model_to_estimator()``` as demonstrated below. We will pass the ```keras_model``` previously compiled as an argument to this function.
###Code
converted_model = tf.keras.estimator.model_to_estimator(keras_model)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.