path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Model backlog/EfficientNet/EfficientNetB4/5-Fold/283 - EfficientNetB4-Reg-Img256 Old Pretrain Fold5.ipynb | ###Markdown
Dependencies
###Code
import os
import sys
import cv2
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import multiprocessing as mp
import matplotlib.pyplot as plt
from tensorflow import set_random_seed
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras.utils import to_categorical
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler, ModelCheckpoint
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed = 0
seed_everything(seed)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/'))
from efficientnet import *
###Output
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Using TensorFlow backend.
###Markdown
Load data
###Code
fold_set = pd.read_csv('../input/aptos-data-split/5-fold.csv')
X_train = fold_set[fold_set['fold_4'] == 'train']
X_val = fold_set[fold_set['fold_4'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
# Preprocecss data
X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png")
X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
display(X_train.head())
###Output
Number of train samples: 2931
Number of validation samples: 731
Number of test samples: 1928
###Markdown
Model parameters
###Code
# Model parameters
model_path = '../working/effNetB4_img256_noBen_fold5.h5'
FACTOR = 4
BATCH_SIZE = 8 * FACTOR
EPOCHS = 10
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-4 * FACTOR
WARMUP_LEARNING_RATE = 1e-3 * FACTOR
HEIGHT = 256
WIDTH = 256
CHANNELS = 3
TTA_STEPS = 1
ES_PATIENCE = 5
RLROP_PATIENCE = 3
LR_WARMUP_EPOCHS = 3
STEP_SIZE = len(X_train) // BATCH_SIZE
TOTAL_STEPS = EPOCHS * STEP_SIZE
WARMUP_STEPS = LR_WARMUP_EPOCHS * STEP_SIZE
###Output
_____no_output_____
###Markdown
Pre-procecess images
###Code
new_data_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(image_id, base_path, save_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
# image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
def preprocess_data(df, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
df = df.reset_index()
for i in range(df.shape[0]):
item = df.iloc[i]
image_id = item['id_code']
item_set = item['fold_4']
if item_set == 'train':
preprocess_image(image_id, new_data_base_path, train_dest_path)
if item_set == 'validation':
preprocess_image(image_id, new_data_base_path, validation_dest_path)
def preprocess_test(df, base_path=test_base_path, save_path=test_dest_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
df = df.reset_index()
for i in range(df.shape[0]):
image_id = df.iloc[i]['id_code']
preprocess_image(image_id, base_path, save_path)
n_cpu = mp.cpu_count()
train_n_cnt = X_train.shape[0] // n_cpu
val_n_cnt = X_val.shape[0] // n_cpu
test_n_cnt = test.shape[0] // n_cpu
# Pre-procecss old data train set
pool = mp.Pool(n_cpu)
dfs = [X_train.iloc[train_n_cnt*i:train_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = X_train.iloc[train_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_data, [x_df for x_df in dfs])
pool.close()
# Pre-procecss validation set
pool = mp.Pool(n_cpu)
dfs = [X_val.iloc[val_n_cnt*i:val_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = X_val.iloc[val_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_data, [x_df for x_df in dfs])
pool.close()
# Pre-procecss test set
pool = mp.Pool(n_cpu)
dfs = [test.iloc[test_n_cnt*i:test_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = test.iloc[test_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_test, [x_df for x_df in dfs])
pool.close()
###Output
_____no_output_____
###Markdown
Data generator
###Code
datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed)
def classify(x):
if x < 0.5:
return 0
elif x < 1.5:
return 1
elif x < 2.5:
return 2
elif x < 3.5:
return 3
return 4
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
def plot_metrics(history, figsize=(20, 14)):
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=figsize)
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
def apply_tta(model, generator, steps=10):
step_size = generator.n//generator.batch_size
preds_tta = []
for i in range(steps):
generator.reset()
preds = model.predict_generator(generator, steps=step_size)
preds_tta.append(preds)
return np.mean(preds_tta, axis=0)
def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic'))
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""
Cosine decay schedule with warm up period.
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
:param global_step {int}: global step.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param global_step {int}: global step.
:Returns : a float representing learning rate.
:Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(Callback):
"""Cosine decay with warmup learning rate scheduler"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""
Constructor for cosine decay with warmup learning rate scheduler.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param global_step_init {int}: initial global step, e.g. from previous checkpoint.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param verbose {int}: quiet, 1: update messages. (default: {0}).
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr))
class RAdam(optimizers.Optimizer):
"""RAdam optimizer.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
weight_decay: float >= 0. Weight decay for each param.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
# References
- [Adam - A Method for Stochastic Optimization](https://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond](https://openreview.net/forum?id=ryQu7f-RZ)
- [On The Variance Of The Adaptive Learning Rate And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf)
"""
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., weight_decay=0., amsgrad=False, **kwargs):
super(RAdam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.weight_decay = K.variable(weight_decay, name='weight_decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.initial_weight_decay = weight_decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='m_' + str(i)) for (i, p) in enumerate(params)]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='v_' + str(i)) for (i, p) in enumerate(params)]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='vhat_' + str(i)) for (i, p) in enumerate(params)]
else:
vhats = [K.zeros(1, name='vhat_' + str(i)) for i in range(len(params))]
self.weights = [self.iterations] + ms + vs + vhats
beta_1_t = K.pow(self.beta_1, t)
beta_2_t = K.pow(self.beta_2, t)
sma_inf = 2.0 / (1.0 - self.beta_2) - 1.0
sma_t = sma_inf - 2.0 * t * beta_2_t / (1.0 - beta_2_t)
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
m_corr_t = m_t / (1.0 - beta_1_t)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
v_corr_t = K.sqrt(vhat_t / (1.0 - beta_2_t) + self.epsilon)
self.updates.append(K.update(vhat, vhat_t))
else:
v_corr_t = K.sqrt(v_t / (1.0 - beta_2_t) + self.epsilon)
r_t = K.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *
(sma_t - 2.0) / (sma_inf - 2.0) *
sma_inf / sma_t)
p_t = K.switch(sma_t > 5, r_t * m_corr_t / v_corr_t, m_corr_t)
if self.initial_weight_decay > 0:
p_t += self.weight_decay * p
p_t = p - lr * p_t
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'weight_decay': float(K.get_value(self.weight_decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
}
base_config = super(RAdam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
###Output
_____no_output_____
###Markdown
Model
###Code
def create_model(input_shape):
input_tensor = Input(shape=input_shape)
base_model = EfficientNetB4(weights=None,
include_top=False,
input_tensor=input_tensor)
# base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
final_output = Dense(1, activation='linear', name='final_output')(x)
model = Model(input_tensor, final_output)
model.load_weights('../input/aptos-pretrain-olddata-effnetb4/effNetB4_img224_noBen_oldData.h5')
return model
###Output
_____no_output_____
###Markdown
Train top layers
###Code
model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS))
for layer in model.layers:
layer.trainable = False
for i in range(-2, 0):
model.layers[i].trainable = True
metric_list = ["accuracy"]
optimizer = RAdam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
verbose=2).history
###Output
Epoch 1/5
- 63s - loss: 0.4938 - acc: 0.6271 - val_loss: 0.5748 - val_acc: 0.5597
Epoch 2/5
- 51s - loss: 0.4479 - acc: 0.6615 - val_loss: 0.4141 - val_acc: 0.6824
Epoch 3/5
- 51s - loss: 0.4343 - acc: 0.6650 - val_loss: 0.4668 - val_acc: 0.6023
Epoch 4/5
- 50s - loss: 0.4369 - acc: 0.6508 - val_loss: 0.4036 - val_acc: 0.6867
Epoch 5/5
- 51s - loss: 0.4324 - acc: 0.6678 - val_loss: 0.4718 - val_acc: 0.6052
###Markdown
Fine-tune the model
###Code
for layer in model.layers:
layer.trainable = True
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
cosine_lr = WarmUpCosineDecayScheduler(learning_rate_base=LEARNING_RATE,
total_steps=TOTAL_STEPS,
warmup_learning_rate=0.0,
warmup_steps=WARMUP_STEPS,
hold_base_rate_steps=(2 * STEP_SIZE))
callback_list = [checkpoint, es, cosine_lr]
optimizer = RAdam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
verbose=2).history
fig, ax = plt.subplots(1, 1, sharex='col', figsize=(20, 4))
ax.plot(cosine_lr.learning_rates)
ax.set_title('Fine-tune learning rates')
plt.xlabel('Steps')
plt.ylabel('Learning rate')
sns.despine()
plt.show()
###Output
_____no_output_____
###Markdown
Model loss graph
###Code
plot_metrics(history)
# Create empty arays to keep the predictions and labels
df_preds = pd.DataFrame(columns=['label', 'pred', 'set'])
train_generator.reset()
valid_generator.reset()
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN + 1):
im, lbl = next(train_generator)
preds = model.predict(im, batch_size=train_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train']
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID + 1):
im, lbl = next(valid_generator)
preds = model.predict(im, batch_size=valid_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation']
df_preds['label'] = df_preds['label'].astype('int')
# Classify predictions
df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x))
train_preds = df_preds[df_preds['set'] == 'train']
validation_preds = df_preds[df_preds['set'] == 'validation']
###Output
_____no_output_____
###Markdown
Model Evaluation Confusion Matrix Original thresholds
###Code
plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
###Output
_____no_output_____
###Markdown
Quadratic Weighted Kappa
###Code
evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
###Output
Train Cohen Kappa score: 0.965
Validation Cohen Kappa score: 0.898
Complete set Cohen Kappa score: 0.952
###Markdown
Apply model to test set and output predictions
###Code
preds = apply_tta(model, test_generator, TTA_STEPS)
predictions = [classify(x) for x in preds]
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
###Output
_____no_output_____
###Markdown
Predictions class distribution
###Code
fig = plt.subplots(sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test')
sns.despine()
plt.show()
results.to_csv('submission.csv', index=False)
display(results.head())
###Output
_____no_output_____ |
nbs/course2020/vision/07_Siamese.ipynb | ###Markdown
Lesson 7 - Siamese Lesson Video:
###Code
#hide_input
from IPython.lib.display import YouTubeVideo
from datetime import timedelta
start = int(timedelta(minutes=16, seconds=44).total_seconds())
YouTubeVideo('0IQYJNkAI3k', start=start)
#hide
#Run once per session
!pip install fastai wwf -q --upgrade
#hide_input
from wwf.utils import state_versions
state_versions(['fastai', 'fastcore', 'wwf'])
###Output
_____no_output_____
###Markdown
This notebook goes through how to build a Siamese dataset from scratch. What is a Siamese Problem?Identifying if two images belong to the same class:  Common use cases:* Person identification* Small classification sample size Why Siamese?Let's think of an example problem:> I own 3 dogs and I want to differentiate between the three of them from a photo, but I only have 5 images of each animal.By our normal standards, we would say that is *far* too little of data for us to work with. But in this case, we have now **120** training samples (not including augmentation). Our example will use the `PETS` dataset. We won't be training, but if you're dealing with this problem, you should have all the tools you need by now Installing the library and *starting* to build the Dataset
###Code
from fastai.vision.all import *
src = untar_data(URLs.PETS)/'images'
###Output
_____no_output_____
###Markdown
We'll grab all the file names:
###Code
items = get_image_files(src)
###Output
_____no_output_____
###Markdown
And now we can start preparing our dataset. We will be doing everything at the *lowest* level possible today. First let's make a transform that will open some image from a filename and resize it.
###Code
def resized_image(fn:Path, sz=128):
"Opens an image from `fn` and resizes it to `sz`"
x = Image.open(fn).convert('RGB').resize((sz,sz))
return tensor(array(x)).permute(2,0,1).float()/255.
###Output
_____no_output_____
###Markdown
Now let's get two random images (that we know are different)
###Code
img1 = resized_image(items[0], 448)
img2 = resized_image(items[1], 448)
###Output
_____no_output_____
###Markdown
Now we need some way of viewing our image, along with a title. Let's make a `TitledImage` class:
###Code
class TitledImage(Tuple):
def show(self, ctx=None, **kwargs): show_titled_image(self, ctx=ctx, **kwargs)
TitledImage(img1, 'Test').show()
###Output
_____no_output_____
###Markdown
Now let's make something similar for a pair of images (our `Siamese`)
###Code
class SiameseImage(Tuple):
def show(self, ctx=None, **kwargs):
im1, im2, is_same = self
return show_image(torch.cat([im1,im2], dim=2), title=is_same, ctx=ctx, **kwargs)
###Output
_____no_output_____
###Markdown
Let's look at two examples (which look *remarkably*) similar to that image earlier:
###Code
SiameseImage(img1, img1, True).show(figsize=(7,7));
SiameseImage(img1, img2, False).show(figsize=(7,7));
###Output
_____no_output_____
###Markdown
SiamesePairNow we need some transform to generate our `Siamese` dataset. We'll want it to take in a list of items and labels:
###Code
class SiamesePair(Transform):
"A transform to generate Siamese data"
def __init__(self, items, labels):
self.items, self.labels, self.assoc = items,labels,self
sortlbl = sorted(enumerate(labels), key=itemgetter(1))
self.clsmap = {k:L(v).itemgot(0) for k,v in itertools.groupby(sortlbl, key=itemgetter(1))}
self.idxs = range_of(self.items)
def encodes(self,i):
"x: tuple of `i`th image and a random image from same or different class; y: True if same class"
othercls = self.clsmap[self.labels[i]] if random.random()>0.5 else self.idxs
otherit = random.choice(othercls)
same = tensor([self.labels[otherit]==self.labels[i]]).int()
return SiameseImage(self.items[i], self.items[otherit], same)
###Output
_____no_output_____
###Markdown
We are going to want some labels to be sued, so let's grab some:
###Code
labeller = RegexLabeller(pat = r'/([^/]+)_\d+.jpg$')
labels = items.map(labeller)
labels[:5], len(labels)
###Output
_____no_output_____
###Markdown
Now we can build our `SiamesePair` transform
###Code
sp = SiamesePair(items, labels)
###Output
_____no_output_____
###Markdown
Let's look at a few bits
###Code
sp.clsmap
sp.labels
###Output
_____no_output_____
###Markdown
Now finally, we can build our `Pipeline` Bringing it to a DataLoaderFirst we'll want to make a `Transform` out of that `resized_image` function we had
###Code
OpenAndResize = Transform(resized_image)
###Output
_____no_output_____
###Markdown
And now that we have all the pieces together, let's build a `Pipeline`:
###Code
pipe = Pipeline([sp, OpenAndResize])
###Output
_____no_output_____
###Markdown
And take a look at it's first set:
###Code
x,y,z = pipe(0)
x.shape, y.shape, z
###Output
_____no_output_____
###Markdown
To turn anything into a `DataLoader`, we want it to first be a `TfmdList`. We can accomplish this by passing in a list of index's and a `Pipeline` to run through:
###Code
tls = TfmdLists(range_of(items), pipe)
###Output
_____no_output_____
###Markdown
And now make our `Dataloaders`
###Code
dls = tls.dataloaders(bs=16, after_batch=[Normalize.from_stats(*imagenet_stats)])
###Output
_____no_output_____
###Markdown
And we can look at a batch!
###Code
batch = dls.one_batch()
###Output
_____no_output_____
###Markdown
Now I did not get the `show` function working, so let's take a look the very "simple" way
###Code
a,b,c = batch[0][0], batch[1][0], batch[2][0]
a.shape, b.shape, c
from torchvision import transforms
im1 = transforms.ToPILImage()(batch[0][0]).convert("RGB")
im2 = transforms.ToPILImage()(batch[1][0]).convert("RGB")
display(im1, im2)
###Output
_____no_output_____ |
lab6_exercises_ANSWERS.ipynb | ###Markdown
Programming Bootcamp 2016 Lesson 6 Exercises -- ANSWERS--- ** Earning points (optional) **- Enter your name below.- Email your `.ipynb` file to me ([email protected]) **before 9:00 am on 9/27**. - You do not need to complete all the problems to get points. - I will give partial credit for effort when possible.- At the end of the course, everyone who gets at least 90% of the total points will get a prize (bootcamp mug!). **Name**: --- 1. Guess the output: scope practice (2pts)Refer to the code below to answer the following questions:
###Code
def fancy_calc(a, b, c):
x1 = basic_calc(a,b)
x2 = basic_calc(b,c)
x3 = basic_calc(c,a)
z = x1 * x2 * x3
return z
def basic_calc(x, y):
result = x + y
return result
x = 1
y = 2
z = 3
result = fancy_calc(x, y, z)
###Output
_____no_output_____
###Markdown
**(A)** List the line numbers of the code above in the order that they will be **executed**. If a line will be executed more than once, list it each time. **NOTE**: Select the cell above and hit "L" to activate line numbering! Answer:```1213141512891023891034891045615``` **(B)** Guess the output if you were to run each of the following pieces of code immediately after running the code above. Then run the code to see if you're right. (Remember to run the code above first)
###Code
print x
print z
print x1
print result
###Output
60
###Markdown
--- 2. Data structure woes (2pt)**(A) Passing a data structure to a function.** Guess the output of the following lines of code if you were to run them immediately following the code block below. Then run the code yourself to see if you're right.
###Code
# run this first!
def getMax(someList):
someList.sort()
x = someList[-1]
return x
scores = [9, 5, 7, 1, 8]
maxScore = getMax(scores)
print maxScore
print someList
print scores
###Output
[1, 5, 7, 8, 9]
###Markdown
> Why does scores get sorted? > When you pass a data structure as a parameter to a function, it's not a **copy** of the data structure that gets passed (as what happens with regular variables). What gets passed is a **direct reference** to the data structure itself. > The reason this is done is because data structures are typically expected to be fairly large, and copying/re-assigning the whole thing can be both time- and memory-consuming. So doing things this way is more efficient. It can also surprise you, though, if you're not aware it's happening. If you would like to learn more about this, look up "Pass by reference vs pass by value". **(B) Copying data structures.** Guess the output of the following code if you were to run them immediately following the code block below. Then run the code yourself to see if you're right.
###Code
# run this first!
list1 = [1, 2, 3, 4]
list2 = list1
list2[0] = "HELLO"
print list2
print list1
###Output
['HELLO', 2, 3, 4]
###Markdown
> Yes, that's right--even when you try to make a new copy of a list, it's actually just a reference to the same list! This is called aliasing. The same thing will happen with a dictionary. This can really trip you up if you don't know it's happening. So what if we want to make a truly separate copy? Here's a way for lists:
###Code
# for lists
list1 = [1, 2, 3, 4]
list2 = list(list1) #make a true copy of the list
list2[0] = "HELLO"
print list2
print list1
###Output
['HELLO', 2, 3, 4]
[1, 2, 3, 4]
###Markdown
And here's a way for dictionaries:
###Code
# for dictionaries
dict1 = {'A':1, 'B':2, 'C':3}
dict2 = dict1.copy() #make a true copy of the dict
dict2['A'] = 99
print dict2
print dict1
###Output
{'A': 99, 'C': 3, 'B': 2}
{'A': 1, 'C': 3, 'B': 2}
###Markdown
--- 3. Writing custom functions (8pts)Complete the following. For some of these problems, you can use your code from previous labs as a starting point. (If you didn't finish those problems, feel free to use the code from the answer sheet, just make sure you understand how they work! Optionally, for extra practice you can try re-writing them using some of the new things we've learned since then.) **(A)** (1pt) Create a function called "gc" that takes a single sequence as a parameter and returns the GC content of the sequence (as a 2 decimal place float).
###Code
def gc(seq):
gcCount = seq.count("C") + seq.count("G")
gcFrac = float(gcCount) / len(seq)
return round(gcFrac,2)
###Output
_____no_output_____
###Markdown
**(B)** (1pt) Create a function called "reverse_compl" that takes a single sequence as a parameter and returns the reverse complement.
###Code
def reverse_compl(seq):
complements = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}
compl = ""
for char in seq:
compl = complements[char] + compl
return compl
###Output
_____no_output_____
###Markdown
**(C)** (1pt) Create a function called "read_fasta" that takes a file name as a parameter (which is assumed to be in fasta format), puts each fasta entry into a dictionary (using the header line as a key and the sequence as a value), and then returns the dictionary.
###Code
def read_fasta(fileName):
ins = open(fileName, 'r')
seqDict = {}
activeID = ""
for line in ins:
line = line.rstrip('\r\n')
if line[0] == ">":
activeID = line[1:]
if activeID in seqDict:
print ">>> Warning: repeat id:", activeID, "-- overwriting previous ID."
seqDict[activeID] = ""
else:
seqDict[activeID] += line
ins.close()
return seqDict
###Output
_____no_output_____
###Markdown
**(D)** (2pts) Create a function called "rand_seq" that takes an integer length as a parameter, and then returns a random DNA sequence of that length. *Hint: make a list of the possible nucleotides*
###Code
def rand_seq(length):
import random
nts = ['A','C','G','T']
seq = ""
for i in range(length):
seq += random.choice(nts)
return seq
###Output
_____no_output_____
###Markdown
**(E)** (2pts) Create a function called "shuffle_nt" that takes a single sequence as a parameter and returns a string that is a shuffled version of the sequence (i.e. the same nucleotides, but in a random order). *Hint: Look for Python functions that will make this easier. For example, the `random` module has some functions for shuffling. There may also be some built-in string functions that are useful. However, you can also do this just using things we've learned.*
###Code
def shuffle_nt(seq):
import random
strList = list(seq)
random.shuffle(strList)
shuffSeq = "".join(strList)
return shuffSeq
###Output
_____no_output_____
###Markdown
**(F)** (1pt) Run the code below to show that all of your functions work. Try to fix any that have problems.
###Code
##### testing gc
gcCont = gc("ATGGGCCCAATGG")
if type(gcCont) != float:
print ">> Problem with gc: answer is not a float, it is a %s." % type(gcCont)
elif gcCont != 0.62:
print ">> Problem with gc: incorrect answer (should be 0.62; your code gave", gcCont, ")"
else:
print "gc: Passed."
##### testing reverse_compl
revCompl = reverse_compl("GGGGTCGATGCAAATTCAAA")
if type(revCompl) != str:
print ">> Problem with reverse_compl: answer is not a string, it is a %s." % type(revCompl)
elif revCompl != "TTTGAATTTGCATCGACCCC":
print ">> Problem with reverse_compl: answer (%s) does not match expected (%s)" % (revCompl, "TTTGAATTTGCATCGACCCC")
else:
print "reverse_compl: Passed."
##### testing read_fasta
try:
ins = open("horrible.fasta", 'r')
except IOError:
print ">> Can not test read_fasta because horrible.fasta is missing. Please add it to the directory with this notebook."
else:
seqDict = read_fasta("horrible.fasta")
if type(seqDict) != dict:
print ">> Problem with read_fasta: answer is not a dictionary, it is a %s." % type(seqDict)
elif len(seqDict) != 22:
print ">> Problem with read_fasta: # of keys in dictionary (%s) does not match expected (%s)" % (len(seqDict), 22)
else:
print "read_fasta: Passed."
##### testing rand_seq
randSeq1 = rand_seq(23)
randSeq2 = rand_seq(23)
if type(randSeq1) != str:
print ">> Problem with rand_seq: answer is not a string, it is a %s." % type(randSeq1)
elif len(randSeq1) != 23:
print ">> Problem with rand_seq: answer length (%s) does not match expected (%s)." % (len(randSeq1), 23)
elif randSeq1 == randSeq2:
print ">> Problem with rand_seq: generated the same sequence twice (%s) -- are you sure this is random?" % randSeq1
else:
print "rand_seq: Passed."
##### testing shuffle_nt
shuffSeq = shuffle_nt("AAAAAAGTTTCCC")
if type(shuffSeq) != str:
print ">> Problem with shuffle_nt: answer is not a string, it is a %s." % type(shuffSeq)
elif len(shuffSeq) != 13:
print ">> Problem with shuffle_nt: answer length (%s) does not match expected (%s)." % (len(shuffSeq), 12)
elif shuffSeq == "AAAAAAGTTTCCC":
print ">> Problem with shuffle_nt: answer is exactly the same as the input. Are you sure this is shuffling?"
elif shuffSeq.count('A') != 6:
print ">> Problem with shuffle_nt: answer doesn't contain the same # of each nt as the input."
else:
print "shuff_seq: Passed."
###Output
gc: Passed.
reverse_compl: Passed.
read_fasta: Passed.
rand_seq: Passed.
shuff_seq: Passed.
###Markdown
--- 4. Using your functions (5pts)Use the **functions you created above** to complete the following. **(A)** (1pt) Create 20 random nucleotide sequences of length 50 and print them to the screen.
###Code
for i in range(20):
print rand_seq(50)
###Output
AGGATTGGTATTTACAATCCAGGGATATATTACATGTGCTCGACCCCGGA
GCAGCGGACGAACAGCTTGGCCCTCAATCGCACGGAGCCATAAACCCATC
TTGTGCGCACTCGCAGGGCCTCAATCTGCTTCGGTCCTGCAATCCTCCTG
TTCAGCGTGGTGAGGGGGGTGACTGTTAGCCAGCCGGGTACAGTGGGGAG
GGGAACTATGCATCTAGGCCCCGTTGTACGTACAACCTCGGCTAAGCTCC
ATTCAACAAGCGTAATGCCACAATCAATTAGTTTATCGATGGCCTAAGCT
TCGCCGGGTTACGAGACGGGCTCCGTGGTAGAGGGGCGCCACCTTGATGG
CGCGTGTATCTAATCCCAGAAACGGATGCCCCTCTCGTACCCGCCCCACA
CGGTGGGGCGAAGCGAGATCCCACTTCATTAATGTGCCCCTTACTCGATG
GCGTCGGAAGATCACAAACGTGTGCATAAAGCCCCAAGAAGCCACTAGCT
CCTACATTAAGACATTCAGCAATAATATTCTTTCTTGTGGGTAGTACGGA
AGTTGTGCTTGACGGGGTATGTACATGGCGTAATAAAGACCGTAACGACA
AAATGGCACCTAGACTTGCCGACGCTTGCCAGTTTATTTAGTTTGCGAAC
GTGGGTTGCGCCAGACACTGAGTGTTGAGTCGGCAGGCGTGATCAAATTA
TAAGTCTCAGGGAGGACCCATCCATTTCATGCTGTAAATATCGAACAGTC
TAGATGGGCAAGGTGCTTCGGTACAACCTCTCGCTTCATTCATGCCCTAC
TCATCGATCAATACCCTATACACTGCCAGCCGGAAGCGAGGAGAGATATG
AACATGGTCTATCTACGGCCCTAGACAAAGACCCGAGACTTTTGATCGCC
TAGGGAATGCTGTATATCCACAATAGTGGGATCTCAGCTTACACATGCGG
TCTTCCGCTCGTCTGTAACTCCACAATTCTGTGTCATAAAGTGCCCGAAG
###Markdown
**(B)** (1pt) Read in `horrible.fasta` into a dictionary. For each sequence, print its reverse complement to the screen.
###Code
seqDict = read_fasta("horrible.fasta")
for seqID in seqDict:
print reverse_compl(seqDict[seqID])
###Output
AACCTCCTGGGGAGGTGGTGGCGGCTCTTGCAGATGTGGAACCAGCAGAGGTTGTGCTTACAGCTGGGCCTGTGGTGCTGCCAGCTGTTTCAGCCGGTGT
CTGATCACTGAGCTGAAACTAAACGTTTTAGGTGGAAAAAAAGCGTCCGAAGGCACCGTGAAATGATTAAGGAACTAAAGAGCTTCTCGCCATGTGAGATCATGTCCTGTTCTCGCCAACATCACAAGATGTCCCCAGACACGCCGCGCCCCCAGCGCGCCGCCCCACACTGCCGGCCCGGAGCGAGGAAAGGGTAGGCGCTGCGCGG
TAGGTGAAAATTCCTTCTGCTGGTTCCCAGAGATACCTAGGAAGACTCTGGGGAACCCTTGGCTAATTATCCCAGGAAAACTGCTGCCTCGGCTGAAACTGGAAGCTCATGGTGGACCCCAAGATATCTTATCTTTGGGACACTTAAAAAAAAAAAGCTATTTTATTCCAATTAAGCCAGTCTTTTGAGAGACACCTAGAAAGAAAGGGCTTCTAAAACATGAACATGAGCTCTGATGTTAGCAACCCAACTTCCACTCCAAAATTACTGAAATATTTATGGGTAAAATTAACTCATAAAAACCTTCTTCT
ACCCCTAAGGAACGTCCCTCGCGTCGGTTTGAGGAGGAAGGCGCACTTCTCTTGATGACCGTTGG
GGTAAGCACAGGATCCAAGAAACAGAGATTACACACAGGAGAGAGGCCAAGCAAAGCTCTGTGATGAAAGGTATGAAGTATGCCCACGGAGCAGCCAGCTGAGACTGGAACAAGAGGATGTAGCACTCCATGCAGGAAAATTCCATGGAATCTAGCACTTTGGGACATCCAGGTGGGCG
AGCAATACTTTCACTGCTGCCAGCCCGAG
GTATCACCTTCAATTTCTTAAGAGCCATTCTTCT
ATTTTCTGAGCTTCTTCTCTCGCAAGGTCTTGTTCATTTGGCAATACTGATATTTGATCTTTGTACACA
GTACCTTCTCGGAAGGCCAGAGTCAATTGTACCACCACAGATCCTGGCCTGAACTTAATATTGGAGAGGCCCAGAAAACCCCCTT
CAAAGCACACAGAGATTCTGTCAGGTGCTGAGACACCACAGCCTTCTCAATTTTGTCCTTAAGGGCTTTATCTTTCATCCAATTGAGCAGAGGCTCAAATTCTTTCTCAACTGCTTCATGACTCTCCTTAGTTTTCTCACTTTTATCAAACTTCATTCCTTCCTTGACAACATTCTGGAACCTCTTCCCATCAAATTTG
GGGCCCGGGACCCGGGTGGGGGGGACCGCCGAGAGGCCCAGCGCAGCGA
GCTTTGGAAACTGGAATGAGGATCACCAACAGGATCCTCATTTTACACAGGAGTTATGAGAGTTACATCCTCTAGCAGAGATGCTTGGTCATTACCTGTGGTACATGAGATTACCGAGCTAAAAGGGAAAAAAAACGATCTTAATGTTCTCCCATGAACTCAACTTAAGCTTTTTATGGAGGCACTGAGGCCATGCAGCTCCTTTTCCAAAAGACACAGATAAAAGCCAAATAAGGTAGAGGACTTTGGAAATTTTCTCTGAAAAGTTAAATTCCACATAATAGTAAGA
TTTTAATCTTCTTCCTTCCCGTCGACTGTCTTTCTTTAAAGCAACTGCAATTTCTTCCCTTACTTCCTCACTGTCTGTTGCTATAATTTGCCCATTGTGAACCATCTGTGAATTCTGTCTTAGGTATTCCATGAATCCATTCACATCTTCATTTAAGTACTCTTTTTTCTTTTTGTTCTTTTTATGTTTTGCTTGGGGTGCATCATTTTTGAGGGATAGCCTATTGGCTTCAAGTTGTTTACGCTTTGGTAGGTTTTGGCTTGTTCCCTCAAAGGATCCCTTCTTCATGTCCTCCCATGATGTTGCAGGCAAGGGTCTCTTGTTATATGTGGTACTAACTCGGGCCCACCTGGTCATAATTTCATCAGTGGTACCGCGCACGAATCCCCCAGAGCAGCCGAGTTGGCGAGCCGGGGAAGACCGCCCTCCTGCGGTATTGGAGACCGGAAGCACATAGTG
TCAATGTTTTCTTCTTTAATCACAGATGATGTACAGACACCAGCATAATTTGCTGATGTAATTTCCTTATCCAAGG
CTTCATATATATTTAATTTTCTCTTTGCTTCACTACTGCAAGGTAGGTGTTTATTATCTCCTTTTACAGATGTGGAAACTTAGGCTCAGAGGTGAAGTAACTTGCACAAGTTTCTACAGCTAGAATTTGAACCAGGTCTGACCCCCGAATTGTGCTCGTCCATAAAGGCCAGCATTTGCCAAATTATGGCACACAGTACCACCAGTGGTACGTGACTTCTTTGGTTGAAAACAGACAAATTTATTTTGTTTTGATAGTTATGTCTTTTAATATGTATTAGAAGAATACATAATTAGCACACATCAAACCTGTGATTTCACAGATATCACTACTTGGGATGAAAATGATATAGGATAACAATGTTAGACCTCAG
AAGATTTCCAGAGTGG
CCATGGTTAGTTAAATTCCCTAGAGATGTAGCCGTGACTCTCCCAATACCTGAAGTGTGCCTCCCCTGACTCTGTGGCATCCTCTGGAAGAGATCATGGTTGTATTCATAATATCTGTAATCTTCTTGTGCACGATCTCCAAGTGGCCGCCTTCTCTGTCCATCAAAAAAGTTATCTGAGAAGAAGTATCGGGAGCCAGAGTCTCCATTCTCAACAGCAAAGTTAACTTCTGTCAAAAATGACTGTGATGAGCCACACTCTCGAGGGACATCTGCTAGGCTCCTGACAAGGTAAGAAGGGGCAGACAGTCTGTGGCTTTCTCTTCTCATTACTTCATGAGGTGTCCTTTGAATTGCAGTTCTCAGGAAACTCTGGTTTCTTGAAACTACACCATCTCCAGAAGCTGAGAAAGCAGTAGCACTTGAATCTGGAAGACAGAGGTCAGTCC
CCTTTCCGGGACTGGTTT
AAATTGACTTCTGCCATAATAAAATC
TGAACAGCTGCTGTGTAGCCCATACTGTGAAAAGTAAAACATCACCCCAGTTCTCGGTACACACAGAGCTCATGCTCCAGCGGGCTGAGCCT
GCTTAAGCCTAGGAGTTTGAGACCAGCCTGGGCAACACAGCAAGACCCCATCTCTACCAAAAAAAAAAAAAAATTAAAGAGTCCTATAGAGAATTCTTATACTCCAATGTGAAGACAACATTGGAAAGGGCCAAGTTTCTCATGCCCTCCAACTAAGAAACCCCTAATAAAAAATGAAGTGACACTTGAACAGGACTTAAGGATTCTACAGTTGGTCTTTGGCAGCAGTATGTTTTAGGAAATGTAATGCGGCGGGTGGGGCGGTGACTTAGCCAGTTATGCTTTTAAATGGAACTGCAATAATAAAAGTGATACTAGTGCAGAAAGTATCTGTATTAGAATTCTAGAGTAAGTCAAGAGCTCACATTCATTAAAATAATGACACAACTCCACGGGGGTGGGGAGAACAGCAGTAAAGCAACCACATACTATACTATTAGACTGGCAACATTGAGACTGAAAATATCCATGAGGAGAATACTGACATCTTA
GCATGGTTGGCCTGAAGGTATTAGTGCGCAGGAGATGATTCAAACTTCCATGGGTCCCATTATTAGGAGCTGGCTTCAATCCCAGGAGATCACACATAACATTGTAAAGTTCAATGTTTTCAAATGGAGGCACTTTAGTCTTGTACTTAAATGTTGAGCCATAACCTACAAAAACAGTCTGCATGCTGTTGACCTTGTTATCAAATCCGTGGTCTCCCTGGAAAAAGCATTTTCCTGATGG
###Markdown
**(C)** (3pts) Read in horrible.fasta into a dictionary. For each sequence, find the length and the gc content. Print the results to the screen in the following format:```SeqID Len GC... ... ...```That is, print the header shown above (separating each column's title by a tab (`\t`)), followed by the corresponding info about each sequence on a separate line. The "columns" should be separated by tabs. Remember that you can do this printing as you loop through the dictionary... that way you don't have to store the length and gc content.(In general, this is the sort of formatting you should use when printing data files!)
###Code
seqDict = read_fasta("horrible.fasta")
print "SeqID\tLen\tGC"
for seqID in seqDict:
seq = seqDict[seqID]
seqLen = len(seq)
seqGC = gc(seq)
print seqID + "\t" + str(seqLen) + "\t" + str(seqGC)
###Output
SeqID Len GC
varlen2_uc007xie.1_4456 100 0.61
varlen2_uc010mlp.1_79 208 0.57
varlen2_uc009bxt.1_1728 311 0.4
varlen2_uc009div.2_242 65 0.58
varlen2_uc003its.2_2976 179 0.5
varlen2_uc003nvg.4_2466 29 0.55
varlen2_uc029ygd.1_73 34 0.35
varlen2_uc007kxx.1_2963 69 0.36
varlen2_uc009wph.3_423 85 0.51
varlen2_uc010osx.2_1007 199 0.41
varlen2_uc001agr.3_7 49 0.84
varlen2_uc001pmn.3_3476 289 0.39
varlen2_uc003khi.3_3 459 0.45
varlen2_uc021qfk.1>2_1472 76 0.34
varlen2_uc011moe.2_5914 373 0.36
varlen2_uc003hyy.2_273 16 0.44
varlen2_uc007nte.2_374 448 0.46
varlen2_uc007fws.1_377 18 0.56
varlen2_uc003pij.1_129 26 0.27
varlen2_uc002wkt.1_1569 92 0.52
varlen2_uc010suq.2_3895 491 0.4
varlen2_uc003yos.2_1634 241 0.42
###Markdown
--- Bonus question: K-mer generation (+2 bonus points)This question is optional, but if you complete it, I'll give you two bonus points. You won't lose points if you skip it.Create a function called `get_kmers` that takes a single integer parameter, `k`, and returns a list of all possible k-mers of A/T/G/C. For example, if the supplied `k` was 2, you would generate all possible 2-mers, i.e. [AA, AT, AG, AC, TA, TT, TG, TC, GA, GT, GG, GC, CA, CT, CG, CC]. Notes:- This function must be *generic*, in the sense that it can take *any* integer value of `k` and produce the corresponding set of k-mers.- As there are $4^k$ possible k-mers for a given k, stick to smaller values of k for testing!!- I have not really taught you any particularly obvious way to solve this problem, so feel free to get creative in your solution!*There are many ways to do this, and plenty of examples online. Since the purpose of this question is to practice problem solving, don't directly look up "k-mer generation"... try to figure it out yourself. You're free to look up more generic things, though.*
###Code
# Method 1
# Generic kmer generation for any k and any alphabet (default is DNA nt)
# Pretty fast
def get_kmers1(k, letters=['A','C','G','T']):
kmers = []
choices = len(letters)
finalNum = choices ** k
# initialize to blank strings
for i in range(finalNum):
kmers.append("")
# imagining the kmers lined up vertically, generate one "column" at a time
for i in range(k):
consecReps = choices ** (k - (i + 1)) #number of times to consecutively repeat each letter
patternReps = choices ** i #number of times to repeat pattern of letters
# create the current column of letters
index = 0
for j in range(patternReps):
for m in range(choices):
for n in range(consecReps):
kmers[index] += letters[m]
index += 1
return kmers
get_kmers1(3)
# Method 2
# Generate numbers, discard any that aren't 1/2/3/4's, convert to letters.
# Super slow~
def get_kmers2(k):
discard = ["0", "5", "6", "7", "8", "9"]
convert = {"1": "A", "2": "T", "3": "G", "4": "C"}
min = int("1" * k)
max = int("4" * k)
kmers = []
tmp = []
for num in range(min, (max + 1)): # generate numerical kmers
good = True
for digit in str(num):
if digit in discard:
good = False
break
if good == True:
tmp.append(num)
for num in tmp: # convert numerical kmers to ATGC
result = ""
for digit in str(num):
result += convert[digit]
kmers.append(result)
return kmers
# Method 3 (by Nate)
# A recursive solution. Fast!
# (A recursive function is a function that calls itself)
def get_kmers3(k):
nt = ['A', 'T', 'G', 'C']
k_mers = []
if k == 1:
return nt
else:
for i in get_kmers3(k - 1):
for j in nt:
k_mers.append(i + j)
return k_mers
# Method 4 (by Nate)
# Fast
def get_kmers4(k):
nt = ['A', 'T', 'G', 'C']
k_mers = []
total_kmers = len(nt)**k
# make a list of size k with all zeroes.
# this keeps track of which base we need at each position
pointers = []
for p in range(k):
pointers.append(0)
for k in range(total_kmers):
# use the pointers to generate the next k-mer
k_mer = ""
for p in pointers:
k_mer += nt[p]
k_mers.append(k_mer)
# get the pointers ready for the next k-mer by updating them left to right
pointersUpdated = False
i = 0
while not pointersUpdated and i < len(pointers):
if pointers[i] < len(nt) - 1:
pointers[i] += 1
pointersUpdated = True
else:
pointers[i] = 0
i += 1
return k_mers
# Method 5 (by Justin Becker, bootcamp 2013)
# Fast!
def get_kmers5(k): #function requires int as an argument
kmers = [""]
for i in range(k): #after each loop, kmers will store the complete set of i-mers
currentNumSeqs = len(kmers)
for j in range(currentNumSeqs): #each loop takes one i-mer and converts it to 4 (i+1)=mers
currentSeq = kmers[j]
kmers.append(currentSeq + 'C')
kmers.append(currentSeq + 'T')
kmers.append(currentSeq + 'G')
kmers[j] += 'A'
return kmers
# Method 6 (by Nick)
# Convert to base-4
def get_kmers6(k):
bases = ['a', 'g', 'c', 't']
kmers = []
for i in range(4**k):
digits = to_base4(i, k)
mystr = ""
for baseidx in digits:
mystr += bases[baseidx]
kmers.append(mystr)
return kmers
# convert num to a k-digit base-4 int
def to_base4(num, k):
digits = []
while k > 0:
digits.append(num/4**(k-1))
num %= 4**(k-1)
k -= 1
return digits
# Below: more from Nate
import random
import time
alphabet = ['A', 'C', 'G', 'T']
## Modulus based
def k_mer_mod(k):
k_mers = []
for i in range(4**k):
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4]+ k_mer
k_mers.append(k_mer)
return k_mers
## maybe the range operator slows things down by making a big tuple
def k_mer_mod_1(k):
k_mers = []
total = 4**k
i = 0
while i < total:
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4]+ k_mer
k_mers.append(k_mer)
i += 1
return k_mers
## Does initializing the list of k_mers help?
def k_mer_mod_2(k):
k_mers = [''] * 4**k
for i in range(4**k):
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4] + k_mer
k_mers[i] = k_mer
return k_mers
## What's faster? element assignment or hashing?
def k_mer_mod_set(k):
k_mers = set()
for i in range(4**k):
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4] + k_mer
k_mers.add(k_mer)
return list(k_mers)
## does creating the string up front help?
#def k_mer_mod_3(k):
#n k_mers = []
# k_mer = "N" * k
# for i in range(4**k):
# for j in range(k):
# k_mer[j] = alphabet[(i/4**j) % 4]
# k_mers.append(k_mer)
# return k_mers
# Nope! String are immutable, dummy!
# maybe we can do something tricky with string substitution
def k_mer_mod_ssub(k):
template = "\%s" * k
k_mers = []
for i in range(4**k):
k_mer = []
for j in range(k):
k_mer.append(alphabet[(i/4**j) % 4])
k_mers.append(template % k_mer)
return k_mers
# what about using a list?
def k_mer_mod_4(k):
k_mers = [''] * 4**k
k_mer = [''] * k
for i in range(4**k):
for j in range(k):
k_mer[j] = alphabet[(i/4**j) % 4]
k_mers[i] = "".join(k_mer)
return k_mers
## recursive version
def k_mer_recursive(k):
if k == 0:
return ['']
else:
k_mers = []
for k_mer in k_mer_recursive(k-1):
for n in alphabet:
k_mers.append("%s%s" % (k_mer, n))
return k_mers
## That works, but what I wanted to be like, really obnoxious about it
def k_mer_recursive_2(k):
if k == 0:
return ['']
else:
k_mers = []
[[k_mers.append("%s%s" % (k_mer, n)) for n in alphabet] for k_mer in k_mer_recursive_2(k-1)]
return k_mers
# using list instead of strings to store the k_mers
def k_mer_recursive_3(k, j = False):
if k == 0:
return [[]]
else:
k_mers = []
[[k_mers.append((k_mer + [n])) if j else k_mers.append("".join(k_mer + [n])) for n in alphabet] for k_mer in k_mer_recursive_3(k-1, True)]
return k_mers
## stochastic (I have a good feeling about this one!)
def k_mer_s(k):
s = set()
i = 0
while i < 4**k:
k_mer = ''
for j in range(k):
k_mer = k_mer + random.choice(alphabet)
if k_mer not in s:
s.add(k_mer)
i += 1
return list(s)
## I sure hope this works because now we're pretty much cheating
import array
def k_mer_mod_array(k):
k_mers = []
k_mer = array.array('c', ['N'] * k)
for i in range(4**k):
for j in range(k):
k_mer[j] = alphabet[(i/4**j) % 4]
k_mers.append("".join(k_mer))
return k_mers
## That could have gone better.
###Output
_____no_output_____
###Markdown
------ Extra problems (0pts) **(A)** Create a function that counts the number of occurences of each nt in a specified string. Your function should accept a nucleotide string as a parameter, and should return a dictionary with the counts of each nucleotide (where the nt is the key and the count is the value).
###Code
def nt_counts(seq):
counts = {}
for nt in seq:
if nt not in counts:
counts[nt] = 1
else:
counts[nt] += 1
return counts
nt_counts("AAAAATTTTTTTGGGGC")
###Output
_____no_output_____
###Markdown
**(B)** Create a function that generates a random nt sequence of a specified length with specified nt frequencies. Your function should accept as parameters: - a length- a dictionary of nt frequences.and should return the generated string. You'll need to figure out a way to use the supplied frequencies to generate the sequence.An example of the nt freq dictionary could be: {'A':0.60, 'G':0.10, 'C':0.25, 'T':0.05}
###Code
def generate_nucleotide(length, freqs):
import random
seq = ""
samplingStr = ""
# maybe not the best way to do this, but fun:
# create a list with the indicated freq of nt
for nt in freqs:
occurPer1000 = int(1000*freqs[nt])
samplingStr += nt*occurPer1000
samplingList = list(samplingStr)
# sample from the list
for i in range(length):
newChar = random.choice(samplingList)
seq += newChar
return seq
generate_nucleotide(100, {'A':0.60, 'G':0.10, 'C':0.25, 'T':0.05})
# let's check if it's really working
n = 10000
testSeq = generate_nucleotide(n, {'A':0.60, 'G':0.10, 'C':0.25, 'T':0.05})
obsCounts = nt_counts(testSeq)
for nt in obsCounts:
print nt, float(obsCounts[nt]) / n
###Output
A 0.5941
C 0.2568
T 0.0457
G 0.1034
|
05_training_with_imputed_mf_data.ipynb | ###Markdown
Loan Prediction 05 - Training and Validation of Models with MissForest Imputed Dataset
###Code
import math
import sys
sys.path.append('utils')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from joblib import dump, load
plt.style.use('seaborn')
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier
import metrics_utils
import model_utils
df_import = pd.read_csv('dataset/train_rf_imputed.csv')
df_import
columns_x = df_import.drop(columns=['Loan_Status']).columns
column_y = ['Loan_Status']
X_train, X_validation, y_train, y_validation = train_test_split(
df_import[columns_x], df_import[column_y], test_size=0.20, random_state=42)
scaler = MinMaxScaler()
scaler.fit(df_import[columns_x])
dump(scaler, 'saves/minmax_scaler_miss_forest_imputation.bin', compress=True)
X_train_norm = pd.DataFrame(data=scaler.transform(X_train),columns=X_train.columns)
X_validation_norm = pd.DataFrame(data=scaler.transform(X_validation),columns=X_train.columns)
X_train_norm.describe()
print(X_train_norm.shape)
X_validation_norm.describe()
print(X_validation_norm.shape)
###Output
(114, 13)
###Markdown
Logistic Regression Classifier
###Code
logistic_regression_params = {
'C' : [1,10,100,1000],
'penalty' : ['l1', 'l2', 'elasticnet', 'none'],
'solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
}
best_lr,best_lr_params,best_lr_score = model_utils.find_best_classification_model_with_cross_validation(
LogisticRegression(random_state=0, class_weight = 'balanced'),
logistic_regression_params,
X_train_norm.values,
y_train.values.ravel(),
metric = 'f1')
df_result = model_utils.predict(best_lr,X_validation_norm,y_validation);
metrics_utils.evalute_model_performance(best_lr, 'Logistic Regression',X_validation_norm,y_validation,df_result)
dump(best_lr, 'saves/logistic_regression_miss_forest_imputation.bin', compress=True)
###Output
_____no_output_____
###Markdown
Ridge Regression
###Code
ridge_regression_params = {
'alpha' : [1,10,100],
'solver' : ['auto', 'svd', 'lsqr', 'sag', 'cholesky','saga','sparse_cg']
}
best_ridge,best_ridge_params,best_ridge_score = model_utils.find_best_classification_model_with_cross_validation(
RidgeClassifier(random_state=0, class_weight = 'balanced'),
ridge_regression_params,
X_train_norm.values,
y_train.values.ravel(),
metric = 'f1')
df_result = model_utils.predict(best_ridge,X_validation_norm,y_validation);
metrics_utils.evalute_model_performance(best_ridge, 'Ridge Regression',X_validation_norm,y_validation,df_result)
dump(best_ridge, 'saves/ridge_regression_miss_forest_imputation.bin', compress=True)
###Output
_____no_output_____
###Markdown
Random Forest Classifier
###Code
random_forest_params = {
'n_estimators' : [50,100,150,200],
'min_samples_split': [2,3,4,5],
'max_depth':[5,8,10,13,15],
'criterion':['gini','entropy'],
'oob_score':[True]
}
best_random_forest,best_random_forest_params,best_random_forest_score = model_utils.find_best_classification_model_with_cross_validation(
RandomForestClassifier(random_state=0, class_weight = 'balanced'),
random_forest_params,
X_train_norm.values,
y_train.values.ravel(),
metric = 'f1')
df_result = model_utils.predict(best_random_forest,X_validation_norm,y_validation)
metrics_utils.evalute_model_performance(model = best_random_forest, model_name = 'Random Forest', X = X_validation_norm, y = y_validation, df_result = df_result)
dump(best_random_forest, 'saves/random_forest_miss_forest_imputation.bin', compress=True)
###Output
_____no_output_____
###Markdown
Gradient Boosting
###Code
gboost_params = {
'loss':['deviance', 'exponential'],
'learning_rate':[0.01,0.1],
'n_estimators' : [50,100,150],
'min_samples_split': [2,3,4,5],
'max_depth':[2,3,5,8]
}
best_gboost,best_gboost_params,best_gboost_score = model_utils.find_best_classification_model_with_cross_validation(
GradientBoostingClassifier(random_state=0),
gboost_params,
X_train_norm.values,
y_train.values.ravel(),
metric = 'f1')
df_result = model_utils.predict(best_gboost,X_validation_norm,y_validation)
metrics_utils.evalute_model_performance(model = best_gboost, model_name = 'Gradient Boosting', X = X_validation_norm, y = y_validation, df_result = df_result)
dump(best_gboost, 'saves/gradient_boosting_miss_forest_imputation.bin', compress=True)
###Output
_____no_output_____
###Markdown
Extreme Gradient Boosting
###Code
xgb_params = {'objective':['binary:logistic'],
'learning_rate': [0.1,0.3,0.5],
'gamma':[0,1],
'max_depth': [3,4,6,10],
'subsample': [0.5, 1],
'n_estimators': [50,100,150],
'missing':[-999]}
best_xgb,best_gboost_params,best_gboost_score = model_utils.find_best_classification_model_with_cross_validation(
XGBClassifier(seed=0),
xgb_params,
X_train_norm.values,
y_train.values.ravel(),
metric = 'f1')
df_result = model_utils.predict(best_xgb,X_validation_norm,y_validation)
metrics_utils.evalute_model_performance(model = best_gboost, model_name = 'Extreme Gradient Boosting', X = X_validation_norm, y = y_validation, df_result = df_result)
dump(best_gboost, 'saves/extreme_gradient_boosting_miss_forest_imputation.bin', compress=True)
###Output
_____no_output_____ |
notebooks/archive/calc_w.ipynb | ###Markdown
An attempt to calculate vertical velocity using continuity
###Code
import xarray as xr
import numpy as np
from verticalvelocity_xr import calc_w_continuity as calc_w
from matplotlib import pyplot as plt
%matplotlib inline
# Specify the location of the file
rootdir = '/archive/oar.gfdl.cmip6/ESM4/DECK/ESM4_piControl_D/gfdl.ncrc4-intel16-prod-openmp/pp/'
datadir = 'ocean_annual_rho2/av/annual_5yr/'
filename = 'ocean_annual_rho2.0866-0870.ann.nc'
ds = xr.open_dataset(rootdir+datadir+filename)
w = calc_w(u=ds.umo,v=ds.vmo,z=ds.rho2_i,wrapx=True,wrapy=False)
w.sum(dim=['xh','yh']).plot()
im = w.isel(z_i=10).plot()
im.set_clim([-0.5E8,0.5E8])
###Output
_____no_output_____ |
#MentalHealthBIllKenya.ipynb | ###Markdown
The State of Mental Health in Kenya An analysis of Kenyans' expressions on Mental Health Bill [@gyleodhis](https://www.twitter.com/gyleodhis)  Authorizing an application to access Twitter account data
###Code
import twitter
CONSUMER_KEY = '' #Intentionaly removed
CONSUMER_SECRET = '' #Intentionaly removed
OAUTH_TOKEN = '' #Intentionaly removed
OAUTH_TOKEN_SECRET = '' #Intentionaly removed
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
print(twitter_api) # This confirms connection to twitter api
###Output
<twitter.api.Twitter object at 0x7fa35c3d0048>
###Markdown
Retrieving trendsFirst we need to identify what are the trending topics in two major cities in Kenya and then try to find a relationship between these tweets and the resently passed MENTAL HEALTH BILL.
###Code
# The Yahoo! Where On Earth ID for the entire world is 1.
# See https://dev.twitter.com/docs/api/1.1/get/trends/place and
# http://developer.yahoo.com/geo/geoplanet/
Nairobi_ID = 1528488
Mombasa_ID = 1528335
# Prefix ID with the underscore for query string parameterization.
# Without the underscore, the twitter package appends the ID value
# to the URL itself as a special case keyword argument.
nairobi_trends = twitter_api.trends.place(_id=Nairobi_ID)
mombasa_trends = twitter_api.trends.place(_id=Mombasa_ID)
#print(nairobi_trends)
print()
#print(mombasa_trends)
###Output
###Markdown
Topics Trending in the city of Nairobi
###Code
for trend in nairobi_trends[0]['trends']:
print(trend['name'])
###Output
#Turkanadrought
#mightiestprophetinnairobi
Kindly
Ruto
Shame
Baringo
#WeCannotIgnore
#TuesdayThoughts
#AMLiveNTV
DCI Kinoti
Cuba
Ekeza Sacco
Linus Kaikai
Kenyans
kenyatta university
M-Pesa
James Oduor
Maseno University
Patrick Hinga
Pastor Ng'ang'a
Wanja
The E Review
#AdelleAndShaffieOnKISS
#BeyondPressConfrences
#JeffAndHamoOnHot
#Brekko
#SpencerBuyingJustice
#AlexNaJalas
#NyakundiStrong
#MainaAndKingangi
#GMITM
#BarakaZaQwetu
#Breakfast984
#BillyNaTricky
#BarakaZaMilele
#mondaymotivation
#NyakundiTheLiar
#SautiYaMayouths
#PrisonDiaries
#presspass
#mondaythoughts
#Sirkal
#MondayReport
#totalcafcc
#Messi
#Home4MauMauHeroes
#KTNMorningExpress
#DayBreak
#NTVTonight
#helpachildreach5
###Markdown
Topics Trending in the City of Mombasa
###Code
for trend in mombasa_trends[0]['trends']:
print(trend['name'])
###Output
#AdelleAndShaffieOnKISS
Turkana
#TheScoreKE
#WeCannotIgnore
#TuesdayThoughts
#AMLiveNTV
DCI Kinoti
Cuba
Ekeza Sacco
Linus Kaikai
Kenyans
kenyatta university
M-Pesa
James Oduor
Maseno University
Patrick Hinga
Pastor Ng'ang'a
Wanja
The E Review
Nanok
James Ng'ang'a
everton
#BeyondPressConfrences
#JeffAndHamoOnHot
#Brekko
#SpencerBuyingJustice
#AlexNaJalas
#NyakundiStrong
#MainaAndKingangi
#GMITM
#BarakaZaQwetu
#Breakfast984
#BillyNaTricky
#BarakaZaMilele
#mondaymotivation
#NyakundiTheLiar
#mightiestprophetinnairobi
#SautiYaMayouths
#PrisonDiaries
#presspass
#mondaythoughts
#Sirkal
#MondayReport
#totalcafcc
#Messi
#Home4MauMauHeroes
#KTNMorningExpress
#DayBreak
#NTVTonight
#helpachildreach5
###Markdown
Common Trends in relation to Mental Health. SautiYaMayouths (The voice of the youths) presspass MondayReport mondaymotivation Kenyatta University (increased suicides in the university) TheScoreKE WeCannotIgnore (In relation to Kenyatta University) TuesdayThoughts MentalHealthBillKe analysisAfter Seversl dull incidences in relation to mental health happening throught the country, a bill was tabled to parliament to revisit the mental health act to make it more protective of the most vulnerable; women and youth. The analysis below is how kenyans reacted to it on twitter. First let us look at top 10 tweets with most 'fevorite' and those retweeted most.
###Code
for i in range(10):
print()
print(statuses[i]['text'])
print('Favorites: ', statuses[i]['favorite_count'])
print('Retweets: ', statuses[i]['retweet_count'])
###Output
RT @_fels1: One man can impregnate 365+ women in 1 yr. 1 woman can only be impregnated by 1 man in 1 yr. Why dont we focus more on male con…
Favorites: 0
Retweets: 57
RT @Gachee: If we continue to overlook mental health The economy as a whole will be affected. Mental illness will afflict more people, unem…
Favorites: 0
Retweets: 14
RT @_fels1: One man can impregnate 365+ women in 1 yr. 1 woman can only be impregnated by 1 man in 1 yr. Why dont we focus more on male con…
Favorites: 0
Retweets: 57
#socialanxiety #me #yes #mentalhealthbillke https://t.co/3zs14UYatp
Favorites: 1
Retweets: 0
RT @FaithArimba: #MentalHealthBillKE Young people in Kenyan social setting today tend to be depressed when they lack jobs; when they feel l…
Favorites: 0
Retweets: 38
RT @enock_kiptanui: #MentalHealthBillKE
In the event you run into someone you know has mental illness,do you whisper to him because you th…
Favorites: 0
Retweets: 41
RT @MzalendoWatch: I was diagnosed with severe depression and bipolar in January. It's only my sister who believed and therefore support in…
Favorites: 0
Retweets: 16
RT @SylviaKasanga: Very interesting submissions on the #MentalHealthBillKe https://t.co/jXfJu1PBsq
Favorites: 0
Retweets: 11
RT @Gachee: If we continue to overlook mental health The economy as a whole will be affected. Mental illness will afflict more people, unem…
Favorites: 0
Retweets: 14
RT @InspectorDhola: Visual reminders of disorganisation can have negative effects on our physical and mental health. Well said @LibbySander…
Favorites: 0
Retweets: 1
###Markdown
Let us pull down one of the tweets
###Code
import json
# Set this variable to a trending topic,
# or anything else for that matter. The example query below
# was a trending topic when this content was being developed
# and is used throughout the remainder of this chapter.
q = '#MentalHealthBillKe'
count = 1000
# Import unquote to prevent url encoding errors in next_results
from urllib.parse import unquote
# See https://dev.twitter.com/rest/reference/get/search/tweets
search_results = twitter_api.search.tweets(q=q, count=count)
statuses = search_results['statuses']
# Iterate through 5 more batches of results by following the cursor
for _ in range(5):
print('Length of statuses', len(statuses))
try:
next_results = search_results['search_metadata']['next_results']
except KeyError as e: # No more results when next_results doesn't exist
break
# Create a dictionary from next_results, which has the following form:
# ?max_id=847960489447628799&q=%23RIPSelena&count=100&include_entities=1
kwargs = dict([ kv.split('=') for kv in unquote(next_results[1:]).split("&") ])
search_results = twitter_api.search.tweets(**kwargs)
statuses += search_results['statuses']
# Show one sample search result by slicing the list...
print(json.dumps(statuses[0], indent=1))
###Output
Length of statuses 100
Length of statuses 200
Length of statuses 289
Length of statuses 376
Length of statuses 468
{
"is_quote_status": false,
"metadata": {
"result_type": "recent",
"iso_language_code": "en"
},
"in_reply_to_status_id": null,
"contributors": null,
"created_at": "Mon Mar 18 23:24:18 +0000 2019",
"lang": "en",
"in_reply_to_status_id_str": null,
"favorite_count": 0,
"id": 1107784823463165952,
"in_reply_to_user_id_str": null,
"coordinates": null,
"place": null,
"truncated": false,
"retweeted": false,
"in_reply_to_user_id": null,
"retweet_count": 57,
"id_str": "1107784823463165952",
"geo": null,
"text": "RT @_fels1: One man can impregnate 365+ women in 1 yr. 1 woman can only be impregnated by 1 man in 1 yr. Why dont we focus more on male con\u2026",
"retweeted_status": {
"is_quote_status": false,
"metadata": {
"result_type": "recent",
"iso_language_code": "en"
},
"in_reply_to_status_id": null,
"contributors": null,
"created_at": "Tue Mar 12 08:54:52 +0000 2019",
"lang": "en",
"in_reply_to_status_id_str": null,
"favorite_count": 137,
"id": 1105391696047673344,
"in_reply_to_user_id_str": null,
"coordinates": null,
"place": null,
"truncated": true,
"retweeted": false,
"in_reply_to_user_id": null,
"retweet_count": 57,
"id_str": "1105391696047673344",
"geo": null,
"text": "One man can impregnate 365+ women in 1 yr. 1 woman can only be impregnated by 1 man in 1 yr. Why dont we focus more\u2026 https://t.co/3kwsb369f8",
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"entities": {
"urls": [
{
"display_url": "twitter.com/i/web/status/1\u2026",
"expanded_url": "https://twitter.com/i/web/status/1105391696047673344",
"url": "https://t.co/3kwsb369f8",
"indices": [
117,
140
]
}
],
"user_mentions": [],
"hashtags": [],
"symbols": []
},
"user": {
"profile_image_url": "http://pbs.twimg.com/profile_images/1107618815444176897/tQq2RqaX_normal.jpg",
"profile_background_image_url_https": null,
"entities": {
"description": {
"urls": []
}
},
"default_profile": true,
"profile_sidebar_border_color": "C0DEED",
"profile_background_image_url": null,
"created_at": "Sat Jan 14 16:40:42 +0000 2017",
"protected": false,
"url": null,
"is_translator": false,
"lang": "en",
"translator_type": "none",
"has_extended_profile": true,
"id": 820309675245760512,
"statuses_count": 9409,
"notifications": false,
"default_profile_image": false,
"geo_enabled": true,
"profile_text_color": "333333",
"profile_background_tile": false,
"verified": false,
"is_translation_enabled": false,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/820309675245760512/1552839383",
"time_zone": null,
"contributors_enabled": false,
"screen_name": "_fels1",
"location": "Kenya",
"description": "Chelsea Fan|| Raila is a LIAR|| Faith is a form of madness....",
"profile_link_color": "1DA1F2",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/1107618815444176897/tQq2RqaX_normal.jpg",
"utc_offset": null,
"favourites_count": 19336,
"profile_use_background_image": true,
"profile_background_color": "F5F8FA",
"profile_sidebar_fill_color": "DDEEF6",
"followers_count": 15653,
"listed_count": 5,
"follow_request_sent": false,
"name": "Wuod Japuonj\ud83c\uddf0\ud83c\uddea",
"friends_count": 13710,
"id_str": "820309675245760512",
"following": false
},
"favorited": false,
"in_reply_to_screen_name": null
},
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"entities": {
"urls": [],
"user_mentions": [
{
"screen_name": "_fels1",
"id_str": "820309675245760512",
"id": 820309675245760512,
"name": "Wuod Japuonj\ud83c\uddf0\ud83c\uddea",
"indices": [
3,
10
]
}
],
"hashtags": [],
"symbols": []
},
"user": {
"profile_image_url": "http://pbs.twimg.com/profile_images/1103766261966745603/YffrAQr6_normal.jpg",
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png",
"entities": {
"description": {
"urls": []
}
},
"default_profile": true,
"profile_sidebar_border_color": "C0DEED",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png",
"created_at": "Mon Oct 12 14:07:52 +0000 2015",
"protected": false,
"url": null,
"is_translator": false,
"lang": "en",
"translator_type": "none",
"has_extended_profile": true,
"id": 3937753342,
"statuses_count": 45697,
"notifications": false,
"geo_enabled": false,
"default_profile_image": false,
"profile_background_tile": false,
"verified": false,
"is_translation_enabled": false,
"profile_text_color": "333333",
"time_zone": null,
"contributors_enabled": false,
"screen_name": "MbeniaJP",
"location": "",
"description": "",
"profile_link_color": "1DA1F2",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/1103766261966745603/YffrAQr6_normal.jpg",
"utc_offset": null,
"favourites_count": 104332,
"profile_use_background_image": true,
"profile_background_color": "C0DEED",
"profile_sidebar_fill_color": "DDEEF6",
"followers_count": 3614,
"listed_count": 19,
"follow_request_sent": false,
"name": "JayP.O",
"friends_count": 3114,
"id_str": "3937753342",
"following": false
},
"favorited": false,
"in_reply_to_screen_name": null
}
###Markdown
Extracting text, screen names, and hashtags from tweets
###Code
status_texts = [ status['text']
for status in statuses ]
screen_names = [ user_mention['screen_name']
for status in statuses
for user_mention in status['entities']['user_mentions'] ]
hashtags = [ hashtag['text']
for status in statuses
for hashtag in status['entities']['hashtags'] ]
# Compute a collection of all words from all tweets
words = [ w
for t in status_texts
for w in t.split() ]
# Explore the first 5 items for each...
print(json.dumps(status_texts[0:5], indent=1))
print(json.dumps(screen_names[0:5], indent=1) )
print(json.dumps(hashtags[0:5], indent=1))
print(json.dumps(words[0:5], indent=1))
###Output
[
"RT @_fels1: One man can impregnate 365+ women in 1 yr. 1 woman can only be impregnated by 1 man in 1 yr. Why dont we focus more on male con\u2026",
"RT @Gachee: If we continue to overlook mental health The economy as a whole will be affected. Mental illness will afflict more people, unem\u2026",
"RT @_fels1: One man can impregnate 365+ women in 1 yr. 1 woman can only be impregnated by 1 man in 1 yr. Why dont we focus more on male con\u2026",
"#socialanxiety #me #yes #mentalhealthbillke https://t.co/3zs14UYatp",
"RT @FaithArimba: #MentalHealthBillKE Young people in Kenyan social setting today tend to be depressed when they lack jobs; when they feel l\u2026"
]
[
"_fels1",
"Gachee",
"_fels1",
"FaithArimba",
"enock_kiptanui"
]
[
"socialanxiety",
"me",
"yes",
"mentalhealthbillke",
"MentalHealthBillKE"
]
[
"RT",
"@_fels1:",
"One",
"man",
"can"
]
###Markdown
Creating a basic frequency distribution from the words in tweets
###Code
from collections import Counter
for item in [words, screen_names, hashtags]:
c = Counter(item)
print(c.most_common()[:10]) # top 10
print()
###Output
[('RT', 451), ('the', 309), ('to', 262), ('#MentalHealthBillKE', 249), ('in', 203), ('of', 188), ('and', 182), ('1', 181), ('be', 151), ('a', 143)]
[('MzalendoWatch', 59), ('enock_kiptanui', 55), ('Dmarigiri_', 54), ('_fels1', 44), ('SylviaKasanga', 43), ('Gachee', 28), ('FaithArimba', 28), ('ChiromoLMC', 20), ('EvyonK', 19), ('Nichonasri1', 18)]
[('MentalHealthBillKE', 250), ('MentalHealthBillKe', 24), ('ProtectingTheRightsOfTheMentallySick', 5), ('TuesdayThoughts', 3), ('ProtectingTheRightsOfMentallySick', 3), ('WeShallOvercome', 3), ('BeUnstoppable', 3), ('GameOfPhonesKE', 3), ('Kenya', 2), ('AfricaNow19', 2)]
###Markdown
Tabulating our results
###Code
from prettytable import PrettyTable
for label, data in (('Word', words),
('Screen Name', screen_names),
('Hashtag', hashtags)):
pt = PrettyTable(field_names=[label, 'Count'])
c = Counter(data)
[ pt.add_row(kv) for kv in c.most_common()[:10] ]
pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment
print(pt)
###Output
+---------------------+-------+
| Word | Count |
+---------------------+-------+
| RT | 451 |
| the | 309 |
| to | 262 |
| #MentalHealthBillKE | 249 |
| in | 203 |
| of | 188 |
| and | 182 |
| 1 | 181 |
| be | 151 |
| a | 143 |
+---------------------+-------+
+----------------+-------+
| Screen Name | Count |
+----------------+-------+
| MzalendoWatch | 59 |
| enock_kiptanui | 55 |
| Dmarigiri_ | 54 |
| _fels1 | 44 |
| SylviaKasanga | 43 |
| Gachee | 28 |
| FaithArimba | 28 |
| ChiromoLMC | 20 |
| EvyonK | 19 |
| Nichonasri1 | 18 |
+----------------+-------+
+--------------------------------------+-------+
| Hashtag | Count |
+--------------------------------------+-------+
| MentalHealthBillKE | 250 |
| MentalHealthBillKe | 24 |
| ProtectingTheRightsOfTheMentallySick | 5 |
| TuesdayThoughts | 3 |
| ProtectingTheRightsOfMentallySick | 3 |
| WeShallOvercome | 3 |
| BeUnstoppable | 3 |
| GameOfPhonesKE | 3 |
| Kenya | 2 |
| AfricaNow19 | 2 |
+--------------------------------------+-------+
###Markdown
Calculating lexical diversity for tweets How are the actual words used related to mental health? Let us find out
###Code
# A function for computing lexical diversity
def lexical_diversity(tokens):
return len(set(tokens))/len(tokens)
# A function for computing the average number of words per tweet
def average_words(statuses):
total_words = sum([ len(s.split()) for s in statuses ])
return total_words/len(statuses)
print(lexical_diversity(words))
print(lexical_diversity(hashtags))
###Output
0.11945857558139535
0.0732484076433121
###Markdown
Both "words" and "hashtags" have lexical values of less than 0.5 . This means that they are strongly co-related mental health Finding the most popular retweets
###Code
retweets = [
# Store out a tuple of these three values ...
(status['retweet_count'],
status['retweeted_status']['user']['screen_name'],
status['retweeted_status']['id'],
status['text'])
# ... for each status ...
for status in statuses
# ... so long as the status meets this condition.
if 'retweeted_status' in status.keys()
]
# Slice off the first 5 from the sorted results and display each item in the tuple
pt = PrettyTable(field_names=['Count', 'Screen Name', 'Tweet ID', 'Text'])
[ pt.add_row(row) for row in sorted(retweets, reverse=True)[:50] ]
pt.max_width['Text'] = 50
pt.align= 'l'
print(pt)
###Output
+-------+----------------+---------------------+----------------------------------------------------+
| Count | Screen Name | Tweet ID | Text |
+-------+----------------+---------------------+----------------------------------------------------+
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 57 | _fels1 | 1105391696047673344 | RT @_fels1: One man can impregnate 365+ women in 1 |
| | | | yr. 1 woman can only be impregnated by 1 man in 1 |
| | | | yr. Why dont we focus more on male con… |
| 41 | enock_kiptanui | 1105370472181624833 | RT @enock_kiptanui: #MentalHealthBillKE |
| | | | |
| | | | In the event you run into someone you know has |
| | | | mental illness,do you whisper to him because you |
| | | | th… |
| 41 | enock_kiptanui | 1105370472181624833 | RT @enock_kiptanui: #MentalHealthBillKE |
| | | | |
| | | | In the event you run into someone you know has |
| | | | mental illness,do you whisper to him because you |
| | | | th… |
| 41 | enock_kiptanui | 1105370472181624833 | RT @enock_kiptanui: #MentalHealthBillKE |
| | | | |
| | | | In the event you run into someone you know has |
| | | | mental illness,do you whisper to him because you |
| | | | th… |
| 41 | enock_kiptanui | 1105370472181624833 | RT @enock_kiptanui: #MentalHealthBillKE |
| | | | |
| | | | In the event you run into someone you know has |
| | | | mental illness,do you whisper to him because you |
| | | | th… |
| 41 | enock_kiptanui | 1105370472181624833 | RT @enock_kiptanui: #MentalHealthBillKE |
| | | | |
| | | | In the event you run into someone you know has |
| | | | mental illness,do you whisper to him because you |
| | | | th… |
| 41 | enock_kiptanui | 1105370472181624833 | RT @enock_kiptanui: #MentalHealthBillKE |
| | | | |
| | | | In the event you run into someone you know has |
| | | | mental illness,do you whisper to him because you |
| | | | th… |
+-------+----------------+---------------------+----------------------------------------------------+
###Markdown
Irresposible men impregnating women is seen as number one course of mental health amongst women as they are left to take care of the babies all by themselves. Plotting frequencies of words
###Code
import matplotlib.pyplot as plt
%matplotlib inline
word_counts = sorted(Counter(words).values(), reverse=True)
plt.loglog(word_counts)
plt.ylabel("Freq")
plt.xlabel("Word Rank")
###Output
_____no_output_____
###Markdown
Generating histograms of words, screen names, and hashtags
###Code
for label, data in (('Words', words),
('Screen Names', screen_names),
('Hashtags', hashtags)):
# Build a frequency map for each set of data
# and plot the values
c = Counter(data)
plt.hist(list(c.values()))
# Add a title and y-label ...
plt.title(label)
plt.ylabel("Number of tims it appeard")
plt.xlabel("Hashtag")
# ... and display as a new figure
plt.figure()
###Output
_____no_output_____
###Markdown
Generating a histogram of retweet counts
###Code
# Using underscores while unpacking values in
# a tuple is idiomatic for discarding them
counts = [count for count, _, _, _ in retweets]
plt.hist(counts)
plt.title('Retweets')
plt.xlabel('Bins (number of times retweeted)')
plt.ylabel('Number of tweets in bin')
###Output
_____no_output_____
###Markdown
Sentiment Analysis
###Code
# pip install nltk
import nltk
#nltk.download('vader_lexicon')
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
twitter_stream = twitter.TwitterStream(auth=auth)
iterator = twitter_stream.statuses.sample()
tweets = []
for tweet in iterator:
try:
if tweet['lang'] == 'en':
tweets.append(tweet)
except:
pass
if len(tweets) == 100:
break
analyzer = SentimentIntensityAnalyzer()
analyzer.polarity_scores('Mental Health')
analyzer.polarity_scores('In the event you run into someone you know has mental illness,do you whisper to him because you.')
analyzer.polarity_scores('1 woman can only be impregnated by 1 man in 1 year.Why dont we focus more on male con..')
###Output
_____no_output_____
###Markdown
From the above three samples it is clear that were very neutral in their discussions as the tweets score are neutral polarity status.
###Code
scores = np.zeros(len(tweets))
for i, t in enumerate(tweets):
# Extract the text portion of the tweet
text = t['text']
# Measure the polarity of the tweet
polarity = analyzer.polarity_scores(text)
# Store the normalized, weighted composite score
scores[i] = polarity['compound']
most_positive = np.argmax(scores)
most_negative = np.argmin(scores)
###Output
_____no_output_____
###Markdown
Let's Find out what the most negative tweet is
###Code
print('{0:6.3f} : "{1}"'.format(scores[most_negative], tweets[most_negative]['text']))
###Output
-0.862 : "RT @icrbthomas: Those who have suffered under this disease can confirm that cancer is not good, let alone very good, and that a god who say…"
|
deeplearning.ai-tensorflow-developer-certificate/1-of-4-intro-to-tf/Week_2/Course_1_Part_4_Lesson_2_Notebook.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Beyond Hello World, A Computer Vision ExampleIn the previous exercise you saw how to create a neural network that figured out the problem you were trying to solve. This gave an explicit example of learned behavior. Of course, in that instance, it was a bit of overkill because it would have been easier to write the function Y=2x-1 directly, instead of bothering with using Machine Learning to learn the relationship between X and Y for a fixed set of values, and extending that for all values.But what about a scenario where writing rules like that is much more difficult -- for example a computer vision problem? Let's take a look at a scenario where we can recognize different items of clothing, trained from a dataset containing 10 different types. Start CodingLet's start with our import of TensorFlow
###Code
import tensorflow as tf
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this:
###Code
mnist = tf.keras.datasets.fashion_mnist
###Output
_____no_output_____
###Markdown
Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels.
###Code
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
###Output
_____no_output_____
###Markdown
What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0
###Code
import numpy as np
np.set_printoptions(linewidth=200)
import matplotlib.pyplot as plt
plt.imshow(training_images[0])
print(training_labels[0])
print(training_images[0])
###Output
_____no_output_____
###Markdown
You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this:
###Code
training_images = training_images / 255.0
test_images = test_images / 255.0
###Output
_____no_output_____
###Markdown
Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen! Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them.
###Code
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
###Output
_____no_output_____
###Markdown
**Sequential**: That defines a SEQUENCE of layers in the neural network**Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set.**Dense**: Adds a layer of neuronsEach layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now. **Relu** effectively means "If X>0 return X, else return 0" -- so what it does it it only passes values 0 or greater to the next layer in the network.**Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding! The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like.
###Code
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
###Output
_____no_output_____
###Markdown
Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.But how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try:
###Code
model.evaluate(test_images, test_labels)
###Output
_____no_output_____
###Markdown
For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this. To explore further, try the below exercises: Exploration Exercises Exercise 1:For this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent?
###Code
classifications = model.predict(test_images)
print(classifications[0])
###Output
_____no_output_____
###Markdown
Hint: try running print(test_labels[0]) -- and you'll get a 9. Does that help you understand why this list looks the way it does?
###Code
print(test_labels[0])
###Output
_____no_output_____
###Markdown
What does this list represent?1. It's 10 random meaningless values2. It's the first 10 classifications that the computer made3. It's the probability that this item is each of the 10 classes Answer: The correct answer is (3)The output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnistlabels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities.For the 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that it's almost certainly a 7. How do you know that this list tells you that the item is an ankle boot?1. There's not enough information to answer that question2. The 10th element on the list is the biggest, and the ankle boot is labelled 92. The ankle boot is label 9, and there are 0->9 elements in the list AnswerThe correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot Exercise 2: Let's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case?
###Code
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
###Output
_____no_output_____
###Markdown
Question 1. Increase to 1024 Neurons -- What's the impact?1. Training takes longer, but is more accurate2. Training takes longer, but no impact on accuracy3. Training takes the same time, but is more accurate AnswerThe correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly! Exercise 3: What would happen if you remove the Flatten() layer. Why do you think that's the case? You get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.
###Code
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([#tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
###Output
_____no_output_____
###Markdown
Exercise 4: Consider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5You get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.
###Code
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(5, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
###Output
_____no_output_____
###Markdown
Exercise 5: Consider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10. Ans: There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary.
###Code
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
###Output
_____no_output_____
###Markdown
Exercise 6: Consider the impact of training for more or less epochs. Why do you think that would be the case? Try 15 epochs -- you'll probably get a model with a much better loss than the one with 5Try 30 epochs -- you might see the loss value stops decreasing, and sometimes increases. This is a side effect of something called 'overfitting' which you can learn about [somewhere] and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)
###Code
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=30)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[34])
print(test_labels[34])
###Output
_____no_output_____
###Markdown
Exercise 7: Before you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results?
###Code
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images/255.0
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
###Output
_____no_output_____
###Markdown
Exercise 8: Earlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...
###Code
import tensorflow as tf
print(tf.__version__)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss')<0.4):
print("\nReached 60% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images/255.0
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])
###Output
_____no_output_____ |
NLP_week2.ipynb | ###Markdown
###Code
import nltk
nltk.download('all')
from nltk.corpus import stopwords
stopwords.words('english')
entries=nltk.corpus.cmudict.entries()
len(entries)
entries[50000:5000000] # Contains extra information for Pronounciation
from nltk.corpus import wordnet as wn
wn.synsets('motorcar')
wn.synset('car.n.01').lemma_names()
import nltk
from nltk.stem import PorterStemmer
PS=PorterStemmer()
PS.stem('Happiness')
import nltk
from nltk.stem import LancasterStemmer
LS=LancasterStemmer()
LS.stem('Happiness')
import nltk
from nltk.stem import SnowballStemmer
SS=SnowballStemmer('french')
SS.stem('manges')
stemmer=PS
example="A quick brown fox jumps over a lazy dog"
example=[stemmer.stem(token) for token in example.split()]
print(" ".join(example))
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print(lemmatizer.lemmatize('cacti'))
print(lemmatizer.lemmatize('better',pos='a')) # adjective
print(lemmatizer.lemmatize('as',pos='v')) # forms of be
! pip install jieba
import jieba
seg=jieba.cut("拉梅什·萨尚",cut_all=True)
' '.join(seg)
###Output
Building prefix dict from the default dictionary ...
Dumping model to file cache /tmp/jieba.cache
Loading model cost 0.937 seconds.
Prefix dict has been built successfully.
|
ML-extremes-mcs/sample_dlworkflow.ipynb | ###Markdown
Sample workflow for training ML model **Notebook Author:** Maria J. Molina, _National Center for Atmospheric Research, Boulder, CO._ Import relevant libraries and modulesFirst lets see if GPU is available in notebook session.
###Code
import tensorflow as tf
print("Is GPU available?", tf.test.is_gpu_available()) # True/False
print("Is GPU with CUDA available?", tf.test.is_gpu_available(cuda_only=True))
# if GPU is available, tf and keras will automatically train on GPU. All set! :)
# this is another way to check devices available with greater detail:
# from tensorflow.python.client import device_lib
# print(device_lib.list_local_devices())
###Output
WARNING:tensorflow:From <ipython-input-1-0f0433ab9b75>:2: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.config.list_physical_devices('GPU')` instead.
Is GPU available? False
Is GPU with CUDA available? False
###Markdown
Now, load standard/useful python libraries for visualization and testing along the way.
###Code
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
And now these are modules and items needed for training MCS feature detector.
###Code
import dataloader
from config import main_path_era
from id_selector import IDSelector
import dlfront_style
###Output
_____no_output_____
###Markdown
Generate list of IDs for trainingLets select IDs for training by choosing from available options in IDSelector.Select from the respective options (e.g., train only with MCSs in masks, certain months, etc).
###Code
class_ids = IDSelector(main_path = main_path_era,
start_year = 2004,
end_year = 2019,
month_only=[4], year_only=[2004], mcs_only=True,
percent_train=0.7, percent_validate=0.1,)
# here we generate the list of IDs by loading from a presaved dictionary
IDlist = class_ids.generate_IDarray(dict_freq='3H')
# here we shuffle and split the IDs into a testing and training set
# train_IDs, test_IDs = class_ids.generate_traintest_split(IDlist, seed=0)
train_IDs, valid_IDs, test_IDs = class_ids.generate_traintest_split(IDlist, seed=0)
print(f"Training set contains ",len(train_IDs)," total training MCSs.")
###Output
Training set contains 113 total training MCSs.
###Markdown
Initiate Keras DataGenerator class object with select variables and pre-generated data IDsCreate list of variables to use for training from ERA5 and that correspond to CESM for later.Then, instantiate keras/tf data generator.
###Code
variables = ["cp","u850", "v850", "q850"]
training_generator = dataloader.DataGenerator(list_IDs = train_IDs,
path_dataID = f"{main_path_era}/dl_files/3H/",
variable = variables,
h_num = None,
height = None,
batch_size = 16, dim = (121, 321),
n_channels = len(variables),
n_classes = 2, shuffle = False,)
###Output
_____no_output_____
###Markdown
Quick test of keras dataloader for sanity check.Lets visualize input variables and output class(es).
###Code
a, b = training_generator.__getitem__(0)
cs = plt.pcolormesh(a[0,:,:,0]); plt.colorbar(cs); plt.title(variables[0]); plt.show()
cs = plt.pcolormesh(a[0,:,:,1]); plt.colorbar(cs); plt.title(variables[1]); plt.show()
cs = plt.pcolormesh(a[0,:,:,2]); plt.colorbar(cs); plt.title(variables[2]); plt.show()
cs = plt.pcolormesh(a[0,:,:,3]); plt.colorbar(cs); plt.title(variables[3]); plt.show()
cs = plt.pcolormesh(b[0,:,:,0]); plt.colorbar(cs); plt.show()
cs = plt.pcolormesh(b[0,:,:,1]); plt.colorbar(cs); plt.show()
###Output
_____no_output_____
###Markdown
Now, lets build the machine learning model with Keras and train!Instantiate the ml model class with desired class options and compile the model.
###Code
#mlmodel = dlfront_style.DLFrontStyle(variable=variables, learning_rate=0.01, scheduler=1, epochs=30, batch_norm=True, spatial_drop=0.3)
mlmodel = dlfront_style.DLFrontStyle(variable=variables, dim=(121, 321), learning_rate=0.01, epochs=30, batch_norm=True, spatial_drop=0.3)
the_model = mlmodel.compile_model()
variables = ["cp","u850", "v850", "q850"]
validation_generator = dataloader.DataGenerator(list_IDs = valid_IDs,
path_dataID = f"{main_path_era}/dl_files/3H/",
variable = variables,
h_num = None, height = None,
batch_size = 16, dim = (121, 321),
n_channels = len(variables),
n_classes = 2, shuffle = False,)
mlmodel.train_model(the_model, training_generator, validation=validation_generator)
variables = ["cp","u850", "v850", "q850"]
testing_generator = dataloader.DataGenerator(list_IDs = test_IDs,
path_dataID = f"{main_path_era}/dl_files/3H/",
variable = variables,
h_num = None, height = None,
batch_size = 32, dim = (121, 321),
n_channels = len(variables),
n_classes = 2, shuffle = False,)
y=the_model.predict(x=a[:,:,:,:])
plt.pcolormesh(y[11,:,:,0]); plt.show()
a, b = testing_generator.__getitem__(0)
cs = plt.pcolormesh(a[11,:,:,0]); plt.colorbar(cs); plt.title(variables[0]); plt.show()
cs = plt.pcolormesh(a[11,:,:,1]); plt.colorbar(cs); plt.title(variables[1]); plt.show()
cs = plt.pcolormesh(a[11,:,:,2]); plt.colorbar(cs); plt.title(variables[2]); plt.show()
cs = plt.pcolormesh(a[11,:,:,3]); plt.colorbar(cs); plt.title(variables[3]); plt.show()
cs = plt.pcolormesh(b[11,:,:,0]); plt.colorbar(cs); plt.show()
cs = plt.pcolormesh(b[11,:,:,1]); plt.colorbar(cs); plt.show()
mlmodel = dlfront_style.DLFrontStyle(variable=variables, learning_rate=0.01, scheduler=1, epochs=30, batch_norm=True, spatial_drop=0.3)
the_model = mlmodel.compile_model()
###Output
Model: "dlfront_style"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 105, 161, 5)] 0
_________________________________________________________________
conv2d (Conv2D) (None, 105, 161, 80) 10080
_________________________________________________________________
batch_normalization (BatchNo (None, 105, 161, 80) 320
_________________________________________________________________
spatial_dropout2d (SpatialDr (None, 105, 161, 80) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 105, 161, 80) 160080
_________________________________________________________________
batch_normalization_1 (Batch (None, 105, 161, 80) 320
_________________________________________________________________
spatial_dropout2d_1 (Spatial (None, 105, 161, 80) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 105, 161, 80) 160080
_________________________________________________________________
batch_normalization_2 (Batch (None, 105, 161, 80) 320
_________________________________________________________________
spatial_dropout2d_2 (Spatial (None, 105, 161, 80) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 105, 161, 2) 4002
=================================================================
Total params: 335,202
Trainable params: 334,722
Non-trainable params: 480
_________________________________________________________________
None
###Markdown
Now, start training!Should go quick if GPU is available.
###Code
mlmodel.train_model(the_model, training_generator)
variables = ["2d","10u","10v", "sp", "2t"]
testing_generator = dataloader.DataGenerator(list_IDs = test_IDs,
path_dataID = f"{main_path_era}/dl_files/3H/",
variable = variables,
ens_num = "era5",
h_num = None,
height = None,
batch_size = 32, dim = (105, 161),
n_channels = len(variables),
n_classes = 2, shuffle = False,
stats_path = main_path_era,
norm = 'zscore')
a, b = testing_generator.__getitem__(0)
cs = plt.imshow(a[0,:,:,0]); plt.colorbar(cs); plt.title(variables[0]); plt.show()
cs = plt.imshow(a[0,:,:,1]); plt.colorbar(cs); plt.title(variables[1]); plt.show()
cs = plt.imshow(a[0,:,:,2]); plt.colorbar(cs); plt.title(variables[2]); plt.show()
cs = plt.imshow(a[0,:,:,3]); plt.colorbar(cs); plt.title(variables[3]); plt.show()
cs = plt.imshow(a[0,:,:,4]); plt.colorbar(cs); plt.title(variables[4]); plt.show()
cs = plt.imshow(b[7,:,:,0]); plt.colorbar(cs); plt.show()
cs = plt.imshow(b[0,:,:,1]); plt.colorbar(cs); plt.show()
a[0,:,:,:].shape
y=the_model.predict(x=a[:,:,:,:])
plt.imshow(y[7,:,:,0], vmin=0, vmax=1);
mlmodel = dlfront_style.DLFrontStyle(variable=variables, learning_rate=0.01, scheduler=1, epochs=30, batch_norm=True, spatial_drop=0.3,
output_shape=1, output_activation='sigmoid', loss_function='mean_squared_error')
the_model = mlmodel.compile_model()
mlmodel.train_model(the_model, training_generator)
###Output
0.009999999776482582
Epoch 1/30
2/2 [==============================] - 1s 599ms/step - loss: 0.3430 - accuracy: 0.7444 - mean_squared_error: 0.1599 - mean_absolute_error: 0.2660
0.009999999776482582
Epoch 2/30
2/2 [==============================] - 1s 552ms/step - loss: 0.2992 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 3/30
2/2 [==============================] - 1s 555ms/step - loss: 0.3722 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 4/30
2/2 [==============================] - 1s 547ms/step - loss: 0.4229 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 5/30
2/2 [==============================] - 1s 539ms/step - loss: 0.4502 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 6/30
2/2 [==============================] - 1s 550ms/step - loss: 0.4577 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 7/30
2/2 [==============================] - 1s 548ms/step - loss: 0.4501 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 8/30
2/2 [==============================] - 1s 568ms/step - loss: 0.4318 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009999999776482582
Epoch 9/30
2/2 [==============================] - 1s 561ms/step - loss: 0.4071 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.009048373438417912
Epoch 10/30
2/2 [==============================] - 1s 551ms/step - loss: 0.3807 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.008187306113541126
Epoch 11/30
2/2 [==============================] - 1s 549ms/step - loss: 0.3547 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.0074081807397305965
Epoch 12/30
2/2 [==============================] - 1s 544ms/step - loss: 0.3301 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.00670319888740778
Epoch 13/30
2/2 [==============================] - 1s 546ms/step - loss: 0.3075 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.006065304856747389
Epoch 14/30
2/2 [==============================] - 1s 545ms/step - loss: 0.2872 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.005488114431500435
Epoch 15/30
2/2 [==============================] - 1s 555ms/step - loss: 0.2691 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.004965851083397865
Epoch 16/30
2/2 [==============================] - 1s 556ms/step - loss: 0.2532 - accuracy: 0.9592 - mean_squared_error: 0.0408 - mean_absolute_error: 0.0408
0.004493287764489651
Epoch 17/30
|
Jupyter/BMRS20_mos_their_cuts.ipynb | ###Markdown
An analysis of the dataset presented in [this technical comment](https://arxiv.org/abs/2004.06601), but *without* our quality cuts appliedIn the previous notebook, `BMRS20_mos_our_cuts.ipynb`, we analyzed the subset of the the [BRMS](https://arxiv.org/abs/2004.06601) dataset that passed our quality cuts as defined in [Dessert et al. _Science_ 2020](https://science.sciencemag.org/content/367/6485/1465) (DRS20). We found no evidence for the 3.5 keV line and ruled out the relevant region of parameter space even with our conservative analysis.In this notebook, we repeat this analysis on the entire 17 Ms BRMS dataset: all of the data, not just the subset that passes the quality cuts. If you use the data in this example in a publication, please cite Dessert et al. _Science_ 2020.**Please direct any questions to [email protected].**
###Code
# Import required modules
%matplotlib inline
%load_ext autoreload
%autoreload 2
import sys,os
import numpy as np
from scipy.stats import chi2 as chi2_scipy
from scipy.optimize import dual_annealing
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from matplotlib import rc
from matplotlib import rcParams
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
###Output
_____no_output_____
###Markdown
**NB**: In this notebook, we minimize with `scipy` so that it is easy to run for the interested reader. For scientific analysis, we recommend [Minuit](https://iminuit.readthedocs.io/en/latest/) as a minimizer. In our paper, we used Minuit. Define signal line energyBy default we will look for an anomalous line at 3.48 keV, as defined by the EUXL parameter below, denoting the energy of the unidentified X-ray line. Lines at different energies can be searched for by changing this parameter accordingly (for example to 3.55 keV as in the first notebook). We start with 3.48 keV as this is the fiducial line energy in BMRS.
###Code
EUXL = 3.48 # [keV]
###Output
_____no_output_____
###Markdown
**NB:** changing EUXL will of course vary the results below, and values in the surrounding discussion will not necessarily be reflective. Load in the data and modelsFirst we will load in the data products that we will use in the analysis. These include the stacked MOS data, associated energy bins, and uncertainties. We will use data from two regions of interest (ROI):- **Signal Region (SR)**: 20-35 degrees from the Galactic Center, this was the fiducial ROI in BRMS (DRS20 instead used 5-45);- **Background Region (BR)**: 60-90 degrees from the Galactic Center, a useful region for studying background as it contains less dark matter.We also load the appropriately averaged D-factors for these two regions (ROIs) for our fiducial NFW profile, along with the respective exposure times.
###Code
## Signal Region (20-35 degrees)
data = np.load("../data/data_mos_boyarsky_ROI_their_cuts.npy") # [cts/s/keV]
data_yerrs = np.load("../data/data_yerrs_mos_boyarsky_ROI_their_cuts.npy") # [cts/s/keV]
QPB = np.load("../data/QPB_mos_boyarsky_ROI_their_cuts.npy") # [cts/s/keV]
# Exposure time
Exp = 16.55e6 # [s]
# D-factor averaged over the signal ROI
D_signal = 4.46e28 # [keV/cm^2]
## Background Region (60-90 degrees)
# Data and associated errors
data_bkg = np.load("../data/data_mos_bkg_ROI.npy") # [cts/s/keV]
data_yerrs_bkg = np.load("../data/data_yerrs_mos_bkg_ROI.npy") # [cts/s/keV]
# Exposure time
Exp_bkg = 67.64e6 # [s]
# D-factor averaged over the background ROI
D_bkg = 1.91e28 # [keV/cm^2]
## Energy binning appropriate for both the signal and background
Energies=np.load("../data/mos_energies.npy") # [keV]
###Output
_____no_output_____
###Markdown
Load in the ModelsNext we use the models that will be used in fitting the above data.There are a sequence of models corresponding to physical line fluxes at the energies specified by `Es_line`. That is, `mod_UXL` gives the detectors counts as a function of energy after forward modeling a physical line at EUXL keV with a flux of 1 cts/cm$^2$/s/sr.
###Code
# Load the forward-modeled lines and energies
mods = np.load("../data/mos_mods.npy")
Es_line = np.load("../data/mos_mods_line_energies.npy")
# Load the detector response
det_res = np.load("../data/mos_det_res.npy")
arg_UXL = np.argmin((Es_line-EUXL)**2)
mod_UXL = mods[arg_UXL]
print "The energy of our "+str(EUXL)+" keV line example will be: "+str(Es_line[arg_UXL])+" keV"
# How to go from flux to sin^2(2\theta)
def return_sin_theta_lim(E_line,flux,D_factor):
"""
D_factor [keV/cm^2]
flux [cts/cm^2/s/sr]
E_line [keV] (dark matter mass is twice this value)
returns: associated sin^2(2theta)
"""
DMmass = 2.*E_line
res = (4.*np.pi*DMmass/D_factor)/1.361e-22*(1/DMmass)**5*flux
return res
###Output
The energy of our 3.48 keV line example will be: 3.4824707846410687 keV
###Markdown
Visualize the dataData in the signal region, where the dashed vertical line denotes the location of a putative signal line. Note that the BMRS dataset has a flux 50% larger than when restricted to the set that passes our quality cuts, highlighting the importance of implementing these cuts. In addition, this extra extended emission (above the irreducible detector and cosmic backgrounds) may have complicated energy-dependence that cannot be described by a simple background model such as a power law. Finally, these backgrounds are stacked, further increasing the likelihood that the background may systematically deviate from a simple model.
###Code
fig = plt.figure(figsize=(10,8))
plt.errorbar(Energies,data,yerr=data_yerrs,xerr=(Energies[1]-Energies[0])/2.,
color="black",label="data",marker="o", fmt='none',capsize=4)
plt.axvline(EUXL,color="black",linestyle="dashed")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.125,0.15)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.show()
###Output
/sw/lsa/centos7/python-anaconda2/2019.03/lib/python2.7/site-packages/matplotlib/font_manager.py:1331: UserWarning: findfont: Font family [u'serif'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
###Markdown
Statistical analysisNow, let's perform a rigorous statistical analysis, using profile likelihood. As we operate in the large counts limit for the stacked data, we can perform a simple $\chi^2$ analysis rather than a full joint likelihood analysis as used by default in Dessert et al. 2020.
###Code
## Define the functions we will use
class chi2:
""" A set offunctions for calculation the chisq associated with different hypotheses
"""
def __init__(self,ens,dat,err,null_mod,sig_template):
self._ens = ens
self._dat = dat
self._err = err
self._null_mod = null_mod
self._sig_template = sig_template
self._A_sig = 0.0
def chi2(self,x):
null_mod = self._null_mod(self._ens,x[1:])
sig_mod = self._sig_template*x[0]
return np.sum((self._dat - null_mod - sig_mod)**2/self._err**2)
def chi2_null(self,x):
null_mod = self._null_mod(self._ens,x)
return np.sum((self._dat - null_mod)**2/self._err**2)
def chi2_fixed_signal(self,x):
null_mod = self._null_mod(self._ens,x)
sig_mod = self._sig_template*self._A_sig
return np.sum((self._dat - null_mod - sig_mod)**2/self._err**2)
def fix_signal_strength(self,A_sig):
self._A_sig = A_sig
###Output
_____no_output_____
###Markdown
Fit within $E_{\rm UXL} \pm 0.25$ keVFirst, we will fit the models from $[E_{\rm UXL}-0.25,\,E_{\rm UXL}+0.25]$ keV. Later in this notebook, we broaden this range to 3.0 to 4.0 keV. For the default $E_{\rm UXL} = 3.48$ keV, this corresponds to $3.23~{\rm keV} < E < 3.73~{\rm keV}$.To begin with then, let's reduce the dataset to this restricted range.
###Code
whs_reduced = np.where((Energies >= EUXL-0.25) & (Energies <= EUXL+0.25))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
###Output
_____no_output_____
###Markdown
Next, let's fit this data with the background only hypothesis and consider the quality of fit. The background modelHere we model the continuum background as a quadratic. In addition, we add degrees of freedom associated with the possible background lines at 3.3 keV and 3.7 keV.
###Code
arg_3p3 = np.argmin((Es_line-3.32)**2)
mod_3p3 = mods[arg_3p3]
arg_3p7 = np.argmin((Es_line-3.68)**2)
mod_3p7 = mods[arg_3p7]
def mod_poly_two_lines(ens,x):
"An extended background model to include two additional lines"
A, B, C, S1, S2 = x
return A+B*ens + C*ens**2 + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_poly_two_lines,mod_UXL_reduced)
mn_null_line = minimize(chi2_instance.chi2_null,np.array([0.50053349, -0.18701816, 0.02353692, 0.06814053,
0.01880195]),method='Nelder-Mead')
mn_line = minimize(chi2_instance.chi2,np.array([1.e-2,mn_null_line.x[0],mn_null_line.x[1],mn_null_line.x[2],mn_null_line.x[3],mn_null_line.x[4]]),method='Nelder-Mead',options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
print "The Delta chi^2 between signal and null model is:", mn_null_line.fun - mn_line.fun
print "The chi^2/DOF of the null-model fit is:", mn_null_line.fun/(len(whs_reduced)-5.)
print "Expected 68% containment for the chi^2/DOF:", np.array(chi2_scipy.interval(0.68,len(whs_reduced)-5.))/float(len(whs_reduced)-5.)
print "Expected 95% containment for the chi^2/DOF:", np.array(chi2_scipy.interval(0.95,len(whs_reduced)-5.))/float(len(whs_reduced)-5.)
###Output
The Delta chi^2 between signal and null model is: 1.0568783470870926
The chi^2/DOF of the null-model fit is: 0.8595653580677374
Expected 68% containment for the chi^2/DOF: [0.85614219 1.14370943]
Expected 95% containment for the chi^2/DOF: [0.73605123 1.30376807]
###Markdown
The inclusion of additional lines has not changed our conclusion. The null model is still a good fit to the data, although we find a very mild preference for nonzero signal $\Delta \chi^2 \sim 1$.Here we plot the best fit signal and background model.
###Code
fig = plt.figure(figsize=(10,8))
plt.errorbar(Energies,data,yerr=data_yerrs,xerr=(Energies[1]-Energies[0])/2.,
color="black",label="data",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_poly_two_lines(Energies_reduced,mn_null_line.x),'k-',label =r"Null model")
plt.plot(Energies_reduced,mod_poly_two_lines(Energies_reduced,mn_line.x[1:])+mn_line.x[0]*mod_UXL_reduced,
'r-',label =r"Signal model")
plt.axvline(EUXL,color="black",linestyle="dashed")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.125,0.15)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
###Output
_____no_output_____
###Markdown
Finally let's compute the associated limit via profile likelihood.
###Code
A_sig_array = np.linspace(mn_line.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn_line.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead',
options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
###Output
The 95% upper limit on the signal flux is 0.03494126304163253 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 3.082609562443865e-11
###Markdown
Recall that this same analysis on the clean dataset in the previous notebook found a limit of $\sin^2(2\theta) = 2.38 \times 10^{-11}$, but despite the increased exposure time the limit here is worse, partially due to the increased background rate. Nevertheless even this limit is fairly safely excluding the 3.5 keV line. Powerlaw background modelNow let's try a power law for the continuum background model (along with the two lines) as done in BMRS. Given that the stacked data is the sum of power laws, we would not expect the stacked data to be a power law itself, although we find it to be a reasonable description.
###Code
def mod_power_two_lines(ens,x):
"An extended background model to include two additional lines"
A, n, S1, S2 = x
return A*ens**n + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_power_two_lines,mod_UXL_reduced)
mn_null_line = minimize(chi2_instance.chi2_null,np.array([0.30859773, -0.66268936, 0.06355456, 0.03587628]),method='Nelder-Mead')
mn_line = minimize(chi2_instance.chi2,np.array([1.e-2,mn_null_line.x[0],mn_null_line.x[1],mn_null_line.x[2],mn_null_line.x[3]]),method='Nelder-Mead',options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
print "The Delta chi^2 between signal and null model is:", mn_null_line.fun - mn_line.fun
print "The chi^2/DOF of the null-model fit is:", mn_null_line.fun/(len(whs_reduced)-4.)
fig = plt.figure(figsize=(10,8))
plt.errorbar(Energies,data,yerr=data_yerrs,xerr=(Energies[1]-Energies[0])/2.,
color="black",label="data",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_power_two_lines(Energies_reduced,mn_null_line.x),'k-',label =r"Null model")
plt.plot(Energies_reduced,mod_power_two_lines(Energies_reduced,mn_line.x[1:])+mn_line.x[0]*mod_UXL_reduced,
'r-',label =r"Signal model")
plt.axvline(EUXL,color="black",linestyle="dashed")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.125,0.15)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn_line.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn_line.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead',
options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
###Output
The 95% upper limit on the signal flux is 0.027480758948516815 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 2.4244243894955322e-11
###Markdown
The power law continuum background does not substantively change the results, although any mild preference for a line has decreased to $\Delta \chi^2 \sim 0.6$. For reference, on the clean dataset, we found $\sin^2(2\theta) = 1.82 \times 10^{-11}$. Note this is the same procedure as in BMRS's test color-coded red in their Fig. 1 and Tab. 1, and performed on the same dataset. In that analysis, they find marginal 1.3$\sigma$ evidence for a line, although here we are unable to reproduce the result with the same significance. Departing from $[E_{\rm UXL}-0.25,\,E_{\rm UXL}+0.25]$ keVWe now fit the same dataset over the 3-4 keV range. Note that going to a wider energy range can open the analysis up to a variety of systematic issues associated with deviations from the background model around the energy of interest. This is exactly why in our fiducial analysis we stuck to the narrow energy range. In this case, the inclusion of data with high backgrounds can exacerbate these issues.Our procedure is as follows. Firstly, we update the dataset. Then we will define a new background model incorporating these additional lines. Finally we repeat our default $\chi^2$ fit procedure. Note that we continue to use a power law continuum background model here. As such, the following analysis is a reproduction of the BMRS magenta color-coded analysis. In that magenta analysis, they claim a 4.0$\sigma$ detection of a line at 3.48 keV. Let us see what we obtain.
###Code
whs_reduced = np.where((Energies >= 3.0) & (Energies <= 4.0))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
arg_3p1 = np.argmin((Es_line-3.12)**2)
mod_3p1 = mods[arg_3p1]
arg_3p9 = np.argmin((Es_line-3.90)**2)
mod_3p9 = mods[arg_3p9]
arg_3p7 = np.argmin((Es_line-3.68)**2)
mod_3p7 = mods[arg_3p7]
arg_3p3 = np.argmin((Es_line-3.32)**2)
mod_3p3 = mods[arg_3p3]
def mod_power_four_lines(ens,x):
A, n,S1, S2, S3, S4 = x
return A*ens**n + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_power_four_lines,mod_UXL_reduced)
x0 = np.array([0.33315606 ,-0.72351094, 0.0494905 , 0.04189487, 0.14450233,
0.06215284])
bounds = np.array([[1e-6,5],[-3,0],[0,0.5],[0,0.5],[0,0.5],[0,0.5]])
mn_null = dual_annealing(chi2_instance.chi2_null,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
boundss = np.array([[-0.5,0.5],[1e-6,5],[-3,0],[0,0.1],[0,0.1],[0,0.1],[0,0.2]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5]])
mn = dual_annealing(chi2_instance.chi2,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "The chi^2/DOF of the null-model fit is:", mn_null.fun/(len(whs_reduced)-6.)
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr"
###Output
Best fit background parameters: [ 0.33325016 -0.72372844 0.0503624 0.04251432 0.14438536 0.06221247]
Best fit signal+background parameters: [ 0.00886512 0.33312208 -0.72409931 0.05170324 0.04417607 0.14438536
0.06497322]
The Delta chi^2 between signal and null model is: 2.6285143425577644
The chi^2/DOF of the null-model fit is: 0.9174177924724561
NB: the best-fit signal strength in this case is: 0.008865118945648565 cts/cm$^2$/s/sr
###Markdown
Now we find modest evidence for the line with $\Delta \chi^2 \sim 2.6$, corresponding to $\sim 1.6 \sigma$. Note that on our cleaner dataset we found no evidence for the line, and in the analysis in the narrower energy range we also found no evidence. Note that the best-fit signal strength is much smaller than would be expected for the 3.5 keV line. There is no reason to expect that the background models employed here are reasonable physical descriptions of the data at the level of the signal, given the lack of quality cuts and stacking procedure. In fact, if one compares the plots of the data over the 3-4 keV range between the datasets with and without the quality cuts, the additional lines are prominent in the data without the quality cuts. This highlights that the full BMRS dataset includes significant reducible background that could easily systematically differ from the models used in this notebook and in BMRS.Let's have a look at the best fit signal and background models in this case. There are subtle difference between the two, but no clear excess is appearing at 3.48 keV. We also look at the downward fluctuation interpretation of the Chandra blank sky detection, as in previous notebooks.
###Code
flux_ill = 4.8e-11 / return_sin_theta_lim(EUXL,1.,D_signal)
print "Flux [cts/cm^2/s/sr] and sin^(2\theta) for illustration: ", flux_ill, return_sin_theta_lim(EUXL,flux_ill,D_signal)
chi2_instance.fix_signal_strength(flux_ill)
mn_f = dual_annealing(chi2_instance.chi2_fixed_signal,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=500)
print "Delta chi^2 between fixed signal and null:", mn_null.fun-mn_f.fun
###Output
Flux [cts/cm^2/s/sr] and sin^(2 heta) for illustration: 0.0544078188309 4.8e-11
Delta chi^2 between fixed signal and null: -25.166893035737758
###Markdown
Let's have a look at the best fit signal and background models in this case. There are subtle difference between the two, but no clear excess is appearing at 3.55 keV. Again, we emphasize that while we are averaging the data in the plot, we didn't in the analysis.
###Code
def avg_data(data,n):
return np.mean(data.reshape(-1, n), axis=1)
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_power_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.plot(Energies_reduced,mod_power_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
plt.plot(Energies_reduced,mod_power_four_lines(Energies_reduced,mn_f.x)+chi2_instance._A_sig*mod_UXL_reduced,
'r--',label =r"$\sin^2(2\theta) = 4.8 \times 10^{-11}$")
plt.xlim(3,4)
plt.ylim(0.12,0.16)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
###Output
_____no_output_____
###Markdown
Finally, we compute the limit in this case using the by now familiar procedure.
###Code
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
###Output
The 95% upper limit on the signal flux is 0.0279782960012058 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 2.46831840885201e-11
###Markdown
Although this analysis found mild evidence for a feature at 3.48 keV, the signal strength is incompatible with previous detections. The limits from the analysis strongly rule out the 3.5 keV line. Note that in when run on the clean data the limit we obtain with this procedure is $\sin^2(2\theta) = 1.34 \times 10^{-11}$. Now with a polynomial background Here we repeat the earlier analysis but with a polynomial background model, as used in the stacked analysis in DRS20 Supplementary Material Sec. 2.9.
###Code
whs_reduced = np.where((Energies >= 3.0) & (Energies <= 4.0))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
arg_3p1 = np.argmin((Es_line-3.12)**2) #3.12 #should really be 3.128
mod_3p1 = mods[arg_3p1]
arg_3p9 = np.argmin((Es_line-3.90)**2)
mod_3p9 = mods[arg_3p9]
arg_3p7 = np.argmin((Es_line-3.68)**2)
mod_3p7 = mods[arg_3p7]
arg_3p3 = np.argmin((Es_line-3.32)**2)
mod_3p3 = mods[arg_3p3]
def mod_poly_four_lines(ens,x):
A, B, C,S1, S2, S3, S4 = x
return A+B*ens + C*ens**2 + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_poly_four_lines,mod_UXL_reduced)
x0 = np.array([ 0.30869963, -0.0713862, 0.00615966, 0.05397736, 0.05030442,
0.15154157 , 0.07303334 ])
bounds = np.array([[-1,1],[-0.5,0.5],[-0.1,0.1],[0,0.4],[0,0.4],[0,0.4],[0,0.4]])
mn_null = dual_annealing(chi2_instance.chi2_null,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=3000)
boundss = np.array([[-0.5,0.5],[-1,1],[-0.5,0.5],[-0.1,0.1],[0,0.4],[0,0.4],[0,0.4],[0,0.4]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5],mn_null.x[6]])
mn = dual_annealing(chi2_instance.chi2,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=3000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "The chi^2/DOF of the null-model fit is:", mn_null.fun/(len(whs_reduced)-7.)
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr"
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_poly_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.plot(Energies_reduced,mod_poly_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
plt.xlim(3,4)
plt.ylim(0.12,0.16)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead',
options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
###Output
The 95% upper limit on the signal flux is 0.037384105228592736 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 3.2981234857983934e-11
###Markdown
This change to the background continuum model does not change any conclusions. The 3.5 keV line is ruled out by these limits. Note that when we analyze the clean data the limit we obtain with this procedure is $\sin^2(2\theta) = 2.45 \times 10^{-11}$. Subtract backgroundNow, we subract off the data taken far away from the Galactic Center. We use a folded powerlaw under the assumption that the residual flux in the signal region should be astrophysical.
###Code
def folded_PL(A,n):
mod_F = np.matmul(det_res,A*Energies**n)
return mod_F
def mod_folded_power_four_lines(ens,x):
A, n,S1, S2, S3, S4 = x
return folded_PL(A,n)[whs_reduced] + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced- data_bkg[whs_reduced],np.sqrt(data_yerrs_reduced**2+data_yerrs_bkg_reduced**2),mod_folded_power_four_lines,mod_UXL_reduced)
x0 = np.array([0.20973079, -0.93929346, 0.0378921, 0.02026992, 0.11586201, 0.04131473])
bounds = np.array([[0.0,0.5],[-2,0],[0,0.1],[0,0.2],[0,0.2],[0,0.2]])
mn_null = dual_annealing(chi2_instance.chi2_null,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
boundss = np.array([[-0.5,0.5],[0.0,0.5],[-2,0],[0,0.1],[0,0.2],[0,0.2],[0,0.2]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5]])
mn = dual_annealing(chi2_instance.chi2,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "The chi^2/DOF of the null-model fit is:", mn_null.fun/(len(whs_reduced)-6.)
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr or \n\
sin^2(2theta) =",return_sin_theta_lim(EUXL,mn.x[0],D_signal-D_bkg)
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data-data_bkg,6),yerr=np.sqrt(6*avg_data(data_yerrs**2+data_yerrs_bkg**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4) #label="data"
plt.plot(Energies_reduced,mod_folded_power_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null model")
plt.plot(Energies_reduced,mod_folded_power_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
plt.xlim(3,4)
plt.ylim(0.045,0.075)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
###Output
_____no_output_____
###Markdown
Note that the null model is generally underpredicting the data between 3.4 and 3.6 keV, and correspondingly a line at 3.45 is mildly preferred with TS ~ 5.6.
###Code
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal-D_bkg)
###Output
The 95% upper limit on the signal flux is 0.03156123531128337 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 4.8699958727995207e-11
###Markdown
Despite the mild evidence for a feature (ignoring the possibility for the moment that it is likely due to background mismodeling), the analysis still place strong tension on the conventional 3.5 keV line parameters, indicating that even if this feature was real it is not naively consistent with the original detection. That said, the background mismodeling in the vicinity of the line points to a more likely explanation as a local systematic deviation from the simplistic background models employed here. Note that when analyzing only the dataset passing our quality cuts, we see no such feature nor background mismodeling, and we obtain a much stronger limit of $\sin^2(2\theta) = 2.48 \times 10^{-11}$, in mild tension with the best-fit point here of $\sin^2(2\theta) = 2.86 \times 10^{-11}$. Include the Quiescent Particle Background (QPB)Now we will do a joint likelihood including the QPB data. The QPB data is complicated because the data is correlated from observation to observation. Thus, summing the data leads to correlated uncertainties. Thus, we will estimate the uncertainties on the QPB data in a data-driven way by fixing the normalization of the $\chi^2$ function such that the powerlaw gives the expected $\chi^2/{\rm DOF}$. We note that this is just an approximation, which is not necessary within the context of the full joint likelihood framework.
###Code
# We are going to fix a powerlaw to the QPB data and then renormalize the chi^2 function
def PL(A,n,ens):
return A*ens**n
def chi2_QPB_UN(x):
A,n = x
mod = PL(A,n,Energies_reduced)
return np.sum((mod-QPB[whs_reduced])**2)
mn_QPB = minimize(chi2_QPB_UN,[0.084,-0.20],method="Nelder-Mead")
bf_QPB=mn_QPB.x
chi2_not_reduced = chi2_QPB_UN(bf_QPB)
# The function below has the expected normalization
chi2_QPB = lambda x: chi2_QPB_UN(x)/chi2_not_reduced*((len(QPB[whs_reduced])-2.))
fig = plt.figure(figsize=(10,8))
plt.scatter(Energies_reduced,QPB[whs_reduced],marker="o",color="black")
plt.plot(Energies_reduced,PL(bf_QPB[0],bf_QPB[1],Energies_reduced),'r-',label="best-fit P.L.")
plt.xlim(3,4)
plt.ylim(0.075,0.09)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"QPB [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.show()
def mod_2power_four_lines(ens,x):
AQPB, nQPB,A, n,S1, S2, S3, S4 = x
return PL(AQPB,nQPB,ens)+ folded_PL(A,n)[whs_reduced] + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_2power_four_lines,mod_UXL_reduced)
x0 = np.array([0.11129247, -0.25195735 , 0.40423702 ,-1.50156748 ,0.06552511,
0.04919298 , 0.14941789 ,0.06836176])
bounds = np.array([[0.75*bf_QPB[0],1.25*bf_QPB[0]],[-1,0],[0.0001,2.0],[-3,0],[0,0.5],[0,0.5],[0,0.5],[0,0.5]])
# Below is the joint likelihood for the null model
def joint_chi2(x):
return chi2_QPB(x[:2])+chi2_instance.chi2_null(x)
mn_null = dual_annealing(joint_chi2,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
# Below is the joint likelihood for the signal model
def joint_chi2_sig(x):
return chi2_QPB(x[1:3])+chi2_instance.chi2(x)
boundss = np.array([[-0.5,0.5],[0.75*bf_QPB[0],1.25*bf_QPB[0]],[-1,0],[0.0001,2.0],[-3,0],[0,0.5],[0,0.5],[0,0.5],[0,0.5]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5],mn_null.x[6],mn_null.x[7]])
mn = dual_annealing(joint_chi2_sig,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr or \n\
sin^2(2theta) =",return_sin_theta_lim(EUXL,mn.x[0],D_signal-D_bkg)
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4) #label="data"
plt.plot(Energies_reduced,mod_2power_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
x0 = np.array([bf_QPB[0],bf_QPB[1], 0.064218, -0.4306988 , 0.02542355 , 0.01451921 , 0.09027154, 0.03331636])
plt.plot(Energies_reduced,mod_2power_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.xlim(3,4)
plt.ylim(0.12,0.16)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
###Output
The 95% upper limit on the signal flux is 0.04332893986686534 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 3.822592337461016e-11
###Markdown
In this analysis we find a large feature at 3.48 keV with TS $\sim 10$. As in the previous section, let's for a moment assume this feature is physical. We find a best-fit $\sin^2(2\theta) = 3.64 \times 10^{-11}$ and a 95% limit $\sin^2(2\theta) = 3.82 \times 10^{-11}$. This is immediately inconsistent with an interpretation as the 3.5 keV line. More strikingly, the same analysis on the cleaned data in the previous notebook found a 95% limit of $\sin^2(2\theta) = 1.70 \times 10^{-11}$, ruling out this detection, highlighting the importance of clean data.Further we caution against a naive interpretation of TS $\sim 10$ as 3$\sigma$ anomaly. 3.48 is not the central value preferred by all UXL values, so the fact a line is preferred at this energy carries with it an additional degree of freedom in terms of the central line energy.As we have seen, the lack of quality cuts on the data selection means that observations with extended emission have crept into the analysis. As compared to the reduced dataset with quality cuts, the flux is higher, and there are additional energy-dependent features in the data that complicate the analysis. In addition, observations with different backgrounds have been added together. As such, there is no reason to expect that these simple background models to reasonably describe the data at the level required to resolve weak signals. In this notebook, we have shown that not only can the addition of these high-background observations introduce artificial features into the data that can resemble an emission line, but they actually decrease the sensitivity to the signal. This is precisely why we implemented the quality cuts in our fiducial analysis. Previous analysis with a narrower energy rangeBefore finishing, let's repeat the above analysis in a narrower energy range.
###Code
whs_reduced = np.where((Energies >= EUXL-0.25) & (Energies <= EUXL+0.25))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
# We are going to fix a powerlaw to the QPB data and then renormalize the chi^2 function
def PL(A,n,ens):
return A*ens**n
def chi2_QPB_UN(x):
A,n = x
mod = PL(A,n,Energies_reduced)
return np.sum((mod-QPB[whs_reduced])**2)
mn_QPB = minimize(chi2_QPB_UN,[0.084,-0.20],method="Nelder-Mead")
bf_QPB=mn_QPB.x
chi2_not_reduced = chi2_QPB_UN(bf_QPB)
# The function below has the expected normalization
chi2_QPB = lambda x: chi2_QPB_UN(x)/chi2_not_reduced*((len(QPB[whs_reduced])-2.))
fig = plt.figure(figsize=(10,8))
plt.scatter(Energies_reduced,QPB[whs_reduced],marker="o",color="black")
plt.plot(Energies_reduced,PL(bf_QPB[0],bf_QPB[1],Energies_reduced),'r-',label="best-fit P.L.")
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"QPB [cts/s/keV]",fontsize=22)
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.075,0.095)
plt.legend(fontsize=22)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.show()
def mod_2power_two_lines(ens,x):
AQPB, nQPB,A, n,S1, S2 = x
return PL(AQPB,nQPB,ens)+ folded_PL(A,n)[whs_reduced] + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_2power_two_lines,mod_UXL_reduced)
x0 = np.array([0.11129247, -0.25195735 , 0.40423702 ,-1.50156748 ,0.06552511,
0.04919298 ])
bounds = np.array([[0.75*bf_QPB[0],1.25*bf_QPB[0]],[-1,0],[0.0001,2.0],[-3,0],[0,0.5],[0,0.5]])
# Below is the joint likelihood for the null model
def joint_chi2(x):
return chi2_QPB(x[:2])+chi2_instance.chi2_null(x)
mn_null = dual_annealing(joint_chi2,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
# Below is the joint likelihood for the signal model
def joint_chi2_sig(x):
return chi2_QPB(x[1:3])+chi2_instance.chi2(x)
boundss = np.array([[-0.5,0.5],[0.75*bf_QPB[0],1.25*bf_QPB[0]],[-1,0],[0.0001,2.0],[-3,0],[0,0.5],[0,0.5]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5]])
mn = dual_annealing(joint_chi2_sig,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr"
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4) #label="data"
plt.plot(Energies_reduced,mod_2power_two_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
x0 = np.array([bf_QPB[0],bf_QPB[1], 0.064218, -0.4306988 , 0.02542355 , 0.01451921 , 0.09027154, 0.03331636])
plt.plot(Energies_reduced,mod_2power_two_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.13,0.15)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
###Output
The 95% upper limit on the signal flux is 0.030215176915257585 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 2.665661890325141e-11
|
_notebooks/2020-08-11-part2.ipynb | ###Markdown
Distinguish Your Own Digits (DYOD) You are going to write a classifier that distinguishes between the number 3 and number 8.
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
From the command line run `pip install mnist`. This is a library that will help you bring down the mnist dataset. If you run this from a notebook, you need to put `!pip install mnist` in a cell by itself.
###Code
!pip install mnist
###Output
Collecting mnist
###Markdown
Preparing the Data
###Code
import mnist
train_images = mnist.train_images()
train_labels = mnist.train_labels()
train_images.shape, train_labels.shape
test_images = mnist.test_images()
test_labels = mnist.test_labels()
test_images.shape, test_labels.shape
image_index = 7776 # You may select anything up to 60,000
print(train_labels[image_index])
plt.imshow(train_images[image_index], cmap='Greys')
###Output
2
###Markdown
Filter data to get 3 and 8 out
###Code
train_filter = np.where((train_labels == 3 ) | (train_labels == 8))
test_filter = np.where((test_labels == 3) | (test_labels == 8))
X_train, y_train = train_images[train_filter], train_labels[train_filter]
X_test, y_test = test_images[test_filter], test_labels[test_filter]
###Output
_____no_output_____
###Markdown
We normalize the pizel values in the 0 to 1 range
###Code
X_train = X_train/255.
X_test = X_test/255.
###Output
_____no_output_____
###Markdown
And setup the labels as 1 (when the digit is 3) and 0 (when the digit is 8)
###Code
y_train = 1*(y_train==3)
y_test = 1*(y_test==3)
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
We reshape the data to flatten the image pixels into a set of features or co-variates:
###Code
X_train = X_train.reshape(X_train.shape[0], -1)
X_test = X_test.reshape(X_test.shape[0], -1)
X_train.shape, X_test.shape
#Impoting functions from 'Kudzu'
from kudzu.model import Model
from kudzu.train import Learner
from kudzu.optim import GD
from kudzu.data import Data, Sampler,Dataloader
from kudzu.callbacks import AccCallback
from kudzu.callbacks import ClfCallback
from kudzu.loss import MSE
from kudzu.layer import Sigmoid,Relu
from kudzu.layer import Affine
###Output
_____no_output_____
###Markdown
Let us create a `Config` class, to store important parameters. This class essentially plays the role of a dictionary.
###Code
class Config:
pass
config = Config()
config.lr = 0.001
config.num_epochs = 250
config.bs = 50
###Output
_____no_output_____
###Markdown
Running Models with the Training dataDetails about the network layers:- A first affine layer has 784 inputs and does 100 affine transforms. These are followed by a Relu- A second affine layer has 100 inputs from the 100 activations of the past layer, and does 100 affine transforms. These are followed by a Relu- A third affine layer has 100 activations and does 2 affine transformations to create an embedding for visualization. There is no non-linearity here.- A final "logistic regression" which has an affine transform from 2 inputs to 1 output, which is squeezed through a sigmoid.
###Code
data = Data(X_train, y_train.reshape(-1,1))
sampler = Sampler(data, config.bs, shuffle=True)
dl = Dataloader(data, sampler)
opt = GD(config.lr)
loss = MSE()
training_data_x = X_train
testing_data_x = X_test
training_data_y = y_train.reshape(-1,1)
testing_data_y = y_test.reshape(-1,1)
layers = [Affine("first", 784, 100), Relu("first"), Affine("second", 100, 100), Relu("second"), Affine("third", 100, 2), Affine("last", 2, 1), Sigmoid("last")]
model_nn = Model(layers)
model_lr = Model([Affine("logits", 784, 1), Sigmoid("sigmoid")])
nn_learner = Learner(loss, model_nn, opt, config.num_epochs)
acc_nn = ClfCallback(nn_learner, config.bs, training_data_x , testing_data_x, training_data_y, testing_data_y)
nn_learner.set_callbacks([acc_nn])
lr_learner = Learner(loss, model_lr, opt, config.num_epochs)
acc_lr = ClfCallback(lr_learner, config.bs, training_data_x , testing_data_x, training_data_y, testing_data_y)
lr_learner.set_callbacks([acc_lr])
nn_learner.train_loop(dl)
lr_learner.train_loop(dl)
#comparing the results of NN and LR
plt.figure(figsize=(15,10))
# Neural Network plots
plt.plot(acc_nn.accuracies, 'r-', label = "Training Accuracies - NN")
plt.plot(acc_nn.test_accuracies, 'g-', label = "Testing Accuracies - NN")
# Logistic Regression plots
plt.plot(acc_lr.accuracies, 'k-', label = "Training Accuracies - LR")
plt.plot(acc_lr.test_accuracies, 'b-', label = "Testing Accuracies - LR")
plt.legend()
###Output
_____no_output_____
###Markdown
Plotting the outputs of this layer of the NN.
###Code
new_model = Model(layers[:-2])
testing_plot = new_model(testing_data_x)
# Plotting the scatter plot of points and color coding by class
plt.figure(figsize=(8,7))
plt.scatter(testing_plot[:,0], testing_plot[:,1], alpha = 0.1, c = y_test.ravel());
plt.title('Outputs')
###Output
_____no_output_____
###Markdown
Probability contours
###Code
model_prob = Model(layers[-2:])
#creating the x and y ranges according to the above generated plot.
x_range = np.linspace(-4, 1, 100)
y_range = np.linspace(-6, 6, 100)
x_grid, y_grid = np.meshgrid(x_range, y_range) # x_grid and y_grig are of size 100 X 100
# converting x_grid and y_grid to continuous arrays
x_gridflat = np.ravel(x_grid)
y_gridflat = np.ravel(y_grid)
# The last layer of the current model takes two columns as input. Hence transpose of np.vstack() is required.
X = np.vstack((x_gridflat, y_gridflat)).T
prob_contour = model_prob(X).reshape(100,100)
plt.figure(figsize=(10,9))
plt.scatter(testing_plot[:,0], testing_plot[:,1], alpha = 0.1, c = y_test.ravel())
contours = plt.contour(x_grid,y_grid,prob_contour)
plt.title('Probability Contours')
plt.clabel(contours, inline = True );
###Output
_____no_output_____ |
8960_Candida_Practical_4.ipynb | ###Markdown
**Name : Candida Ruth Noronha****Class : SE COMPS B****Roll No. : 8960****Title : Python Experiment 4** **Aim : Implement different data structures in Python.**1. Write a program to sort a queue in python without using extra space**Code :**
###Code
import queue
def minIndex(q, sortedIndex):
min_index = -1
min_val = 999999999999
n = q.qsize()
for i in range(n):
curr = q.queue[0]
q.get()
if (curr <= min_val and i <= sortedIndex):
min_index = i
min_val = curr
q.put(curr)
return min_index
def insertMinToRear(q, min_index):
min_val = None
n = q.qsize()
for i in range(n):
curr = q.queue[0]
q.get()
if (i != min_index):
q.put(curr)
else:
min_val = curr
q.put(min_val)
def sortQueue(q):
for i in range(1, q.qsize() + 1):
min_index = minIndex(q, q.qsize() - i)
insertMinToRear(q, min_index)
if __name__ == '__main__':
q = queue.Queue()
n = int(input("Enter the number of elements: "))
for i in range(n):
q.put(int(input("Enter the element you want to insert: ")))
sortQueue(q)
print('\nQueue after sorting: ')
while (q.empty() == False):
print(q.queue[0], end = " ")
q.get()
###Output
Enter the number of elements: 5
Enter the element you want to insert: 2
Enter the element you want to insert: 3
Enter the element you want to insert: 1
Enter the element you want to insert: 5
Enter the element you want to insert: 4
Queue after sorting:
1 2 3 4 5
###Markdown
2. Write a python program to Implement shopping cart using linked-list. The user should be able to add item, remove item, display all the items and calculate total amount of the cart. Each Item details contains (Item name, quantity, price).
###Code
class Node:
def __init__(self,name=None,quantity=None,price=None):
self.name = name
self.quantity = quantity
self.price = price
self.next = None
class LinkedList:
def AtEnd(self, name, quantity,price):
NewNode = Node(name,quantity,price)
if self.headNode is None:
self.headNode = NewNode
else:
last = self.headNode
while(last.next):
last = last.next
last.next = NewNode
print("Product added !")
return
def listprint(self):
printval = self.headNode
print('===================================================================')
print('\t\t\tSHOPPING CART')
print('===================================================================')
while printval is not None:
print ("Name of the product: ",printval.name)
print("Quantity: ",printval.quantity)
print("Price: ",printval.price)
print("==================================================================")
printval = printval.next
return
def RemoveNode(self, name):
HeadVal = self.headNode
result = 0
if(HeadVal is not None):
if(HeadVal.name == name):
self.headNode = HeadVal.next
HeadVal = None
result = 1
return result
while(HeadVal is not None):
if(HeadVal.name == name):
break
prev = HeadVal
HeadVal = HeadVal.next
if (HeadVal == None):
return result
prev.next = HeadVal.next
HeadVal = None
result = 1
return result
def sumofcart(self):
val = self.headNode
if val is None:
return 0
sum = float(0)
while val is not None:
sum += float(val.price)*val.quantity
val = val.next
return sum
def __init__(self):
self.headNode = None
list = LinkedList()
while True:
print("\n1. Add an item to the cart")
print("2. Delete an item from the cart")
print("3. Display Items in the cart")
print("4. Total amount")
print("5. Exit")
choice = int(input("\nEnter the choice: "))
if(choice == 1):
name = input("\nEnter product name: ")
price = input("Enter product price per item(in Rs): ")
quantity = float(input("Enter quantity(in units): "))
list.AtEnd(name, quantity, price)
elif(choice == 2):
name = input("Enter the product name you want to remove: ")
if(list.headNode is None):
print("Cart is empty")
else:
output = int(list.RemoveNode(name))
if(output == 1):
print("Product deleted !")
else:
print("Product not found")
elif(choice == 3):
if(list.headNode is None):
print("Cart is empty")
else:
list.listprint()
elif(choice == 4):
if(list.headNode is None):
print("Cart is empty")
else:
sum = float(list.sumofcart())
print("\nTotal Amount: ",sum)
elif(choice == 5):
print("Exiting the application")
break
else:
print("Invalid input")
###Output
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 1
Enter product name: onion
Enter product price per item(in Rs): 30
Enter quantity(in units): 3
Product added !
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 1
Enter product name: potato
Enter product price per item(in Rs): 35
Enter quantity(in units): 3
Product added !
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 1
Enter product name: cabbage
Enter product price per item(in Rs): 20
Enter quantity(in units): 2
Product added !
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 1
Enter product name: peas
Enter product price per item(in Rs): 45
Enter quantity(in units): 4
Product added !
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 1
Enter product name: mint
Enter product price per item(in Rs): 10
Enter quantity(in units): 3
Product added !
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 3
===================================================================
SHOPPING CART
===================================================================
Name of the product: onion
Quantity: 3.0
Price: 30
==================================================================
Name of the product: potato
Quantity: 3.0
Price: 35
==================================================================
Name of the product: cabbage
Quantity: 2.0
Price: 20
==================================================================
Name of the product: peas
Quantity: 4.0
Price: 45
==================================================================
Name of the product: mint
Quantity: 3.0
Price: 10
==================================================================
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 2
Enter the product name you want to remove: peas
Product deleted !
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 3
===================================================================
SHOPPING CART
===================================================================
Name of the product: onion
Quantity: 3.0
Price: 30
==================================================================
Name of the product: potato
Quantity: 3.0
Price: 35
==================================================================
Name of the product: cabbage
Quantity: 2.0
Price: 20
==================================================================
Name of the product: mint
Quantity: 3.0
Price: 10
==================================================================
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 4
Total Amount: 265.0
1. Add an item to the cart
2. Delete an item from the cart
3. Display Items in the cart
4. Total amount
5. Exit
Enter the choice: 5
Exiting the application
###Markdown
Post Labs:
###Code
class Node:
def __init__(self, x):
self.data = x
self.prev = None
self.next = None
def push(head_ref, new_data):
new_node = Node(new_data)
new_node.data = new_data
new_node.next = head_ref
new_node.prev = None
if (head_ref != None):
head_ref.prev = new_node
head_ref = new_node
return head_ref
def insertBefore(head_ref, next_node, new_data):
if (next_node == None):
print("the given next node cannot be NULL")
return
new_node = Node(new_data)
new_node.prev = next_node.prev
next_node.prev = new_node
new_node.next = next_node
if (new_node.prev != None):
new_node.prev.next = new_node
else:
head_ref = new_node
return head_ref
def printList(node):
last = None
print("Traversal in forward direction ")
while (node != None):
print(node.data, end=" ")
last = node
node = node.next
print("\nTraversal in reverse direction ")
while (last != None):
print(last.data, end=" ")
last = last.prev
if __name__ == '__main__':
head = None
head = push(head, 6)
head = push(head, 2)
head = push(head, 4)
head = insertBefore(head, head.next, 3)
print("Created Doubly Linked List is: ")
printList(head)
###Output
Created Doubly Linked List is:
Traversal in forward direction
4 3 2 6
Traversal in reverse direction
6 2 3 4
###Markdown
Q2. Write a python program to implement linked list using collection.dequeue().
###Code
import collections
linked_list = collections.deque()
while True:
print("\n1. Add an element to the Linked List")
print("2. Add an element at a given loc in the Linked List")
print("3. Delete an element from the Linked List")
print("4. Display the Linked List")
print("5. Exit")
choice = int (input("\nEnter the choice : "))
if (choice == 1):
ele = int(input("Enter the element : "))
linked_list.append(ele)
print("{} is added to the Linked List".format(ele))
elif (choice == 2):
ele = int(input("Enter the element : "))
loc = int(input("Enter the location : "))
linked_list.insert(loc,ele)
print("{} is added to the Linked List at location {}".format(ele, loc))
elif (choice == 3):
ele = int(input("\nEnter the element you want to delete : "))
linked_list.remove(ele)
print("{} is deleted from the Linked List".format(ele))
elif (choice == 4):
print("\nElements in the Linked List are : ")
print(linked_list)
elif (choice == 5):
print("Exiting !")
break
else:
print("Invalid Input")
###Output
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 1
Enter the element : 10
10 is added to the Linked List
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 1
Enter the element : 30
30 is added to the Linked List
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 1
Enter the element : 40
40 is added to the Linked List
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 2
Enter the element : 20
Enter the location : 1
20 is added to the Linked List at location 1
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 4
Elements in the Linked List are :
deque([10, 20, 30, 40])
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 3
Enter the element you want to delete : 30
30 is deleted from the Linked List
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 4
Elements in the Linked List are :
deque([10, 20, 40])
1. Add an element to the Linked List
2. Add an element at a given loc in the Linked List
3. Delete an element from the Linked List
4. Display the Linked List
5. Exit
Enter the choice : 5
Exiting !
|
2020WinterIPS-Tech/liuchengbi/.ipynb_checkpoints/first-checkpoint.ipynb | ###Markdown
非常严格的缩进要求
###Code
#缩进是基本能力
if A==10:#冒号
print('A==10')
#缩进问题
content=input('请输入,并回车')
print(content)
numberInput=input("输入年龄")
if int (numberInput)>18:
print('Adault')
else:
print("child")
print("Monday")
print('sunday')
#print('123',end=",") 不换行,隔开
###Output
_____no_output_____
###Markdown
--------- 经典的星星游戏-------
###Code
print("*")
print("**")
print("***")
print("*")
print("**")
print("***")
print("*****")
print("*****")
print("*****")
print("*****")
#循环和判断
#运算符 **是次幂 //整除 %取余,模
#位运算 二进制 & | || 看课件
#and or not
#有复数 a+bj
#int ,float, complex
#函数记忆abs(x)等 使用前要导入math包
import math
math.abs(-10)
math.ceil(4.1)
#随机数 导入random包
import random
random.random()
random.random()
random.random()*10
random.uniform(3,5)
math.e
#/n换行 %格式化 记忆 %d 整数 %f 小数 %c 字符串
#字符串函数 四十个背
###Output
_____no_output_____
###Markdown
循环
###Code
#if for循环
print("*****")
print("*****")
print("*****")
print("*****")
for i in range(20):
print("*")
for i in range(20):
print("*",end='')
###Output
********************
###Markdown
作业------------------------
###Code
for i in range(5):
print("*"*(i+1))
for i in range(5):
print("*"*(i+(i+1)))
for i in range(5):
print(" "*(4-i),end="")
print("*"*(i+1))
for i in range(4):
print(" "*(4-(i+1)),end="")
print("*"*(i+(i+1)))
for i in range(4):
print(" "*(4-(i+1)),end="")
print("*"*(i+(i+1)))
for i in range(4):
print("*"*(4-(i+1)),end="")
print(" "*(i+(i+1)))
for i in range(5):
print("*"*(6-(i+i+1)))
print(" "*(i+1),end="")
for i in range(4):
print(" "*(4-(i+1)),end="")
print("*"*(i+(i+1)))
for i in range(5):
print(" "*(i+1),end="")
print("*"*(6-(i+i+1)))
###Output
*
***
*****
*******
*****
***
*
|
04_ingest/archive/10_Redshift_ML.ipynb | ###Markdown
Query Both Athena And Redshift With `Redshift Spectrum` We can leverage our previously created table in Amazon Athena with its metadata and schema information stored in the AWS Glue Data Catalog to access our data in S3 through Redshift Spectrum. All we need to do is create an external schema in Redshift, point it to our AWS Glue Data Catalog, and point Redshift to the database we’ve created.
###Code
import boto3
import sagemaker
# Get region
session = boto3.session.Session()
region_name = session.region_name
# Get SageMaker session & default S3 bucket
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
###Output
_____no_output_____
###Markdown
Connect to Redshift
###Code
redshift = boto3.client('redshift')
secretsmanager = boto3.client('secretsmanager')
###Output
_____no_output_____
###Markdown
Setup Redshift Connection Via SQLAlchemyThe Python SQL Toolkit and Object Relational Mapper (https://pypi.org/project/SQLAlchemy/)
###Code
!pip install -q SQLAlchemy==1.3.13
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pandas as pd
###Output
_____no_output_____
###Markdown
Get Redshift Credentials
###Code
import json
secret = secretsmanager.get_secret_value(SecretId='dsoaws_redshift_login')
cred = json.loads(secret['SecretString'])
master_user_name = cred[0]['username']
master_user_pw = cred[1]['password']
master_user_name
###Output
_____no_output_____
###Markdown
Redshift Configuration Parameters
###Code
redshift_cluster_identifier = 'dsoaws'
database_name_redshift = 'dsoaws'
database_name_athena = 'dsoaws'
redshift_port = '5439'
schema_redshift = 'redshift'
schema_athena = 'athena'
table_name_tsv = 'amazon_reviews_tsv'
###Output
_____no_output_____
###Markdown
Please Wait for Cluster Status `Available`
###Code
import time
response = redshift.describe_clusters(ClusterIdentifier=redshift_cluster_identifier)
cluster_status = response['Clusters'][0]['ClusterStatus']
print(cluster_status)
while cluster_status != 'available':
time.sleep(10)
response = redshift.describe_clusters(ClusterIdentifier=redshift_cluster_identifier)
cluster_status = response['Clusters'][0]['ClusterStatus']
print(cluster_status)
###Output
_____no_output_____
###Markdown
Get Redshift Endpoint Address & IAM Role
###Code
redshift_endpoint_address = response['Clusters'][0]['Endpoint']['Address']
iam_role = response['Clusters'][0]['IamRoles'][0]['IamRoleArn']
print('Redshift endpoint: {}'.format(redshift_endpoint_address))
print('IAM Role: {}'.format(iam_role))
###Output
_____no_output_____
###Markdown
Connect to Redshift Database Engine
###Code
engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(master_user_name, master_user_pw, redshift_endpoint_address, redshift_port, database_name_redshift))
###Output
_____no_output_____
###Markdown
Configure Session
###Code
session = sessionmaker()
session.configure(bind=engine)
s = session()
###Output
_____no_output_____
###Markdown
Redshift SpectrumAmazon Redshift Spectrum directly queries data in S3, using the same SQL syntax of Amazon Redshift. You can also run queries that span both the frequently accessed data stored locally in Amazon Redshift and your full datasets stored cost-effectively in S3.To use Redshift Spectrum, your cluster needs authorization to access data catalog in Amazon Athena and your data files in Amazon S3. You provide that authorization by referencing an AWS Identity and Access Management (IAM) role that is attached to your cluster. To use this capability in from your Amazon SageMaker notebook:* Register your Athena database `dsoaws` with Redshift Spectrum* Query Your Data in Amazon S3 Query RedshiftLet's query results across Athena and Redshift tables using just Redshift. This feature is called Redshift Spectrum. We will use a `UNION ALL` for this. Similarly, if we need to delete data, we would drop the tables using `UNION ALL`. Use `UNION ALL` across 2 tables (2015, 2014) in our `redshift` schema
###Code
statement = """
SELECT review_body, star_rating
FROM redshift.amazon_reviews_tsv_2015
UNION ALL
SELECT review_body, star_rating
FROM redshift.amazon_reviews_tsv_2014
"""
print(statement)
df = pd.read_sql_query(statement, engine)
df.head(20)
###Output
_____no_output_____
###Markdown
Run Same Query on Original Data in S3 using `athena` Schema to Verify the Results Match
###Code
statement = """
SELECT CAST(DATE_PART_YEAR(TO_DATE(review_date, 'YYYY-MM-DD')) AS INTEGER) AS year, product_category, COUNT(star_rating) AS count_star_rating
FROM athena.amazon_reviews_tsv
WHERE year = 2015 OR year = 2014
GROUP BY athena.amazon_reviews_tsv.product_category, year
ORDER BY product_category ASC, year DESC
"""
print(statement)
df = pd.read_sql_query(statement, engine)
df.head(20)
###Output
_____no_output_____
###Markdown
Create Modelhttps://aws.amazon.com/blogs/aws/amazon-redshift-ml-is-now-generally-available-use-sql-to-create-machine-learning-models-and-make-predictions-from-your-data/ `CREATE MODEL` SQL APIhttps://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_MODEL.htmlr_byom_create_model
###Code
# statement = """
# SELECT review_body, star_rating
# FROM (
# SELECT review_body, star_rating
# FROM redshift.amazon_reviews_tsv_2015
# )
# """
# # TODO: Union with 2014
# #UNION ALL
# #SELECT review_body, star_rating
# # FROM redshift.amazon_reviews_tsv_2014
statement = """
CREATE MODEL predict_star_rating
FROM (
SELECT review_body, star_rating
FROM redshift.amazon_reviews_tsv_2015
)
TARGET star_rating
FUNCTION predict_star_rating
IAM_ROLE 'arn:aws:iam::{}:role/DSOAWS_Redshift'
SETTINGS (
S3_BUCKET 'sagemaker-us-east-1-{}'
)
""".format(account_id, account_id)
s.execute(statement)
# pd.read_sql_query??
# df_create_model = pd.read_sql_query?? (statement, engine, )
# df_create_model.head(20)
###Output
_____no_output_____
###Markdown
Now Query Across Both Redshift and Athena in a single queryUse `UNION ALL` across 2 Redshift tables (2015, 2014) and the rest from Athena/S3 (2013-1995)
###Code
statement = """
SELECT review_body, star_rating
FROM redshift.amazon_reviews_tsv_2015
UNION ALL
SELECT year, product_category, COUNT(star_rating) AS count_star_rating
FROM redshift.amazon_reviews_tsv_2014
UNION ALL
SELECT CAST(DATE_PART_YEAR(TO_DATE(review_date, 'YYYY-MM-DD')) AS INTEGER) AS year, review_body, star_rating
FROM athena.amazon_reviews_tsv
WHERE year <= 2013
GROUP BY athena.amazon_reviews_tsv.product_category, year
"""
print(statement)
# statement = """
# SELECT year, product_category, COUNT(star_rating) AS count_star_rating
# FROM redshift.amazon_reviews_tsv_2015
# GROUP BY redshift.amazon_reviews_tsv_2015.product_category, year
# UNION ALL
# SELECT year, product_category, COUNT(star_rating) AS count_star_rating
# FROM redshift.amazon_reviews_tsv_2014
# GROUP BY redshift.amazon_reviews_tsv_2014.product_category, year
# ORDER BY product_category ASC, year DESC
# """
# print(statement)
df = pd.read_sql_query(statement, engine)
df.head(20)
###Output
_____no_output_____
###Markdown
Use `EXPLAIN` to Verify that Both Redshift and S3 are Part of the Same Query
###Code
statement = """
EXPLAIN
SELECT year, product_category, COUNT(star_rating) AS count_star_rating
FROM redshift.amazon_reviews_tsv_2015
GROUP BY redshift.amazon_reviews_tsv_2015.product_category, year
UNION ALL
SELECT year, product_category, COUNT(star_rating) AS count_star_rating
FROM redshift.amazon_reviews_tsv_2014
GROUP BY redshift.amazon_reviews_tsv_2014.product_category, year
UNION ALL
SELECT CAST(DATE_PART_YEAR(TO_DATE(review_date, 'YYYY-MM-DD')) AS INTEGER) AS year, product_category, COUNT(star_rating) AS count_star_rating
FROM athena.amazon_reviews_tsv
WHERE year <= 2013
GROUP BY athena.amazon_reviews_tsv.product_category, year
ORDER BY product_category ASC, year DESC
"""
print(statement)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', 1024)
df = pd.read_sql_query(statement, engine)
df.head(100)
###Output
_____no_output_____
###Markdown
Expected Output```QUERYPLANXN Merge (cost=1000177373551.14..1000177373584.69 rows=13420 width=1040) Merge Key: product_category, year -> XN Network (cost=1000177373551.14..1000177373584.69 rows=13420 width=1040) Send to leader -> XN Sort (cost=1000177373551.14..1000177373584.69 rows=13420 width=1040) Sort Key: product_category, year -> XN Append (cost=733371.52..177372631.06 rows=13420 width=1040) -> XN Subquery Scan *SELECT* 1 (cost=733371.52..733372.06 rows=43 width=22) -> XN HashAggregate (cost=733371.52..733371.63 rows=43 width=22) -> XN Seq Scan on amazon_reviews_tsv_2015 (cost=0.00..419069.44 rows=41906944 width=22) -> XN Subquery Scan *SELECT* 2 (cost=772258.45..772258.98 rows=43 width=23) -> XN HashAggregate (cost=772258.45..772258.55 rows=43 width=23) -> XN Seq Scan on amazon_reviews_tsv_2014 (cost=0.00..441290.54 rows=44129054 width=23) -> XN Subquery Scan *SELECT* 3 (cost=175866766.67..175867000.02 rows=13334 width=1040) -> XN HashAggregate (cost=175866766.67..175866866.68 rows=13334 width=1040) -> XN S3 Query Scan amazon_reviews_tsv (cost=175000000.00..175766766.67 rows=13333334 width=1040) Filter: (date_part_year(to_date((derived_col1)::text, 'YYYY-MM-DD'::text)) <= 2013) -> S3 HashAggregate (cost=175000000.00..175000100.00 rows=40000000 width=1036) -> S3 Seq Scan athena.amazon_reviews_tsv location:s3://sagemaker-us-west-2-237178646982/amazon-reviews-pds/tsv format:TEXT (cost=0.00..100000000.00 rows=10000000000 width=1036)----- Tables missing statistics: amazon_reviews_tsv_2015, amazon_reviews_tsv_2014 ---------- Update statistics by running the ANALYZE command on these tables -----``` When to use Athena vs. Redshift? Amazon AthenaAthena should be your preferred choice when running ad-hoc SQL queries on data that is stored in Amazon S3. It doesn’t require you to set up or manage any infrastructure resources, and you don’t need to move any data. It supports structured, unstructured, and semi-structured data. With Athena, you are defining a **“schema on read”** - you basically just log in, create a table and you are good to go. Amazon RedshiftRedshift is targeted for modern data analytics on large sets of structured data. Here, you need to have a predefined **“schema on write”**. Unlike serverless Athena, Redshift requires you to create a cluster (compute and storage resources), ingest the data and build tables before you can start to query, but caters to performance and scale. So for any highly-relational data with a transactional nature (data gets updated), workloads which involve complex joins, and latency requirements to be sub-second, Redshift is the right choice.
###Code
%%javascript
try {
Jupyter.notebook.save_checkpoint();
Jupyter.notebook.session.delete();
}
catch(err) {
// NoOp
}
###Output
_____no_output_____ |
Python-Drills/02-Text_Wrap/Text_Wrap.ipynb | ###Markdown
Text WrapWrite a function that will accept a string and a length N and print every N characters for the string on a separate line.Input:```N = 4test_string = "abcdefghijklmn"```Output:```abcdefghijklmn```
###Code
test_string = "abcdefghijklmn"
###Output
_____no_output_____
###Markdown
YOUR CODE HERE
###Code
n = 4|
>>> [test_string[i:i+n] for i in range(0, len(test_string), n)]
###Output
_____no_output_____ |
20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData/20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData_localised.ipynb | ###Markdown
Go down for licence and other metadata about this presentation \newpage Preamble LicenceUnless stated otherwise all content is released under a [CC0]+BY licence. I'd appreciate it if you reference this but it is not necessary. \newpage Using Ipython for presentationsA short video showing how to use Ipython for presentations
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('F4rFuIb1Ie4')
## PDF output using pandoc
import os
### Export this notebook as markdown
commandLineSyntax = 'ipython nbconvert --to markdown 20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData.ipynb'
print (commandLineSyntax)
os.system(commandLineSyntax)
### Export this notebook and the document header as PDF using Pandoc
commandLineSyntax = 'pandoc -f markdown -t latex -N -V geometry:margin=1in DocumentHeader.md 20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData.md --filter pandoc-citeproc --latex-engine=xelatex --toc -o interim.pdf '
os.system(commandLineSyntax)
### Remove cruft from the pdf
commandLineSyntax = 'pdftk interim.pdf cat 1-5 18-end output 20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData.pdf'
os.system(commandLineSyntax)
### Remove the interim pdf
commandLineSyntax = 'rm interim.pdf'
os.system(commandLineSyntax)
###Output
ipython nbconvert --to markdown 20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData.ipynb
###Markdown
The environmentIn order to replicate my environment you need to know what I have installed! Set up watermarkThis describes the versions of software used during the creation. Please note that critical libraries can also be watermarked as follows:```python%watermark -v -m -p numpy,scipy```
###Code
%install_ext https://raw.githubusercontent.com/rasbt/python_reference/master/ipython_magic/watermark.py
%load_ext watermark
%watermark -a "Anthony Beck" -d -v -m -g
#List of installed conda packages
!conda list
#List of installed pip packages
!pip list
###Output
abstract-rendering (0.5.1)
accelerate (2.0.0)
affine (1.2.0)
alabaster (0.7.6)
anaconda-client (1.2.1)
argcomplete (1.0.0)
astropy (1.1.1)
Babel (2.1.1)
basemap (1.0.7)
beautifulsoup4 (4.4.1)
bitarray (0.8.1)
blaze (0.9.0)
bokeh (0.11.0)
boto (2.38.0)
Bottleneck (1.0.0)
cffi (1.2.1)
click (4.1)
click-plugins (1.0.2)
cligj (0.2.0)
clyent (1.2.0)
colorama (0.3.3)
colorlover (0.2.1)
conda (3.19.0)
conda-build (1.18.2)
conda-env (2.4.5)
configobj (5.0.6)
cryptography (0.9.3)
cufflinks (0.7.1)
cycler (0.9.0)
Cython (0.23.4)
cytoolz (0.7.4)
datashape (0.5.0)
decorator (4.0.6)
descartes (1.0.1)
docutils (0.12)
dynd (9b63882)
et-xmlfile (1.0.1)
fastcache (1.0.2)
Fiona (1.6.0)
Flask (0.10.1)
funcsigs (0.4)
GDAL (2.0.0)
greenlet (0.4.9)
h5py (2.5.0)
html5lib (0.9999999)
idna (2.0)
iopro (1.7.2)
ipykernel (4.2.2)
ipython (4.0.2)
ipython-genutils (0.1.0)
ipywidgets (4.1.1)
itsdangerous (0.24)
jdcal (1.2)
jedi (0.9.0)
Jinja2 (2.8)
jsonschema (2.4.0)
jupyter (1.0.0)
jupyter-client (4.1.1)
jupyter-console (4.1.0)
jupyter-core (4.0.6)
llvmlite (0.8.0)
lxml (3.5.0)
MarkupSafe (0.23)
matplotlib (1.5.1)
mistune (0.7.1)
mock (1.3.0)
multipledispatch (0.4.8)
nbconvert (4.1.0)
nbformat (4.0.1)
networkx (1.10)
nltk (3.1)
nose (1.3.7)
notebook (4.1.0)
numba (0.22.1)
numbapro (0.22.1)
numexpr (2.4.4)
numpy (1.10.2)
odo (0.4.0)
openpyxl (2.3.2)
OWSLib (0.10.3)
pandas (0.17.1)
path.py (0.0.0)
patsy (0.4.0)
pbr (1.3.0)
pep8 (1.6.2)
pexpect (3.3)
pickleshare (0.5)
Pillow (3.1.0)
pip (8.0.2)
plotly (1.9.5)
ply (3.8)
psutil (3.3.0)
ptyprocess (0.5)
py (1.4.30)
pyasn1 (0.1.9)
pycosat (0.6.1)
pycparser (2.14)
pycrypto (2.6.1)
pycurl (7.19.5.1)
pyepsg (0.2.0)
pyflakes (1.0.0)
Pygments (2.0.2)
pyodbc (3.0.10)
pyOpenSSL (0.15.1)
pyparsing (2.0.3)
pyproj (1.9.4)
pyshp (1.2.3)
pytest (2.8.1)
python-dateutil (2.4.2)
pytz (2015.7)
PyYAML (3.11)
pyzmq (15.2.0)
qtconsole (4.1.1)
rasterio (0.25.0)
redis (2.10.3)
requests (2.9.1)
rope-py3k (0.9.4.post1)
scikit-image (0.11.3)
scikit-learn (0.17)
scipy (0.16.1)
seaborn (0.6.0)
setuptools (19.2)
Shapely (1.5.11)
simplegeneric (0.8.1)
simplejson (3.8.1)
six (1.10.0)
snowballstemmer (1.2.0)
snuggs (1.3.1)
sockjs-tornado (1.0.1)
Sphinx (1.3.1)
sphinx-rtd-theme (0.1.7)
spyder (2.3.8)
SQLAlchemy (1.0.11)
statsmodels (0.6.1)
sympy (0.7.6.1)
tables (3.2.2)
terminado (0.5)
Theano (0.7.0)
toolz (0.7.4)
tornado (4.3)
traitlets (4.1.0)
ujson (1.33)
unicodecsv (0.14.1)
Werkzeug (0.11.3)
wheel (0.26.0)
xlrd (0.9.4)
XlsxWriter (0.8.2)
xlwt (1.0.0)
###Markdown
Running dynamic presentationsYou need to install the [RISE Ipython Library](https://github.com/damianavila/RISE) from [Damián Avila](https://github.com/damianavila) for dynamic presentations To convert and run this as a static presentation run the following command:
###Code
# Notes don't show in a python3 environment
!ipython nbconvert 20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData.ipynb --to slides --post serve
###Output
_____no_output_____
###Markdown
To close this instances press *control 'c'* in the *ipython notebook* terminal consoleStatic presentations allow the presenter to see *speakers notes* (use the 's' key)If running dynamically run the scripts below Pre load some useful libraries
###Code
#Future proof python 2
from __future__ import print_function #For python3 print syntax
from __future__ import division
# def
import IPython.core.display
# A function to collect user input - ipynb_input(varname='username', prompt='What is your username')
def ipynb_input(varname, prompt=''):
"""Prompt user for input and assign string val to given variable name."""
js_code = ("""
var value = prompt("{prompt}","");
var py_code = "{varname} = '" + value + "'";
IPython.notebook.kernel.execute(py_code);
""").format(prompt=prompt, varname=varname)
return IPython.core.display.Javascript(js_code)
# inline
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
\newpage About me * Honorary Research Fellow, University of Nottingham: [orcid](http://orcid.org/0000-0002-2991-811X)* Director, Geolytics Limited - A spatial data analytics consultancy About this presentation* [Available on GitHub](https://github.com/AntArch/Presentations_Github/tree/master/20151008_OpenGeo_Reuse_under_licence) - https://github.com/AntArch/Presentations_Github/* [Fully referenced PDF](https://github.com/AntArch/Presentations_Github/blob/master/20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData/20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData.pdf) \newpage Contribution to GIScience learning outcomesThis presentation contributes to the following learning outcomes for this course.1. Knowledge and Understanding: * Appreciate the importance of standards for Geographic Information and the role of the Open Geospatial Consortium. * Understand the term 'interoperability'. * Appreciate the different models for database design. * Understand the basis of Linked Data. * Find UK government open data and understand some of the complexities in the use of this data. * Appreciate the data issues involved in managing large distributed databases, Location-Based Services and the emergence of real-time data gathering through the 'Sensor-Web'. * Understand the different models for creating international Spatial Data Infrastructures.1. Intellectual Skills: * Evaluate the role of standards and professional bodies in GIS. * Articulate the meaning and importance of interoperability, semantics and ontologies. * Assess the technical and organisational issues which come into play when attempting to design large distributed geographic databases aimed at supporting 'real-world' problems. A potted history of mapping In the beginning was the geoword and the word was ***cartography***  \newpage  * Cartography was king. * Static representations of spatial knowledge with the cartographer deciding what to represent. * Hence, maps are domain specific knowledge repositories for spatial data \newpage And then there was data .........  \newpage  Restrictive data \newpage  Disconnected data with different: * Standards* Quality* Databases* Semantics \newpage Why is this an issue?Over to you...... * Decision Making * certainty * uncertainty* Co-ordination* Policy formation* Efficiencies* Best Practice \newpage [INSPIRE](http://inspire.ec.europa.eu/)
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('xew6qI-6wNk')
###Output
_____no_output_____
###Markdown
\newpage INSPIRE principles* Data should be collected only once and kept where it can be maintained most effectively* It should be possible to combine seamless spatial information from different sources across Europe and share it with many users and applications* It should be possible for information collected at one level/scale to be shared with all levels/scales; detailed for thorough investigations, general for strategic purposes* Geoinformation needed for good governance at all levels should be readily and transparently available* Easy to find what geoinformation is available, how it can be used to meet a particular need, and under which conditions it can be acquired and used \newpage  Making data interoperable and open \newpage Interoperability> is a property of a product or system, whose interfaces are completely understood, to work with other products or systems, present or future, without any restricted access or implementation.@wikipedia_interoperability_2016 \newpage Technical interoperability - levelling the field \newpage Syntactic Heterogeneity> the difference in data format. The same **logical model** can be represented in a range of different **physical models** (for example ESRI shape file or Geography Mark-up Language (GML)).This mismatch between underlying data models implies that the same information could be represented differently in different organisations. The most profound difference is in the storage paradigm: * relational, * object orientated or* hybrids.@beck_uk_2008, @bishr_overcoming_1998 \newpage Semantic Heterogeneity> Semantic heterogeneity refers to differences in naming conventions and conceptual groupings.This can be subdivided into **naming** and **cognitive** heterogeneities. * Naming (synonym) mismatch arises when semantically identical data items are named differently. * Cognitive (homonym) mismatch arises when semantically different data items are named identically. * Cognitive semantics can be subtle, reflecting the domain of discourse.@beck_uk_2008, @bishr_overcoming_1998 \newpage Schematic Heterogeneity> refers to the differences in data model between organisations modelling the same concepts. This reflects each organisation’s abstracted view of their business and physical assets. Hence, different hierarchical and classification concepts are adopted by each organisation to refer to identical or similar real world objects.@beck_uk_2008, @bishr_overcoming_1998 \newpage The role of the OGC (a geospatial standards body)* To serve as a global forum for the development, promotion and harmonization of *open and freely available* **geospatial standards*** To achieve the full societal, economic and scientific benefits of integrating electronic location resources into commercial and institutional processes worldwide.  \newpage The role of the OGC (a geospatial standards body)OGC’s Open Standards are:* Freely and publicly available* Non discriminatory* No license fees* Vendor neutral* Data neutral* Adopted in a formal, member based consensus processOGC’s Open Standards are submitted to other industry and National Standards Development Organisations in the vertical area and to global organisations like ISO for standard branding. \newpage OGC Technologies* The OGC publish standards that have been agreed by OGC members* Current standards can be found at: [http://www.opengeospatial.org/standards](http://www.opengeospatial.org/standards)* These are implementation standards * written for a more technical audience and detail the interface structure between software components* Predicated on abstract specifications * the conceptual foundation for most OGC standards development activities * [http://www.opengeospatial.org/specs/?page=abstract](http://www.opengeospatial.org/specs/?page=abstract) \newpage The main OGC standards* [WMS – Web Map Service](http://www.opengeospatial.org/standards/wms) * Provides rendered images of maps * Current version: 1.3* [WFS – Web Feature Service](http://www.opengeospatial.org/standards/wfs) * Provides vector data on demand * Current version: 2.0* [WCS – Web Coverage Service](http://www.opengeospatial.org/standards/wcs) * Provides raster data (e.g. satellite data) on demand * Current version: 2.0* [GML – The Geography Markup Language](http://www.opengeospatial.org/standards/gml) * Used as an interoperable standard for transmitting geographic data (2D, 3D, topology, etc.) * Versions 2.1.x and 3.2.1 are most relevant \newpage Other OGC standards
###Code
from IPython.display import IFrame
IFrame('http://www.opengeospatial.org/standards', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage Interoperability in action \newpage What did technical interoperability facilitate From Map to Model The changing paradigm of map creation from cartography to data driven visualization \newpage  \newpage The world was a happy place.......Our data was interoperable! \newpage Then ....... along came **open data** \newpage The Open landscape integrates **formal** and **informal** data \newpage  \newpage Background - originally a grass roots (community) movement.. Open access to knowledge gained significant momementum with the increased uptake of the World Wide Web. This is particularly seen in initiatives like [Wikipedia](https://en.wikipedia.org) (established in 2001) and [Open Knowledge](https://en.wikipedia.org/wiki/Open_Knowledge) (was the Open Knowledge Foundation: established in 2004). Within the Geo community [Open Street Map ](https://en.wikipedia.org/wiki/OpenStreetMap) (also established in 2004) and the [Open Source Geospatial Foundation](https://en.wikipedia.org/wiki/Open_Source_Geospatial_Foundation) (OSGeo - established in 2006) are key initiatives that promote accessible data and software resources respectively.Critical to this is that these were **grass roots** (community) movements that have proven to be highly disruptive to incumbent data providers, practices and policies. \newpage Open in government The impact of these grass roots movements is seen in Open Data (dot) gov. Pioneered by leaders such as Tim Berners Lee and Nigel ShadboltThe Shakespeare review [-@shakespeare_shakespeare_2013] indicate that the amount of government Open Data, at least in the UK, is only going to grow.Open data has the potential to trigger a revolution in how governments think about providing services to citizens and how they measure their success: this produces societal impact.This will require an understanding of citizen needs, behaviours, and mental models, and how to use data to improve services. \newpage Valuing Open Data![McKinsey report valuing *open data* [@mckinsey_open_2013]](https://dl.dropboxusercontent.com/u/393477/ImageBank/Mckinsey_Value_of_OpenData.png) A [McKinsey Global Institute report examines the economic impact of Open Data](http://www.mckinsey.com/insights/business_technology/open_data_unlocking_innovation_and_performance_with_liquid_information) [@mckinsey_open_2013] and estimates that globally open data could be worth a minimum of $3 trillion annually. \newpage Open in academia> Open inquiry is at the heart of the scientific enterprise..... Science’s powerful capacity for self-correction comes from this openness to scrutiny and challenge.*Science as an open enterprise* [@royal_society_science_2012 p. 7].>Science is based on building on, reusing and openly criticising the published body of scientific knowledge.>For science to effectively function, and for society to reap the full benefits from scientific endeavours, it is crucial that science data be made open.The Panton Principles (@murray-rust_panton_2010) which underpin **Open Science**. The Royal Society’s report Science as an open enterprise [-@royal_society_science_2012] identifies how 21^st^ century communication technologies are changing the ways in which scientists conduct, and society engages with, science. The report recognises that ‘open’ enquiry is pivotal for the success of science, both in research and in society.The Panton Principles pre-cursed this call with a clarion call to the academic community to open their data and start to conduct **open science**.  This goes beyond open access to publications (Open Access), to include access to data and other research outputs (Open Data), and the process by which data is turned into knowledge (Open Science). The next generation open data in academiaZenodo is a **DATA REPOSITORY** which offers:* accreditation * different licences * different exposure (private (closed), public (open) and embargoed (timestamped)) * DOIs * is free at the point of use * is likely to be around for a long time * supported by Horizon 2020 and delivered by CERN \newpage The underlying rationale of Open Data is: * unfettered access to large amounts of ‘raw’ data * enables patterns of re-use and knowledge creation that were previously impossible. * improves transparency and efficiency * encourages innovative service delivery* introduces a range of data-mining and visualisation challenges, * which require multi-disciplinary collaboration across domains * catalyst to research and industry* supports the generation of new products, services and markets* the prize for succeeding is improved knowledge-led policy and practice that transforms * communities, * practitioners, * science and * society \newpage Free and Open Source Software in in Geo
###Code
from IPython.display import IFrame
IFrame('http://www.osgeo.org/', width=1200, height=700)
###Output
_____no_output_____
###Markdown
\newpage So...... we have access to lots of data and software* Formal and Informal* Open and Proprietary Where are these new data products?Data, data everywhere - but where are the new derivatives and services? \newpage Interoperability[The Defense domain are a bit more explicit......](http://www.dau.mil/pubscats/atl%20docs/jan-feb/watson_jan-feb10.pdf)> As defined by DoD policy, interoperability is the ability of systems, units, or forces to provide data, information, material, and services to, and accept the same from, other systems, units, or forces; and to use the data, information, material, and services so exchanged to enable them to operate effectively together. IT and NSS interoperability includes both the technical exchange of information and the end-to-end operational effectiveness of that exchanged information as required for mission accomplishment. Interoperability is more than just information exchange; it includes systems, processes, procedures, organizations, and missions over the life cycle and must be balanced with information assurance.@watson_joint_2010 \newpage Non-technical interoperability issues?![Islands of incompatibility [@IncompatibilitiesAndLicenceClauses_en_beck_2016]](https://upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Incompatibilities_And_Licence_Clauses.svg/989px-Incompatibilities_And_Licence_Clauses.svg.png) \newpage \newpage  \newpage Non-technical interoperabilityIssues surrounding non-technical interoperability include: * Policy interoperabilty* Licence interoperability* Legal interoperability* Social interoperabilityWe will focus on licence interoperability \newpage Policy InteroperabilityThe relationship between:* Individuals* Organisations* CountriesPolicy determines what, who and how different content can be accessed. In addition to other elements the policy statements determine:* Authentication* Authorization* Audit See @innocenti_towards_2011 for more details \newpage Social (or human) InteroperabilitySocial interoperability is concerned about the environment and business and human processes.* Tools are used by people* The social dimension of operational use is underestimated (it's difficult)* People form complex inclusive and exclusive networks * These operate at many scales[US Department of Defence researchers have advocated](http://www.dtic.mil/ndia/2009systemengr/8854WednesdayTrack8Zavin.pdf) the development of Policy, Standards, and Operational Procedures for:* forming human networks* human to human communications* organization to organization communications* human system integration* information sharing across disparate domains: * DoD-Coalition-Interagency-intercommunity \newpage Legal Interoperability> Legal interoperability addresses the process of making legal rules cooperate across jurisdictions, on different subsidiary levels within a single state or between two or more states. (@weber_legal_2014, p. 6)The [Research Data Alliance](https://rd-alliance.org/) state that [legal interoperability occurs among multiple datasets when](https://rd-alliance.org/group/rdacodata-legal-interoperability-ig/wiki/legal-principles-data.html):* use conditions are clearly and readily determinable for each of the datasets,* the legal use conditions imposed on each dataset allow creation and use of combined or derivative products, and* users may legally access and use each dataset without seeking authorization from data rights holders on a case-by-case basis, assuming that the accumulated conditions of use for each and all of the datasets are met.Legal interoperability also implies that the search for or tracking of licenses or other legal instruments and their compatibility with other legal conditions will occur in online environments. \newpage Licence InteroperabilityA specific form of legal interoperability \newpage Example of applying the semantic web to licence interoperability  There is a multitude of formal and informal data. \newpage What is a licence?[Wikipedia state:](https://en.wikipedia.org/wiki/License)> A license may be granted by a party ("licensor") to another party ("licensee") as an element of an agreement between those parties. > A shorthand definition of a license is "an authorization (by the licensor) to use the licensed material (by the licensee)."  Each of these data objects can be licenced in a different way. This shows some of the licences described by the RDFLicence ontology \newpage
###Code
### Export this notebook as markdown
commandLineSyntax = 'dot -Tpng FCA_ConceptAnalysis.dot > FCA_ConceptAnalysis.png'
commandLineSyntax = 'dot -Tsvg FCA_ConceptAnalysis.dot > FCA_ConceptAnalysis.svg'
print (commandLineSyntax)
os.system(commandLineSyntax)
###Output
_____no_output_____
###Markdown
  Concepts (derived from Formal Concept Analysis) surrounding licences \newpage Two lead organisations have developed legal frameworks for content licensing:* [Creative Commons (CC)](https://creativecommons.org/) and * [Open Data Commons (ODC)](http://opendatacommons.org/). Until the release of [CC version 4](https://wiki.creativecommons.org/4.0), published in November 2013, the CC licence did not cover data. Between them, CC and ODC licences can cover all forms of digital work.* **There are many other licence types*** Many are bespoke * Bespoke licences are difficult to manage * Many legacy datasets have bespoke licences  I'll describe CC in more detail \newpage Creative Commons Zero Creative Commons Zero (CC0) is essentially public domain which allows: * Reproduction* Distribution* Derivations \newpage Constraints on CC0The following clauses constrain CC0:* Permissions * ND – No derivatives: the licensee can not derive new content from the resource.* Requirements * BY – By attribution: the licensee must attribute the source. * SA – Share-alike: if the licensee adapts the resource, it must be released under the same licence.* Prohibitions * NC – Non commercial: the licensee must not use the work commercially without prior approval. CC license combinationsLicense|Reproduction|Distribution|Derivation|BY|SA|NC----|----|----|----|----|----|----CC0|X|X|X|||CC-BY-ND|X|X||X||CC-BY-NC-ND|X|X||X||XCC-BY|X|X|X|X||CC-BY-SA|X|X|X|X|X|CC-BY-NC|X|X|X|X||XCC-BY-NC-SA|X|X|X|X|X|XTable: [Creative Commons license combinations](https://docs.google.com/spreadsheets/d/17aT7Dj6QtE88XPS44oPQ7mVeSdY1YnZ1rlpjPvXNz0E/pub?single=true&gid=0&output=html) \newpage Why are licenses important?* They tell you what you can and can't do with 'stuff'* Very significant when multiple datasets are combined * It then becomes an issue of license compatibility \newpage Which is important when we mash up dataCertain licences when combined: * Are incompatible * Creating data islands* Inhibit commercial exploitation (NC)* Force the adoption of certain licences * If you want people to commercially exploit your stuff don't incorporate CC-BY-NC-SA data!* Stops the derivation of *new works* \newpage ![Islands of incompatibility [@IncompatibilitiesAndLicenceClauses_en_beck_2016]](https://upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Incompatibilities_And_Licence_Clauses.svg/989px-Incompatibilities_And_Licence_Clauses.svg.png) \newpage  A conceptual licence processing workflow. The licence processing service analyses the incoming licence metadata and determines if the data can be legally integrated and any resulting licence implications for the derived product. \newpage A rudimentry logic example```Data1 hasDerivedContentIn NewThing.Data1 hasLicence a cc-by-sa.What hasLicence a cc-by-sa? reason hereIf X hasDerivedContentIn Y and hasLicence Z then Y hasLicence Z. reason hereData2 hasDerivedContentIn NewThing.Data2 hasLicence a cc-by-nc-sa.What hasLicence a cc-by-nc-sa? reason hereNothing hasLicence a cc-by-nc-sa and hasLicence a cc-by-sa. reason here```And processing this within the Protege reasoning environment
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('jUzGF401vLc')
###Output
_____no_output_____
###Markdown
\newpage Here's something I prepared earlierA live presentation (for those who weren't at the event).....
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('tkRB5Rp1_W4')
###Output
_____no_output_____
###Markdown
\newpage A more robust logic* Would need to decouple licence incompatibility from licence name into licence clause (see table below)* Deal with all licence type* Provide recommendations based on desired derivative licence type* Link this through to the type of process in a workflow: * data derivation is, from a licence position, very different to contextual displayLicense|Reproduction|Distribution|Derivation|BY|SA|NC----|----|----|----|----|----|----CC0|X|X|X|||CC-BY-ND|X|X||X||CC-BY-NC-ND|X|X||X||XCC-BY|X|X|X|X||CC-BY-SA|X|X|X|X|X|CC-BY-NC|X|X|X|X||XCC-BY-NC-SA|X|X|X|X|X|XODC-PDDL|X|X|X|||ODC-BY|X|X|X|X||ODC-ODbL|X|X|X|X|X|OGL 2.0|X|X|X|X||OS OpenData|X|X|X|X|?|Table: [Creative Commons license combinations](https://docs.google.com/spreadsheets/d/17aT7Dj6QtE88XPS44oPQ7mVeSdY1YnZ1rlpjPvXNz0E/pub?single=true&gid=0&output=html) \newpage OGC and Licence interoperability* The geo business landscape is increasingly based on integrating heterogeneous data to develop new products* Licence heterogeneity is a barrier to data integration and interoperability* A licence calculus can help resolve and identify heterogenties leading to * legal compliance * confidence* Use of standards and collaboration with organisations is crucial * [Open Data Licensing ontology](https://github.com/theodi/open-data-licensing) * [The Open Data Institute](http://theodi.org/)* Failure to do this could lead to breaches in data licenses * and we all know where that puts us........  \newpage Linked data and the Semantic Web The web of Documents* a global filesystem* Designed for human consumption* Primary objects are documents* Expresses links between documents (or sub-parts of)* Degree of structure in objects is fairly low* Semantics of content and links is implicit The web of Linked Data* a global database* Designed for machines first, humans later* Primary objects are things (or descriptions of things)* Expresses links between things* Degree of structure in (descriptions of) things is high* Semantics of content and links explicit  \newpage Linked Data a way of publishing data on the Web that:* encourages reuse* reduces redundancy* maximises its (real and potential) inter-connectedness* enables network effects to add value to data Why publish Linked Data* Ease of discovery* Ease of consumption * standards-based data sharing* Reduced redundancy* Added value * build ecosystems around your data/content  \newpage Linked Data Basics [Four rules for Linked Data from Tim Berners Lee](https://www.w3.org/DesignIssues/LinkedData.html)1. Use URIs as names for things1. Use HTTP URIs so that people can look up those names.1. When someone looks up a URI, provide useful information, using the standards (RDF*, SPARQL)1. Include links to other URIs, so that they can discover more things. \newpage The [Resource Description Framework](https://en.wikipedia.org/wiki/Resource_Description_Framework) (RDF) data modelRDF stores data as *triples* in the following manner:This is a [graph model](https://en.wikipedia.org/wiki/Graph_(abstract_data_type) that consists of nodes (subject and object)) and edges (predicate). \newpage Data expressed as RDF \newpage Data expressed as RDF Linked Data \newpage [RDF notation](https://en.wikipedia.org/wiki/Resource_Description_Framework)RDF can be represented in different ways - each of which are interoperable. For example:* RDF/XML, * Notation-3 (N3), * Turtle (.ttl), * N-Triples, * RDFa,* RDF/JSONEach represent *subject, predicate, object* triples in different ways \newpage One step beyond.... Linked Open Data [Is your Linked Open Data 5 star](https://www.w3.org/DesignIssues/LinkedData.html)```★ Available on the web (whatever format) but with an open licence, to be Open Data★★ Available as machine-readable structured data (e.g. excel instead of image scan of a table)★★★ as (2) plus non-proprietary format (e.g. CSV instead of excel)★★★★ All the above plus, Use open standards from W3C (RDF and SPARQL) to identify things, so that people can point at your stuff★★★★★ All the above, plus: Link your data to other people’s data to provide context``` \newpage The Supporting Semantic Web Stack \newpage It's about re-use VocabulariesThe glue that joins concepts together. A concept shared is a link gained. By re-using concepts it makes it easier to understand what your data means and where and how it should be re-used.
###Code
from IPython.display import IFrame
IFrame('http://lov.okfn.org/dataset/lov/', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage It's about re-use Ontology> An ontology is a shared formal explicit specialisation of a conceptualisation \newpage Ontology* The term originated from a philosophy * which deals with the nature and organization of reality* It tries to answer the questions: * What is being? * What are the features common to all beings? * How should things be classified? Ontology> An ontology is a shared formal explicit specialisation of a conceptualisationAfter Agarwal -(@agarwal_ontological_2005):* *conceptualisation* is identifying relevant abstracted concepts of a phenomena suited to a specific domain* *explicit* means that the concepts are explicitly defined* *formal* refers to the fact that the ontology should be machine-readable* *shared* refers to notion that on ontology captures consensual knowledge \newpage Ontology Example * A ‘Carnivore’ is a concept whose members are exactly those animals who eat only meat* A ‘Bear’ is a concept whose members are a kind of ‘Carnivore’* A ‘Cub’ is a concept whose members are exactly those ‘Bear’ whose age is less than one year* A Panda is a individual of a ‘Bear’We can use these concepts to infer new information from facts. For example: from the fact 'Ching Ching' is a newborn Panda we know:```'Ching Ching' is a Panda.'Ching Ching' is a newborn.``` We can infer:```'Ching Ching' is a Bear.'Ching Ching' is a Carnivore. ????'Ching Ching' eats only meat. ????```If we had other logic that told us that 'newborn' is the same as saying less than one year then we can also infer```'Ching Ching' is a Cub.```In an ontology/RDF you can say *Anything about Anything*. Whilst carnivore is a generally useful concept about *bears* it is not *specifically* useful when considering *pandas*. **The domain of application is clearly important.** \newpage [SPARQL](https://en.wikipedia.org/wiki/SPARQL) the SQL of the semantic webFind me the capital of all countries in Africa:```PREFIX abc: .SELECT ?capital ?countryWHERE { ?x abc:cityname ?capital ; abc:isCapitalOf ?y. ?y abc:countryname ?country ; abc:isInContinent abc:Africa.}``` There is a thing ('x') against which the following concepts exist:* 'abc:cityname' (the name of a city: stored in the variable 'capital') * 'abc:isCapitalOf' (the concept for which the city is capital: stored in the variable 'y') The 'concept for which the city is capital' (stored in variable 'y') must also have the following concepts:* 'abc:countryname' (the name of a country: stored in the variable 'country')* 'abc:isInContinent' abc:Africa (isInContinent of the the individual Africa') \newpage [GeoSPARQL](http://www.opengeospatial.org/projects/groups/geosparqlswg) the SQL of the spatial semantic webAn OGC standard```SELECT ?fWHERE { ?f my:hasPointGeometry ?fGeom .?fGeom ogc:asWKT ?fWKT .FILTER (ogcf:relate(?fWKT,“Polygon ((-83.5 34.0, -83.5 34.3, -83.1 34.3,-83.1 34.0, -83.5 34.0))”^^ogc:WKTLiteral,ogc:within))}```
###Code
from IPython.display import IFrame
IFrame('http://www.opengeospatial.org/projects/groups/geosparqlswg', width=1000, height=700)
###Output
_____no_output_____
###Markdown
Linked Data and Geo \newpage GeoSPARQL employs spatial calculus \newpage Querying Linked Data in the wild The Ordnance SurveyA URI for every place in the UK
###Code
from IPython.display import IFrame
IFrame('http://data.ordnancesurvey.co.uk/doc/50kGazetteer/177276', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://data.ordnancesurvey.co.uk/id/postcodeunit/NG72QL', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://data.ordnancesurvey.co.uk/', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://data.ordnancesurvey.co.uk/ontology/', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://data.ordnancesurvey.co.uk/datasets/code-point-open/explorer/sparql', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage Open Street Map
###Code
from IPython.display import IFrame
IFrame('http://linkedgeodata.org/About', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://browser.linkedgeodata.org/', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage Geonames
###Code
from IPython.display import IFrame
IFrame('http://www.geonames.org/ontology/documentation.html', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://www.geonames.org/maps/google_52.94_358.8.html', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage
###Code
from IPython.display import IFrame
IFrame('http://lov.okfn.org/dataset/lov/vocabs/gn', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage Geo Vocabularies
###Code
from IPython.display import IFrame
IFrame('http://lov.okfn.org/dataset/lov/vocabs/?q=geo+space+address+geonames+os+spatial', width=1000, height=700)
###Output
_____no_output_____
###Markdown
\newpage Conclusions* Technical interoperability is only one part of the problem* Open data will become increasingly important as governments and other groups release resources under clear licences * Licences are a barrier to re-use* Data shows its true value when combined with other data sources – linked data creates an opportunity* Usability: common data model and reference of common URIs (for example, postcodes) allows for easy data aggregation and integration.* Shift in focus from cartography and geometries to ‘things’ and the relationships between them.* Spatial no longer special – part of the bigger information world....* location is a very important information hub and provides a key underpinning reference framework which brings many datasets together and provides important context. \newpage Geo reasoning example (if time) Geo example: ```Leeds is a city.Yorkshire is a county.Sheffield is a city.Lancaster is a city.Lancashire is a county.Lancaster has a port.What is Leeds?Leeds isIn Yorkshire.Sheffield isIn Yorkshire.Lancaster isIn Lancashire.What isIn Yorkshire?If X isIn Y then Y contains X.What contains Leeds?Yorkshire borders Lancashire.If X borders Y then Y borders X.What borders Lancashire?Yorkshire isIn UnitedKingdom.Lancashire isIn UnitedKingdom.TransitivityIf X isIn Y and Y isIn Z then X isIn Z.If X contains Y and Y contains Z then X contains Z``` using proper isIn```Leeds is a city.Yorkshire is a county.Sheffield is a city.Lancaster is a city.Lancashire is a county.Lancaster has a port.What is Leeds?Leeds is spatiallyWithin Yorkshire.Sheffield is spatiallyWithin Yorkshire.Lancaster is spatiallyWithin Lancashire.What is spatiallyWithin Yorkshire?If X is spatiallyWithin Y then Y spatiallyContains X.What spatiallyContains Leeds?Yorkshire borders Lancashire.If X borders Y then Y borders X.What borders Lancashire?Yorkshire is spatiallyWithin UnitedKingdom.Lancashire is spatiallyWithin UnitedKingdom.TransitivityIf X is spatiallyWithin Y and Y is spatiallyWithin Z then X is spatiallyWithin Z.If X spatiallyContains Y and Y spatiallyContains Z then X spatiallyContains ZWhat is spatiallyWithin UnitedKingdom?``` Adding more......```Pudsey is spatiallyWithin Leeds.Kirkstall is spatiallyWithin Leeds.Meanwood is spatiallyWithin Leeds.Roundhay is spatiallyWithin Leeds.Scarcroft is spatiallyWithin Leeds.``` and more```UnitedKingdom isPartOf Europe.UnitedKingdom is a country.If X isPartOf Y and X spatiallyContains Z then Z isPartOf Y.What isPartOf Europe?```
###Code
and more
```
If X spatiallyContains Y and X is a city then Y is a place and Y is a cityPart.
Every city is a place.
What is a place.
```
and more
```
UK isPartOf Europe.
UK is sameAs UnitedKingdom.
If X has a port then X borders Water.
What borders Water?
```
###Output
_____no_output_____ |
Projects in Python with Scikit-Learn- XGBoost- Pandas- Statsmodels- etc./Black Friday purchase (Bagging & Stacked methods).ipynb | ###Markdown
Data description & Problem statement: The dataset here is a sample of the transactions made in a retail store. The store wants to know better the customer purchase behaviour against different products. Specifically, here the problem is a Regression problem where we are trying to predict the dependent variable (the amount of purchase) with the help of the information contained in the other variables. The data set has 550067 rows and 11 variables. Workflow:- Load the dataset, and define the required functions (e.g. for detecting the outliers)- Data Cleaning/Wrangling: Manipulate outliers, missing data or duplicate values, Encode categorical variables, etc. - Split data into training & test parts (utilize the training part for training & hyperparameter tuning of model, and test part for the final evaluation of model) Model Training:- Build the ensemble method (i.e. Bagging model and Stacked model) individually Model Evaluation: - Evaluate the Ensemble models with Cross-Validation technique, by calculating: - r2 (determination factor) - Lift chart - RMSE
###Code
import sklearn
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
df=pd.read_csv('train.csv')
# To Shuffle the data:
np.random.seed(42)
df=df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
df.info()
df.fillna(999, inplace=True)
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# for i in ['User_ID', 'Product_ID', 'Age', 'Occupation', 'City_Category', 'Stay_In_Current_City_Years']:
for i in ['User_ID', 'Product_ID', 'Age', 'Occupation', 'City_Category', 'Stay_In_Current_City_Years',
'Gender', 'Marital_Status', 'Product_Category_1', 'Product_Category_2', 'Product_Category_3' ]:
encode_text_index(df, i)
df.head(5)
X=df.drop(['Purchase'], axis=1)
y=np.log(df['Purchase'])
# We initially devide data into training & test folds: We do the Grid-Search only on training part
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, shuffle=True)
# Re-scaling & Polynomial Interactions:
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
#scalor_X=MinMaxScaler().fit(pd.DataFrame(X_train))
#X_train=scalor_X.transform(pd.DataFrame(X_train))
#X_test=scalor_X.transform(pd.DataFrame(X_test))
scaler_y=MinMaxScaler().fit(pd.DataFrame(y_train))
y_train=scaler_y.transform(pd.DataFrame(y_train))
y_test=scaler_y.transform(pd.DataFrame(y_test))
###Output
_____no_output_____
###Markdown
1) Bagging meta-estimator with XGBoost:
###Code
import xgboost
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score, KFold
from sklearn.ensemble import BaggingRegressor
model=XGBRegressor(gamma= 0, max_depth= 3, min_child_weight= 1)
bag=BaggingRegressor(model, n_estimators = 100, max_samples=0.9, max_features=0.9, random_state=42)
kfold=KFold(n_splits=4, shuffle=True, random_state=42)
scores=cross_val_score(bag, X_train, y_train, cv=kfold)
print(scores, "\n")
print("AUC Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
# Grid-Search for the best Bagging parameters:
from sklearn.model_selection import GridSearchCV
param={'max_samples':[0.5, 0.7, 0.8, 0.9, 1], 'max_features':[0.5, 0.7, 0.9, 1]}
kfold=KFold(n_splits=4, shuffle=True, random_state=42)
grid_search=GridSearchCV(BaggingRegressor(model, n_estimators = 50, random_state=42), param, cv=kfold, n_jobs=-1)
grid_search.fit(X_train, y_train)
# Grid-Search report:
G=pd.DataFrame(grid_search.cv_results_).sort_values("rank_test_score")
G.head(3)
print("Best parameters: ", grid_search.best_params_)
print("Best validation accuracy: %0.2f (+/- %0.2f)" % (np.round(grid_search.best_score_, decimals=2), np.round(G.loc[grid_search.best_index_,"std_test_score" ], decimals=2)))
print("Test score: ", np.round(grid_search.score(X_test, y_test),2))
# Plot the Lift Chart:
# Regression chart.
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
pred=grid_search.predict(X_test)
chart_regression(pred.flatten(), np.array(y_test), sort=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_inv=scaler_y.inverse_transform(pd.DataFrame(pred))
y_test_inv=scaler_y.inverse_transform(y_test)
rmse = sqrt(mean_squared_error(np.e**y_test_inv, np.e**pred_inv))
print('Test rmse: ', rmse)
###Output
Test rmse: 2.76164627400196
###Markdown
2) Stacked Regressor with XGBoost:
###Code
import xgboost
from xgboost import XGBRegressor
from mlxtend.regressor import StackingRegressor
from sklearn.linear_model import Lasso, Ridge, ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score, KFold
reg_1=XGBRegressor(max_depth= 12, min_child_weight=10, subsample=0.7, n_estimators=100)
reg_2=XGBRegressor(max_depth= 8, min_child_weight=10, subsample=0.7, n_estimators=200)
reg_3=XGBRegressor(max_depth= 6, min_child_weight=10, subsample=0.7, n_estimators=300)
meta_reg=XGBRegressor(max_depth= 12, min_child_weight=5, subsample=0.7, n_estimators=200)
stack=StackingRegressor(regressors=[reg_1, reg_2, reg_3], meta_regressor= meta_reg, use_features_in_secondary=True)
scores=cross_val_score(stack, X_train, y_train)
print(scores, "\n")
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
stack.fit(X_train, y_train)
# Plot the Lift Chart:
# Regression chart.
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
pred=stack.predict(X_test)
chart_regression(pred.flatten(), np.array(y_test), sort=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_inv=scaler_y.inverse_transform(pd.DataFrame(pred))
y_test_inv=scaler_y.inverse_transform(y_test)
rmse = sqrt(mean_squared_error(np.e**y_test_inv, np.e**pred_inv))
print('Test rmse: ', rmse)
# Grid-Search for the best model parameters:
from sklearn.model_selection import GridSearchCV
param={'meta-ridge__alpha': [0.0001, 0.001, 0.01, 0.1, 0.5, 1, 5, 10, 100, 1000]}
#param={'meta-xgbregressor__max_depth':[2, 3, 4, 5], 'meta-xgbregressor__min_child_weight':[1, 2, 3, 4],
# 'meta-xgbregressor__gamma': [ 0, 0.01, 0.05, 0.1]}
kfold=KFold(n_splits=5, shuffle=True, random_state=42)
grid_search=GridSearchCV(StackingRegressor([reg1, reg2, reg3], meta_regressor= meta_reg), param, cv=kfold)
grid_search.fit(X_train, y_train)
# Grid-Search report:
G=pd.DataFrame(grid_search.cv_results_).sort_values("rank_test_score")
G.head(3)
print("Best parameters: ", grid_search.best_params_)
print("Best validation accuracy: %0.2f (+/- %0.2f)" % (np.round(grid_search.best_score_, decimals=2), np.round(G.loc[grid_search.best_index_,"std_test_score" ], decimals=2)))
print("Test score: ", np.round(grid_search.score(X_test, y_test),2))
# Plot the Lift Chart:
# Regression chart.
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
pred=grid_search.predict(X_test)
chart_regression(pred.flatten(), np.array(y_test), sort=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_inv=scaler_y.inverse_transform(pd.DataFrame(pred))
y_test_inv=scaler_y.inverse_transform(y_test)
rmse = sqrt(mean_squared_error(np.e**y_test_inv, np.e**pred_inv))
print('Test rmse: ', rmse)
###Output
Test rmse: 2.640357548516481
|
ArtistGroups.ipynb | ###Markdown
Fix Merger IDs
###Code
def fixMergerIDs(df, mam):
dbMaxLen = {db: df[db].apply(lambda x: len(x) if x is not None else 0).max() for db in artistIDToCleanName}
mergedRows = concat([dbData[dbData.apply(lambda x: len(x) if x is not None else 0) == dbMaxLen[db]] for db,dbData in df.iteritems() if db in artistIDToCleanName]).index.drop_duplicates()
idxs = []
for idx,row in df.loc[mergedRows].iterrows():
mergeData = mam.getArtistDataByName(row["ArtistName"])
if mergeData is None:
print(row["ArtistName"])
idxs.append(idx)
continue
print(row["ArtistName"])
for db,dbMergeData in mergeData.items():
mergeID = dbMergeData["ID"]
currentID = row[db]
print("\t{0: <16}{1} --> {2}".format(db,currentID,mergeID))
df.loc[idx,db] = mergeID
#mme.saveData(manualEntries=df, local=False)
def isMerger(row):
return sum([mam.getArtistDataByMergerID(dbID) is not None for dbID in row.values]) > 0
ts = timestat("Find Merged Artist Data")
mergedArtists = df.apply(isMerger, axis=1)
mergedIDXs = df[mergedArtists].index
ts.stop()
dfNameData[dfNameData["ArtistName"] == "Alice Cooper"]
class artistGroup:
def __init__(self, key, debug=False):
self.key = key
self.debug = debug
############################################################################
# General And Diagnostic
############################################################################
self.groupType = None
self.terminal = True # Becomes False If adding an artistGroup To groups()
self.mmeID = None
############################################################################
# Database Matches
############################################################################
self.dbIDs = {}
############################################################################
# Artist Group Names
############################################################################
### Will likely be an ALL CAPS version of the assigned name
self.searchName = None
### My Choice of Group Name (very arbitrary. must be in stylized or latin names)
self.assignedName = None
### Stylized Names (any weird way group's name is written)
self.stylizedNames = []
### Latin Names (Ascii if possible, something readable in English)
self.latinNames = []
### Renames (Mapping between name and one of names in stylized or latin names)
self.dbRenames = {}
self.genRenames = {}
### A collection of other ArtistGroup items
self.groups = {}
################################################################################################################################
# General
################################################################################################################################
def show(self):
print("{0: <20}: {1}".format("Key", self.key))
print("{0: <20}: {1}".format("Assigned Name", self.assignedName))
print("{0: <20}: {1}".format("Search Name", self.searchName))
print("{0: <20}: {1}".format("DB Matches", self.dbIDs))
print("{0: <20}: {1}".format("DB Renames", self.dbRenames))
print("{0: <20}: {1}".format("General Renames", self.genRenames))
################################################################################################################################
# Getters and Setters
################################################################################################################################
def getKey(self):
return self.key
def setDBIDs(self, dbIDs):
self.dbIDs = dbIDs
def setAssignedName(self, assignedName):
self.assignedName = assignedName
self.searchName = assignedName.upper()
def setDBRenames(self, dbRenames):
self.dbRenames = dbRenames
def setGenRenames(self, genRenames):
self.genRenames = genRenames
def addGroup(self, ag):
if isinstance(ag, artistGroup):
self.groups[ag.getKey] = ag
def createArtistGroupData(row, idx, manDB, mergedArtists):
artistName = row["ArtistName"]
artistDBData = {idx: idxData for idx,idxData in row.iteritems() if isinstance(idxData,tuple)}
dbNames = {db: dbData[0] for db,dbData in artistDBData.items() if dbData[0] not in ["NotInDB", "NotDigit"]}
dbIDs = {db: dbData[1] for db,dbData in artistDBData.items()}
isMerged = {db: dbData[2] for db,dbData in artistDBData.items() if dbData[2] is True}
isMerged = isMerged if len(isMerged) > 0 else None
if len(dbNames) == 0:
print(idx,'\t',artistName)
ag = artistGroup(key=key)
ag.mmeID = idx
ag.terminal = not isMerged
ag.setAssignedName(artistName)
unMerged = mergedArtists.isin([artistName]).sum() == 0
if unMerged:
dbRenames = {db: {dbName: manDB.renamed(dbName)} for db,dbName in dbNames.items()}
dbRenames = {db: dbRename for db,dbRename in dbRenames.items() if list(dbRename.keys()) != list(dbRename.values())}
genRenames = {rename: artistName for rename in manInvData.get(artistName, {}) if {rename: artistName} not in dbRenames.values()}
else:
dbRenames = {}
genRenames = {}
ag.setDBRenames(dbRenames)
ag.setGenRenames(genRenames)
ag.setDBIDs(dbIDs)
return ag
indivAGS = {}
mergedAGS = {}
N = dfNameData.shape[0]
ts = timestat("Creating Artist Groups For {0} \'Artists\'".format(N))
mergedArtists = df.loc[mergedIDXs]["ArtistName"]
for i,(idx,row) in enumerate(dfNameData.iterrows()):
if (i+1) % 50000 == 0 or (i+1) == 10000:
ts.update(n=i+1,N=N)
key = str(uuid4())
data = createArtistGroupData(row, idx, manDB, mergedArtists)
if idx in mergedIDXs:
mergedAGS[key] = data
else:
indivAGS[key] = data
print("{0: <30}{1: >6}".format("All Artists", dfNameData.shape[0]))
print("{0: <30}{1: >6}".format("Individual Artists", len(indivAGS)))
print("{0: <30}{1: >6}".format("Merged Artists", len(mergedAGS)))
ts.stop()
print("{0: <30}{1: >6}".format("All Artists", dfNameData.shape[0]))
print("{0: <30}{1: >6}".format("Individual Artists", len(indivAGS)))
print("{0: <30}{1: >6}".format("Merged Artists", len(mergedAGS)))
ts = timestat("Split Renames By Known DB Renames")
manDBDataRemaining = manDBData
ags = {"Individual": indivAGS, "Merged": mergedAGS}
for agType,agData in ags.items():
dbRenameData = [item for item in getFlatList([ag.dbRenames.values() for key,ag in agData.items()]) if len(item) > 0]
dbRenameData = {k: v for item in dbRenameData for k,v in item.items()}
manDBDataTemp = DataFrame(manDBDataRemaining, columns=["PermReplace"]).join(Series(dbRenameData, name="dbRename"))
manDBDataRemaining = manDBDataTemp[manDBDataTemp["dbRename"].isna()]["PermReplace"]
manDBDataDBRename = manDBDataTemp[manDBDataTemp["dbRename"].notna()]["PermReplace"]
print("{0: <30}{1: >6}".format("Perm Renames", manDBDataTemp.shape[0]))
print("{0: <30}{1: >6}".format("Known DB Renames", manDBDataDBRename.shape[0]))
print("{0: <30}{1: >6}".format("Remaining Renames", manDBDataRemaining.shape[0]))
ts.stop()
ts = timestat("Split Renames By Known General Renames")
genRenameData = [ag.genRenames for key,ag in indivAGS.items() if len(ag.genRenames) > 0]
genRenameData = {k: v for item in genRenameData for k,v in item.items()}
manDBDataTemp = DataFrame(manDBDataRemaining, columns=["PermReplace"]).join(Series(genRenameData, name="genRename"))
manDBDataRemaining = manDBDataTemp[manDBDataTemp["genRename"].isna()]["PermReplace"]
manDBDataGenRename = manDBDataTemp[manDBDataTemp["genRename"].notna()]["PermReplace"]
print("{0: <30}{1: >6}".format("(Perm-DB) Renames", manDBDataTemp.shape[0]))
print("{0: <30}{1: >6}".format("Known Gen Renames", manDBDataGenRename.shape[0]))
print("{0: <30}{1: >6}".format("Remaining Renames", manDBDataRemaining.shape[0]))
ts.stop()
ts = timestat("Split Renames By Merged Renames")
manDBDataTemp = manDBDataRemaining
manDBDataMergeRename = manDBDataTemp[manDBDataTemp.isin(df.loc[mergedIDXs]["ArtistName"])]
manDBDataRemaining = manDBDataTemp[~manDBDataTemp.isin(df.loc[mergedIDXs]["ArtistName"])]
ts.stop()
print("{0: <30}{1: >6}".format("(Perm-DB-Merge) Renames", manDBDataTemp.shape[0]))
print("{0: <30}{1: >6}".format("Known Merge Renames", manDBDataMergeRename.shape[0]))
print("{0: <30}{1: >6}".format("Not Merge Renames", manDBDataRemaining.shape[0]))
manDBDataRemaining[manDBDataRemaining.isin(["Dave Matthews"])]
manDBDataMergeRename
###Output
_____no_output_____ |
notebooks/T6 - 1 - Distancias-Colab.ipynb | ###Markdown
Clonamos el repositorio para obtener los dataSet
###Code
!git clone https://github.com/joanby/python-ml-course.git
###Output
_____no_output_____
###Markdown
Damos acceso a nuestro Drive
###Code
from google.colab import drive
drive.mount('/content/drive')
# Test it
!ls '/content/drive/My Drive'
from google.colab import files # Para manejar los archivos y, por ejemplo, exportar a su navegador
import glob # Para manejar los archivos y, por ejemplo, exportar a su navegador
from google.colab import drive # Montar tu Google drive
###Output
_____no_output_____
###Markdown
Distancias
###Code
from scipy.spatial import distance_matrix
import pandas as pd
data = pd.read_csv("/content/python-ml-course/datasets/movies/movies.csv", sep=";")
data
movies = data.columns.values.tolist()[1:]
movies
dd1 = distance_matrix(data[movies], data[movies], p=1)
dd2 = distance_matrix(data[movies], data[movies], p=2)
dd10 = distance_matrix(data[movies], data[movies], p=10)
def dm_to_df(dd, col_name):
import pandas as pd
return pd.DataFrame(dd, index=col_name, columns=col_name)
dm_to_df(dd1, data["user_id"])
dm_to_df(dd2, data["user_id"])
dm_to_df(dd10, data["user_id"])
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter(xs = data["star_wars"], ys = data["lord_of_the_rings"], zs=data["harry_potter"])
###Output
_____no_output_____
###Markdown
Enlaces
###Code
df = dm_to_df(dd1, data["user_id"])
df
Z=[]
df[11]=df[1]+df[10]
df.loc[11]=df.loc[1]+df.loc[10]
Z.append([1,10,0.7,2])#id1, id2, d, n_elementos_en_cluster -> 11.
df
for i in df.columns.values.tolist():
df.loc[11][i] = min(df.loc[1][i], df.loc[10][i])
df.loc[i][11] = min(df.loc[i][1], df.loc[i][10])
df
df = df.drop([1,10])
df = df.drop([1,10], axis=1)
df
x = 2
y = 7
n = 12
df[n]=df[x]+df[y]
df.loc[n]=df.loc[x]+df.loc[y]
Z.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.
for i in df.columns.values.tolist():
df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])
df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])
df = df.drop([x,y])
df = df.drop([x,y], axis=1)
df
x = 5
y = 8
n = 13
df[n]=df[x]+df[y]
df.loc[n]=df.loc[x]+df.loc[y]
Z.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.
for i in df.columns.values.tolist():
df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])
df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])
df = df.drop([x,y])
df = df.drop([x,y], axis=1)
df
x = 11
y = 13
n = 14
df[n]=df[x]+df[y]
df.loc[n]=df.loc[x]+df.loc[y]
Z.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.
for i in df.columns.values.tolist():
df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])
df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])
df = df.drop([x,y])
df = df.drop([x,y], axis=1)
df
x = 9
y = 12
z = 14
n = 15
df[n]=df[x]+df[y]
df.loc[n]=df.loc[x]+df.loc[y]
Z.append([x,y,df.loc[x][y],3])#id1, id2, d, n_elementos_en_cluster -> 11.
for i in df.columns.values.tolist():
df.loc[n][i] = min(df.loc[x][i], df.loc[y][i], df.loc[z][i])
df.loc[i][n] = min(df.loc[i][x], df.loc[i][y], df.loc[i][z])
df = df.drop([x,y,z])
df = df.drop([x,y,z], axis=1)
df
x = 4
y = 6
z = 15
n = 16
df[n]=df[x]+df[y]
df.loc[n]=df.loc[x]+df.loc[y]
Z.append([x,y,df.loc[x][y],3])#id1, id2, d, n_elementos_en_cluster -> 11.
for i in df.columns.values.tolist():
df.loc[n][i] = min(df.loc[x][i], df.loc[y][i], df.loc[z][i])
df.loc[i][n] = min(df.loc[i][x], df.loc[i][y], df.loc[i][z])
df = df.drop([x,y,z])
df = df.drop([x,y,z], axis=1)
df
x = 3
y = 16
n = 17
df[n]=df[x]+df[y]
df.loc[n]=df.loc[x]+df.loc[y]
Z.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.
for i in df.columns.values.tolist():
df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])
df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])
df = df.drop([x,y])
df = df.drop([x,y], axis=1)
df
Z
###Output
_____no_output_____
###Markdown
Clustering jerárquico
###Code
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
movies
data[movies]
Z = linkage(data[movies], "ward")
Z
plt.figure(figsize=(25,10))
plt.title("Dendrograma jerárquico para el Clustering")
plt.xlabel("ID de los usuarios de Netflix")
plt.ylabel("Distancia")
dendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)
plt.show()
Z = linkage(data[movies], "average")
Z
plt.figure(figsize=(25,10))
plt.title("Dendrograma jerárquico para el Clustering")
plt.xlabel("ID de los usuarios de Netflix")
plt.ylabel("Distancia")
dendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)
plt.show()
data[movies]
Z = linkage(data[movies], "complete")
Z
plt.figure(figsize=(25,10))
plt.title("Dendrograma jerárquico para el Clustering")
plt.xlabel("ID de los usuarios de Netflix")
plt.ylabel("Distancia")
dendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)
plt.show()
Z = linkage(data[movies], method="single", metric="cosine")
Z
plt.figure(figsize=(25,10))
plt.title("Dendrograma jerárquico para el Clustering")
plt.xlabel("ID de los usuarios de Netflix")
plt.ylabel("Distancia")
dendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)
plt.show()
###Output
_____no_output_____ |
Short_term-SVM.ipynb | ###Markdown
Importing LibrariesTo pull data from a CSV file, you must use the reader function to generate a reader object. NumPy is a package in Python used for Scientific Computing. NumPy package is used to perform different operations. Sklearn is a simple and efficient tool for data mining and data analysis built on numpy, scipy and matplotlib. Matplotlib is a plotting library for the Python programming language and its numerical mathematics extension NumPy.
###Code
import csv
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
The SVR module imported from sklearn.svm carries out Support Vector Regression under the Support Vector Machine sub-library of sklearn. There are three different implementations of Support Vector Regression: SVR, NuSVR and LinearSVR. LinearSVR provides a faster implementation than SVR but only considers linear kernels, while NuSVR implements a slightly different formulation than SVR and LinearSVR. Initialising the 'dates' and 'prices' listsA list is a data structure in Python that is a mutable, or changeable, ordered sequence of elements. Each element or value that is inside of a list is called an item.
###Code
dates = []
prices = []
###Output
_____no_output_____
###Markdown
Defining the get_data() functionStep 1: Read the CSV file Step 2: Skip the column names of the CSV file Step 3: Read each row Step 4: From each row, add element of 1st column to 'dates' list Step 5: From each row, add element of 2nd column to 'prices' list
###Code
def get_data(filename):
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
next(csvFileReader)
for row in csvFileReader:
dates.append(int(row[0].split('-')[0]))
prices.append(float(row[1]))
return
###Output
_____no_output_____
###Markdown
Defining the predict_price() functionStep 1: Convert the 'dates' list to a nx1 matrix. Step 2: Defining the SVR models. Making the radial basis function (rbf), linear and polynomial kernels for the SVR model. Step 3: Fitting the data points in the model. When this function is called, it will:Step 4: Scatter plot the initial data points in black. Step 5: Plot the best-fit line by the RBF kernel in red. Step 6: Plot the best-fit line by the linear kernel in green. Step 7: Plot the best-fit line by the polynomial kernel in blue. Step 8: Return as lines made by the: a. RBF kernel b. Linear kernel c. Polynomial kernel
###Code
def predict_price(dates, prices, x):
dates = np.reshape(dates,(len(dates), 1))
svr_rbf = SVR(kernel= 'rbf', C= 1e3, gamma= 0.1)
svr_lin = SVR(kernel= 'linear', C= 1e3)
svr_poly = SVR(kernel= 'poly', C= 1e3, degree= 2)
svr_rbf.fit(dates, prices)
svr_lin.fit(dates, prices)
svr_poly.fit(dates, prices)
plt.scatter(dates, prices, color= 'black', label= 'Data')
plt.plot(dates, svr_rbf.predict(dates), color= 'red', label= 'RBF model')
plt.plot(dates,svr_lin.predict(dates), color= 'green', label= 'Linear model')
plt.plot(dates,svr_poly.predict(dates), color= 'blue', label= 'Polynomial model')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
###Output
_____no_output_____
###Markdown
Getting and reading the CSV fileActually calling the get_data() function.
###Code
get_data('/Users/rounakbose/Git Local/goog.csv')
print ("Dates- ", dates)
print ("Prices- ", prices)
###Output
Dates- [26, 25, 24, 23, 22, 19, 18, 17, 16, 12, 11, 10, 9, 8, 5, 4, 3, 2, 1]
Prices- [708.58, 700.01, 688.92, 701.45, 707.45, 695.03, 710.0, 699.0, 692.98, 690.26, 675.0, 686.86, 672.32, 667.85, 703.87, 722.81, 770.22, 784.5, 750.46]
###Markdown
Showing the results1. The predicted stock price for a new date, as calculated by the:a. RBF kernelb. Linear kernelc. Polynomial kernel2. The scatter plot.
###Code
predicted_price = predict_price(dates, prices, 29)
print ("\nThe stock open price for 29th Feb is:")
print ("RBF kernel: $", str(predicted_price[0]))
print ("Linear kernel: $", str(predicted_price[1]))
print ("Polynomial kernel: $", str(predicted_price[2]))
###Output
_____no_output_____ |
diabetes-detection.ipynb | ###Markdown
**Importing Data**
###Code
df = pd.read_csv('Diabities-210331-154610.csv')
# Snapshot of the dataframe
df.head()
df.info()
df.isnull().values.any()
# List the columns
df.columns
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
print(X.shape)
print(y.shape)
###Output
(768,)
###Markdown
**Data Split**
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
num_rows_train = len(y_train)
print('The number of entries in the train split are:', num_rows_train)
num_rows_test = len(y_test)
print('The number of entries in the test split are:', num_rows_test)
###Output
The number of entries in the train split are: 614
The number of entries in the test split are: 154
###Markdown
**Train the model(s)** RandomForestClassifier
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc_random_forest = round(accuracy_score(y_pred, y_test) * 100, 2)
print("Accuracy:", acc_random_forest, "%")
###Output
Accuracy: 79.22 %
###Markdown
LogisticRegression
###Code
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(solver = 'lbfgs', max_iter = 1000)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc_log_reg = round(accuracy_score(y_pred, y_test) * 100, 2)
print("Accuracy:", acc_log_reg, "%")
###Output
Accuracy: 82.47 %
###Markdown
KNeighbour Classifier
###Code
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
acc_knn = round(accuracy_score(y_pred, y_test) * 100, 2)
print("Accuracy:", acc_knn, "%")
###Output
Accuracy: 75.32 %
|
Doug/ThalwegSalinityVideo.ipynb | ###Markdown
Working Toward a Daily Updated Thalweg Salinity Contours Video
###Code
from importlib import reload
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
from salishsea_tools import nc_tools
from salishsea_tools.nowcast import figures
%matplotlib inline
title_font = {
'fontname': 'Bitstream Vera Sans', 'size': '15', 'color': 'black',
'weight': 'medium'
}
axis_font = {'fontname': 'Bitstream Vera Sans', 'size': '13'}
grid_T_d = nc.Dataset('../../SalishSea/nowcast/24oct15/SalishSea_1d_20151024_20151024_grid_T.nc')
colours = {
'figure': {
'facecolor': '#2B3E50', # salishsea site Superhero theme background
},
'cbar': {
'label': 'white',
'tick labels': 'white',
},
}
# %load -n figures.thalweg_salinity
def thalweg_salinity(
grid_T_d,
thalweg_pts_file='../../../bathymetry/thalweg_working.txt',
salinity_levels = [26, 27, 28, 29, 30, 30.2, 30.4, 30.6, 30.8, 31, 32, 33, 34],
cmap='hsv',
colours=colours,
figsize=(20, 8),
):
thalweg_pts = np.loadtxt(thalweg_pts_file, delimiter=' ', dtype=int)
x, z = np.meshgrid(
np.arange(thalweg_pts.shape[0]), -grid_T_d.variables['deptht'][:])
salinity = grid_T_d.variables['vosaline']
masked_salinity = np.ma.masked_values(
salinity[:][0, :, thalweg_pts[:, 0], thalweg_pts[:, 1]], 0)
fig, ax = plt.subplots(1, 1, figsize=figsize)
fig.set_facecolor(colours['figure']['facecolor'])
mesh = ax.contourf(
x, z, masked_salinity.transpose(), salinity_levels, cmap=cmap, extend='both')
cbar = fig.colorbar(mesh, ax=ax)
cbar.set_ticks(salinity_levels)
cbar.set_label(
'Practical Salinity [psu]', color=colours['cbar']['label'], **axis_font)
cbar.ax.axes.tick_params(labelcolor=colours['cbar']['tick labels'])
timestamp = nc_tools.timestamp(grid_T_d, 0)
ax.set_title(
'Salinity field along thalweg: ' +
timestamp.format('DD-MMM-YYYY'),
**title_font)
ax.set_ylabel('Depth [m]', **axis_font)
ax.set_xlabel('Position along Thalweg', **axis_font)
# axis_colors(ax, 'white')
figures.axis_colors(ax, 'white')
ax.set_axis_bgcolor('burlywood')
########################
#add_bathy(x, thalweg_pts, ax)
########################
return fig, cbar
fig, cbar = thalweg_salinity(grid_T_d, thalweg_pts_file='../../tools/bathymetry/thalweg_working.txt', colours=colours)
cbar.ax.axes.tick_params()
pts = np.loadtxt('../../tools/bathymetry/thalweg_working.txt', delimiter=' ', dtype=int)
print(pts.shape, pts.size)
salinity = grid_T_d.variables['vosaline']
import matplotlib
matplotlib.figure.Figure.set_facecolor()
matplotlib.axes.Axes.contourf()
reload(figures)
fig = figures.thalweg_salinity(grid_T_d, thalweg_pts_file='../../tools/bathymetry/thalweg_working.txt')
figures.thalweg_salinity()
###Output
_____no_output_____ |
sample/optimal_control_sample.ipynb | ###Markdown
非線形最適制御問題を数値的に解く 参考- CasADi - Docs [8. Optimal control with CasADi](https://web.casadi.org/docs/a-simple-test-problem)- CasADi [direct_single_shooting.py](https://github.com/casadi/casadi/blob/master/docs/examples/python/direct_single_shooting.py) 例題Van der Pol oscillator to the originを考える.$$\text{minimize}_{x(\cdot) \in \mathbb{R}^2, u(\cdot) \in \mathbb{R}} \; \; \int_{t=0}^T (x_0^2 + x_1^2 + u^2) dt \\ \text{subject to} \; \; \begin{cases} \dot{x}_0 = (1 - x_1^2)x_0 - x_1 + u \\ \dot{x}_1 = x_0 \\ -1.0 \leq u \leq 1.0, x_1 \geq -0.25 \end{cases} \text{for} \; 0 \leq t \leq T \\ x_0(0) = 0, \; x_1(0) = 1$$ただし,$T = 10$とする.
###Code
######## Packages ########
from casadi import *
import matplotlib.pyplot as plt
##########################
###Output
_____no_output_____ |
Aufgaben/U_03-A_1.ipynb | ###Markdown
Aufgabe 1 a)
###Code
V_ra = 240*3.20 # m**3
n = 50 # Personen
dV_sch = n*20e-3 # CO_2-Volumenstrom in m**3/h
k_zul = k_inf = 1100e-6 # 1100 ppM
k_0 = k_au = 400e-6 # 400 ppM
dV_au = dV_sch/(k_zul-k_au) # m**3/h
dV_au
###Output
_____no_output_____
###Markdown
Es werden etwa $ 1430\,\frac{m^3}{h}$ Luft benötigt
###Code
beta = dV_au/V_ra # in h**(-1)
beta
###Output
_____no_output_____
###Markdown
Das entspricht einem etwa 1.86-fachen Luftwechsel. b)
###Code
t_beta = 1/beta
t_beta
###Output
_____no_output_____
###Markdown
Die Zeitkonstante ist etwa eine halbe Stunde.
###Code
from matplotlib import pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
lt = np.linspace(0,2.5,51)
df = pd.DataFrame(
{
't': lt,
'k': ((k_inf + (k_0-k_inf)*np.exp(-beta*lt))*1e6).round(2)
}
)
display(df.head().set_index('t').T)
ax = df.plot(x='t',y='k',label='$k=k(t)$')
ax.axhline(k_zul*1e6,c='r')# in ppM
ax.set(
xlim=(0,2.5), xlabel='Zeit $t$ in Stunden',
ylim=(0,1200),ylabel='Schadstoffkonzentration $k$ in ppM'
)
ax.grid(lw=0.5,c='k')
###Output
_____no_output_____
###Markdown
$$ 1100 ppM + (400-1100)ppM\cdot\mathrm{e}^{-\beta\,t} = 1050 ppM$$ $$ -700\,ppM \cdot\mathrm{e}^{-\beta\,t} = -50\,ppM$$ $$ \mathrm{e}^{-\beta\,t} = \dfrac{1}{14}$$ $$ t = -\dfrac{1}{\beta}\, \ln \left( \dfrac{1}{14} \right)$$
###Code
from math import log # das ist der ln
-log(1/14)/beta
###Output
_____no_output_____
###Markdown
Nach etwa 1.4 Stunden ist der Wert $k=1050 ppM$ erreicht.
###Code
ax = df.plot(x='t',y='k',label='$k=k(t)$')
ax.axhline(k_zul*1e6,c='r')# in ppM
ax.scatter(-log(1/14)/beta,1050,c='k')
ax.set(
xlim=(0,2.5), xlabel='Zeit $t$ in Stunden',
ylim=(0,1200),ylabel='Schadstoffkonzentration $k$ in ppM'
)
ax.grid(lw=0.5,c='k')
###Output
_____no_output_____ |
pead_accruals.ipynb | ###Markdown
Post Earnings Announcement Drift & Accruals anomalyThis notebook aims to examine if the [Accrual anomaly](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1793364) and [PEAD drift](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1510321) still exists today. Because of data availability, the timeframe is only from 1st Jan 2017- 1st Jan 2020. I've opted to exclude the months of COVID-19 due to extreme volatility in the market and influx of liquidity introduced.The holding period for each stock will be monthly and the rebalancing is done at the end of each month. Thought process Part 1 - Figuring out data availability & data visualization1. What timeframe in which earnings are available?2. Limit to stocks that have earnings in that timeframe3. Visualise PEAD effect on stocks Part 2 - Calculating accruals1. Calculate accruals for that timeperiod - meaning know which columns contribute to accruals2. Calculate accruals as a portion of earnings Part 3 - Generating Positions for the time period1. Create a list of rebalancing dates for the time period2. Stitch together accruals & estimates, sort accordingly and get the tickers Part 4 - Backtesting1. To optimize speed, prepare a DataFrame with price data readily available Part 5 - Plot performance and benchmark against SPY1. Plot cumulative performance2. Plot monthly performance Importing Packages
###Code
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import pandas_datareader as wb
import dateutil.relativedelta
from pprint import pprint
plt.style.use('fast')
pd.options.mode.chained_assignment = None
%matplotlib inline
###Output
_____no_output_____
###Markdown
Part 1 - Figuring out data availability & data visualization
###Code
fundamentals = pd.read_csv('US_Financials.csv')
e_hist = pd.read_csv('US_earnings_History.csv')
e_hist['surprisePercent'] = e_hist['epsDifference'] / e_hist['epsEstimate'] * 100
e_hist.dropna(axis=0,subset=['epsActual','epsEstimate','surprisePercent'],inplace=True)
e_hist[["reportDate", "date"]] = e_hist[["reportDate", "date"]].apply(pd.to_datetime)
freq_table = e_hist.groupby('reportDate').count()[['epsActual']]
freq_table.plot(figsize=(15,6))
plt.title('Frequency of Earnings Announcements')
plt.xlabel('Annoucement Date')
plt.ylabel('Count of Announcements')
###Output
_____no_output_____
###Markdown
Quick eyeballing tells us that earnings data is not readily available prior to Q3 of 2016. Hence, we'll limit the data to 2017-2020.
###Code
end_date = datetime.strptime('2020-01-01', '%Y-%m-%d')
start_date = datetime.strptime('2017-01-01', '%Y-%m-%d')
e_hist = e_hist[(e_hist['reportDate'] >= start_date) & (e_hist['reportDate'] <= end_date)]
###Output
_____no_output_____
###Markdown
Now we want to check if the earnings data is complete, given that between 2017-2020 there should be approx 10-12 records of earnings.
###Code
valid_tickers = e_hist.groupby('Ticker').count()['reportDate'].reset_index()
min_records = valid_tickers['reportDate'].min()
max_records = valid_tickers['reportDate'].max()
print('Min No. of records:',min_records)
print('Max No. of records:',max_records)
###Output
Min No. of records: 1
Max No. of records: 13
###Markdown
This tells us that for some tickers, there is incomplete data. Here, we could choose to remove these tickers altogether or retain them for the sake of potentially holding them when the data is available. In this instance, to widen the universe for positions, i'll limit it to at least 5 records and above. (5 is just arbitrarily chosen)
###Code
tickers = valid_tickers[valid_tickers['reportDate'] >= 5]['Ticker']
e_hist_final = e_hist[e_hist['Ticker'].isin(tickers)]
e_hist_final.head()
###Output
_____no_output_____
###Markdown
Now to do some visualisations to see if the PEAD exists
###Code
## Plotting Function
def plot_hypothesis(ticker,e_hist_final,start_date,end_date):
ticker = ticker.upper()
tickers = [ticker]
df = pd.DataFrame()
for ticker in tickers:
df[ticker] = wb.DataReader(ticker,data_source='yahoo',start=start_date,end=end_date)['Adj Close']
idx = pd.date_range(start_date,end_date)
df = df.reindex(idx, method='ffill')
df.reset_index(inplace=True)
df.columns = ['Date','Adjusted_close']
df.dropna(inplace=True)
stock = e_hist_final[e_hist_final['Ticker']==ticker]
fig, ax1 = plt.subplots(figsize=(15,6))
ax2 = ax1.twinx()
ax1.plot(df['Date'], df['Adjusted_close'], 'black',label='{} Share Price'.format(ticker))
ax2.plot(stock['reportDate'], stock['surprisePercent'], 'blue',marker='o',linestyle='--',label='EPS Surprise %')
ax1.set_xlabel('Reported Date')
ax1.set_ylabel('{} Share Price'.format(ticker),color='black')
ax2.set_ylabel('EPS',color='black')
#plotting vertical & horizontal lines
plt.plot(df['Date'],[0 for i in range(len(df['Date']))],'black',linestyle='--',markersize=0.5,label='Zero line')
for point in stock['reportDate']:
plt.axvline(x=point,color='grey',alpha=0.5,linestyle=':')
plt.title('{} Post Earnings Drift Hypothesis'.format(ticker),color='black')
ax2.legend(loc='upper left')
ax1.legend(loc='lower left')
plt.show()
plot_hypothesis('EBAY',e_hist_final,start_date,end_date)
plot_hypothesis('NKE',e_hist_final,start_date,end_date)
#Some missing points for EPS due to data incompleteness
###Output
_____no_output_____
###Markdown
Part 2 - Calculating Accruals & selecting fundamental dataCalculating accruals as proportion of net income I've opted to use Cashflows to calculate accruals for ease of computing.Formulas as follows (taken from Sloan's paper) 1. ΔWC* = Income + Depreciation and Amortization* – Cash from Operating Activities* 2. ΔNOA* = Income – Cash from Operating Activities * – Cash from Investing Activities* 3. TACC* = Income – ΔCash*– Dividends4. Proportion of TACC to Net Income = TACC/Net Income (engineered feature)
###Code
#Using Cashflow to calculate Accruals
core = fundamentals[['Ticker','flag','date',
'totalCashflowsFromInvestingActivities',
'totalCashFromFinancingActivities',
'totalCashFromOperatingActivities',
'totalStockholderEquity',
'depreciation',
'dividendsPaid',
'netIncome_x',
]]
core.isna().sum()
core['totalCashflowsFromInvestingActivities'] = core.groupby('Ticker')['totalCashflowsFromInvestingActivities'].ffill()
core['totalCashFromFinancingActivities'] = core.groupby('Ticker')['totalCashFromFinancingActivities'].ffill()
core['totalCashFromOperatingActivities'] = core.groupby('Ticker')['totalCashFromOperatingActivities'].ffill()
core['totalStockholderEquity'] = core.groupby('Ticker')['totalStockholderEquity'].ffill()
core['depreciation'] = core.groupby('Ticker')['depreciation'].ffill()
core['dividendsPaid'] = core.groupby('Ticker')['dividendsPaid'].ffill()
core['netIncome_x'] = core.groupby('Ticker')['netIncome_x'].ffill()
core.dropna(inplace=True)
core.isna().sum()
core['ChangeInCash'] = core['totalCashFromOperatingActivities'] + core['totalCashFromFinancingActivities'] + core['totalCashflowsFromInvestingActivities']
core['WC'] = core['netIncome_x'] + core['depreciation'] - core['totalCashFromOperatingActivities']
core['NCO'] = core['netIncome_x'] - core['totalCashFromOperatingActivities'] - core['totalCashflowsFromInvestingActivities']
core['TACC'] = core['netIncome_x'] - core['ChangeInCash'] - core['dividendsPaid']
core['ACC_Income'] = core['TACC']/core['netIncome_x']
core[['date']] = core[['date']].apply(pd.to_datetime)
ava_tickers = e_hist_final['Ticker'].unique()
working_df = core[(core['Ticker'].isin(ava_tickers)) &
(core['date'] >= start_date) &
(core['date'] <= end_date)]
working_df.head()
###Output
_____no_output_____
###Markdown
Visualisations to see accruals wrt to price
###Code
#Plotting Accruals overtime
def plot_accruals(ticker,working_df,start_date,end_date):
ticker = ticker.upper()
tickers = [ticker]
df = pd.DataFrame()
for ticker in tickers:
df[ticker] = wb.DataReader(ticker,data_source='yahoo',start=start_date,end=end_date)['Adj Close']
#re-indexing the date
idx = pd.date_range(start_date,end_date)
df = df.reindex(idx, method='ffill')
df.reset_index(inplace=True)
df.columns = ['Date',ticker]
stock = working_df[(working_df['Ticker']==ticker) & (working_df['date'] >= start_date)]
#plotting the graph
fig, ax1 = plt.subplots(figsize=(15,6))
ax2 = ax1.twinx()
ax1.plot(df['Date'], df[ticker.upper()], 'black',label='{} Share Price'.format(ticker))
ax2.plot(stock['date'], stock['ACC_Income'], 'blue',marker='o',label='Accruals as proportion of NI')
for point in stock['date']:
plt.axvline(x=point,color='grey',alpha=0.5,linestyle=':')
ax1.set_xlabel('Reported Date')
ax1.set_ylabel('{} Share Price'.format(ticker),color='black')
ax2.set_ylabel('TACC',color='black')
plt.title('{} TACC wrt Price'.format(ticker),color='black')
ax2.legend(loc='upper left')
ax1.legend(loc='lower left')
plt.show()
plot_accruals('NKE',working_df,start_date,end_date)
plot_accruals('EBAY',working_df,start_date,end_date)
###Output
_____no_output_____
###Markdown
Part 3 - Generating Positions for the timeperiod1. Stitch together accruals & estimates, sort accordingly and get the tickers For reference:- working_df - fundamentals data- e_hist_final - estimates data
###Code
# To visualise which dates have the bulk of earnings announcements
freq_table = e_hist_final.groupby('reportDate').count()[['epsActual']]
freq_table.plot(figsize=(15,6))
plt.title('Peak Earnings Announcements')
plt.xlabel('Annoucement Date')
plt.ylabel('Count of Announcements')
###Output
_____no_output_____
###Markdown
Ideally we would want to perform rebalancing as frequently as possible to best optimize. However, for practicality sake for now i've excluded that and taken on an approach where i rebalance at the end of each month.
###Code
# Creating columns with month & year for ease of creating positions later on
working_df['month'] = working_df['date'].dt.month
working_df['year'] = working_df['date'].dt.year
e_hist_final['month'] = e_hist_final['reportDate'].dt.month
e_hist_final['year'] = e_hist_final['reportDate'].dt.year
rebalancing_dates = pd.date_range(start_date,end_date,freq='M')
###Output
_____no_output_____
###Markdown
In generating positions, we prioritise PEAD and refine it further with accrual anomality.Essentially, what I am aiming to filter out are stocks that surprise positively (negatively) on earnings and persist with low (high) accruals.
###Code
%%time
positions_dict = {}
all_tickers = []
for i in range(len(rebalancing_dates)-1):
d = rebalancing_dates[i]
date = str(d.date())
month = d.month
year = d.year
#This identifies tickers available for that datetime based on estimates
estimates_list = e_hist_final[(e_hist_final['month'] == month) & (e_hist_final['year'] == year)]
tickers = estimates_list['Ticker']
#Selecting the top 10% to long/short
n = int(round(0.1 * len(estimates_list),0))
#This pulls the latest accrual figure for the relevant tickers
accruals = working_df[(working_df['Ticker'].isin(tickers)) & (working_df['date'] <= d) ]
accruals = accruals[['Ticker','date','ACC_Income']]
accruals.sort_values(by=['ACC_Income'],inplace=True,ascending=True)
accruals.drop_duplicates(subset=['Ticker'],inplace=True,keep='last')
#merging and sorting based on PEAD and Accruals
main = pd.merge(estimates_list,accruals,how='left',on='Ticker')
long = main.sort_values(by=['surprisePercent','ACC_Income'],ascending=[False,True])
short = main.sort_values(by=['surprisePercent','ACC_Income'],ascending=[False,False])
#collecting the ticker to purhcase
long = long[long['epsDifference'] > 0].head(n)['Ticker'].to_list()
short = short[short['epsDifference'] < 0].head(n)['Ticker'].to_list()
positions_dict[date] = {
'long':long,
'short':short
}
#this compiles all the tickers of interest
for t in long:
if t not in all_tickers:
all_tickers.append(t)
for s in short:
if s not in all_tickers:
all_tickers.append(s)
###Output
CPU times: user 958 ms, sys: 19.9 ms, total: 977 ms
Wall time: 988 ms
###Markdown
Part 4 - Backtesting1. To optimize speed, prepare a price_dictionary with Key: Ticker , Value: DataFrame with price data
###Code
def get_returns(ticker,start_date,end_date,pos_type,price_df):
'''
Function to calculate returns
'''
try:
p_1 = price_df[(price_df['Date']==start_date)]['Adjusted_close'].values[0]
p_2 = price_df[(price_df['Date']==end_date)]['Adjusted_close'].values[0]
except:
#if for some reason the price data does not exist, return false and continue to next ticker
return False
if pos_type == 'long':
returns = (p_2/p_1) - 1
else:
returns = -(p_2/p_1) + 1
return returns
p_df = pd.read_csv('US_price.csv')
price = p_df[['Ticker','Adjusted_close','Date']]
price[['Date']] = price[['Date']].apply(pd.to_datetime)
price = price[(price['Date']>=start_date) & (price['Date']<=end_date) & (price['Ticker'].isin(all_tickers))]
%%time
price_dict = {}
for t in all_tickers:
temp = price[price['Ticker']==t]
temp.set_index('Date',inplace=True)
#resampling index to contain all dates in the event the dates don't sync up
idx = pd.date_range(start_date,end_date)
temp = temp.reindex(idx, method='ffill')
temp.reset_index(inplace=True)
temp.columns = ['Date','Ticker','Adjusted_close']
temp.dropna(inplace=True)
temp = temp[temp['Date'].isin(rebalancing_dates)]
price_dict[t] = temp
price_dict['EBAY'].head()
###Output
_____no_output_____
###Markdown
Backtesting
###Code
%%time
backtest_result = 0.0
long_result = 0.0
short_result = 0.0
long_dict = {}
short_dict = {}
total_dict = {}
for i in range(0,len(rebalancing_dates)-1):
d = str(rebalancing_dates[i].date())
long = positions_dict[d]['long']
short = positions_dict[d]['short']
end_date = rebalancing_dates[i+1]
total_positions = len(long) + len(short)
long_returns = 0
short_returns = 0
long_count = 0
short_count = 0
for stock in long:
ret = get_returns(stock,d,end_date,'long',price_dict[stock])
#if for some reason the price data does not exist, return false and continue to next ticker
if ret == False:
continue
long_returns += ret
long_count += 1
for stock in short:
ret = get_returns(stock,d,end_date,'short',price_dict[stock])
#if for some reason the price data does not exist, return false and continue to next ticker
if ret == False:
continue
short_returns += ret
short_count += 1
#containing the data
long_dict[i] = [d,long_returns/len(long)]
short_dict[i] = [d,short_returns/len(short)]
backtest_result += (long_returns + short_returns)/total_positions
long_result += long_returns/long_count
short_result += short_returns/short_count
total_dict[i] = [d,(long_returns + short_returns)/total_positions]
print("Backtest completed.")
print("Total Performance:{}%".format(round(backtest_result*100,5)))
print("Long Performance:{}%".format(round(long_result*100,5)))
print("Short Performance:{}%".format(round(short_result*100,5)))
print("-------")
###Output
Backtest completed.
Total Performance:16.83023%
Long Performance:37.50103%
Short Performance:-2.07718%
-------
CPU times: user 10.2 s, sys: 95.7 ms, total: 10.3 s
Wall time: 10.4 s
###Markdown
Part 5 - Plot performance and benchmark against SPYWe benchmark against SPY.
###Code
#SPY Data for cumsum
df = pd.DataFrame()
df['SPY'] = wb.DataReader('SPY',data_source='yahoo',start=start_date,end=end_date)['Adj Close']
idx = pd.date_range(start_date,end_date)
df = df.reindex(idx, method='ffill')
df.reset_index(inplace=True)
df.columns = ['Date','SPY']
df.dropna(inplace=True)
spy_cumsum = df.copy()
spy_cumsum['Returns'] = (spy_cumsum['SPY']/spy_cumsum['SPY'].shift(1) - 1)
spy_cumsum.dropna(inplace=True)
spy_cumsum.head()
total = pd.DataFrame.from_dict(total_dict,orient='index',columns=['Date','Returns'])
total[['Date']] = total[['Date']].apply(pd.to_datetime)
long = pd.DataFrame.from_dict(long_dict,orient='index',columns=['Date','Returns'])
long[['Date']] = long[['Date']].apply(pd.to_datetime)
short = pd.DataFrame.from_dict(short_dict,orient='index',columns=['Date','Returns'])
short[['Date']] = short[['Date']].apply(pd.to_datetime)
fig, ax1 = plt.subplots(figsize=(15,6))
ax1.plot(spy_cumsum['Date'],spy_cumsum['Returns'].cumsum(),label='SPY Performance',alpha=0.5)
ax1.plot(total['Date'],total['Returns'].cumsum(),label='Combined Performance',linestyle='--',color='red',marker='.')
ax1.plot(long['Date'],long['Returns'].cumsum(),label='Long Performance',linestyle='--',color='green',marker='.')
ax1.plot(short['Date'],short['Returns'].cumsum(),label='Short Performance',linestyle='--',color='orange',marker='.')
ax1.set_xlabel('Date Date')
ax1.set_ylabel('Returns',color='black')
plt.title('Cumulative Performance',color='black')
ax1.legend(loc='upper left')
plt.show()
#SPY Data for monthly non-cumulative returns
s = wb.DataReader('SPY',data_source='yahoo',start=start_date,end=end_date)['Adj Close']
spy_monthly = pd.DataFrame(s.asfreq('M', method='ffill'))
spy_monthly.reset_index(inplace=True)
spy_monthly.columns = ['Date','SPY']
spy_monthly['Returns'] = (spy_monthly['SPY']/spy_monthly['SPY'].shift(1) - 1)
df.dropna(inplace=True)
fig, ax1 = plt.subplots(figsize=(15,6))
ax1.plot(spy_monthly['Date'],spy_monthly['Returns'],label='SPY Performance',linestyle='--',marker='.',alpha=0.5)
ax1.plot(total['Date'],total['Returns'],label='Combined Performance',linestyle='--',color='red',marker='.',alpha=0.5)
ax1.plot(long['Date'],long['Returns'],label='Long Performance',linestyle='--',color='green',marker='.')
ax1.plot(short['Date'],short['Returns'],label='Short Performance',linestyle='--',color='orange',marker='.')
ax1.set_xlabel('Date Date')
ax1.set_ylabel('Returns',color='black')
for point in spy_monthly['Date']:
plt.axvline(x=point,color='grey',alpha=0.2,linestyle='-')
plt.title('Non-cumulative Performance',color='black')
ax1.legend(loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Conclusions & potential follow upObservations1. The top 10% and bottom 10% of the appear to be inversely related between 2018-06 to 2019-01.2. Shorting really doesn't seem to go well.3. PEAD + Accruals underperforms, for when we factor in transaction costs, even if we only go long, the returns will be significant less than SPY. 4. The strategy is missing key growth stocks that have contributed to SPY's performance and thus when we include them, the results could change drastically. (Refer to Appendix A)Follow up1. One could look at the data between 2018-06 to 2019-01 and understand what led to the inverse relation.2. Could also further refine this to target small cap stocks and verify the other anomalies presented in the book mentioned earlier3. Instead of soley using Earnings Estimates, could also attempt the same strategy with estimates on Revenue/Expenditure etc Appendix ANotable high performing tickers missing from the strategy above due to data unavailability by taking the difference between SPY and the tickers included. E.g GOOGL, FB, AAPL, MSFT
###Code
import bs4 as bs
import requests
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
spy_tickers = []
for row in table.findAll('tr')[1:]:
spy_ticker = row.findAll('td')[0].text
spy_tickers.append(spy_ticker.strip())
print('Full list of unavailable tickers')
print('-----')
print(sorted(list(set(spy_tickers)-set(tickers))))
###Output
Full list of unavailable tickers
-----
['AAL', 'AAPL', 'ABC', 'ABMD', 'ABT', 'ACN', 'ADBE', 'ADM', 'ADP', 'ADS', 'AEP', 'AFL', 'AJG', 'AKAM', 'ALGN', 'ALK', 'ALL', 'ALLE', 'ALXN', 'AMCR', 'AMD', 'AME', 'AMGN', 'AMP', 'AMT', 'AMZN', 'ANTM', 'AON', 'AOS', 'APA', 'APD', 'APH', 'APTV', 'ARE', 'ATO', 'AVB', 'AVGO', 'AVY', 'AWK', 'AXP', 'AZO', 'BA', 'BAC', 'BAX', 'BDX', 'BEN', 'BF.B', 'BIIB', 'BK', 'BKR', 'BLK', 'BLL', 'BMY', 'BR', 'BRK.B', 'BSX', 'BWA', 'BXP', 'C', 'CAG', 'CAH', 'CARR', 'CAT', 'CB', 'CCI', 'CCL', 'CDNS', 'CDW', 'CE', 'CERN', 'CF', 'CFG', 'CHD', 'CHRW', 'CHTR', 'CI', 'CINF', 'CLX', 'CMA', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMS', 'CNC', 'COF', 'COG', 'COO', 'COP', 'COST', 'COTY', 'CPB', 'CRM', 'CSCO', 'CSX', 'CTAS', 'CTSH', 'CTVA', 'CTXS', 'CXO', 'DAL', 'DD', 'DFS', 'DG', 'DGX', 'DHI', 'DHR', 'DIS', 'DLR', 'DLTR', 'DOV', 'DOW', 'DPZ', 'DRE', 'DRI', 'DTE', 'EA', 'EBAY', 'ECL', 'EFX', 'EIX', 'EL', 'EMN', 'EMR', 'EQIX', 'EQR', 'ESS', 'ETFC', 'ETN', 'ETR', 'EW', 'EXC', 'EXR', 'F', 'FAST', 'FB', 'FBHS', 'FCX', 'FDX', 'FFIV', 'FITB', 'FLIR', 'FLS', 'FMC', 'FOX', 'FOXA', 'FRC', 'FRT', 'FTI', 'FTV', 'GD', 'GE', 'GILD', 'GIS', 'GL', 'GLW', 'GM', 'GOOG', 'GOOGL', 'GPC', 'GPN', 'GRMN', 'GS', 'GWW', 'HAL', 'HAS', 'HBAN', 'HBI', 'HCA', 'HES', 'HFC', 'HLT', 'HOG', 'HOLX', 'HON', 'HRB', 'HSY', 'HWM', 'IBM', 'ICE', 'IDXX', 'IEX', 'ILMN', 'INCY', 'INFO', 'INTC', 'IP', 'IPG', 'IPGP', 'IQV', 'IR', 'IRM', 'ISRG', 'IT', 'ITW', 'IVZ', 'J', 'JBHT', 'JCI', 'JKHY', 'JNJ', 'JNPR', 'JPM', 'K', 'KEY', 'KHC', 'KIM', 'KLAC', 'KMB', 'KMI', 'KMX', 'KO', 'KR', 'KSU', 'L', 'LDOS', 'LEG', 'LEN', 'LH', 'LHX', 'LKQ', 'LLY', 'LMT', 'LNC', 'LRCX', 'LUV', 'LVS', 'LW', 'MA', 'MAA', 'MAS', 'MCD', 'MCK', 'MCO', 'MDLZ', 'MDT', 'MET', 'MGM', 'MHK', 'MKC', 'MKTX', 'MLM', 'MMC', 'MMM', 'MO', 'MPC', 'MRK', 'MS', 'MSCI', 'MSFT', 'MSI', 'MTB', 'MU', 'MXIM', 'NDAQ', 'NEE', 'NFLX', 'NI', 'NKE', 'NLOK', 'NOC', 'NOV', 'NOW', 'NSC', 'NTAP', 'NTRS', 'NUE', 'NVR', 'NWS', 'NWSA', 'ODFL', 'OKE', 'OMC', 'ORCL', 'ORLY', 'OTIS', 'PAYC', 'PAYX', 'PBCT', 'PCAR', 'PEAK', 'PEG', 'PEP', 'PFE', 'PFG', 'PG', 'PGR', 'PH', 'PHM', 'PKG', 'PKI', 'PLD', 'PM', 'PNC', 'PNR', 'PPG', 'PSA', 'PSX', 'PWR', 'PYPL', 'QCOM', 'RCL', 'RE', 'REG', 'RF', 'RHI', 'RJF', 'RMD', 'ROK', 'ROL', 'ROP', 'RSG', 'RTX', 'SBAC', 'SBUX', 'SCHW', 'SHW', 'SIVB', 'SJM', 'SLB', 'SLG', 'SNA', 'SNPS', 'SO', 'SPG', 'SPGI', 'STT', 'STX', 'STZ', 'SWK', 'SWKS', 'SYF', 'SYK', 'SYY', 'T', 'TAP', 'TDG', 'TEL', 'TFC', 'TFX', 'TIF', 'TMO', 'TMUS', 'TPR', 'TROW', 'TRV', 'TSCO', 'TSN', 'TT', 'TWTR', 'TXN', 'TXT', 'UAL', 'UDR', 'UHS', 'ULTA', 'UNH', 'UNM', 'UNP', 'UPS', 'URI', 'USB', 'V', 'VAR', 'VFC', 'VIAC', 'VLO', 'VNO', 'VRSK', 'VRSN', 'VRTX', 'VTR', 'VZ', 'WAB', 'WAT', 'WBA', 'WDC', 'WELL', 'WFC', 'WHR', 'WLTW', 'WM', 'WMB', 'WRB', 'WRK', 'WST', 'WY', 'XEL', 'XLNX', 'XRX', 'XYL', 'YUM', 'ZBRA', 'ZION']
|
hrbook/files/models/decision_tree.ipynb | ###Markdown
Random Forest Classifier
###Code
# Load the packages
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
# Load the data
train_df = pd.read_csv('./../../../../data/train/train.csv')
test_df = pd.read_csv('./../../../../data/test/test.csv')
# Load the feature selection result
feature_selector = pd.read_csv('./../../../../data/feature_ranking.csv')
feature_selector.set_index('Unnamed: 0', inplace=True)
# Separate feature space from target variable
y_train = train_df['Attrition']
X_train = train_df.drop('Attrition', axis=1)
y_test = test_df['Attrition']
X_test = test_df.drop('Attrition', axis=1)
###Output
_____no_output_____
###Markdown
We will be running models for different set of features and evaluate their performances. We start with complete dataset and then start with meaximum feature score of 8 to 5.
###Code
# Declare the model paramters for searching
param_grid = dict(
criterion = ['gini', 'entropy'],
splitter = ['best', 'random'],
max_depth = [20, 40, 60, None],
min_samples_split = [2, 10, 40]
)
# Declare and train the model
dt_clf = DecisionTreeClassifier(class_weight="balanced", max_features=None)
dt = GridSearchCV(estimator=dt_clf, param_grid=param_grid, scoring='f1', n_jobs=-1)
###Output
_____no_output_____
###Markdown
Complete data
###Code
# Train the model
dt.fit(X_train, y_train)
# Get the parameters for the best model
dt.best_estimator_
# Predict using model
y_pred = dt.predict(X_test)
# Make the classification report
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
False 0.88 0.85 0.87 255
True 0.21 0.26 0.23 39
accuracy 0.78 294
macro avg 0.55 0.56 0.55 294
weighted avg 0.79 0.78 0.78 294
###Markdown
The results not better than that of logistic regression. The precision, recall and f1 of attrition is not at all good as that of random forest. Feature score of 8
###Code
# Create the new dataset
# Get features with feature score of 8
features = feature_selector[feature_selector['Total']==8].index.tolist()
X_train_8 = X_train.loc[:, features]
X_test_8 = X_test.loc[:, features]
# Train the model
dt.fit(X_train_8, y_train)
# Predict with model
y_pred_8 = dt.predict(X_test_8)
# Make the report
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
False 0.88 0.85 0.87 255
True 0.21 0.26 0.23 39
accuracy 0.78 294
macro avg 0.55 0.56 0.55 294
weighted avg 0.79 0.78 0.78 294
|
examples/tutorials/Basics.ipynb | ###Markdown
FlyingSquid BasicsIn this notebook, we'll use some synthetic data to introduce you to FlyingSquid's API. In this notebook, we'll cover the three steps of the FlyingSquid pipeline using synthetic data: First, we'll generate some synthetic labeling function outputs. Next, we'll use FlyingSquid to model the accuracies of these labeling functions (without any ground truth data). Finally, we'll generate probabilistic training labels for downstream model training. Step 1: Generate Synthetic Labeling Function OutputsLet's generate some synthetic labeling function outputs.For a real application, we would write `m` labeling functions that would generate any of the three following labels for each data point:* Positive: return +1* Negative: return -1* Abstain: return 0We would run the `m` labeling functions over `n` data points to get an `(n, m)`-sized matrix. For this tutorial, the `synthetic_data_basics` function will do that for us:
###Code
from tutorial_helpers import *
L_train, L_dev, Y_dev = synthetic_data_basics()
print(L_train.shape)
print(L_dev.shape)
print(Y_dev.shape)
###Output
(10000, 5)
(500, 5)
(500,)
###Markdown
As you can see, we have five synthetic labeling functions that have generated labels for an unlabeled training set with 10,000 data points, and a labeled dev set with 500 labeled data points. We can use the dev set to see how accurate our labeling functions are:
###Code
print_statistics(L_dev, Y_dev)
###Output
LF 0: Accuracy 93%, Abstain rate 78%
LF 1: Accuracy 63%, Abstain rate 87%
LF 2: Accuracy 62%, Abstain rate 30%
LF 3: Accuracy 59%, Abstain rate 37%
LF 4: Accuracy 46%, Abstain rate 48%
###Markdown
As you can see, we have two labeling functions that have high accuracies but also high abstain rates (LF 0 and LF 1), and three labeling functions with lower abstain rates but also lower accuracies. We can inspect the `L_dev` and `Y_dev` matrices to see the data formats:
###Code
print(L_dev[:10])
print(Y_dev[:10])
###Output
[ 1. -1. -1. -1. -1. 1. -1. -1. -1. 1.]
###Markdown
Step 2: Model the labeling functions with FlyingSquidNext, we're going to use FlyingSquid to model the five labeling functions. We'll use this dependency graph: As you can see, we have one (hidden) node for the latent ground truth variable Y, and five (observable) nodes for each labeling function.To model that in FlyingSquid, we just need to specify that we have `m = 5` labeling functions. Since we only have a single task, the dependencies are automatically inferred (see the video tutorial for more complex dependencies).
###Code
from flyingsquid.label_model import LabelModel
m = 5
label_model = LabelModel(m)
###Output
_____no_output_____
###Markdown
To train the label model, all we need to do is pass `L_train` to the fit function:
###Code
label_model.fit(L_train)
###Output
_____no_output_____
###Markdown
Evaluating the label modelNow, let's use the dev set to evaluate the label model:
###Code
preds = label_model.predict(L_dev).reshape(Y_dev.shape)
accuracy = np.sum(preds == Y_dev) / Y_dev.shape[0]
print('Label model accuracy: {}%'.format(int(100 * accuracy)))
###Output
Label model accuracy: 70%
###Markdown
We can see that this performs better than majority vote:
###Code
majority_vote_preds = np.array([1 if pred > 0 else -1 for pred in np.sum(L_dev, axis=1)])
majority_vote_accuracy = np.sum(majority_vote_preds == Y_dev) / Y_dev.shape[0]
print('Majority vote accuracy: {}%'.format(int(100 * majority_vote_accuracy)))
###Output
Majority vote accuracy: 65%
###Markdown
Step 3: Training an End ModelIf necessary, we can also use FlyingSquid to generate probabilistic labels to train up an end model. Instead of calling the `predict` function, we can call `predict_proba_marginalized` over `L_train`:
###Code
probabilistic_labels = label_model.predict_proba_marginalized(L_train)
print(probabilistic_labels.shape)
print(probabilistic_labels[:10])
###Output
(10000,)
[0.46439535 0.89805256 0.72736331 0.48237588 0.2962007 0.2633458
0.66693893 0.53600092 0.72736331 0.3213108 ]
|
results_plots.ipynb | ###Markdown
Visualise the results
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from anomaly_delays.main_functions import share_delays
from anomaly_delays.helper_functions import read_nab, calc_cum_avg_loss
scores = pd.read_csv("results/scores.csv")
losses_log = pd.read_csv("results/losses_log.csv")
losses_square = pd.read_csv("results/losses_square.csv")
###Output
_____no_output_____
###Markdown
Predictions for the real data with known anomaly causes
###Code
scores_real = scores[scores["folder_name"] == "realKnownCause"]
files = [
"_ec2_request_latency_system_failure.csv",
"_machine_temperature_system_failure.csv",
"_ambient_temperature_system_failure.csv",
]
fig, axs = plt.subplots(1, len(files), figsize=(17, 5))
for i, _ in enumerate(files):
axs[i].plot(
scores_real[scores_real["file_name"] == files[i]][
"score_Fixed10d1"
].values,
label="predictions",
)
axs[i].plot(
scores_real[scores_real["file_name"] == files[i]]["label"].values,
label="anomalies",
)
plt.subplots_adjust(
left=None, bottom=None, right=None, top=None, wspace=0.06, hspace=0
)
for n, ax in enumerate(axs.flat):
if n == 1:
ax.set_xlabel("Time", fontsize=26)
ax.set_ylabel("Probability", fontsize=26)
for ax in axs.flat:
ax.label_outer()
ax.tick_params(labelsize=26)
handles, labels = ax.get_legend_handles_labels()
fig.legend(
handles, labels, loc="upper right", bbox_to_anchor=(1.11, 1), fontsize=26
)
fig.suptitle(r"Fixed-share, $\alpha = 0.1$, delay = 20", fontsize=26);
fig, axs = plt.subplots(1, len(files), figsize=(17, 5))
for i, _ in enumerate(files):
axs[i].plot(
scores_real[scores_real["file_name"] == files[i]][
"score_Variable10d3"
].values,
label="predictions",
)
axs[i].plot(
scores_real[scores_real["file_name"] == files[i]]["label"].values,
label="anomalies",
)
plt.subplots_adjust(
left=None, bottom=None, right=None, top=None, wspace=0.06, hspace=0
)
for n, ax in enumerate(axs.flat):
if n == 1:
ax.set_xlabel("Time", fontsize=26)
ax.set_ylabel("Probability", fontsize=26)
for ax in axs.flat:
ax.label_outer()
ax.tick_params(labelsize=26)
handles, labels = ax.get_legend_handles_labels()
fig.legend(
handles, labels, loc="upper right", bbox_to_anchor=(1.11, 1), fontsize=26
)
fig.suptitle(r"Variable-share, $\alpha = 0.1$, delay = 100", fontsize=26);
fig, axs = plt.subplots(1, len(files), figsize=(17, 5))
for i, _ in enumerate(files):
axs[i].plot(
scores_real[scores_real["file_name"] == files[i]][
"score_randomCutForest"
].values,
label="predictions",
)
axs[i].plot(
scores_real[scores_real["file_name"] == files[i]]["label"].values,
label="anomalies",
)
plt.subplots_adjust(
left=None, bottom=None, right=None, top=None, wspace=0.06, hspace=0
)
for n, ax in enumerate(axs.flat):
if n == 1:
ax.set_xlabel("Time", fontsize=26)
ax.set_ylabel("Probability", fontsize=26)
for ax in axs.flat:
ax.label_outer()
ax.tick_params(labelsize=26)
handles, labels = ax.get_legend_handles_labels()
fig.legend(
handles, labels, loc="upper right", bbox_to_anchor=(1.11, 1), fontsize=26
)
fig.suptitle("randomCutForest", fontsize=26);
###Output
_____no_output_____
###Markdown
Weights analysis for the real data with known anomaly causes of system failure
###Code
experts = [
"knncad",
"numentaTM",
"twitterADVec",
"skyline",
"earthgeckoSkyline",
"numenta",
"bayesChangePt",
"null",
"expose",
"relativeEntropy",
"htmjava",
"randomCutForest",
"random",
"contextOSE",
"windowedGaussian",
]
FOLDER_NAME = "realKnownCause"
FILE_NAME = "_machine_temperature_system_failure.csv"
dt = read_nab(experts, FOLDER_NAME, FILE_NAME)
score_experts = np.array(dt.filter(regex="^score", axis=1))
assert score_experts.shape[1] == len(experts)
target = dt["label"].values
score_AA, loss_AA, loss_experts, weights_experts = share_delays(
target, score_experts, share_type="Fixed", alpha=0, delays=100
)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for i, _ in enumerate(experts):
if max(weights_experts.T[i]) > 0.7:
plt.plot(
weights_experts.T[i], linewidth=6, label=f"{experts[i]}"
)
else:
plt.plot(weights_experts.T[i], linewidth=6)
axs.legend(loc="upper right", bbox_to_anchor=(1.6, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=36)
axs.set_ylabel("Weights", fontsize=36)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
plt.rcParams.update({"font.size": 36})
fig.suptitle("AAP, log-loss, delay = 100", fontsize=30);
score_AA, loss_AA, loss_experts, weights_experts = share_delays(
target, score_experts, share_type="Variable", alpha=0, delays=100
)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for i, _ in enumerate(experts):
if max(weights_experts.T[i]) > 0.7:
plt.plot(
weights_experts.T[i], linewidth=6, label=f"{experts[i]}"
)
else:
plt.plot(weights_experts.T[i], linewidth=6)
axs.legend(loc="upper right", bbox_to_anchor=(1.6, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=36)
axs.set_ylabel("Weights", fontsize=36)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
plt.rcParams.update({"font.size": 36})
fig.suptitle("AAP, square-loss, delay = 100", fontsize=30);
score_AA, loss_AA, loss_experts, weights_experts = share_delays(
target, score_experts, share_type="Fixed", alpha=0.05, delays=100
)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for i, _ in enumerate(experts):
if max(weights_experts.T[i]) > 0.3:
plt.plot(
weights_experts.T[i], linewidth=6, label=f"{experts[i]}"
)
else:
plt.plot(weights_experts.T[i], linewidth=6)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=36)
axs.set_ylabel("Weights", fontsize=36)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
plt.rcParams.update({"font.size": 36})
fig.suptitle(r"Fixed-share, $\alpha = 0.05$, delay = 100", fontsize=30);
score_AA, loss_AA, loss_experts, weights_experts = share_delays(
target, score_experts, share_type="Variable", alpha=0.05, delays=100
)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for i, _ in enumerate(experts):
if max(weights_experts.T[i]) > 0.5:
plt.plot(
weights_experts.T[i], linewidth=6, label=f"{experts[i]}"
)
else:
plt.plot(weights_experts.T[i], linewidth=6)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=36)
axs.set_ylabel("Weights", fontsize=36)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
plt.rcParams.update({"font.size": 36})
fig.suptitle(r"Variable-share, $\alpha = 0.05$, delay = 100", fontsize=30);
score_AA, loss_AA, loss_experts, weights_experts = share_delays(
target, score_experts, share_type="Fixed", alpha=0.3, delays=100
)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for i, _ in enumerate(experts):
if max(weights_experts.T[i] > 0.2):
plt.plot(
weights_experts.T[i], linewidth=6, label=f"{experts[i]}"
)
else:
plt.plot(weights_experts.T[i], linewidth=6)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=36)
axs.set_ylabel("Weights", fontsize=36)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
plt.rcParams.update({"font.size": 36})
fig.suptitle(r"Fixed-share, $\alpha = 0.3$, delay = 100", fontsize=30);
score_AA, loss_AA, loss_experts, weights_experts = share_delays(
target, score_experts, share_type="Variable", alpha=0.3, delays=100
)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for i, _ in enumerate(experts):
if max(weights_experts.T[i] > 0.5):
plt.plot(
weights_experts.T[i], linewidth=6, label=f"{experts[i]}"
)
else:
plt.plot(weights_experts.T[i], linewidth=6)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=36)
axs.set_ylabel("Weights", fontsize=36)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
plt.rcParams.update({"font.size": 36})
fig.suptitle(r"Variable-share, $\alpha = 0.3$, delay = 100", fontsize=30);
###Output
_____no_output_____
###Markdown
Plot theoretical bounds
###Code
losses_log_art = (
losses_log[
(losses_log["folder_name"] == "artificialNoAnomaly")
& (losses_log["file_name"] == "_art_daily_perfect_square_wave.csv")
]
.filter(regex="^loss", axis=1)
.reset_index()
.drop("index", axis=1)
)
losses_square_art = (
losses_square[
(losses_square["folder_name"] == "artificialNoAnomaly")
& (losses_square["file_name"] == "_art_daily_perfect_square_wave.csv")
]
.filter(regex="^loss", axis=1)
.reset_index()
.drop("index", axis=1)
)
###Output
_____no_output_____
###Markdown
Log-loss
###Code
ETA = 1
Losses_log_avg = calc_cum_avg_loss(losses_log_art, current_delay=1)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for algo_ind in ["numenta", "bayesChangePt", "earthgeckoSkyline"]:
axs.plot(
Losses_log_avg[f"loss_{algo_ind}"] - Losses_log_avg["loss_Fixed0d0"],
label=f"{algo_ind}",
linewidth=4,
)
axs.plot(
np.repeat(-np.log(len(experts)) / ETA, Losses_log_avg.shape[0]),
linewidth=4,
label="bound",
)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=26)
axs.set_ylabel("Loss difference", fontsize=26)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
fig.suptitle("Cumulative logarithmic loss", fontsize=26);
ETA = 1
Losses_log_avg = calc_cum_avg_loss(losses_log_art, current_delay=50)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for algo_ind in ["numenta", "bayesChangePt", "earthgeckoSkyline"]:
axs.plot(
Losses_log_avg[f"loss_{algo_ind}"] - Losses_log_avg["loss_Fixed0d2"],
label=f"{algo_ind}",
linewidth=4,
)
axs.plot(
np.repeat(-np.log(len(experts)) / ETA, Losses_log_avg.shape[0]),
linewidth=4,
label="bound",
)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=26)
axs.set_ylabel("Loss difference", fontsize=26)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
fig.suptitle("Average logarithmic loss, delay = 50", fontsize=26);
###Output
_____no_output_____
###Markdown
Square-loss
###Code
ETA = 2
Losses_square_avg = calc_cum_avg_loss(losses_square_art, current_delay=1)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for algo_ind in ["numenta", "bayesChangePt", "earthgeckoSkyline"]:
axs.plot(
Losses_square_avg[f"loss_{algo_ind}"]
- Losses_square_avg["loss_Variable0d0"],
label=f"{algo_ind}",
linewidth=4,
)
axs.plot(
np.repeat(-np.log(len(experts)) / ETA, Losses_square_avg.shape[0]),
linewidth=4,
label="bound",
)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=26)
axs.set_ylabel("Loss difference", fontsize=26)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
fig.suptitle("Cumulative square loss", fontsize=26);
ETA = 2
Losses_square_avg = calc_cum_avg_loss(losses_square_art, current_delay=50)
fig, axs = plt.subplots(1, 1, figsize=(10, 5))
for algo_ind in ["numenta", "bayesChangePt", "earthgeckoSkyline"]:
axs.plot(
Losses_square_avg[f"loss_{algo_ind}"]
- Losses_square_avg["loss_Variable0d2"],
label=f"{algo_ind}",
linewidth=4,
)
axs.plot(
np.repeat(-np.log(len(experts)) / ETA, Losses_square_avg.shape[0]),
linewidth=4,
label="bound",
)
axs.legend(loc="upper right", bbox_to_anchor=(1.65, 1), fontsize=26)
axs.set_xlabel("Time", fontsize=26)
axs.set_ylabel("Loss difference", fontsize=26)
axs.xaxis.set_tick_params(labelsize=26)
axs.yaxis.set_tick_params(labelsize=26)
fig.suptitle("Average square loss, delay = 50", fontsize=26);
###Output
_____no_output_____ |
notebooks/Extracting Role Title Words.ipynb | ###Markdown
Senior is a valid title; but can also be a seniority modifier
###Code
df[df.Title.str.lower().str.endswith('senior')].head()
df[df.Title.str.lower().str.endswith('support')].Title.value_counts().to_frame().T
role_words = [
'manager',
'engineer',
'executive',
'assistant',
'accountant',
'administrator',
'developer',
'analyst',
'controller',
'teacher',
'consultant',
'advisor',
'cleaner',
'officer',
'worker',
'nurse',
'operative',
'surveyor',
'technician',
'clerk',
'chef',
'director',
'coordinator',
'supervisor',
'partner',
'labourer',
'secretary',
'receptionist',
'buyer',
'planner',
'designer',
'estimator',
'senior',
'leader',
#'partie', # Part of chef de partie, specific title
'solicitor',
'driver',
'auditor',
'electrician',
'negotiator',
'fitter',
'operator',
'turner',
#'workers', # Plural of worker
'representative',
'handler',
'machinist',
'miller',
'welder',
'inspector',
'associate',
#'pa', # Acronym
'therapist',
'architect',
# 'dba', # Acronym
'bookkeeper',
'programmer',
'control', # Is "Quality Control" and "Quality controller" the same thing?
'telesales', # This is a linguistic exception; telesalesperson?
'resourcer',
'sales', # This is a linguistic exception; salesperson?
'merchandiser',
#'rgn', # Acronym
'bricklayer',
'toolmaker',
'groundworker',
# 'finance', # This needs to be part of a role, doesn't make sense by itself
'cook',
'optometrist',
'cashier',
'paraplanner',
'author',
'agent',
# 'marketing',
'paralegal',
'trainer',
'fellow',
#'service', # customer service
'fundraiser',
'technologist',
'carpenter',
'joiner',
'plumber',
'caretaker',
'housekeeper',
'telemarketer',
'ledger',
'studentship',
# 'english', # e.g. teacher of english
'guard',
#'receptionist/administrator', # Two roles
'plasterer',
'porter',
'writer',
'headteacher', # A contraction of head teacher?
'conveyancer',
#'operatives', # plural of operatives
'physiotherapist',
'wirer',
'draughtsperson',
'support',
# '(rgn)', # see rgn
'payroller',
'chemist',
'tester',
# 'hr', # acronym human resources, which is itself an exception
'generator',
# 'drivers', # plural of driver
# 'operations', # Work type
'underwriter',
# 'cleaners', # Plural of cleaner
# 'welder/fabricator', Two titles
'carer',
'typist',
# 'rmn', # Acronym
#'executives', # Plural
'picker',
'specialist',
# 'assistants', #plural
# 'payroll', # This is an edge case...
# 'payable', # Accounts payable, a deparment
'sprayer',
# 'teachers', #plural of teacher
'dentist',
'draughtsman',
# 'nqt', # Acronym
# 'adviser', # Variant of advisor
'practitioner',
# 'consultants', # Plural of consultants
'copywriter',
# 'nurses' # Plural of nurse
'head', # Added
]
len(role_words), len(set(role_words))
bs = 8
pd.DataFrame([role_words[i:i+bs] for i in range(0, len(role_words), bs)])
exceptions = ['Chef de Partie', 'Custmer Service']
acronyms = {
'PA': 'Personal Assistant',
'DBA': 'Database Administrator',
'RGN': 'Registered General Nurse',
#'HR': 'Human Resources', # hr can also be short for hour, particularly as /hr
'RMN': 'Registered Mental Health Nurse', # Sometimes aliased to Registered Mental Nurse
'NQT': 'Newly Qualified Teacher',
'CEO': 'Chief Executive Officer',
'MD': 'Managing Director', # Medical doctor doesn't occur here
'EA': 'Executive Assistant',
}
variants = {
'Adviser': 'Advisor',
'Registered Mental Nurse': 'Registered Mental Health Nurse',
}
functions = [
'finance',
'marketing',
'service',
'english',
'operations',
'human resources',
'payroll',
'accounts payable',
]
# Can also be a role
ambiguous_functions = [
'sales',
'telesales',
]
df[df.Title.str.contains('payable$', case=False)].Title.value_counts().to_frame().T
###Output
_____no_output_____
###Markdown
Functions occur as part of a role title in the following ways:* Head of* Teacher of* Director ofCommon modifiers include Deputy, Assistant or InterimOtherwise they can be stuck on the end of a role title.Director and Teacher are both in the list of role titles; the 'of' just flips the ordere.g.* Marketing Director == Director of Marketing* Teacher of English == English TeacherBut "Finance Head" sounds funny...
###Code
df[df.Title.str.lower().str.contains('(?:' + '|'.join(functions) + ')$')].Title.value_counts().to_frame().T
df[df.Title.str.contains('(?:head|teacher|director) of ', case=False)].Title.value_counts().to_frame().T
###Output
_____no_output_____
###Markdown
Head also makes sense without the 'of' as a seniority modifier
###Code
df[df.Title.str.contains(r'head\b [^o]', case=False)].Title.value_counts().to_frame().head(30).T
###Output
_____no_output_____
###Markdown
We can sometimes end in Head.Look at "Assistant to Head"
###Code
df[df.Title.str.contains(r'\bhead$', case=False)].Title.value_counts().to_frame().head(30).T
###Output
_____no_output_____
###Markdown
'Of' seems like a general rule: ` of ` is the same as ` ` (except for head of)e.g. Manager of Marine Equipment -> Marine Equipment Manager
###Code
df[df.Title.str.contains(r'\bof\b', case=False)].Title.value_counts().to_frame().head(100).T
###Output
_____no_output_____
###Markdown
What about 'to'?There are exceptions like 'Business to Business' or 'Door to Door'But ` to ` it's generally the first role that matters:* PA to Director* PA to CEO* Clerk to Governors
###Code
df[df.Title.str.contains(r'\bto\b', case=False)].Title.value_counts().to_frame().head(30).T
df[df.Title.str.contains(r'\bMD\b', case=False)].Title.value_counts().to_frame().head(30).T
###Output
_____no_output_____
###Markdown
Let's now look for second order role titles
###Code
def expand_mapping(df, source_col, dest_col, mapping):
df = df.copy()
df[dest_col] = df[source_col]
for source, target in mapping.items():
df[dest_col] = df[dest_col].str.replace(fr'\b{source}\b', target)
return df
%time df = expand_mapping(df, 'Title', 'expanded_title', acronyms)
df[df.Title.str.contains('(?:RGN|PA)')][['Title', 'expanded_title']]
def extract_from_ending(series, role_words, n):
return (
series
.str.lower()
.str.extractall('(' + (r'[\w\d*]+ ' * n) + '(?:' + '|'.join(role_words) + r')\b)')[0]
.value_counts()
.to_frame()
)
two_title_words = extract_from_ending(df.expanded_title, role_words, 1)
def examine(term):
return df[df.expanded_title.str.contains(rf'{term}\b', case=False)].expanded_title.value_counts().to_frame().T
examine('ing manager')
two_title_words.T
###Output
_____no_output_____
###Markdown
This heuristic seems to work well *except* when it's part of a longer string (or when we have seniority in the title, if we consider that separate)
###Code
[
'account manager',
'project manager',
'design engineer',
'recruitment consultant',
'development manager', # often part of business development manager
#'general nurse', # almost always part of registered general nurse
'team leader',
'business analyst',
'software engineer',
'web developer',
'home manager',
'sales executive',
'personal assistant',
'staff nurse',
'marketing manager',
# 'health nurse', # almost always part of mental health nurse
'management accountant',
'quantity surveyor',
'social worker',
'software developer',
'maintenance engineer',
'support worker',
'java developer',
'finance manager',
'general manager',
'sous chef',
'net developer',
'service engineer',
'project engineer',
'operations manager',
'marketing executive',
'service advisor',
'teaching assistant',
'sales manager',
'field sales',
'care assistant',
'accounts assistant', # notice accounts vs account
'account executive',
'assistant manager',
'registered nurse',
'database administrator',
'store manager',
'financial controller',
'systems engineer',
'credit controller',
'business partner',
'account director',
'deputy manager',
'quality engineer',
'mechanical engineer',
'primary teacher',
'php developer',
# 'head chef', # seniority: head ...
'development executive',
'internal sales',
'technical support',
'product manager',
'financial accountant',
'purchase ledger',
'c developer',
'it support',
'travel consultant',
'service manager',
'area sales',
'team manager',
#'line support', # almost always 'first line support' or '2nd line support' or '3rd line support'
'site manager',
'test engineer',
'hr manager',
'branch manager',
'hr advisor',
'legal secretary',
'process engineer',
'systems administrator',
'electrical engineer',
'data analyst',
'services manager',
#'graduate sales', # seniority
'care worker',
'application support',
'test analyst',
'development engineer',
'business manager',
'cleaning operative',
'contracts manager',
'telesales executive',
'english teacher',
'office manager',
'area manager',
'assistant accountant',
'financial analyst',
'commercial manager',
'restaurant manager',
'finance analyst',
'technical sales',
'car sales',
'occupational therapist',
'programme manager',
'aspnet developer',
'quality manager',
'cnc miller',
'sales engineer',
'manufacturing engineer',
'research associate',
'sales administrator',
'hr administrator',
'audit manager',
#'senior sales', # seniority
'security officer',
'production manager',
# 'end developer', # always part of front end developer or back end developer
'nursery nurse',
'technical manager',
'marketing assistant',
'web designer',
'media sales',
'network engineer',
'cnc turner',
'finance assistant',
'science teacher',
'maths teacher',
'care manager',
'audit senior',
'structural engineer',
'payroll administrator',
'infrastructure engineer',
'warehouse operative',
'estate agent', # *mostly* real estate agent
'category manager',
'technical consultant',
'internal auditor',
'pastry chef',
'vehicle technician',
'delivery manager',
'account handler',
'brand manager',
'lettings negotiator',
'credit control',
'it sales',
'project coordinator',
'risk analyst',
'systems analyst',
'commis chef',
'sales support',
'electronics engineer',
'engineering manager',
'communications manager',
'business sales',
'qualified teacher',
'property manager',
'regional sales',
'relationship manager',
'sales consultant',
'risk manager',
'design manager',
'facilities manager',
'office administrator',
'tax manager',
'administration assistant',
'graphic designer',
'application developer',
'planning manager',
'supply teacher',
'production operative',
#'senior buyer', # seniority
'claims handler',
'registered manager',
'executive assistant',
'maintenance technician',
'sales advisor',
'admin assistant',
'sql developer',
'hgv technician',
'compliance manager',
'gas engineer',
'procurement manager',
'centre manager',
'healthcare assistant',
'hr officer',
'unit manager',
'recruitment manager',
'solutions architect',
'associate director',
'asbestos surveyor',
'cnc programmer',
'cnc machinist',
'medical sales',
'sales negotiator',
'mechanical fitter',
'customer support',
'production engineer',
'pensions administrator',
'building surveyor',
#'1 driver', # e.g. Class 1 Driver
'civil engineer',
'contract manager',
'solution architect',
'business support',
'regional manager',
'research fellow',
'service administrator',
'dental nurse',
'desk analyst',
'commissioning engineer',
'shift manager',
'chief executive',
'care coordinator',
'desktop support',
#'2 teacher', # key stage 2 teacher/ year 2 teacher
'sharepoint developer',
'learning support',
'senior manager',
'technical architect',
'catering assistant',
'senior estimator',
'quality inspector',
'sales ledger',
'senior developer',
'accounts senior',
'property solicitor',
'demi chef',
'service representative',
'trainee sales',
'senior support',
'service assistant',
'accounts administrator',
'digital designer',
'commercial analyst',
'development officer',
'hr assistant',
'maintenance electrician',
'bi developer',
'finance director',
'android developer',
'administrative assistant',
'ios developer',
'project support',
'security engineer',
'bid manager',
'reporting analyst',
'marketing coordinator',
'inside sales',
'assurance manager',
'accounts manager',
'site engineer',
'project accountant',
'campaign manager',
#'senior practitioner', # senior
'fundraising manager']
seniority_words = [
'deputy',
'senior',
'graduate',
'trainee',
'lead',
'head',
'interim',
'junior',
#'band 6', # occupational therapist
#'stage 1', # teacher
#'stage 2', # teacher
]
three_title_words = extract_from_ending(df.expanded_title, role_words, 2)
three_title_words.T
###Output
_____no_output_____
###Markdown
At 5 words it's mostly different things put together.While there are some exceptions they are quite specific and it's fine to ignore these* "Personal Assistant to ..."* Speech and Language Therapist* Financial Planning and Analysis Manager* Sales and Business Development Manager* Mechanical Building Services Design Engineer
###Code
extract_from_ending(df.expanded_title, role_words, 3).T
###Output
_____no_output_____
###Markdown
Can we find structure using parse tree?Unfortunately the way language is used in job ad titles is quite different to what SpaCy was trained on and so the parse trees tend to be wrong.This is partly why the NER went so badly for extracting role titles. I'm better off crafting my own rules.
###Code
import spacy
from spacy import displacy
###Output
_____no_output_____
###Markdown
Download the model if necessary
###Code
#!python -m spacy download en_core_web_lg
nlp = spacy.load('en_core_web_lg')
ls = list(nlp.pipe(list(sorted(df.expanded_title, key=len, reverse=True)[:100])))
displacy.render(ls[0])
displacy.render(ls[2])
displacy.render(ls[3])
###Output
_____no_output_____ |
third/orm/SQLAlchemy_vs_peewee.ipynb | ###Markdown
SQLAlchemy vs peewee SQLAlchemy
###Code
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///sqlalchemy.db', echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(40))
age = Column(Integer)
password = Column(String)
Base.metadata.create_all(engine)
def create_user():
session.add(User(name='Mary', age=30, password='secret'))
session.commit()
%timeit create_user()
%timeit session.query(User).first()
session.close()
###Output
116 ms ± 49.3 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
472 µs ± 8.89 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
peewee
###Code
from peewee import *
db = SqliteDatabase('peewee.db')
class User(Model):
name = CharField(max_length=40)
age = IntegerField()
password = CharField()
class Meta:
database = db
User.create_table(fail_silently=True)
%timeit User.create(name='Mary', age=30, password='secret')
%timeit User.select()[0]
###Output
90.6 ms ± 24.2 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
269 µs ± 17.8 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Comparison
###Code
# from __future__ import unicode_literals, division
sqlalchemy_line_count, peewee_line_count = 31, 20
print( 'Code length: peewee is %.2f× shorter than SQLAlchemy' % (sqlalchemy_line_count / peewee_line_count))
print( 'SQL INSERT : peewee is %.2f× faster than SQLAlchemy' % (116 / 90.6))
print( 'SQL SELECT : peewee is %.2f× faster than SQLAlchemy' % (472 / 269))
###Output
Code length: peewee is 1.55× shorter than SQLAlchemy
SQL INSERT : peewee is 1.28× faster than SQLAlchemy
SQL SELECT : peewee is 1.75× faster than SQLAlchemy
|
statistical_word.ipynb | ###Markdown
answer 可以由ocr直接产生
###Code
dataset
def tanswer(dataset):
sae = 0 # single answer and existing in ocr
san = 0
msae = 0
man = 0
mae = 0
for i in range(len(dataset) - 1):
signal_answer_list = dataset[i+1]["valid_answers"]
signal_answer_list = [an.strip() for an in signal_answer_list]
dd = dict.fromkeys(signal_answer_list,0)
ocr_tokens = [ocr.lower().strip() for ocr in dataset[i+1]["ocr_tokens"]]
answers = dd.keys()
if len(answers) == 1:
if answers[0] in ocr_tokens:
sae += 1
else:
# print(len(dd.keys()), dd, dataset[i+1]["ocr_tokens"])
san += 1
else:
na = True
for i in range(len(answers)):
answer_words = answers[i].split(" ")
if len(answer_words) == 1:
if answer_words[0] in ocr_tokens:
na = False
msae += 1
break
else:
all = True
for a in answer_words:
if a not in ocr_tokens:
all = False
break
if all:
mae += 1
# print(answers[i], ocr_tokens)
na = False
break
if na:
man += 1
print("total: %d", len(dataset[1:]))
print("single anser existing:%d, rates:%f"%(sae,float(sae)/len(dataset[1:])))
print("multiple single answer existing:%d, rates:%f"%(msae,float(msae)/len(dataset[1:])))
print("multiple words answer existing:%d, rates:%f"%(mae,float(mae)/len(dataset[1:])))
print("multiple words answer no existing:%d, rates:%f"%(man,float(man)/len(dataset[1:])))
print("single answer no exsiting:%d, rates:%f"%(san,float(san)/len(dataset[1:])))
print("no answer :%d, rates:%f"%(man + san,float(man + san)/len(dataset[1:])))
tanswer(dataset)
###Output
('total: %d', 34602)
single anser existing:3455, rates:0.099850
multiple single answer existing:8677, rates:0.250766
multiple words answer existing:4372, rates:0.126351
multiple words answer no existing:13667, rates:0.394977
single answer no exsiting:4431, rates:0.128056
no answer :18098, rates:0.523033
|
acc-models-ps-master/scenarios/sftpro/5_phase_rotation/notebooks/MTE_rotation_tracking.ipynb | ###Markdown
MTE phase rotation
###Code
# instances along the cycle at which the phase space will be evaluated
dt = 5
times = np.arange(800, 840, dt)
# number of turns to be tracked
turns = 2000
# flag to perform matching
match = 0
# flag to perform tracking
ptc_track = 1
MTE_table = pnd.read_pickle('./MTE_rotation_table.pkl')
phase_space = pnd.read_pickle('./MTE_rotation_phase_space.pkl')
# phase_space[phase_space.columns[1:8]] = np.nan
# phase_space[phase_space.columns[9:]] = np.nan
# x = ['X' + str(t) for t in times]
# px = ['PX' + str(t) for t in times]
# cols = x + px
# phase_space = pnd.DataFrame(columns = cols, index = np.arange(0, 40 * 2000, 1))
circuit_names = MTE_table.columns[:-5]
if 0:
for t in times:
# number of particles to be tracked
n_p = 28
# distance between phase space trajectories in mm and mrad
dx = 1
dpx = 0.11
print('Cycle time ' + str(t) + ' ms')
print('________________________________________________\n')
# Setting up the MAD-X environment
madx = Madx()
madx.chdir('/eos/user/a/ahuschau/www/test-acc-models/repository/PS/2019/scenarios/SFTPRO/4_horizontal_splitting/')
# madx = Madx(stdout=False)
madx.command.beam('PARTICLE=PROTON, PC = 14.;')
madx.input('BRHO := BEAM->PC * 3.3356;')
madx.call('../../../PS_MU.seq')
madx.call('../../../PS_SS.seq')
madx.call('../../../PS.str')
madx.call('../3_resonance_crossing/PS_RC_SFTPRO.str')
# madx.call('PS_HS_SFTPRO.madx')
madx.call('../../../matching_macros.ptc')
# defining the strenghts of the MTE elements
for c in circuit_names:
madx.input(c + ' = ' + str(MTE_table[c].loc[t]) + ';')
# define LEQ settings
for k in ['kd', 'kf']:
madx.input(k + ' = ' + str(MTE_table[k].loc[t]) + ';')
# redefining the horizontal tune
Qx = MTE_table['Qx_input'].loc[t] - 6
madx.input('Qx := ' + str(np.round(Qx, 6)) + ';' )
madx.input('Qy := 0.29826;')
if match:
# update LEQ settings based on matching results of previous time step
for k in ['kd', 'kf']:
madx.input(k + ' = ' + str(MTE_table[k].loc[t-dt]) + ';')
print('\n________________________________________________')
print('Matching of the horizontal tune with the LEQ...')
print('________________________________________________\n')
madx.command.use(sequence = 'PS')
madx.command.match('use_macro;')
madx.command.vary('name = kf, step = 1.0e-6;')
madx.command.vary('name = kd, step = 1.0e-6;')
madx.input('use_macro, name = ptc_twiss_tune_macro;')
madx.input('constraint, expr = table(ptc_twiss_summary,Q1) = Qx;')
madx.input('constraint, expr = table(ptc_twiss_summary,Q2) = Qy;')
madx.input('jacobian, calls=50000, bisec=3, tolerance=1.0E-21;')
madx.input('ENDMATCH;')
# fill the MTE table with the matching results
MTE_table['Qx'].loc[t] = madx.table['ptc_twiss_summary'].Q1
MTE_table['Qy'].loc[t] = madx.table['ptc_twiss_summary'].Q2
MTE_table['kd'].loc[t] = madx.globals['kd']
MTE_table['kf'].loc[t] = madx.globals['kf']
if ptc_track:
print('\n________________________________________________')
print('Tracking with PTC...')
print('________________________________________________\n')
madx.command.use(sequence = 'PS')
madx.command.ptc_create_universe()
madx.command.ptc_create_layout('time=false, model=2, exact=true, method=6, nst=5;')
if t == 800:
dx = 1.5
n_p = 40
x = np.arange(dx, (n_p + 1) * dx, dx)
px = x * 0.0
else:
x = np.arange(dx, (n_p + 1) * dx, dx)
px = np.arange(dpx, (n_p + 1) * dpx, dpx)
for i,j in enumerate(x):
madx.input('PTC_START, x=' + str(j*1e-3) + ', px=' + str(px[i]*1e-3) + ', y=0.0, py=0.0, t=0.0, pt=0.0;')
madx.command.ptc_track('icase=4, turns=' + str(turns) + ', element_by_element;')
madx.command.ptc_track_end()
madx.command.ptc_end()
# identify all output tables of ptc_track
tables = [table for table in madx.table.keys() if 'obs' in table]
# arange all particle coordinates in two vectors for X and PX
X = np.empty(0)
PX = np.empty(0)
for i in np.arange(0, len(tables), 1):
track = madx.table[tables[i]]
X = np.append(X, track.x)
PX = np.append(PX, track.px)
try:
phase_space['X' + str(t)].iloc[:len(X)] = X*1e3
phase_space['PX' + str(t)].iloc[:len(X)] = PX*1e3
except ValueError:
phase_space['X' + str(t)] = X[:n_p * turns]*1e3
phase_space['PX' + str(t)] = PX[:n_p * turns]*1e3
phase_space.to_pickle('./MTE_rotation_phase_space.pkl')
MTE_table.to_pickle('./MTE_rotation_table.pkl')
###Output
_____no_output_____
###Markdown
Create Bokeh plot
###Code
from bokeh.plotting import figure, output_file, output_notebook, show, save, ColumnDataSource
from bokeh.models import Legend, LinearAxis, Range1d, CustomJS, Slider, Span
from bokeh.layouts import row, column, gridplot
t0 = str(times[0])
tend = str(times[-1])
data = phase_space
data_visible = data[['X' + t0, 'PX' + t0]]
data_visible.rename(columns={'X' + t0: 'X', 'PX' + t0: 'PX'}, inplace = True)
source_available = ColumnDataSource(data)
source_visible = ColumnDataSource(data_visible)
MTE_elements = pnd.read_pickle('../../4_horizontal_splitting/notebooks/Strengths_of_MTE_elements.pkl')
f = figure(plot_width=450, plot_height=400, x_axis_label='x [mm]', y_axis_label="x' [mrad]" , x_range = Range1d(-65, 65, bounds="auto"), y_range = Range1d(-4.5, 4.5, bounds="auto"), tools="box_zoom, pan, reset", active_drag = 'box_zoom')
f.axis.major_label_text_font = 'times'
f.axis.axis_label_text_font = 'times'
f.axis.axis_label_text_font_style = 'normal'
f.outline_line_color = 'black'
f1 = figure(plot_width=500, plot_height=400, x_axis_label='Cycle time [ms]', y_axis_label="Current [A]" , x_range = Range1d(700, 840, bounds="auto"), y_range = Range1d(-500, 600, bounds="auto"), tools="box_zoom, pan, reset", active_drag = 'box_zoom', toolbar_location="right")
f1.axis.major_label_text_font = 'times'
f1.axis.axis_label_text_font = 'times'
f1.axis.axis_label_text_font_style = 'normal'
f1.outline_line_color = 'black'
# Adding the second y axis to the plot.
f1.extra_y_ranges = {"Qx": Range1d(start = 6.245, end = 6.3)}
f1.add_layout(LinearAxis(y_range_name="Qx", axis_label='Qx', axis_label_text_font = 'times', axis_label_text_font_style = 'normal', major_label_text_font = 'times'), 'right')
f.scatter('X', 'PX', source = source_visible, marker = "circle", size = 0.1, color = 'black')
col = ['black', 'gray', 'firebrick', 'royalblue', 'darkviolet', 'cadetblue']
legend_items = []
for i, circuit in enumerate(MTE_elements.columns[:6]):
if circuit == 'Qx':
c = f1.line(MTE_elements.index, MTE_elements[circuit], color = col[i], muted_color = col[i], muted_alpha = 0.2, y_range_name="Qx", line_dash = 'dashed')
legend_items.append((circuit, [c]))
else:
c = f1.line(MTE_elements.index, MTE_elements[circuit], color = col[i], muted_color = col[i], muted_alpha = 0.2)
legend_items.append((circuit, [c]))
time_slider = Slider(title = "Cycle time [ms]", start = int(t0), end = int(tend), value = int(t0), step=5)
vline = Span(location = time_slider.value, dimension = 'height', line_color = 'black', line_dash = 'dashed', line_width = 1)
f1.renderers.extend([vline])
time_slider.callback = CustomJS(
args=dict(source_visible = source_visible,
source_available = source_available,
span = vline),
code =
"""
var t = cb_obj.value;
// Get the data from the data sources
var data_visible = source_visible.data;
var data_available = source_available.data;
// Change y-axis data according to the selected value
data_visible['X'] = data_available['X' + t.toString()];
data_visible['PX'] = data_available['PX' + t.toString()];
span.location = t;
// Update the plot
source_visible.change.emit();
""")
layout = column(f, time_slider)
# grid = gridplot([[time_slider], [f], [f1]])
grid = gridplot([[f, f1], [time_slider], []])
legend = Legend(items = legend_items, location="bottom_center")
legend.orientation = "horizontal"
f1.add_layout(legend, 'below')
f1.legend.label_text_font_size = '9pt'
f1.legend.label_text_font = 'times'
legend.click_policy="mute"
# output_file('slider.html')
# output_notebook()
# show(grid)
output_file('../PS_PR_SFTPRO_2.html', mode="inline")
save(grid)
###Output
/cvmfs/sft.cern.ch/lcg/views/LCG_96/x86_64-centos7-gcc8-opt/lib/python2.7/site-packages/pandas/core/frame.py:4025: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
return super(DataFrame, self).rename(**kwargs)
|
pynq_composable/notebooks/custom_pipeline/04_modify_pipeline.ipynb | ###Markdown
Modify Composable Pipeline----Please use Jupyter labs http://<board_ip_address>/lab for this notebook.This notebook shows your how modify the composable pipeline using the available methods Aims* Explore methods to modify the composable pipeline Table of Contents* [Download Composable Overlay](download)* [Start HDMI Video](start_hdmi)* [Let us Compose](compose)* [Replace IP object](replace)* [Remove IP object](remove)* [Insert IP objects](insert)* [Stop HDMI Video](stop_hdmi)* [Conclusion](conclusion)---- Revision History* v1.0 | 30 March 2021 | First notebook revision.* v1.1 | 11 August 2021 | Update notebook to composable overlay API 1.0.0---- Download Composable Overlay Import the pynq video libraries as well as Composable class and the drivers for the IP.Download the Composable Overlay using `pynq.Overlay` and grab a handler to the `composable` hierarchy
###Code
from pynq import Overlay
from pynq.lib.video import *
from pynq_composable import *
ol = Overlay("cv_dfx_4_pr.bit")
cpipe = ol.composable
###Output
_____no_output_____
###Markdown
Start HDMI Video Get `VideoStream` object and start video Warning:Failure to connect HDMI cables to a valid video source and screen may cause the notebook to hang
###Code
video = VideoStream(ol)
video.start()
###Output
_____no_output_____
###Markdown
Let us Compose First we need to grab handlers to the IP objects to simplify the notebook
###Code
filter2d = cpipe.filter2d_accel
rgb2gray = cpipe.rgb2gray_accel
gray2rgb = cpipe.gray2rgb_accel
rgb2hsv = cpipe.rgb2hsv_accel
colorthr = cpipe.colorthresholding_accel
lut = cpipe.lut_accel
###Output
_____no_output_____
###Markdown
We will start with a simple pipeline that converts from [RGB color space]((https://en.wikipedia.org/wiki/RGB_color_space)) to [Grayscale color space](https://en.wikipedia.org/wiki/Grayscale)
###Code
video_pipeline = [cpipe.hdmi_source_in, rgb2gray, cpipe.hdmi_source_out]
cpipe.compose(video_pipeline)
cpipe.graph
###Output
_____no_output_____
###Markdown
Replace IP object We can replace the `rgb2gray` IP object with the `rgb2hsv` easily using the `.replace` method. This method takes a tuple with the IP object to be replaced and the new IP object.
###Code
cpipe.replace?
cpipe.replace((rgb2gray, rgb2hsv))
cpipe.graph
###Output
_____no_output_____
###Markdown
Remove IP object To visualize the RGB color space we can simply remove the `rgb2hsv` IP object from the composable pipeline using the `.remove` method. This method gets a list of IP object to be removed as argument
###Code
cpipe.remove?
cpipe.remove([rgb2hsv])
cpipe.graph
###Output
_____no_output_____
###Markdown
Insert IP objects The `.insert` method allows you to insert an IP object or list of IP object into a given index within the current pipeline
###Code
cpipe.insert?
cpipe.insert(([filter2d, lut], 1))
cpipe.graph
###Output
_____no_output_____
###Markdown
Change default kernel type on the filter2d
###Code
filter2d.kernel_type = xvF2d.sharpen
###Output
_____no_output_____
###Markdown
Insert the gray2rgb IP after the LUT IP
###Code
cpipe.insert(([gray2rgb], 3))
cpipe.graph
###Output
_____no_output_____
###Markdown
Stop HDMI Video Finally stop the HDMI video pipeline Warning:Failure to stop the HDMI Video may hang the board when trying to download another bitstream onto the FPGA
###Code
video.stop()
ol.free()
###Output
_____no_output_____ |
Instalacion.ipynb | ###Markdown
InstalaciónEn el siguiente documento se hace una breve descripción de los programas que se utilizarán durante el curso. Para no afectar los sistemas se recomienda instalar un manejador de ambientes virtuales como los es [Anaconda](https://www.anaconda.com/). La intención de utilizar este software es tener acceso a las herramientas y paqueterías de los lenguajes de programación [Python](https://www.python.org/) y [R](https://www.r-project.org/). AnacondaEn en esta sección se da una descripción de como instalar el manejador de [Anaconda](https://www.anaconda.com/) en los principales sistemas operativos, para poder hacer una instalación de la paquetería es necesario contar con los permisos adecuados (ser usuario * "administrador" *)Todos los links y las instruciones se hacen para la instalación de *Miniconda* (una versión ligera de *Anaconda*, las instrucciones para la instalación de *Anaconda* se pueden considerar "iguales" a las descritas aquí). Linux * Se descarga el archivo con el scrip para su instalación de este [link](https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh) * Se abre una terminal se da permisos de ejecución al archivo descargado. Dentro de la terminal en el directorio donde se encuentre el archivo descargado se ejecuta el siguiente comando `bash Miniconda3-latest-Linux-x86_64.sh` * Se siguen las instruccione que aparecen en la pantalla. Si no se esta seguro de los ajustes se recomienda aceptar los que estan definidos por defecto. Para que los cambios tengan efecto es necesario cerrar la terminal y abrir una nueva. * Para verificar que la instalación ha salido correctamente se abre una terinal y se ejecuta el comando `conda list`, una lista con la paqueteria instalada se desplegara si la instalación fue correcta. MacOSLas siguientes instrucciones se extraen de la documentación de *Anaconda* en este [link](https://conda.io/projects/conda/en/latest/user-guide/install/macos.html). * Se descarga el instalador de *Anaconda* para macOS de este [link](https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.pkg). * Se hace doble click en el archivo descargado (.pkg) * Seguir las instrucciones del instalador. Si no se esta seguro de los ajustes acepte los que vienen por defecto, estos se pueden modificar después. * Para que los cambios tengan efecto se tiene que reabrir la ventana de la terminal. * Para verificar la instalación se abre una ventana de terminal o un línea de comando de Anaconda y se ejecuta el comando `conda list`. Una lista con los paquetes instalados aparecera si la instalación fue correcta. WindowsLas siguientes instrucciones se extraen de la documentación de *Anaconda*. * Se descarga el instalador de *Anaconda* para windows de este [link](https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe) * Se hace dobleclick en el instalador para su ejecución (.exe). * Seguir las instrucciones que se presentan en la pantalla. * Para probar su instalación desde el menú **START** abrir la línea de comando de Anaconda y ejecutar `conda list`. Una lista con los paquetes instalados aparecera si la instalación fue correcta. Colab Google [Colaboratory (Colab)](https://www.youtube.com/watch?v=inN8seMm7UI) nos permite ejecutar código a través de maquinas virtuales generadas en servidores de Google. La interface para la ejecución del código se hace a través de un *notebook*. Esta interfaz nos permite instalar *Anaconda* para poder hacer uso de las librerias que se pueden instalar con conda. Para su instalación es necesario ejecutar el siguiente código
###Code
!wget -c https://repo.continuum.io/archive/Anaconda3-5.1.0-Linux-x86_64.sh
!chmod +x Anaconda3-5.1.0-Linux-x86_64.sh
!bash ./Anaconda3-5.1.0-Linux-x86_64.sh -b -f -p usr/local
import sys
sys.path.append('/usr/local/lib/python3.6/site-packages/')
###Output
_____no_output_____
###Markdown
Uso de CondaUna vez instalado *minianaconda* vamos a describir como generar un ambiente virtual, un ambiente virtual nos permite instalar paquetes dentro de este sin que afecten al sistema en general. Dentro de una terminal ejecutamos el comando `conda create --name analisis_datos` el ambiente creado tendrá por nombre *análisis_datos* y para acceder a este dentro de una terminal `conda activate analisis_datos`. Ya dentro del ambiente virtual podemos instalar la paquetería necesaria para este curso. Jupyter- Lab[Jupyter-lab](https://jupyter.org/) es una interfaz de notebook basado en *ipyhon* que nos permite ejecutar comandos como si fuese la terminal de *Python* y presentar la salida de una forma más amigable para el usuario. Este notebook utiliza [*Markdown*](https://en.wikipedia.org/wiki/Markdown) para generar texto que sea de fácil lectura y las funcionalidades de ipython para generar las visualizaciones, ninguna de estas dos propiedades se pueden hacer directamente desde la terminal de Python. Por las atribuciones antes descritas durante el curso utilizaremos esta herramienta. Para su instalación dentro del ambiente virtual que deseamos se ejecuta `conda install jupyterlab`. Una vez instalado para ejecutarlo dentro del ambiente virtual ejecutamos el comando `jupyter-lab` este generará un servicio web desde el cual podremos acceder a la interfaz para generar los notebooks. * nota: El directorio, donde se ejecute el comando `jupyter-lab`, se toma como la raiz en donde la interface *jupyter-lab* guardará los notebooks y los scripts que se generen durante la sesión de la interface. PandasPara hacer el análisis de los datos y extraer información relevante, es necesario poder extraer, transformar y almacenar (**ETL** (*extract, transform and load*)) los datos para su manipulación. La idea de este curso es que se aprendan las herramientas que ofrece *Python* para tal propósito. Las formas más comunes para almacenar la información es a través de tablas, esto lo podemos ver en los formatos donde comúnmente se almacenan los datos, como ejemplos de éstos tenemos las hojas de cálculo (*Excel*) o las bases de datos (*MySQL*, *Oracle*, *DB2*, etc). La biblioteca *[Pandas](https://pandas.pydata.org/)* nos permite hacer un manejo de los datos en forma tabular, los objetos y funciones definidas dentro de la biblioteca serán las herramientas principales que se utilizan a lo largo del curso.Para la instalación de esta biblioteca utilizaremos anaconda dentro de nuestro ambiente virtual se ejecuta el siguiente comando:`conda install pandas`. Para verificar la instalación y la versión instalada dentro de la IDE de python o un notebook en jupyter se ejecutan los comandos `import pandas as pd` y `pd.__version__`, si no se muestra ningun error la instalación fue realizada de forma correcta.
###Code
import pandas as pd
print(pd.__version__)
###Output
1.1.0
|
API/java/example/TEGprocess.ipynb | ###Markdown
Test NeqSim process rest APIThis notebook will test the NeqSim Rest API for a imple process. Commands to build:mvn compile quarkus:dev: How to use NeqSim API from a Python scriptThe follwoing code demonstrates how to use the TEG process API from a Python script.
###Code
import requests
import json
import requests
import pandas as pd
data = {
"feedGasFlowRate": 4.65,
"feedGasTemperature": 25.0,
"feedGasPressure": 70.0,
"absorberFeedGasTemperature": 35.0,
"absorberFeedGasPressure": 139.0,
"leanTEGFlowRate": 5500.0,
"leanTEGTemperature": 48.5,
"flashDrumPressure": 4.8,
"reboilerPressure": 1.2,
"reboilerTemperature": 197.5,
"condenserTemperature": 80.0,
"condenserPressure": 1.2,
'regenerationGasCoolerTemperature': 47.0,
"strippingGasRate": 180.0,
"strippingGasFeedTemperature": 78.3,
"bufferTankTemperatureTEG": 90.5,
'hotTEGpumpPressure': 3.0,
'finefilterdeltaP': 0.0,
"numberOfStagesTEGabsorber": 4,
"stageEfficiencyTEGabsorber": 0.7,
"numberOfStagesStripper": 2,
"stageEfficiencyStripper": 1,
"UAvalueLeanRichTEGHeatExchanger": 8316.0,
"UAvalueLeanRichTEGHeatExchanger2": 2224.0
}
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
params={'key':''}
response = requests.post('http://localhost:8080/ML/dehydTEGsim', data=json.dumps(data), params=params, headers=headers)
response.json().items()
###Output
_____no_output_____ |
Linear Regression - Crew Prediction.ipynb | ###Markdown
Import libraries
###Code
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("Linear Regression Model").getOrCreate()
from pyspark.ml.regression import LinearRegression
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import IndexToString, StringIndexer
###Output
_____no_output_____
###Markdown
Load and verify data
###Code
data = spark.read.csv('resources/cruise_ship_info.csv',header = True, inferSchema = True)
data.printSchema()
data.head(3)
for item in data.head(1)[0]:
print(item)
data.columns
data.groupBy('Cruise_line').count().show()
data.groupBy('Ship_name').count().show()
###Output
+------------+-----+
| Ship_name|count|
+------------+-----+
| Virgo| 1|
| Fortuna| 1|
| Shadow| 1|
| Empress| 1|
| Wind| 2|
| Paradise| 1|
| Surf| 1|
| Wonder| 1|
| Magic| 1|
| Symphony| 1|
| Sinfonia| 1|
| Inspiration| 1|
| Millenium| 1|
| Solstice| 1|
|PrideofAloha| 1|
| Majesty| 2|
| Ventura| 1|
| Romantica| 1|
| Spirit| 4|
| Oasis| 1|
+------------+-----+
only showing top 20 rows
###Markdown
Data Preprocessing
###Code
indexer = StringIndexer(inputCols=["Ship_name","Cruise_line"], outputCols=["Ship_name_Index","Cruise_line_Index"])
indexed = indexer.fit(data).transform(data)
indexed.show()
indexed.printSchema()
indexed.columns
assembler = VectorAssembler(inputCols =['Age','Tonnage','passengers','length', 'cabins', 'passenger_density', 'Ship_name_Index', 'Cruise_line_Index'],
outputCol='features')
output = assembler.transform(indexed)
output.printSchema()
print(output.features)
output.head(1)
final_data = output.select('features','crew')
final_data.show()
###Output
+--------------------+----+
| features|crew|
+--------------------+----+
|[6.0,30.276999999...|3.55|
|[6.0,30.276999999...|3.55|
|[26.0,47.262,14.8...| 6.7|
|[11.0,110.0,29.74...|19.1|
|[17.0,101.353,26....|10.0|
|[22.0,70.367,20.5...| 9.2|
|[15.0,70.367,20.5...| 9.2|
|[23.0,70.367,20.5...| 9.2|
|[19.0,70.367,20.5...| 9.2|
|[6.0,110.23899999...|11.5|
|[10.0,110.0,29.74...|11.6|
|[28.0,46.052,14.5...| 6.6|
|[18.0,70.367,20.5...| 9.2|
|[17.0,70.367,20.5...| 9.2|
|[11.0,86.0,21.24,...| 9.3|
|[8.0,110.0,29.74,...|11.6|
|[9.0,88.5,21.24,9...|10.3|
|[15.0,70.367,20.5...| 9.2|
|[12.0,88.5,21.24,...| 9.3|
|[20.0,70.367,20.5...| 9.2|
+--------------------+----+
only showing top 20 rows
###Markdown
Train Test split
###Code
train_data,test_data = final_data.randomSplit([0.7,0.3])
train_data.describe().show()
test_data.describe().show()
###Output
+-------+------------------+
|summary| crew|
+-------+------------------+
| count| 49|
| mean| 7.660204081632652|
| stddev|3.4570667297796907|
| min| 0.88|
| max| 21.0|
+-------+------------------+
###Markdown
Build Model
###Code
regressor = LinearRegression(labelCol='crew')
model = regressor.fit(train_data)
###Output
_____no_output_____
###Markdown
Evaluate Model
###Code
pred_data = model.evaluate(test_data)
pred_data.residuals.show()
pred_data.rootMeanSquaredError
pred_data.r2
pred_data.meanSquaredError
pred_data.meanAbsoluteError
from pyspark.sql import functions as f
data.select(f.corr('crew','passengers')).show()
data.select(f.corr('crew','cabins')).show()
unlabeled_data = test_data.select('features')
test_predictions = model.transform(unlabeled_data)
test_predictions.show()
###Output
+--------------------+------------------+
| features| prediction|
+--------------------+------------------+
|[4.0,220.0,54.0,1...|20.595915926382975|
|[5.0,86.0,21.04,9...| 9.260890019783105|
|[5.0,115.0,35.74,...|11.675118708493574|
|[5.0,122.0,28.5,1...| 6.863881770034197|
|[9.0,59.058,17.0,...| 7.397522627394963|
|[9.0,88.5,21.24,9...| 9.596830046526891|
|[9.0,105.0,27.2,8...|11.078047934903061|
|[9.0,113.0,26.74,...|11.298007777661951|
|[10.0,91.62700000...| 9.284874902917485|
|[11.0,58.6,15.66,...| 7.296078241589184|
|[11.0,85.0,18.48,...| 8.812971761600613|
|[11.0,90.09,25.01...| 9.086191741339752|
|[12.0,77.104,20.0...| 8.73593398089814|
|[12.0,138.0,31.14...|12.980054021311688|
|[13.0,61.0,13.8,7...| 6.576693122198025|
|[13.0,63.0,14.4,7...| 6.709660758304139|
|[13.0,91.0,20.32,...| 9.20563191430896|
|[14.0,76.8,19.6,8...| 8.714766215822314|
|[14.0,77.104,20.0...| 8.693427098474773|
|[14.0,138.0,31.14...| 12.96944755473286|
+--------------------+------------------+
only showing top 20 rows
|
L08-mlp/code/mlp-pytorch_sigmoid-crossentr.ipynb | ###Markdown
STAT 453: Deep Learning (Spring 2020) Instructor: Sebastian Raschka ([email protected]) Course website: http://pages.stat.wisc.edu/~sraschka/teaching/stat453-ss2020/ GitHub repository: https://github.com/rasbt/stat453-deep-learning-ss20---
###Code
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
###Output
Sebastian Raschka
CPython 3.7.1
IPython 7.12.0
torch 1.4.0
###Markdown
MLP With Different Loss Functions Imports
###Code
import matplotlib.pyplot as plt
import pandas as pd
import torch
%matplotlib inline
import time
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
###Output
_____no_output_____
###Markdown
Settings and Dataset
###Code
##########################
### SETTINGS
##########################
RANDOM_SEED = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
def to_onehot(y, num_classes):
y_onehot = torch.FloatTensor(y.size(0), num_classes)
y_onehot.zero_()
tmp = y.view(-1, 1).long().to(torch.device('cpu'))
y_onehot.scatter_(1, tmp, 1).float()
return y_onehot
###Output
_____no_output_____
###Markdown
Model
###Code
class MlpSigmoidMSE(torch.nn.Module):
def __init__(self, num_features, num_hidden, num_classes):
super(MlpSigmoidMSE, self).__init__()
self.num_classes = num_classes
### 1st hidden layer
self.linear_1 = torch.nn.Linear(num_features, num_hidden)
self.linear_1.weight.detach().normal_(0.0, 0.1)
self.linear_1.bias.detach().zero_()
### Output layer
self.linear_out = torch.nn.Linear(num_hidden, num_classes)
self.linear_out.weight.detach().normal_(0.0, 0.1)
self.linear_out.bias.detach().zero_()
def forward(self, x):
out = self.linear_1(x)
out = torch.sigmoid(out)
logits = self.linear_out(out)
probas = torch.softmax(logits, dim=1)
return logits, probas
#################################
### Model Initialization
#################################
torch.manual_seed(RANDOM_SEED)
model = MlpSigmoidMSE(num_features=28*28,
num_hidden=100,
num_classes=10)
model = model.to(DEVICE)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
#################################
### Training
#################################
def compute_loss(net, data_loader):
curr_loss = 0.
with torch.no_grad():
for cnt, (features, targets) in enumerate(data_loader):
features = features.view(-1, 28*28).to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = net.forward(features)
loss = F.nll_loss(torch.log(probas), targets)
# or better (more numerically stable):
# loss = F.cross_entropy(logits, targets)
# see
# ../../other/pytorch-lossfunc-cheatsheet.md
curr_loss += loss
return float(curr_loss)/cnt
start_time = time.time()
minibatch_cost = []
epoch_cost = []
for epoch in range(NUM_EPOCHS):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.view(-1, 28*28).to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits, probas = model(features)
#y_onehot = to_onehot(targets, model.num_classes).to(DEVICE)
cost = F.nll_loss(torch.log(probas), targets)
optimizer.zero_grad()
cost.backward()
minibatch_cost.append(cost)
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f'
%(epoch+1, NUM_EPOCHS, batch_idx,
len(train_loader), cost))
cost = compute_loss(model, train_loader)
epoch_cost.append(cost)
print('Epoch: %03d/%03d Train Cost: %.4f' % (
epoch+1, NUM_EPOCHS, cost))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
plt.plot(range(len(minibatch_cost)), minibatch_cost)
plt.ylabel('Cross Entropy')
plt.xlabel('Minibatch')
plt.show()
plt.plot(range(len(epoch_cost)), epoch_cost)
plt.ylabel('Cross Entropy')
plt.xlabel('Epoch')
plt.show()
def compute_accuracy(net, data_loader):
correct_pred, num_examples = 0, 0
with torch.no_grad():
for features, targets in data_loader:
features = features.view(-1, 28*28).to(DEVICE)
targets = targets.to(DEVICE)
a1, a2 = net.forward(features)
predicted_labels = torch.argmax(a2, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
print('Training Accuracy: %.2f' % compute_accuracy(model, train_loader))
print('Test Accuracy: %.2f' % compute_accuracy(model, test_loader))
###Output
Training Accuracy: 99.00
Test Accuracy: 97.67
|
week2/week2-prework2.ipynb | ###Markdown
【問題1】乱数の作成平均が(-3, 0)、共分散行列が\[\[1.0, 0.8\], \[0.8, 1.0\]\]で表される2次元正規分布による乱数を500個作成してください。
###Code
mean = np.array([-3, 0])
cov = np.array([[1.0, 0.8],
[0.8, 1.0]])
num_array_q1 = np.random.multivariate_normal(mean, cov, size=500)
###Output
_____no_output_____
###Markdown
【問題2】散布図による可視化問題1で作成したデータ点を散布図により可視化してください。散布図はmatplotlibのplt.scatter()を使うことで描けます。
###Code
plt.scatter(num_array[:,0], num_array[:,1])
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('scatter')
plt.show();
###Output
_____no_output_____
###Markdown
【問題3】ヒストグラムによる可視化問題1で作成したデータをヒストグラムにより可視化してください。 ヒストグラムはplt.hist()を使うことで描けます。
###Code
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(5, 7))
ax1.hist(num_array[:, 0], bins=50)
ax2.hist(num_array[:, 1], bins=50)
ax1.set_title('histgram of x1')
ax2.set_title('histgram of x2')
ax1.set_xlabel('x1')
ax2.set_xlabel('x2')
ax1.set_ylabel('frequency')
ax2.set_ylabel('frequency')
ax1.set_xlim([-6, 3])
ax2.set_xlim([-6, 3])
plt.tight_layout()
plt.show();
###Output
_____no_output_____
###Markdown
【問題4】データの追加新たに平均が(0, -3)、共分散行列が\[\[1.0, 0.8\], \[0.8, 1.0\]\]で表される2次元正規分布による乱数を500個作成してください。 そして、問題1、4それぞれのデータをひとつの散布図として可視化してください。凡例として問題1のものは0、問題2のものは1を表示してください。
###Code
mean = np.array([0, -3])
cov = np.array([[1.0, 0.8],
[0.8, 1.0]])
num_array_q4 = np.random.multivariate_normal(mean, cov, size=500)
plt.scatter(num_array_q1[:,0], num_array_q1[:,1], label='0')
plt.scatter(num_array_q4[:,0], num_array_q4[:,1], label='1')
plt.legend()
plt.title('scatter')
plt.xlabel('x1')
plt.ylabel('x2')
plt.show();
###Output
_____no_output_____
###Markdown
【問題5】データの結合データはまとめておいた方が後々扱いやすいです。 問題1、4で作成したndarrayを 結合 し、(1000, 2)のndarrayとしてください。 結合はnp.concatenate()やnp.vstack()を使うことで行えます。
###Code
np.vstack?
#np.concatenate
stacked_array = np.concatenate([num_array_q1, num_array_q4], axis=0)
stacked_array.shape
#np.vstack
stacked_array = np.vstack([num_array_q1, num_array_q4])
stacked_array.shape
###Output
_____no_output_____
###Markdown
【問題6】ラベル付けひとまとめになった1000個のデータそれぞれに対して、問題1、4どちらで作成したものなのかを示す ラベル付けを行ってください。 問題1のものには0、問題4のものには1を対応させます。0と1を含むラベルの列を新たに追加し、(1000, 3)のndarrayを作成してください。 機械学習に使用するデータセットはこのような形になっていることが多いです。
###Code
num_array_q1_labeled = np.hstack([num_array_q1, np.zeros((num_array_q1.shape[0],1))])
num_array_q4_labeled = np.hstack([num_array_q4, np.ones((num_array_q4.shape[0],1))])
stacked_array = np.vstack([num_array_q1_labeled, num_array_q4_labeled])
stacked_array.shape
###Output
_____no_output_____ |
doc/rdf/odml_RDF_tools.ipynb | ###Markdown
What is the Semantic Web and RDF? **RDF (Resource Description Framework)** is one of the three foundational [Semantic Web](https://en.wikipedia.org/wiki/Semantic_Web) technologies, the other two being SPARQL and OWL.In particular, RDF is the data model of the Semantic Web. That means that all data in Semantic Web technologies are represented as RDF. If you store Semantic Web data, it's in RDF. If you query Semantic Web data (typically using the SPARQL query language), it's RDF data. If you send Semantic Web data to your friend, it's RDF.RDF data model is based upon the idea of making statements about resources (in particular web resources) in the form of *subject–predicate–object* expressions, known as [*triples*](https://en.wikipedia.org/wiki/Semantic_triple). The *subject* denotes the resource, and the *predicate* denotes traits or aspects of the resource, and expresses a relationship between the *subject* and the *object*.For example, one way to represent the notion "The sky has the color blue" in RDF is as the triple: a **subject** denoting *"the sky"*, a **predicate** denoting *"has the color"*, and an **object** denoting *"blue"*. Therefore, RDF uses subject instead of object(or entity) in contrast to the typical approach of an entity–attribute–value model in object-oriented design: entity (sky), attribute (color), and value (blue).(Resource Description Framework, Wikipedia, 2017)  Find out more:- https://en.wikipedia.org/wiki/Resource_Description_Framework- https://www.cambridgesemantics.com/blog/semantic-university/learn-rdf/ odML to RDF converter Here we will explore odML to RDF conversion using the `odml/tools/rdf_converter.py` module.If you are new python odML please read the [tutorial](https://python-odml.readthedocs.io/en/latest/tutorial.html) first to familiarize yourself with odML. Let's create the example odML document.
###Code
import datetime
import odml
doc = odml.Document(author="D. N. Adams", date=datetime.date(1979, 10, 12))
# CREATE AND APPEND THE MAIN SECTIONs
doc.append(odml.Section(name="Arthur Philip Dent",
type="crew/person",
definition="Information on Arthur Dent"))
# SET NEW PARENT NODE
parent = doc['Arthur Philip Dent']
# APPEND PROPERTIES WITH VALUES
parent.append(odml.Property(name="Species",
value="Human",
dtype=odml.DType.string,
definition="Species to which subject belongs to"))
###Output
_____no_output_____
###Markdown
The RDFWriter class The RDFWriter class is used to convert odML documents to one of the supported RDF formats:'xml', 'pretty-xml', 'trix', 'n3', 'turtle', 'ttl', 'ntriples', 'nt', 'nt11', 'trig'.'turtle' is the format that is best suited for storage and human readability which is why we will use it in our tutorial. For cross-tool usage, saving RDF in its 'XML' variant is probably the safest choice.The output can be returned as a string.
###Code
from odml.tools.rdf_converter import RDFWriter
print(RDFWriter(doc).get_rdf_str('turtle'))
###Output
@prefix odml: <https://g-node.org/odml-rdf#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
odml:Hub odml:hasDocument odml:40797785-2e1a-435e-b905-aeeac2ba2b3e .
odml:220489b8-2043-452b-863b-8ba6a4b5e536 a odml:Section ;
odml:hasDefinition "Information on Arthur Dent" ;
odml:hasName "Arthur Philip Dent" ;
odml:hasProperty odml:40ede84a-650b-4aab-af81-b4136c833e58 ;
odml:hasType "crew/person" .
odml:40797785-2e1a-435e-b905-aeeac2ba2b3e a odml:Document ;
odml:hasAuthor "D. N. Adams" ;
odml:hasDate "1979-10-12"^^xsd:date ;
odml:hasFileName "None" ;
odml:hasSection odml:220489b8-2043-452b-863b-8ba6a4b5e536 .
odml:40ede84a-650b-4aab-af81-b4136c833e58 a odml:Property ;
odml:hasDefinition "Species to which subject belongs to" ;
odml:hasDtype "string" ;
odml:hasName "Species" ;
odml:hasValue odml:4425ade2-5d03-4484-a272-764c1e933933 .
odml:4425ade2-5d03-4484-a272-764c1e933933 a rdf:Seq ;
rdf:_1 "Human" .
###Markdown
Or the output can be written to a specified file.
###Code
import tempfile
# Create temporary file
f = tempfile.NamedTemporaryFile(mode='w', suffix=".ttl")
path = f.name
RDFWriter(doc).write_file(path, "turtle")
with open(path) as ff:
data = ff.read()
print(data)
###Output
@prefix odml: <https://g-node.org/odml-rdf#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
odml:Hub odml:hasDocument odml:08f8c7fa-4ea0-4512-8927-ff73c117644d .
odml:08f8c7fa-4ea0-4512-8927-ff73c117644d a odml:Document ;
odml:hasAuthor "D. N. Adams" ;
odml:hasDate "1979-10-12"^^xsd:date ;
odml:hasFileName "None" ;
odml:hasSection odml:3c86174b-b183-47aa-9e0b-58dfc066a76d .
odml:15eb4c32-73fe-4da1-8cba-3fac965d4d17 a odml:Property ;
odml:hasDefinition "Species to which subject belongs to" ;
odml:hasDtype "string" ;
odml:hasName "Species" ;
odml:hasValue odml:1ad9c2d6-6055-465b-b281-51943569338b .
odml:1ad9c2d6-6055-465b-b281-51943569338b a rdf:Seq ;
rdf:_1 "Human" .
odml:3c86174b-b183-47aa-9e0b-58dfc066a76d a odml:Section ;
odml:hasDefinition "Information on Arthur Dent" ;
odml:hasName "Arthur Philip Dent" ;
odml:hasProperty odml:15eb4c32-73fe-4da1-8cba-3fac965d4d17 ;
odml:hasType "crew/person" .
###Markdown
Please note at this point, that RDF does not respect order. Everytime an unchanged file is written, the content will be identical, but the order of the statements will differ. Quering the data with rdflib and SPARQL The following example depends on specific example files. If you do not already have these files\ you can find and download them from https://github.com/G-Node/python-odml/tree/master/doc/example_rdfs/example_data.The example will load RDF triples from multiple files and load them into a single, connected graph.
###Code
from glob import glob
from rdflib import Graph
graph = Graph()
for file_name in glob("odml_RDF_example_*.ttl"):
graph.parse(file_name, format="turtle")
print('Total number of triples: ', len(graph))
###Output
Total number of triples: 3041
###Markdown
The example query uses an rdflib tool to find each Section with type `Recording` also featuring a Property with the name `Recording duration`. The result prints the Values of the returned Properties.
###Code
from rdflib import Graph, Namespace, RDF
from rdflib.plugins.sparql import prepareQuery
from odml.tools.rdf_converter import ODML_NS
rdf_namespace = {"odml": ODML_NS, "rdf": RDF}
q = prepareQuery("""SELECT ?d ?s ?p ?value WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasType "Recording" .
?s odml:hasProperty ?p .
?p rdf:type odml:Property .
?p odml:hasName "Recording duration" .
?p odml:hasValue ?v .
?v rdf:type rdf:Bag .
?v rdf:li ?value .}""", initNs=rdf_namespace)
for row in graph.query(q):
print("Doc: {0}, Sec: {1}, \n"
"Prop: {2}, Val:{3}".format(row.d, row.s, row.p, row.value))
###Output
Doc: https://g-node.org/odml-rdf#cc66e78a-3742-490a-9fdb-1c66761d7652, Sec: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956,
Prop: https://g-node.org/odml-rdf#41316903-80f1-45a3-9b06-400a02903531, Val:11.25
Doc: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7, Sec: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd,
Prop: https://g-node.org/odml-rdf#9aeede78-678c-4db8-acb5-fbd6d408b762, Val:13.9
Doc: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a, Sec: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee,
Prop: https://g-node.org/odml-rdf#1636af03-8e97-4ef2-9d7d-6c7db23dcd02, Val:11.88
Doc: https://g-node.org/odml-rdf#24066355-1ee8-4eb5-a715-96bbb6231cd5, Sec: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de,
Prop: https://g-node.org/odml-rdf#0ed215a2-5d20-48eb-b744-bf3b731459fc, Val:0.33
###Markdown
FuzzyFinder class **FuzzyFinder** is a tool for querying an RDF graph through so called *fuzzy* queries. The finder executes multiple queries to better match input parameters. It returns sets of triples and prioritized from more to fewer matched parameters.The function `find()` accepts several oprtional parameters.- `graph`: rdflib graph object- `q_str`: fuzzy query string, we explore it later- `q_params`: dict object with parameters of a query- `mode`: default 'fuzzy' and 'match'Each mode works with specific a type of fuzzy query (`q_str`).Let's check the `match` mode in an example.
###Code
from odml.rdf.fuzzy_finder import FuzzyFinder
query_string = 'prop(name:Date) section(name:Recording-2013-02-08-ak, type:Recording)'
f = FuzzyFinder(graph)
print(f.find(mode='match', q_str=query_string))
###Output
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasType "Recording" .
?s odml:hasProperty ?p .
?p rdf:type odml:Property .
?p odml:hasName "Date" .
}
Document: https://g-node.org/odml-rdf#cc66e78a-3742-490a-9fdb-1c66761d7652
Section: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956
Property: https://g-node.org/odml-rdf#f1699eb6-4cab-4dd0-9327-120eab2089ae
Document: https://g-node.org/odml-rdf#24066355-1ee8-4eb5-a715-96bbb6231cd5
Section: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de
Property: https://g-node.org/odml-rdf#fadffec7-6b23-454e-bfd1-9d5884802abb
Document: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
Property: https://g-node.org/odml-rdf#138f08f7-23c7-4722-8577-85a6fa633ae1
Document: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
Property: https://g-node.org/odml-rdf#1d6db4ce-87f3-4e9c-b221-e76ba05b2759
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasName "Recording-2013-02-08-ak" .
?s odml:hasType "Recording" .
}
Document: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
SELECT * WHERE {
?s odml:hasProperty ?p .
?p rdf:type odml:Property .
?p odml:hasName "Date" .
}
Section: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de
Property: https://g-node.org/odml-rdf#fadffec7-6b23-454e-bfd1-9d5884802abb
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
Property: https://g-node.org/odml-rdf#1d6db4ce-87f3-4e9c-b221-e76ba05b2759
Section: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956
Property: https://g-node.org/odml-rdf#f1699eb6-4cab-4dd0-9327-120eab2089ae
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
Property: https://g-node.org/odml-rdf#138f08f7-23c7-4722-8577-85a6fa633ae1
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasName "Recording-2013-02-08-ak" .
}
Document: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasType "Recording" .
}
Document: https://g-node.org/odml-rdf#cc66e78a-3742-490a-9fdb-1c66761d7652
Section: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956
Document: https://g-node.org/odml-rdf#24066355-1ee8-4eb5-a715-96bbb6231cd5
Section: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de
Document: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
Document: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
###Markdown
As you can see from the output, the finder builds multiple SPARQL queries from `match` queries, executes them and returns some matched results. The first result always represents the most specific query (the biggest combination of input parameters that returned at least one triple).The query syntax is pretty straightforward. Just write the name of the entity `property`, `section` or `document` (also possible to use shortened names `prop`, `sec` and `doc`) and add attributes with their values inside the parentheses separated by a colon.As a code example: `prop(name:Date) section(name:Recording-2013-02-08-ak, type:Recording)`.Here we search for Sections and Properties where `property` has attribute the `name` and its Value is `Date`.For building `match` queries you should know exactly to which odML attribute the value(subject) is related. If you write `prop(name:Date) section(name:Recording, type:Recording-2013-02-08-ak)` the `find()` method would not return any triples with Section parameters, because it is unlikely that there is a Section with type `Recording-2013-02-08-ak`.Non-odML entity attributes will also be ignored (e.g. only `id, author, date, version, repository, sections` can exist in the `Document` object).In the example `section(not-odml-name:Recording-2013-02-08-ak, record:Recording)` the `find` method returns nothing.
###Code
from odml.rdf.fuzzy_finder import FuzzyFinder
query_string = 'section(not-odml-name:Recording-2013-02-08-ak, record:Recording)'
f = FuzzyFinder(graph)
print(f.find(mode='match', q_str=query_string))
###Output
###Markdown
This is often inconvenient if you do not know exactly how the diverse data in the graph is related. For situations like this *'fuzzy'* mode comes into play. It is also set by default.The output logic is similar to the previous mode, but there you can provide more broad information, the finder will match the parameters and create meaningful queries based on the input.The query string consists of two parts: *FIND* and *HAVING*.In the *FIND* part a user specifies the set of odML objects and its attributes. e.g. `FIND prop(name) section(name, type)`In the *HAVING* part a user specifies a set of search values which could relate to the attributes in the *FIND* part.e.g `HAVING Recording, Recording-2012-04-04-ab, Date`Finally, the complete query will look like this:`FIND sec(name, type) prop(name) HAVING Recording, Recording-2012-04-04-ab, Date`As you can see in the example you do not need to know to which attribute search values in the *HAVING* part relate to, the finder can do it for you.
###Code
from odml.rdf.fuzzy_finder import FuzzyFinder
query_string = 'FIND sec(name, type) prop(name) HAVING Recording, Recording-2012-04-04-ab, Date, Some_value'
f = FuzzyFinder(graph)
print(f.find(mode='fuzzy', q_str=query_string))
###Output
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasType "Recording" .
?s odml:hasProperty ?p .
?p rdf:type odml:Property .
?p odml:hasName "Date" .
}
Document: https://g-node.org/odml-rdf#cc66e78a-3742-490a-9fdb-1c66761d7652
Section: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956
Property: https://g-node.org/odml-rdf#f1699eb6-4cab-4dd0-9327-120eab2089ae
Document: https://g-node.org/odml-rdf#24066355-1ee8-4eb5-a715-96bbb6231cd5
Section: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de
Property: https://g-node.org/odml-rdf#fadffec7-6b23-454e-bfd1-9d5884802abb
Document: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
Property: https://g-node.org/odml-rdf#138f08f7-23c7-4722-8577-85a6fa633ae1
Document: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
Property: https://g-node.org/odml-rdf#1d6db4ce-87f3-4e9c-b221-e76ba05b2759
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasName "Recording" .
?s odml:hasType "Recording" .
}
Document: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
SELECT * WHERE {
?s odml:hasProperty ?p .
?p rdf:type odml:Property .
?p odml:hasName "Date" .
}
Section: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de
Property: https://g-node.org/odml-rdf#fadffec7-6b23-454e-bfd1-9d5884802abb
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
Property: https://g-node.org/odml-rdf#1d6db4ce-87f3-4e9c-b221-e76ba05b2759
Section: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956
Property: https://g-node.org/odml-rdf#f1699eb6-4cab-4dd0-9327-120eab2089ae
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
Property: https://g-node.org/odml-rdf#138f08f7-23c7-4722-8577-85a6fa633ae1
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasName "Recording" .
}
Document: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
SELECT * WHERE {
?d odml:hasSection ?s .
?s rdf:type odml:Section .
?s odml:hasType "Recording" .
}
Document: https://g-node.org/odml-rdf#cc66e78a-3742-490a-9fdb-1c66761d7652
Section: https://g-node.org/odml-rdf#5365f7e5-603c-4154-a5ea-33bb1a07a956
Document: https://g-node.org/odml-rdf#24066355-1ee8-4eb5-a715-96bbb6231cd5
Section: https://g-node.org/odml-rdf#bbd44815-5016-49e0-9f4b-5b83778d00de
Document: https://g-node.org/odml-rdf#537c6cc8-7dfe-4d53-a111-24b3ce0f3c1a
Section: https://g-node.org/odml-rdf#346773f2-abee-4892-b052-840ddcff35ee
Document: https://g-node.org/odml-rdf#cd24b60f-1d5e-4040-9881-5e5a597baef7
Section: https://g-node.org/odml-rdf#782bd29d-e4b0-4c14-a417-1772a4851ffd
|
translation/XLM/XLM/PKM-layer.ipynb | ###Markdown
Product-Key Memory (PKM)**Minimalist implementation of a Product-Key Memory layer** https://arxiv.org/abs/1907.05242This notebook contains a simple implementation of a PKM layer.Overall, the PKM layer can be seen as a network with very high capacity that maps elements from $R^d$ to $R^n$, but very efficiently.In particular, a 12-layer transformer model that leverages a PKM layer outperforms a 24-layer model without memory, and is almost twice faster at inference.A more detailed implementation can be found at https://github.com/facebookresearch/XLM/tree/master/src/model/memory,with options to make the query network more powerful, to shuffle the key indices, to compute the value scores differentlythan with a softmax, etc., but the code below is much simpler and implements a configuration that worked well in our experiments (and that we used to report the majority of our results). Note: at training time, we recommend to use a different optimizer for the values, as these are learned with sparse updates. In particular, we obtained our best performance with the Adam optimizer, and a constant learning rate of 1e-3 to learn the values, independently of the optimizer / learning rate used to learn the rest of the network.
###Code
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def get_uniform_keys(n_keys, dim, seed):
"""
Generate random uniform keys (same initialization as nn.Linear).
"""
rng = np.random.RandomState(seed)
bound = 1 / math.sqrt(dim)
keys = rng.uniform(-bound, bound, (n_keys, dim))
return keys.astype(np.float32)
class HashingMemory(nn.Module):
def __init__(self, input_dim, output_dim, params):
super().__init__()
# global parameters
self.input_dim = input_dim
self.output_dim = output_dim
self.k_dim = params.k_dim
self.v_dim = output_dim
self.n_keys = params.n_keys
self.size = self.n_keys ** 2
self.heads = params.heads
self.knn = params.knn
assert self.k_dim >= 2 and self.k_dim % 2 == 0
# dropout
self.input_dropout = params.input_dropout
self.query_dropout = params.query_dropout
self.value_dropout = params.value_dropout
# initialize keys / values
self.initialize_keys()
self.values = nn.EmbeddingBag(self.size, self.v_dim, mode='sum', sparse=params.sparse)
nn.init.normal_(self.values.weight, mean=0, std=self.v_dim ** -0.5)
# query network
self.query_proj = nn.Sequential(*filter(None, [
nn.Linear(self.input_dim, self.heads * self.k_dim, bias=True),
nn.BatchNorm1d(self.heads * self.k_dim) if params.query_batchnorm else None
]))
if params.query_batchnorm:
print("WARNING: Applying batch normalization to queries improves the performance "
"and memory usage. But if you use it, be sure that you use batches of "
"sentences with the same size at training time (i.e. without padding). "
"Otherwise, the padding token will result in incorrect mean/variance "
"estimations in the BatchNorm layer.\n")
def initialize_keys(self):
"""
Create two subkey sets per head.
`self.keys` is of shape (heads, 2, n_keys, k_dim // 2)
"""
half = self.k_dim // 2
keys = nn.Parameter(torch.from_numpy(np.array([
get_uniform_keys(self.n_keys, half, seed=(2 * i + j))
for i in range(self.heads)
for j in range(2)
])).view(self.heads, 2, self.n_keys, half))
self.keys = nn.Parameter(keys)
def _get_indices(self, query, subkeys):
"""
Generate scores and indices for a specific head.
"""
assert query.dim() == 2 and query.size(1) == self.k_dim
bs = query.size(0)
knn = self.knn
half = self.k_dim // 2
n_keys = len(subkeys[0])
# split query for product quantization
q1 = query[:, :half] # (bs,half)
q2 = query[:, half:] # (bs,half)
# compute indices with associated scores
scores1 = F.linear(q1, subkeys[0], bias=None) # (bs,n_keys)
scores2 = F.linear(q2, subkeys[1], bias=None) # (bs,n_keys)
scores1, indices1 = scores1.topk(knn, dim=1) # (bs,knn)
scores2, indices2 = scores2.topk(knn, dim=1) # (bs,knn)
# cartesian product on best candidate keys
all_scores = (
scores1.view(bs, knn, 1).expand(bs, knn, knn) +
scores2.view(bs, 1, knn).expand(bs, knn, knn)
).view(bs, -1) # (bs,knn**2)
all_indices = (
indices1.view(bs, knn, 1).expand(bs, knn, knn) * n_keys +
indices2.view(bs, 1, knn).expand(bs, knn, knn)
).view(bs, -1) # (bs,knn**2)
# select best scores with associated indices
scores, best_indices = torch.topk(all_scores, k=knn, dim=1) # (bs,knn)
indices = all_indices.gather(1, best_indices) # (bs,knn)
assert scores.shape == indices.shape == (bs, knn)
return scores, indices
def get_indices(self, query):
"""
Generate scores and indices.
"""
assert query.dim() == 2 and query.size(1) == self.k_dim
query = query.view(-1, self.heads, self.k_dim)
bs = len(query)
outputs = [self._get_indices(query[:, i], self.keys[i]) for i in range(self.heads)]
s = torch.cat([s.view(bs, 1, self.knn) for s, _ in outputs], 1) # (bs,heads,knn)
i = torch.cat([i.view(bs, 1, self.knn) for _, i in outputs], 1) # (bs,heads,knn)
return s.view(-1, self.knn), i.view(-1, self.knn)
def forward(self, input):
"""
Read from the memory.
"""
# input dimensions
assert input.shape[-1] == self.input_dim
prefix_shape = input.shape[:-1]
bs = np.prod(prefix_shape)
# compute query
input = F.dropout(input, p=self.input_dropout, training=self.training) # (...,i_dim)
query = self.query_proj(input.contiguous().view(-1, self.input_dim)) # (bs,heads*k_dim)
query = query.view(bs * self.heads, self.k_dim) # (bs*heads,k_dim)
query = F.dropout(query, p=self.query_dropout, training=self.training) # (bs*heads,k_dim)
assert query.shape == (bs * self.heads, self.k_dim)
# retrieve indices and scores
scores, indices = self.get_indices(query) # (bs*heads,knn)
scores = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs*heads,knn)
# merge heads / knn (since we sum heads)
indices = indices.view(bs, self.heads * self.knn) # (bs,heads*knn)
scores = scores.view(bs, self.heads * self.knn) # (bs,heads*knn)
# weighted sum of values
output = self.values(indices, per_sample_weights=scores) # (bs,v_dim)
output = F.dropout(output, p=self.value_dropout, training=self.training)# (bs,v_dim)
# reshape output
if len(prefix_shape) >= 2:
output = output.view(prefix_shape + (self.v_dim,)) # (...,v_dim)
return output
@staticmethod
def register_args(parser):
"""
Register memory parameters.
"""
# memory parameters
parser.add_argument("--sparse", type=bool_flag, default=False,
help="Perform sparse updates for the values")
parser.add_argument("--k_dim", type=int, default=256,
help="Memory keys dimension")
parser.add_argument("--heads", type=int, default=4,
help="Number of memory heads")
parser.add_argument("--knn", type=int, default=32,
help="Number of memory slots to read / update - k-NN to the query")
parser.add_argument("--n_keys", type=int, default=512,
help="Number of keys")
parser.add_argument("--query_batchnorm", type=bool_flag, default=False,
help="Query MLP batch norm")
# dropout
parser.add_argument("--input_dropout", type=float, default=0,
help="Input dropout")
parser.add_argument("--query_dropout", type=float, default=0,
help="Query dropout")
parser.add_argument("--value_dropout", type=float, default=0,
help="Value dropout")
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
params = AttrDict({
"sparse": False,
"k_dim": 128,
"heads": 4,
"knn": 32,
"n_keys": 512, # the memory will have (n_keys ** 2) values
"query_batchnorm": True,
"input_dropout": 0,
"query_dropout": 0,
"value_dropout": 0,
})
device = 'cuda' # cpu / cuda
input_dim = 50
output_dim = 100
memory = HashingMemory(input_dim, output_dim, params).to(device=device)
print(memory)
x = torch.randn(2, 3, 4, input_dim).to(device=device)
output = memory(x)
print(output.sum().item())
print(output.shape)
###Output
0.14277362823486328
torch.Size([2, 3, 4, 100])
|
notebooks/welter_issue026-01_Text_and_analysis_for_results_section.ipynb | ###Markdown
Welter issue 26 Text and Analysis for the results sectionMichael Gully-Santiago Monday, July 5, 2016 See [Issue26](https://github.com/BrownDwarf/welter/issues/26)
###Code
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
% matplotlib inline
% config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_context('notebook')
import pandas as pd
###Output
_____no_output_____
###Markdown
Single order results
###Code
orders = pd.read_csv('../data/analysis/orders_LkCa4_oneTeff.csv')
orders.tail()
IG_orders = orders[orders.Instrument == 'IGRINS']
ES_orders = orders[orders.Instrument == 'ESPaDoNs']
len(IG_orders), len(IG_orders.dropna())
sns.distplot(IG_orders.vz_50p.dropna(), rug=True)
sns.distplot(IG_orders.vz_05p.dropna(), hist=False)
sns.distplot(IG_orders.vz_95p.dropna(), hist=False)
IG_orders.vz_95p.dropna().median(), IG_orders.vz_95p.dropna().std()
sns.distplot(IG_orders.vi_50p.dropna(), rug=True)
sns.distplot(ES_orders.vi_50p.dropna(), rug=False, hist=False, color='k')
sns.distplot(IG_orders.vi_05p.dropna(), hist=False)
sns.distplot(IG_orders.vi_95p.dropna(), hist=False)
IG_orders.vi_95p.dropna().median(), IG_orders.vi_95p.dropna().std()
sns.distplot(ES_orders.FeH_50p.dropna(), rug=False, hist=False, color='k')
sns.distplot(IG_orders.FeH_50p.dropna(), rug=True)
sns.distplot(IG_orders.FeH_05p.dropna(), hist=False)
sns.distplot(IG_orders.FeH_95p.dropna(), hist=False)
IG_orders.FeH_95p.dropna().median(), IG_orders.FeH_95p.dropna().std()
sns.distplot(IG_orders.logg_50p.dropna(), rug=True)
sns.distplot(IG_orders.logg_05p.dropna(), hist=False)
sns.distplot(IG_orders.logg_95p.dropna(), hist=False)
#IG_orders.FeH_95p.dropna().median(), IG_orders.FeH_95p.dropna().std()
plt.plot(IG_orders.wl_center, IG_orders.vz_50p, '.')
plt.plot(orders.wl_center, orders.FeH_50p, '.')
###Output
_____no_output_____
###Markdown
Multi order results
###Code
mo = pd.read_csv('../data/analysis/IGRINS_mix_emcee_last200.csv')
mo.columns
len(mo), len(mo.dropna())
gi = mo.SA_50p < 1.0
mo.SA_50p[gi].median(), mo.SA_50p[gi].std()
plt.plot(mo.m_val, mo.Teff_50p, '.')
mo.columns
#plt.plot(mo.m_val, mo.vz_95p, '.')
plt.plot(mo.m_val, mo.vz_50p, '.')
#plt.plot(mo.m_val, mo.vz_05p, '.')
#plt.ylim(0, 20)
mo.vz_50p.median(), mo.vz_50p.std()
sns.distplot(mo.vi_95p.dropna(), hist=False)
sns.distplot(mo.vi_50p.dropna(), rug=True)
sns.distplot(mo.vi_05p.dropna(), hist=False)
mo.vi_50p.dropna().median(), mo.vi_50p.dropna().std()
bi = mo.vi_95p > 40
mo[bi][['m_val', 'vi_50p']]
sns.distplot(mo.vz_95p.dropna(), hist=False)
sns.distplot(mo.vz_50p.dropna(), rug=True)
sns.distplot(mo.vz_05p.dropna(), hist=False)
mo.vz_50p.dropna().median(), mo.vz_50p.dropna().std()
bi = mo.vz_05p < 0
mo[bi][['m_val', 'vi_50p', 'vz_50p']]
mo.columns
plt.plot(mo.m_val, mo.logg_50p, '.')
sns.distplot(mo.logg_50p.dropna(), rug=True)
sns.distplot(mo.logg_05p.dropna(), hist=False)
sns.distplot(mo.logg_95p.dropna(), hist=False)
mo.logg_50p.dropna().median(), mo.logg_50p.dropna().std()
mo_full = mo
mo = mo.dropna()
weights = 1.0/(mo.logg_95p - mo.logg_05p)
val = (mo.logg_50p*weights).sum()/(weights.sum())
val
sns.distplot(mo.FeH_50p.dropna(), rug=True)
sns.distplot(mo.FeH_05p.dropna(), hist=False)
sns.distplot(mo.FeH_95p.dropna(), hist=False)
yerr_hi = mo.FeH_95p - mo.FeH_50p
yerr_lo = mo.FeH_50p - mo.FeH_05p
plt.errorbar(mo.wl_center, mo.FeH_50p, yerr=[yerr_lo, yerr_hi], fmt='.')
#sns.distplot(mo.FeH_50p, rug=True)
bins = np.arange(-0.5, 0.51, 0.2)
sns.distplot(mo.FeH_50p[mo.band == 'H'], bins, hist=True, kde=False, rug=True, label='$H-$band')
sns.distplot(mo.FeH_50p[mo.band == 'K'], bins, hist=True, kde=False, rug=True, label='$K-$band')
plt.xlim(-0.5, 0.5)
plt.legend(loc='best')
tips = sns.load_dataset("tips")
ax = sns.violinplot(x="day", y="total_bill", data=tips)
tips
###Output
_____no_output_____ |
Python/Python-Completo/Python Completo/Notebooks Traduzidos/Python Debugger (pdb).ipynb | ###Markdown
Python DebuggerVocê provavelmente usou uma variedade de instruções de impressão para tentar encontrar erros em seu código. Uma maneira melhor de fazer isso é usando o módulo de depuração incorporado do Python (pdb). O módulo pdb implementa um ambiente de depuração interativo para programas Python. Ele inclui recursos para permitir que você pause seu programa, veja os valores das variáveis e assista a execução do programa passo a passo, para que você possa entender o que o seu programa realmente faz e encontrar erros na lógica.Isso é um pouco difícil de mostrar, uma vez que requer criar um erro de propósito, mas espero que este exemplo simples ilustre o poder do módulo pdb. * Nota: tenha em mente que seria bastante incomum usar o pdb em uma configuração do iPython Notebook. *___Aqui vamos criar um erro de propósito, tentando adicionar uma lista a um número inteiro
###Code
x = [1,3,4]
y = 2
z = 3
result = y + z
print(result)
result2 = y+x
print(result2)
###Output
5
###Markdown
Hmmm, parece que temos um erro! Vamos implementar um set_trace() usando o módulo pdb. Isso nos permitirá basicamente pausar o código no ponto do rastreamento e verificar se algo está errado.
###Code
import pdb
x = [1,3,4]
y = 2
z = 3
result = y + z
print result
# Usa o método set_trace() para pausar o código neste ponto.
pdb.set_trace()
result2 = y+x
print result2
###Output
5
--Return--
> <ipython-input-4-0a2880872cf0>(11)<module>()->None
-> pdb.set_trace()
(Pdb) x
[1, 3, 4]
(Pdb) y
2
(Pdb) z
3
(Pdb) x+y
*** TypeError: can only concatenate list (not "int") to list
(Pdb) q
|
transform_v1.ipynb | ###Markdown
Stage: transform_v1 input_variable: df_c
###Code
df_t = df_c *5
###Output
_____no_output_____ |
Deskripsi data masing2/Achyar.ipynb | ###Markdown
Credit Scoring_Achyar Import Library
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor,GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler, Imputer, MinMaxScaler
from imblearn.over_sampling import SMOTE
from xgboost import XGBClassifier
import lightgbm as lgb
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from imblearn.over_sampling import SMOTE
###Output
C:\Users\achyar059232\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\ensemble\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.
from numpy.core.umath_tests import inner1d
###Markdown
Import Data
###Code
train=pd.read_csv('npl_train.csv') #catatan pakai encoding bila perlu, alternaitv delimeter=',' jika butuh
test=pd.read_csv('npl_test.csv') #kalau tidak ada header dan naik satu baris, tambahkan header=None
###Output
_____no_output_____
###Markdown
View, Cleansing , EDA + View
###Code
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows',None)
#Jumlah baris dan kolom dari df
train.shape
train.head()
train[train['sisa_tagihan_tidak_terbayar']>=train['tagihan']].shape
#menampilkan tipe dari semua variabel dari data frame
print(train.dtypes.to_string())
train['flag_kredit_macet'].value_counts()
train['flag_kredit_macet'].value_counts().plot.bar()
###Output
_____no_output_____
###Markdown
Jumlah kredit lancar adalah 10 kali lipat jumlah kredit macet (kasus imbalance class) Jumlah kartu terhadap flag macet
###Code
train['jumlah_kartu'].value_counts()
#Jumlah kartu terhadap flag macet
plt.figure(figsize=(10,10))
train['jumlah_kartu'].value_counts().plot.bar()
pd.crosstab(train['jumlah_kartu'],train['flag_kredit_macet']).plot.bar(rot=45,stacked=True, figsize=(10,7))
pd.crosstab(train['jumlah_kartu'],train['flag_kredit_macet'],normalize='index').plot.bar(rot=45,stacked=True, figsize=(10,7))
###Output
_____no_output_____
###Markdown
Outstanding terhadap flag macet
###Code
plt.figure(figsize=(20,10))
plt.subplot(211)
sns.distplot(train['outstanding'], bins=None, hist=True, kde=True)
plt.figure(figsize=(20,10))
plt.subplot(212)
sns.boxplot(train['outstanding'])
plt.figure(figsize=(20,10))
train.groupby('flag_kredit_macet').outstanding.plot.density(title='Age',legend=True)
###Output
_____no_output_____
###Markdown
Limit kredit terhadap flag macet`
###Code
plt.figure(figsize=(20,10))
plt.subplot(211)
sns.distplot(train['limit_kredit'], bins=None, hist=True, kde=True)
plt.figure(figsize=(20,10))
plt.subplot(212)
sns.boxplot(train['limit_kredit'])
plt.figure(figsize=(20,10))
train.groupby('flag_kredit_macet').limit_kredit.plot.density(title='Age',legend=True)
###Output
_____no_output_____
###Markdown
Tagihan terhadap flag macet
###Code
plt.figure(figsize=(20,10))
plt.subplot(211)
sns.distplot(train['tagihan'], bins=None, hist=True, kde=True)
plt.figure(figsize=(20,10))
plt.subplot(212)
sns.boxplot(train['tagihan'])
plt.figure(figsize=(20,10))
train.groupby('flag_kredit_macet').tagihan.plot.density(title='Age',legend=True)
###Output
_____no_output_____
###Markdown
Total Pemakaian Tunai terhadap flag macet
###Code
plt.figure(figsize=(20,10))
plt.subplot(211)
sns.distplot(train['total_pemakaian_tunai'], bins=None, hist=True, kde=True)
plt.figure(figsize=(20,10))
plt.subplot(212)
sns.boxplot(train['total_pemakaian_tunai'])
plt.figure(figsize=(20,10))
train.groupby('flag_kredit_macet').total_pemakaian_tunai.plot.density(title='Age',legend=True)
###Output
_____no_output_____
###Markdown
Total Pemakaian Retail terhadap flag macet
###Code
plt.figure(figsize=(20,10))
plt.subplot(211)
sns.distplot(train['total_pemakaian_retail'], bins=None, hist=True, kde=True)
plt.figure(figsize=(20,10))
plt.subplot(212)
sns.boxplot(train['total_pemakaian_retail'])
plt.figure(figsize=(20,10))
train.groupby('flag_kredit_macet').total_pemakaian_retail.plot.density(title='Age',legend=True)
###Output
_____no_output_____
###Markdown
Sisa Tagihan tidak terbayar terhadap flag macet
###Code
plt.figure(figsize=(20,10))
plt.subplot(211)
sns.distplot(train['sisa_tagihan_tidak_terbayar'], bins=None, hist=True, kde=True)
plt.figure(figsize=(20,10))
plt.subplot(212)
sns.boxplot(train['sisa_tagihan_tidak_terbayar'])
plt.figure(figsize=(20,10))
train.groupby('flag_kredit_macet').sisa_tagihan_tidak_terbayar.plot.density(title='Age',legend=True)
###Output
_____no_output_____
###Markdown
Kode Cabang
###Code
#Jumlah kartu terhadap flag macet
plt.figure(figsize=(10,10))
train['kode_cabang'].value_counts()
#Jumlah kartu terhadap flag macet
plt.figure(figsize=(10,10))
train['kode_cabang'].value_counts().plot.bar()
pd.crosstab(train['kode_cabang'],train['flag_kredit_macet']).plot.bar(stacked=False, figsize=(10,7))
pd.crosstab(train['kode_cabang'],train['flag_kredit_macet'],normalize='index').plot.bar(rot=45,stacked=True, figsize=(10,7))
###Output
_____no_output_____ |
klima/.ipynb_checkpoints/2_preprocessor-checkpoint.ipynb | ###Markdown
0: NoCloud1: Cloud4: Fog6: Rain7: Snow11: Hail
###Code
weather={
2:1,
3:7,
5:6,
8:6,
9:6,
100:0,
101:0,
102:0,
103:0,
104:0,
105:0,
106:0,
107:0,
108:0,
109:0,
110:4,
111:4,
112:4,
113:1,
114:6,
115:6,
116:6,
117:6,
118:6,
119:1,
120:7,
121:6,
122:7,
123:6,
124:11,
125:6,
126:7,
127:11,
128:4,
129:6,
130:1,
131:1,
132:1,
133:1,
134:1,
135:1,
136:7,
137:7,
138:7,
139:7,
140:4,
141:4,
142:4,
143:4,
144:4,
145:4,
146:4,
147:4,
148:4,
149:4,
150:6,
151:6,
152:6,
153:6,
154:6,
155:6,
156:6,
157:6,
158:6,
159:6,
160:6,
161:6,
162:6,
163:6,
164:6,
165:6,
166:11,
167:11,
168:6,
169:6,
170:7,
171:7,
172:7,
173:7,
174:7,
175:7,
176:7,
177:7,
178:7,
179:7,
180:6,
181:6,
182:6,
183:7,
184:7,
185:7,
186:7,
187:7,
188:7,
189:11,
190:11,
191:6,
192:6,
193:7,
194:7,
195:6,
196:11,
197:6,
198:1,
199:11}
measures=['XTEMP','XSPD','XPCP','XSD','XVSB','YFOG','YPCP','YSNW','YHAL']
ycolumns={1:'YCLD',4:'YFOG',6:'YPCP',7:'YSNW',10:'YCLR',11:'YHAL'}
def load_data(stn,d='high_res',p=p,stations=stations,verbose=True):
if verbose:
print('loading...',stn,stations.loc[int(stn)]['LOC'],d)
df=pd.read_csv(p+'/'+d+'/export/'+stn+'.csv',dtype={' FRSHTT':str})
df.columns=[i.strip() for i in df.columns]
df['time']=pd.to_datetime(df['time'])
df['XTEMP']=(pd.to_numeric(df['TEMP'], errors='coerce').replace(9999.9,np.nan)-32)*5/9 #Fahrenheit to Celsiu
if d=='high_res':
df['XSPD']=pd.to_numeric(df['SPD'], errors='coerce')*1.61 #MPH to Km/h
df['XVSB']=(pd.to_numeric(df['VSB'], errors='coerce')*1.61).apply(lambda x: min(x,10)) #miles to Km, max VSB=20Km
df['XPCP']=pd.to_numeric(df['PCP06'], errors='coerce')*25.4 #inch to mm
df['XSD']=pd.to_numeric(df['SD'], errors='coerce')*25.4 #inch to mm
df['PCP01']=pd.to_numeric(df['PCP01'], errors='coerce')
df['PCP06']=pd.to_numeric(df['PCP06'], errors='coerce')
df['PCP24']=pd.to_numeric(df['PCP24'], errors='coerce')
df['PCPXX']=pd.to_numeric(df['PCPXX'], errors='coerce')
df['PCP06'].loc[~df['PCP06'].isnull()] = 6
df['PCPXX'].loc[~df['PCPXX'].isnull()] = 6
df['PCP01'].loc[~df['PCP01'].isnull()] = 6
df['PCP24'].loc[~df['PCP24'].isnull()] = 6
df['AW']=pd.to_numeric(df['AW'], errors='coerce')+100
df['MW']=pd.to_numeric(df['MW'], errors='coerce')+100
df['W']=pd.to_numeric(df['W'], errors='coerce')
dz=df[['PCP01','PCP06','PCP24','PCPXX','AW','MW','W']]
df['W']=dz.ffill(axis=1)['W'].replace(weather).replace(0,10)
dz=df.groupby(['time','W']).count()['TEMP'].unstack().fillna(0)
dz.columns=[ycolumns[i] for i in dz.columns]
df=df.set_index('time').join(dz).reset_index()
else:
df['year']=df['time'].dt.year
df['month']=df['time'].dt.month
df['day']=df['time'].dt.day
df['hour']=df['time'].dt.hour
df['XSPD']=pd.to_numeric(df['WDSP'], errors='coerce').replace(999.9,np.nan)*1.85 #knots to Km/h
df['XVSB']=(pd.to_numeric(df['VISIB'], errors='coerce').replace(999.9,np.nan)*1.61).apply(lambda x: min(x,10)) #miles to Km, max VSB=20Km
df['XPCP']=pd.to_numeric(df['PRCP'].str[:-1], errors='coerce').replace(99.99,np.nan)*25.4 #inch to mm
df['XSD']=pd.to_numeric(df['SNDP'], errors='coerce').replace(999.9,np.nan)*25.4 #inch to mm
df['YFOG']=pd.to_numeric(df['FRSHTT'].str[0], errors='coerce')
df['YPCP']=pd.to_numeric(df['FRSHTT'].str[1], errors='coerce')
df['YSNW']=pd.to_numeric(df['FRSHTT'].str[2], errors='coerce')
df['YHAL']=pd.to_numeric(df['FRSHTT'].str[3], errors='coerce')
for m in measures:
if m not in df.columns:
df[m]=np.nan
return df[['time','year','month','day','hour']+measures].set_index('time')
# stn='154200' #aurel vlaicu
stn='151700' #mciuc
daily=load_data(stn,'daily')
hires=load_data(stn,'high_res')
def comparison_getter(measure,daily=daily,hires=hires):
if type(measure)!=list:
measure=[measure]
d=daily[measure]
h=hires.groupby(['year','month','day','hour']).mean()[measure]
ymeasures=[m for m in measure if 'Y' in m]
h[ymeasures]=h[ymeasures][h[ymeasures]==0].fillna(1)
h=h.reset_index()
h['time']=pd.to_datetime(dict(year=h['year'], month=h['month'], day=h['day'], hour=h['hour'])).values
h=h.set_index('time')[measure]
return d,h
def comaprison_plotter(measure,daily=daily,hires=hires):
d,h=comparison_getter(measure,daily,hires)
d.columns=['d']
h.columns=['h']
x=h.join(d,how='outer').dropna()
x['diff']=(x['h']-x['d'])
fig,ax=plt.subplots(1,3,figsize=(15,4))
x['diff'].plot(ax=ax[0],title='diff')
x['h'].plot(ax=ax[1],title='high res')
x['d'].plot(ax=ax[2],title='daily')
return x,d,h
comaprison_plotter('XTEMP');
comaprison_plotter('XSPD');
x,d,h=comaprison_plotter('XPCP')
x,d,h=comaprison_plotter('XSD');
x,d,h=comaprison_plotter('XVSB');
x,d,h=comaprison_plotter('YSNW');
x,d,h=comaprison_plotter('YPCP');
x,d,h=comaprison_plotter('YHAL');
hu=['127720',
'128050',
'128120',
'128220',
'128250',
'128300',
'128390',
'128430',
'128510',
'128600',
'128820',
'128920',
'129100',
'129150',
'129420',
'129600',
'129700',
'129820',
'129920',
'129350']
ro=['150040',
'150100',
'150140',
'150150',
'150200',
'150230',
'150800',
'150850',
'150900',
'151080',
'151200',
'151450',
'151500',
'151700',
'151970',
'152000',
'152300',
'152350',
'152470',
'152600',
'152800',
'152920',
'153100',
'153350',
'153460',
'153500',
'153600',
'154100',
'154200',
'154210',
'154500',
'154600',
'154700',
'154800',
'154810',
'154990']
import os
hu=[i[:-4] for i in os.listdir(p+'/high_res/export') if int(i[:-4])<140000]
ro=[i[:-4] for i in os.listdir(p+'/high_res/export') if int(i[:-4])>140000]
hs=[]
ds=[]
for stn in ro:
try:
d_ok=True
daily=load_data(stn,'daily')
except:
d_ok=False
print('failed')
try:
h_ok=True
hires=load_data(stn,'high_res')
except:
h_ok=False
print('failed')
d,h=comparison_getter(measures,daily,hires)
d['ID']=stn
h['ID']=stn
if d_ok: ds.append(d)
if h_ok: hs.append(h)
ds=pd.concat(ds)
hs=pd.concat(hs)
ds.to_csv('data/ro_ds.csv')
hs.to_csv('data/ro_hs.csv')
hs=[]
ds=[]
for stn in hu:
try:
d_ok=True
daily=load_data(stn,'daily')
except:
d_ok=False
print('failed')
try:
h_ok=True
hires=load_data(stn,'high_res')
except:
h_ok=False
print('failed')
d,h=comparison_getter(measures,daily,hires)
d['ID']=stn
h['ID']=stn
if d_ok: ds.append(d)
if h_ok: hs.append(h)
ds=pd.concat(ds)
hs=pd.concat(hs)
ds.to_csv('data/hu_ds.csv')
hs.to_csv('data/hu_hs.csv')
###Output
_____no_output_____ |
dreams.ipynb | ###Markdown
Dreams of GLG101 class 2018 Data Collection DetailThis is a fun project that I did while I was TAing for GLG101 class at ASU in Spring 2018.During one of the class we had a in class exercise where students are requiredto submit a piece of paper answering what the major in and what their dream job would beif money and security is of no concern.I collected all the papers and digitised them and during this process,I negelected some details and made some generalization of their description. I started this project out of curiosity for the following questions:* What is the class major composition* What is the most frequently used words to describe dreams* Are we working on our dreamsThis project uses NLTK package to do the natural language processing and many thanks to the tutorial by [bonzanini](https://github.com/bonzanini/nlp-tutorial).
###Code
import os
import pandas as pd
infile = './data/dreams.data'
# read csv file into dataframe
# col 0 fields, major
# col 1 dreams descriptions
df = pd.read_csv(infile,header=None)
dreams_long = df[1].str.split().as_matrix()
fields_long = df[0].str.split().as_matrix()
###Output
/usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:11: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
# This is added back by InteractiveShellApp.init_path()
/usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:12: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
if sys.path[0] == '':
###Markdown
Tokenisationsplit file into tokens and count frequency of each token
###Code
from nltk.tokenize import word_tokenize
# make major string frame flat
flat_major = []
for sublist in fields_long:
for item in sublist:
flat_major.append(str(item))
major = ' '.join([i for i in flat_major])
#print("Number of major characters: {}".format(len(major)))
tokens_major = [t for t in word_tokenize(major)]
print("Major token num: {}".format(len(tokens_major)))
# make dreams string frame flat
flat_dreams = []
for sublist in dreams_long:
for item in sublist:
flat_dreams.append(str(item))
dreams = ' '.join([i for i in flat_dreams])
#print("Number of characters: {}".format(len(dreams)))
# all_tokens are dreams token
all_tokens = [t for t in word_tokenize(dreams)]
print("Dreams token number is: {}".format(len(all_tokens)))
###Output
Major token num: 167
Dreams token number is: 510
###Markdown
Check frequency of key words on Major and Dreams
###Code
# count each token frequency
from collections import Counter
major_token_frequency = Counter(tokens_major)
token_frequency = Counter(all_tokens)
# print major token frequency
print("{0:15s}\t\t\t{1}".format("Major Token(top30)","freq"))
for word, freq in major_token_frequency.most_common(30):
print("{0:15s}\t\t\t{1}".format(word,freq))
print("======================================")
# print dreams token frequency
print("{0:15s}\t\t\t{1}".format("Dreams Token(top30)","freq"))
for word, freq in token_frequency.most_common(30):
print("{0:15s}\t\t\t{1}".format(word,freq))
###Output
Major Token(top30) freq
CS 46
Enginneering 13
Civil 9
psychology 8
business 5
Management 4
and 4
Industrial 3
Education 3
Engineering 3
Supply 3
chain 2
Accounting 2
design 2
Elementary 2
Mechanical 2
Geology 2
secondary 2
education 2
in 2
study 2
law 2
Finance 2
Chain 2
marketing 2
physhology 2
Global 2
Economics 2
supply 1
Conservation 1
======================================
Dreams Token(top30) freq
and 63
or 24
traveling 12
for 10
engineer 9
company 9
game 8
software 8
travel 7
of 6
in 6
player 5
video 5
the 5
volunteer 4
on 4
with 4
& 3
CEO 3
national 3
startup 3
professor 3
makeup 3
design 3
designer 3
at 3
media 3
social 3
work 3
family 3
###Markdown
Remove Stop-WordsRemove common stop-wrods in english and remove punctuation
###Code
from nltk.corpus import stopwords
import string
# Construct stop list
stop_list = stopwords.words('english') + list(string.punctuation)
token_no_stop = [token for token in all_tokens if token not in stop_list]
major_token_no_stop = [token for token in tokens_major if token not in stop_list]
token_freq_no_stop = Counter(token_no_stop)
major_token_freq_no_stop = Counter(major_token_no_stop)
# major token without stop-words
print("{0:15s}\t\t\t{1}".format("Major Token(top30)","freq"))
for word,freq in major_token_freq_no_stop.most_common(30):
print("{0:15s}\t\t\t{1}".format(word,freq))
print("\n=====================================")
# dreams token without stop-words
print("{0:15s}\t\t\t{1}".format("Dreams Token(top30)","freq"))
for word,freq in token_freq_no_stop.most_common(30):
print("{0:15s}\t\t\t{1}".format(word,freq))
###Output
Major Token(top30) freq
CS 46
Enginneering 13
Civil 9
psychology 8
business 5
Management 4
Industrial 3
Education 3
Engineering 3
Supply 3
chain 2
Accounting 2
design 2
Elementary 2
Mechanical 2
Geology 2
secondary 2
education 2
study 2
law 2
Finance 2
Chain 2
marketing 2
physhology 2
Global 2
Economics 2
supply 1
Conservation 1
Biology 1
anthropology 1
=====================================
Dreams Token(top30) freq
traveling 12
engineer 9
company 9
game 8
software 8
travel 7
player 5
video 5
volunteer 4
CEO 3
national 3
startup 3
professor 3
makeup 3
design 3
designer 3
media 3
social 3
work 3
family 3
actor 2
ocean 2
home 2
help 2
people 2
football 2
teaching 2
artist 2
world 2
google 2
###Markdown
Text Normalisation, StemReplacing tokens with a canonical form, so we can group different spelling/variations of the same word
###Code
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
# majors
major_all_token_lower = [t.lower() for t in tokens_major]
major_all_token_lower_no_stop = [t for t in major_all_token_lower if t not in stop_list]
major_tokens_norm = [stemmer.stem(t) for t in major_all_token_lower if t not in stop_list]
major_token_freq_norm = Counter(major_tokens_norm)
major_label = []
major_size = []
print("{0:15s}\t\t\t{1}".format("Major Token(top25)","freq"))
for word,freq in major_token_freq_norm.most_common(25):
print("{0:20s}\t{1}".format(word,freq))
major_label.append(word)
major_size.append(freq)
total_count = sum(major_token_freq_no_stop.values())
other_count = total_count - sum(major_size)
#major_label.append('other')
#major_size.append(other_count)
print("==============================================")
# dreams
all_token_lower = [t.lower() for t in all_tokens]
all_token_lower_no_stop = [t for t in all_token_lower if t not in stop_list]
tokens_norm = [stemmer.stem(t) for t in all_token_lower if t not in stop_list]
token_freq_norm = Counter(tokens_norm)
dream_label = []
dream_size = []
print("{0:15s}\t\t\t{1}".format("Dreams Token(top25)","freq"))
for word,freq in token_freq_norm.most_common(25):
print("{0:20s}\t{1}".format(word,freq))
dream_label.append(word)
dream_size.append(freq)
###Output
Major Token(top25) freq
cs 46
enginn 13
civil 9
psycholog 8
manag 5
busi 5
educ 5
suppli 4
chain 4
industri 3
engin 3
market 3
account 2
design 2
elementari 2
mechan 2
geolog 2
secondari 2
studi 2
law 2
financ 2
sociolog 2
scienc 2
physholog 2
global 2
==============================================
Dreams Token(top25) freq
travel 19
softwar 9
engin 9
game 9
compani 9
design 6
player 5
video 5
volunt 4
tech 4
ceo 3
nation 3
comput 3
startup 3
professor 3
makeup 3
code 3
media 3
social 3
work 3
famili 3
appl 2
actor 2
ocean 2
home 2
###Markdown
Use Pie Chart to plot Major/Dreams composition
###Code
import matplotlib.pyplot as plt
from pylab import rcParams
import matplotlib as mpl
mpl.rcParams['font.size'] = 10.0
rcParams['figure.figsize'] = 10, 10
plt.pie(major_size,labels=major_label,autopct='%1.1f%%')
plt.title("Pie Chart for Major Composition for top 25",size=25)
plt.show()
# print(major_token_freq_no_stop)
plt.pie(dream_size,labels=dream_label,autopct='%1.1f%%')
plt.title("Pie Chart for Dreams Composition for top 25",size=25)
plt.show()
# print(major_token_freq_no_stop)
###Output
_____no_output_____
###Markdown
Play with n-grams
###Code
# for majors
from nltk import ngrams
phase_num = 2
title_label = []
title_size = []
phrases = Counter(ngrams(tokens_norm,phase_num))
# phrases = Counter(ngrams(all_token_lower_no_stop,phase_num))
print("{0:15s}\t\t\t{1}".format("Major ngrams(top15)","freq"))
for phrase,freq in phrases.most_common(15):
print("{}\t{}".format(phrase,freq))
title_label.append(phrase)
title_size.append(freq)
#print(title_label)
rcParams['figure.figsize'] = 10, 10
plt.pie(title_size,labels=title_label,autopct='%1.1f%%')
plt.title("2-words phrases for Dreams for top 15",size=25)
plt.show()
###Output
_____no_output_____
###Markdown
###Code
# 2words ngrams for majors
from nltk import ngrams
phase_num = 2
phrases = Counter(ngrams(major_all_token_lower_no_stop,phase_num))
print("{0:15s}\t\t\t{1}".format("Dreams ngrams","freq"))
for phrase,freq in phrases.most_common(20):
print("{0}\t\t\t\t\t{1}".format(phrase,freq))
###Output
Dreams ngrams freq
('cs', 'cs') 22
('civil', 'enginneering') 6
('supply', 'chain') 4
('enginneering', 'cs') 4
('psychology', 'cs') 4
('cs', 'supply') 3
('chain', 'cs') 3
('cs', 'civil') 3
('civil', 'engineering') 3
('enginneering', 'management') 2
('cs', 'business') 2
('psychology', 'psychology') 2
('industrial', 'design') 2
('elementary', 'education') 2
('mechanical', 'enginneering') 2
('secondary', 'education') 2
('enginneering', 'psychology') 2
('business', 'law') 2
('cs', 'finance') 2
('cs', 'global') 2
###Markdown
Are We Working on Dreams?This part we use pre-trained NLP model to check the smiliarity between major and dream job.Please refer to the actual data file(_./data/dreams.data_) for more insights.
###Code
# read in model
import csv
import numpy as np
glove = pd.read_csv('./data/glove.twitter.27B.50d.txt',delimiter=' ',engine='python', quoting=csv.QUOTE_NONE)
glove.head()
glove.index = glove['<user>']
glove.drop('<user>',axis=1,inplace=True)
glove = glove.T
glove.head()
stop_list = stopwords.words('english') + list(string.punctuation)
def get_dist(k1,k2):
k1 = k1.lower()
k2 = k2.lower()
# remove stop words
if k1 in stop_list or k2 in stop_list:
return 100
words = glove.columns
if k1 in words:
v1 = glove[k1].values
else:
return 100
if k2 in words:
v2 = glove[k2].values
else:
# v2 = np.ones(50) * 100
return 100
return np.linalg.norm(v1-v2)
get_dist("cat",'dog')
vector_dist = []
for dream, major in zip(dreams_long,fields_long):
mindist = 1000
for k1 in dream:
for k2 in major:
dist = get_dist(k1,k2)
# print(k1,k2,dist)
if dist < mindist:
mindist = dist
print(dream,major,mindist)
vector_dist.append(mindist)
plt.hist(vector_dist,bins=np.linspace(0,8,50))
plt.xlabel('Similarity Score')
plt.ylabel('Frequency')
print(" --> Percentage of students working on dream jobs: {:.2f}%".format( sum( [x<5 for x in vector_dist]) / len(vector_dist) * 100))
# matches = match_long.tolist()
# yes_percentage = (len(matches.count('yes')) / len(matches))
# yes_percentage = matches.count('yes') / len(matches)
yes_percentage = sum( [x<5 for x in vector_dist]) / len(vector_dist)
match_label = ["yes","no"]
match_size = [yes_percentage,1-yes_percentage]
plt.pie(match_size,labels=match_label,autopct='%1.1f%%',startangle=90)
plt.title("Whether current Major matches dream job",size=20)
plt.show()
###Output
_____no_output_____ |
notebooks/mario2.ipynb | ###Markdown
Partially complete import of Ks files and output to stats.csv
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools as it
from sklearn.cluster import KMeans
bodies = pd.read_csv('correctedframes.csv')
chars = pd.read_csv('correctedchars.csv')
gliders = pd.read_csv('correctedgliders.csv')
tires = pd.read_csv('correctedtires.csv')
# use only stock (non-DLC) characters / karts / tires
#chars = chars.loc[chars['DLC']==0]
#bodies = bodies.loc[bodies['DLC']==0]
#tires = tires.loc[tires['DLC']==0]
#gliders = gliders.loc[gliders['DLC']==0]
stat_cols = bodies.columns[3:-1]
main_cols = ['Weight','Speed','Acceleration','Handling','Traction']
# lots of characters/karts/tires are exactly the same. here we just want one from each stat type
chars_unique = chars.drop_duplicates(subset=stat_cols).set_index('CBTG')[stat_cols]
bodies_unique = bodies.drop_duplicates(subset=stat_cols).set_index('CBTG')[stat_cols]
tires_unique = tires.drop_duplicates(subset=stat_cols).set_index('CBTG')[stat_cols]
gliders_unique = gliders.drop_duplicates(subset=stat_cols).set_index('CBTG')[stat_cols]
combos=[]
body_names=bodies_unique.index
tire_names=tires_unique.index
char_names=chars_unique.index
glider_names=gliders_unique.index
for body in body_names:
for tire in tire_names:
for char in char_names:
for glider in glider_names:
thiscombo=(char,body,tire,glider)
combos.append(thiscombo)
stats=pd.DataFrame(columns=['speed','accel','hand'], index=combos)
for combo in combos:
#print(combo)
char=combo[0]
body=combo[1]
tire=combo[2]
glider=combo[3]
speed=sum([gliders_unique.loc[glider,'Speed'],bodies_unique.loc[body,'Speed'],tires_unique.loc[tire,'Speed'],chars_unique.loc[char,'Speed']] )
accel= sum([gliders_unique.loc[glider,'Acceleration'],bodies_unique.loc[body,'Acceleration'],tires_unique.loc[tire,'Acceleration'],chars_unique.loc[char,'Acceleration'] ])
hand= sum([gliders_unique.loc[glider,'Handling'],bodies_unique.loc[body,'Handling'],tires_unique.loc[tire,'Handling'],chars_unique.loc[char,'Handling'] ])
index=combo
# print(index)
stats.loc[(index),'speed':'hand']= [speed, accel, hand]
# stats.loc[(index),'accel']=accel
# stats.loc[(index),'hand']=hand
# print(speed, accel, hand)
stats.to_csv('stats.csv')
maxes=[max(stats.loc[:,'speed']),max(stats.loc[:,'accel']),max(stats.loc[:,'hand'])]
print(maxes)
def is_pareto_front(row, maxes):
cols=len(row)
for i in range(0,3):
if row[i]==maxes[i]:
return True
return False
import numpy as np
def is_pareto_efficient_dumb(costs):
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
is_efficient[i] = np.all(np.any(costs>=c, axis=1))
return is_efficient
pareto=pd.DataFrame()
for index,row in stats.iterrows():
# print(index,row)
if is_pareto_front(row, maxes):
print(index,row)
pareto[index]=row
pareto
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(221, projection='3d')
x = stats.loc[:,'speed']
y =stats.loc[:,'accel']
z =stats.loc[:,'hand']
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('speed')
ax.set_ylabel('accel')
ax.set_zlabel('hand')
axy=fig.add_subplot(222)
axy.scatter(x, y)
ayz=fig.add_subplot(223)
ayz.scatter(y, z)
axz=fig.add_subplot(224)
axz.scatter(x, z)
plt.show()
###Output
_____no_output_____ |
beer_recipes/beer.ipynb | ###Markdown
There are too many styles to make a strong model. I can experiment with different style groupings to figure out which work best in the model.
###Code
## Count nulls
null_count = beer.isnull().sum()
null_count[null_count>0]
###Output
_____no_output_____
###Markdown
'PrimingMethod','PrimingAmount'have over 90% nulls and I don't need 'URL', or 'Name'. I can drop them. To see if I can drop the other high-null columns I need to see how strong they correlate with other columns.
###Code
beer.corr()
###Output
_____no_output_____
###Markdown
'OG','BoilGravity','BoilSize' are strongly correlated with other variables. I can drop them. I can keep the high-null columns for now.
###Code
# drop unnecessary and high-null columns
beer2 = beer.drop(columns = ['PrimingMethod','PrimingAmount','URL','Name','OG','BoilGravity','BoilSize'])
###Output
_____no_output_____
###Markdown
Now I'll try grouping the different styles into larger groups.I'll store them in their own columns.
###Code
v = beer2['IPA'] = (beer['Style'].str.contains(' IPA'))*'IPA'
d = beer2['Light Lager'] = (beer['Style'].str.contains('Light Lager'))*'Light '
o = beer2['Lager'] = (beer['Style'].str.contains(' Lager|Common |Steam '))*'Lager'
f = beer2['Pale Ale'] = (beer['Style'].str.contains('Saison|Strong Bitter|Pale Ale'))*'Pale Ales'
k = beer2['Stout-Porter'] = (beer['Style'].str.contains(' Stout| Porter'))*'Stout-Porter'
q = beer2['Weissbier'] = (beer['Style'].str.contains('Witbier|Weissbier|Weizenbier|Weizen/Weissbier'))*'Weissbier'
#add new style groups to form 'kind' of beer column
beer2['kind'] = d+f+k+o+q+v+''
beer2['kind'].value_counts()
beer2['kind'].nunique()
def cut_beercols(df):
df2 = pd.DataFrame(df.iloc[:,:-int(df['kind'].nunique())])
df2['kind'] = df['kind']
df2['kind'] = df2['kind'].replace('',np.nan)
print(df2['kind'].value_counts())
return df2
#cut unnecessary style group columns
beer3 = cut_beercols(beer2)
###Output
IPA 17064
Pale Ales 11963
Stout-Porter 8654
Weissbier 3181
Lager 2969
Light Lager 2277
Name: kind, dtype: int64
###Markdown
The nan counts for MashThickness, PitchRate and PrimaryTemp are fairly high. We can keep these features if they don't seem skewed toward one style or another.
###Code
#Plot MashThickness value counts over StyleIDs.
beergroup = pd.DataFrame(beer3.groupby('StyleID')['MashThickness'].count().reset_index())
sns.lmplot(x = 'StyleID', y = 'MashThickness', data = beergroup)
#Plot PrimaryTemp value counts over StyleIDs.
beergroup2 = pd.DataFrame(beer3.groupby('StyleID')['PrimaryTemp'].count().reset_index())
sns.lmplot(x = 'StyleID', y = 'PrimaryTemp', data = beergroup2)
#Plot PitchRate value counts over StyleIDs.
beergroup3 = pd.DataFrame(beer3.groupby('StyleID')['PitchRate'].count().reset_index())
sns.lmplot(x = 'StyleID', y = 'PitchRate', data = beergroup3)
###Output
_____no_output_____
###Markdown
The data-points for all of these high-nan variables seem to be pretty consistent across styles. These columns could be useful. Now how do I fill nan? Let me check the distributions for each variable for each kind of beer.
###Code
beerind = beer3.set_index('kind')
beerind.index.unique()
#boxplot of PrimaryTemp for each beer category
sns.boxplot(x =beerind.index, y = 'PrimaryTemp', data = beerind)
#boxplot of PrimaryTemp for each beer category
sns.boxplot(x =beerind.index, y = 'MashThickness', data = beerind)
#boxplot of PrimaryTemp for each beer category
sns.boxplot(x =beerind.index, y = 'PitchRate', data = beerind)
#Median is probably best for PitchRate because of Pale Ales, and Weissbier. Otherwise I could use mean.
###Output
_____no_output_____
###Markdown
It looks like the best choice is to fill with the variable median for each category of beer, as there are a lot of outliers.
###Code
# make list of beers to iterate over
beerlist =list(beerind.index.unique())
beerlist.remove(np.nan)
beerlist
#make list of columns to iterate over
columnlist = ['PrimaryTemp','PitchRate','MashThickness']
def fill_median(df,columnlist,beerlist):
for j in columnlist:
fin = pd.DataFrame(None)
for i in beerlist:
df2 = df.set_index('kind')
medians= pd.DataFrame(df2.loc[i,:].median(axis =0,numeric_only = True))
result = df2.loc[i,:][[j,'BeerID']].fillna(int(medians.loc[j]))
fin = pd.concat([fin,result])
df = df.merge(fin, how = 'inner', on ='BeerID')
df = df.drop(columns=['MashThickness_x','PitchRate_x','PrimaryTemp_x'])
return df
#fill variables nans with variable medians for each beer
beer4 = fill_median(beer3,columnlist,beerlist)
beer4.shape
beer4.head()
###Output
_____no_output_____
###Markdown
Finally I can drop all the empty fields from the 'kind' column I created and see how many rows I have left.
###Code
beer4 = pd.DataFrame(beer4.dropna(how = 'any',axis = 0))
beer4.shape
###Output
_____no_output_____
###Markdown
Now I'll get_dummies to prepare for modeling.
###Code
beer4dum = pd.get_dummies(beer4.drop(columns = ['kind','Style','StyleID','BeerID']))
beer4dum.corr()
###Output
_____no_output_____
###Markdown
These 3 columns don't seem to correlated strongly with anything else. I'll keep them.
###Code
#The Sugarscales are too strongly correlated with FG.
beer4dum = pd.DataFrame(beer4dum.drop(columns = ['SugarScale_Specific Gravity','SugarScale_Plato']))
beer4dum.head()
dropcols = ['kind','Style','StyleID','BeerID']
def preproc(df,dropcols):
df = pd.DataFrame(df.dropna(how = 'any',axis = 0))
df2 = pd.get_dummies(df.drop(columns = dropcols))
exes = df2
columns = exes.columns
scaler = MinMaxScaler()
scaled_df = scaler.fit_transform(exes)
scaled_df = pd.DataFrame(scaled_df, columns = columns)
print(scaled_df.columns)
return scaled_df
#Use new preprocessing function to prepare for modeling
beer4 = pd.DataFrame(beer4.drop(columns = ['SugarScale']))
scaled_df = preproc(beer4,dropcols)
#logistic regression
y = np.ravel(beer4['kind'])
y = y.astype(str)
X = np.asarray(scaled_df)
# Declare a logistic regression classifier.
lr = LogisticRegression(C = 1e6)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.20)
##Fit the model.
fit = lr.fit(X_train, Y_train)
# logistic regression output
print('Coefficients')
print(fit.coef_)
print('Intercepts')
print (fit.intercept_)
pred_y_sklearn = lr.predict(X_test)
pred_y_sklearn = lr.predict(X_train)
print('\n Percentage accuracy')
print('Test',lr.score(X_test, Y_test))
print('Train',lr.score(X_train, Y_train))
#random forest classifier
from sklearn import ensemble
from sklearn.model_selection import cross_val_score
rfc = ensemble.RandomForestClassifier(n_jobs = -1)
y = np.ravel(beer4['kind'])
X = pd.DataFrame(beer4dum)
cross_val_score(rfc,X,y,cv=5)
#random forest classifier feature importance
rfc.fit(X,y)
feature_importance = rfc.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig, ax = plt.subplots(figsize =(10,10))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
# Create training and test sets for gradient boosting.
offset = int(X.shape[0] * 0.8)
# Put 80% of the data in the training set.
X_train, y_train = X[:offset], y[:offset]
# And put 20% in the test set.
X_test, y_test = X[offset:], y[offset:]
#gradient boosting classifier
params = {'n_estimators': 500,
'max_depth': 2,
'loss': 'deviance'}
# Initialize and fit the model.
clf = ensemble.GradientBoostingClassifier(**params,subsample=.5)
clf.fit(X_train, y_train)
predict_train = clf.predict(X_train)
predict_test = clf.predict(X_test)
#gradient boosting scores
score = accuracy_score(y_train, predict_train, normalize=True, sample_weight=None)
print("Train {}".format(score))
score2 = accuracy_score(y_test, predict_test, normalize=True, sample_weight=None)
print("Test {}".format(score2))
#plot important features
fig, ax = plt.subplots(figsize=(7, 4))
sns.boxplot(x = 'kind', y = 'Color', data = beer4)
ax.set_xlabel('Category')
#plot important features
fig, ax = plt.subplots(figsize=(7, 4))
sns.boxplot(x = 'kind', y = 'IBU', data = beer4)
plt.ylim(0,350)
ax.set_xlabel('Category')
#Outliers cut off for better visualization
#plot important features
fig, ax = plt.subplots(figsize=(7, 4))
sns.boxplot(x = 'kind', y = 'ABV', data = beer4)
ax.set_xlabel('Category')
plt.ylim(0,20)
#Outliers cut off for better visualization
#plot important features
fig, ax = plt.subplots(figsize=(7, 4))
sns.boxplot(x = 'kind', y = 'PrimaryTemp_y', data = beer4)
ax.set_xlabel('Category')
ax.set_ylabel('Primary Temp')
plt.ylim(0,60)
#Outliers cut off for better visualization
###Output
_____no_output_____
###Markdown
Now I'd like to do a binary classification with Stout and Porter since I couldn't classify them in my multinomial classification.
###Code
#dropping unnecessary columns.
stout = beer.drop(columns = ['PrimingMethod','PrimingAmount','URL','Name','OG','BoilGravity','BoilSize','SugarScale'])
#find and group stout and porter
a = stout['Stout'] = (beer['Style'].str.contains(' Stout'))*'Stout'
b = stout['Porter'] = (beer['Style'].str.contains(' Porter'))*'Porter'
#create kind of beer column
stout['kind'] = a+b+''
#cut unnecessary stout and porter columns
stout2 = cut_beercols(stout)
#fill nan
stout3 = fill_median(stout2,columnlist,['Stout','Porter'])
stout3.head()
#pre-processing
scaled_df = preproc(stout3,['Style','StyleID','BeerID','kind'])
#logistic regression
y = np.ravel(stout3['kind'])
y = y.astype(str)
X = np.asarray(scaled_df)
# Declare a logistic regression classifier.
lr = LogisticRegression(C = 1e6)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.20)
##Fit the model.
fit = lr.fit(X_train, Y_train)
#logistic regression output
print('Coefficients')
print(fit.coef_)
print('Intercepts')
print (fit.intercept_)
pred_y_sklearn = lr.predict(X_test)
pred_y_sklearn = lr.predict(X_train)
print('\n Percentage accuracy')
print('Test',lr.score(X_test, Y_test))
print('Train',lr.score(X_train, Y_train))
#random forest classifier
from sklearn import ensemble
from sklearn.model_selection import cross_val_score
rfc = ensemble.RandomForestClassifier(n_jobs = -1)
y = np.ravel(stout3['kind'])
X = pd.DataFrame(scaled_df)
cross_val_score(rfc,X,y,cv=5)
#rfc feature importance
rfc.fit(X,y)
feature_importance = rfc.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig, ax = plt.subplots(figsize =(10,10))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
# Create training and test sets for gradient boosting.
offset = int(X.shape[0] * 0.8)
# Put 80% of the data in the training set.
X_train, y_train = X[:offset], y[:offset]
# And put 20% in the test set.
X_test, y_test = X[offset:], y[offset:]
# gradient boosting classifier
params = {'n_estimators': 500,
'max_depth': 2,
'loss': 'deviance'}
# Initialize and fit the model.
clf = ensemble.GradientBoostingClassifier(**params,subsample=.5)
clf.fit(X_train, y_train)
predict_train = clf.predict(X_train)
predict_test = clf.predict(X_test)
#gradient boosting scores
score = accuracy_score(y_train, predict_train, normalize=True, sample_weight=None)
print("Train {}".format(score))
score2 = accuracy_score(y_test, predict_test, normalize=True, sample_weight=None)
print("Test {}".format(score2))
#dropping unnecessary columns.
IPA = beer.drop(columns = ['PrimingMethod','PrimingAmount','URL','Name','OG','BoilGravity','BoilSize','SugarScale'])
#find and group IPA and pale ales.
y = IPA['IPA'] = (beer['Style'].str.contains(' IPA'))*'IPA'
z = IPA['Pale Ale'] = (beer['Style'].str.contains('Saison|Strong Bitter|Pale Ale'))*'Pale Ales'
#create kind of beer column
IPA['kind'] = y+z+''
#cut unnecessary IPA and Pale Ale columns
IPA2 = cut_beercols(IPA)
#fill nan
IPA3 = fill_median(IPA2,columnlist,['IPA','Pale Ales'])
#preprocessing
scaled_df = preproc(IPA3,['Style','StyleID','BeerID','kind'])
#logistic regression
y = np.ravel(IPA3['kind'])
y = y.astype(str)
X = np.asarray(scaled_df)
# Declare a logistic regression classifier.
lr = LogisticRegression(C = 1e6)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.20)
##Fit the model.
fit = lr.fit(X_train, Y_train)
#logistic regression output
print('Coefficients')
print(fit.coef_)
print('Intercepts')
print (fit.intercept_)
pred_y_sklearn = lr.predict(X_test)
pred_y_sklearn = lr.predict(X_train)
print('\n Percentage accuracy')
print('Test',lr.score(X_test, Y_test))
print('Train',lr.score(X_train, Y_train))
#random forest classifier
from sklearn import ensemble
from sklearn.model_selection import cross_val_score
rfc = ensemble.RandomForestClassifier(n_jobs = -1)
y = np.ravel(IPA3['kind'])
X = pd.DataFrame(scaled_df)
cross_val_score(rfc,X,y,cv=5)
# rfc feature importance
rfc.fit(X,y)
feature_importance = rfc.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig, ax = plt.subplots(figsize =(10,10))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
# Create training and test sets for gradient boosting.
offset = int(X.shape[0] * 0.8)
# Put 80% of the data in the training set.
X_train, y_train = X[:offset], y[:offset]
# And put 20% in the test set.
X_test, y_test = X[offset:], y[offset:]
# Gradient boosting classifier
params = {'n_estimators': 500,
'max_depth': 2,
'loss': 'deviance'}
# Initialize and fit the model.
clf = ensemble.GradientBoostingClassifier(**params,subsample=.5)
clf.fit(X_train, y_train)
predict_train = clf.predict(X_train)
predict_test = clf.predict(X_test)
#gradient boosting scores
score = accuracy_score(y_train, predict_train, normalize=True, sample_weight=None)
print("Train {}".format(score))
score2 = accuracy_score(y_test, predict_test, normalize=True, sample_weight=None)
print("Test {}".format(score2))
###Output
Train 0.8533654881357392
Test 0.8098518773682397
|
2017-12-15_CCMI_workshop/notebooks/2017-12-15_08_CCMI_Hierarchical+Clustering+-+RNASeq.ipynb | ###Markdown
Hierarchical Clustering in GenePattern NotebookCluster genes and/or samples based on how close they are to one another. The result is a tree structure, referred to as dendrogram. Before you begin* Sign in to GenePattern by entering your username and password into the form below.* Gene expression data must be in a [GCT or RES file](https://genepattern.broadinstitute.org/gp/pages/protocols/GctResFiles.html) - we have provided files in the correct format. * Example file: [all_aml_test.gct](https://software.broadinstitute.org/cancer/software/genepattern/data/all_aml/all_aml_test.gct).* Learn more by reading about [file formats](http://www.broadinstitute.org/cancer/software/genepattern/file-formats-guideGCT).
###Code
# Requires GenePattern Notebook: pip install genepattern-notebook
import gp
import genepattern
# Username and password removed for security reasons.
genepattern.GPAuthWidget(genepattern.register_session("https://genepattern.broadinstitute.org/gp", "", ""))
###Output
_____no_output_____
###Markdown
Step 1: HierarchicalClusteringRun hierarchical clustering on genes and/or samples to create dendrograms for the clustered genes (*.gtr) and/or clustered samples (*.atr), as well as a file (*.cdt) that contains the original gene expression data ordered to reflect the clustering. Considerations* Best practice is to normalize (row/column normalize parameters) and center (row/column center parameters) the data being clustered. * The CDT output file must be converted to a GCT file before it can be used as an input file for another GenePattern module (other than HierachicalClusteringViewer). For instructions on converting a CDT file to a GCT file, see [Creating Input Files](http://www.broadinstitute.org/cancer/software/genepattern/file-formats-guidecreating-input-files).* Learn more by reading about the [HierarchicalClustering](https://genepattern.broadinstitute.org/gp/getTaskDoc.jsp?name=HierarchicalClustering) module. InstructionsFor the input.filename parameter, click and drag BRCA_HUGO_symbols.preprocessed.gct into the "Enter Path or URL" text boxClick Run.
###Code
hierarchicalclustering_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00009')
hierarchicalclustering_job_spec = hierarchicalclustering_task.make_job_spec()
hierarchicalclustering_job_spec.set_parameter("input.filename", "")
hierarchicalclustering_job_spec.set_parameter("column.distance.measure", "2")
hierarchicalclustering_job_spec.set_parameter("row.distance.measure", "0")
hierarchicalclustering_job_spec.set_parameter("clustering.method", "a")
hierarchicalclustering_job_spec.set_parameter("log.transform", "")
hierarchicalclustering_job_spec.set_parameter("row.center", "mean.row")
hierarchicalclustering_job_spec.set_parameter("row.normalize", "")
hierarchicalclustering_job_spec.set_parameter("column.center", "mean.column")
hierarchicalclustering_job_spec.set_parameter("column.normalize", "")
hierarchicalclustering_job_spec.set_parameter("output.base.name", "<input.filename_basename>")
genepattern.GPTaskWidget(hierarchicalclustering_task)
###Output
_____no_output_____
###Markdown
Step 2: HierarchicalClusteringViewerDisplay a heat map of the clustered gene expression data, with dendrograms showing how the genes and/or samples were clustered. Considerations* Select File > Save Image to save the heat map and dendrograms to an image file. Supported formats include bmp, eps, jpeg, png, and tiff. * Learn more by reading about the [HierarchicalClusteringViewer](https://genepattern.broadinstitute.org/gp/getTaskDoc.jsp?name=HierarchicalClusteringViewer) module. Instructions- For the **cdt file** parameter, click the down arrow in the file input box and choose the result of the HierarchicalClustering job.- For the **atr file** parameter, click the down arrow in the file input box and choose the result of the HierarchicalClustering job.- Click **Run**.
###Code
hierarchicalclusteringviewer_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.visualizer:00031')
hierarchicalclusteringviewer_job_spec = hierarchicalclusteringviewer_task.make_job_spec()
hierarchicalclusteringviewer_job_spec.set_parameter("cdt.file", "")
hierarchicalclusteringviewer_job_spec.set_parameter("gtr.file", "")
hierarchicalclusteringviewer_job_spec.set_parameter("atr.file", "")
genepattern.GPTaskWidget(hierarchicalclusteringviewer_task)
###Output
_____no_output_____ |
notebooks/dev/paper_plots_angularcl_64.ipynb | ###Markdown
Prototyping Settings for Simplistic DESC SRD Y1 after PGD implementation For reference, the DESC SRD can be found [here](https://arxiv.org/pdf/1809.01669.pdf). Appendix D2 specifies some of the analysis choices for the Y1 data. In particular:- neff for lensing sources: 10 gal/arcmin^2- sigma_e: 0.26 per component- lmax: 3000In this notebook, we will see the effects of the PGD implemantion on the angular power spectrum. \We will use a kappa TNG map as a reference point.
###Code
%pylab inline
import tensorflow_addons as tfa
import tensorflow as tf
import flowpm
from flowpm.tfpower import linear_matter_power
import DifferentiableHOS as DHOS
import flowpm.scipy.interpolate as interpolate
import pickle
from flowpm import tfpm
from DifferentiableHOS.pk import power_spectrum
from nbodykit.cosmology import Cosmology
from nbodykit.cosmology.power.halofit import HalofitPower
from astropy.cosmology import Planck15
import astropy.units as u
n_lens = 11
box_size= 205
nc=128
field_size = 5.
field_npix = 1024
B=1
batch_size = 1
z_source = np.array([1.])
cosmology = flowpm.cosmology.Planck15()
r = tf.linspace(0., box_size*n_lens, n_lens+1)
r_center = 0.5*(r[1:] + r[:-1])
a = flowpm.tfbackground.a_of_chi(cosmology, r)
a_center =flowpm.tfbackground.a_of_chi(cosmology, r_center)
init_stages = tf.linspace(0.1, a[-1], 4)
stages = tf.concat([init_stages, a_center.numpy()[::-1]], axis=0)
k = tf.constant(np.logspace(-4, 1, 128), dtype=tf.float32)
pk = linear_matter_power(cosmology, k)
pk_fun = lambda x: tf.cast(tf.reshape(interpolate.interp_tf(tf.reshape(tf.cast(x, tf.float32), [-1]), k, pk), x.shape), tf.complex64)
initial_conditions = flowpm.linear_field(
[nc, nc, nc],
[box_size, box_size, box_size],
pk_fun,
batch_size=1)
initial_state = flowpm.lpt_init(cosmology, initial_conditions, 0.1)
pgd_data = pickle.load(open("results_fit_PGD_205_128.pkl", 'rb'))
pgdparams = pgd_data['params']
states = flowpm.nbody(cosmology,
initial_state,
stages, [nc, nc, nc],
pm_nc_factor=B,
pgdparams=pgdparams)
###Output
_____no_output_____
###Markdown
Check that everything works properly comparing the matter power spectrum to the reference one
###Code
cosmology = flowpm.cosmology.Planck15()
# Create a simple Planck15 cosmology without neutrinos, and makes sure sigma8
# is matched
nbdykit_cosmo = Cosmology.from_astropy(Planck15.clone(m_nu=0 * u.eV))
nbdykit_cosmo = nbdykit_cosmo.match(sigma8=cosmology.sigma8.numpy())
corrected_states=[]
pk_PGD=[]
pk_NO_PGD=[]
pk_halo=[]
for i in range((len(new_states))):
corrected_states.append(dx[i]+new_states[i][1][0])
final_field = flowpm.cic_paint(tf.zeros_like(initial_conditions), corrected_states[i])
final_field=tf.reshape(final_field, [nc, nc, nc])
k, power_spectrum = pkl(final_field,shape=final_field.shape,boxsize=np.array([box_size, box_size,
box_size]),kmin=0.1,dk=2*np.pi/box_size)
final_field1 = flowpm.cic_paint(tf.zeros_like(initial_conditions), new_states[i][1][0])
final_field1=tf.reshape(final_field1, [nc, nc, nc])
k1, power_spectrum1 = pkl(final_field1,shape=final_field.shape,boxsize=np.array([box_size, box_size,
box_size]),kmin=0.1,dk=2*np.pi/box_size)
pk_PGD.append(power_spectrum)
pk_NO_PGD.append(power_spectrum1)
pk_halo.append(HalofitPower(nbdykit_cosmo, 1. / new_states[i][0] - 1.)(k))
def trim_axs(axs, N):
"""
Reduce *axs* to *N* Axes. All further Axes are removed from the figure.
"""
axs = axs.flat
for ax in axs[N:]:
ax.remove()
return axs[:N]
figsize = (23, 10)
fig =plt.figure(figsize=figsize, constrained_layout=True)
cols = 5
rows =7
axs = fig.subplots(rows, cols)
axs = trim_axs(axs, len(pk_PGD))
for ax, pi in zip(axs, pk_PGD):
ax.loglog(k, pi,label='DLL with PGD')
for ax, px in zip(axs, pk_NO_PGD):
ax.loglog(k, px,label='DLL without PGD')
for ax, pj in zip(axs, pk_halo):
ax.loglog(k, pj,'--',label='Analytical $halofit$ predictions')
ax.set_xlabel('k')
ax.set_ylabel('$P_k$')
legend(loc='center left')
###Output
_____no_output_____
###Markdown
Implement the raytracing
###Code
lensplanes_PGD = []
lensplanes_NO_PGD = []
for i in range(len(corrected_states)):
plane_PGD = flowpm.raytracing.density_plane(corrected_states[i],
[nc, nc, nc],
nc//2,
width=nc,
plane_resolution=256,
shift=flowpm.raytracing.random_2d_shift())
plane_NO_PGD = flowpm.raytracing.density_plane(new_states[i][1],
[nc, nc, nc],
nc//2,
width=nc,
plane_resolution=256,
shift=flowpm.raytracing.random_2d_shift())
plane_PGD = tf.expand_dims(plane_PGD, axis=-1)
plane_PGD = tf.image.random_flip_left_right(plane_PGD)
plane_PGD = tf.image.random_flip_up_down(plane_PGD)
plane_NO_PGD = tf.expand_dims(plane_NO_PGD, axis=-1)
plane_NO_PGD = tf.image.random_flip_left_right(plane_NO_PGD)
plane_NO_PGD = tf.image.random_flip_up_down(plane_NO_PGD)
lensplanes_PGD.append((r_center[i], new_states[i][0], plane_PGD[...,0]))
lensplanes_NO_PGD.append((r_center[i], new_states[i][0], plane_NO_PGD[...,0]))
xgrid, ygrid = np.meshgrid(np.linspace(0, field_size, field_npix, endpoint=False), # range of X coordinates
np.linspace(0, field_size, field_npix, endpoint=False)) # range of Y coordinates
coords = np.stack([xgrid, ygrid], axis=0)*u.deg
c = coords.reshape([2, -1]).T.to(u.rad)
m_PGD = flowpm.raytracing.convergenceBorn(cosmology,
lensplanes_PGD,
dx=box_size/256,
dz=box_size,
coords=c,
z_source=z_source)
m_NO_PGD = flowpm.raytracing.convergenceBorn(cosmology,
lensplanes_NO_PGD,
dx=box_size/256,
dz=box_size,
coords=c,
z_source=z_source)
m_PGD = m_PGD.numpy().reshape([batch_size, field_npix, field_npix])
m_NO_PGD = m_NO_PGD.numpy().reshape([batch_size, field_npix, field_npix])
imshow(m_PGD[0])
colorbar()
imshow(m_NO_PGD[0])
colorbar()
l_PGD, ps_PGD= DHOS.statistics.power_spectrum(m_PGD[0],field_size,field_npix)
l_NO_PGD, ps_NO_PGD= DHOS.statistics.power_spectrum(m_NO_PGD[0],field_size,field_npix)
l=l_PGD
import jax_cosmo as jc
cosmo=jc.Planck15()
nz =jc.redshift.delta_nz(z_source)
probes = [jc.probes.WeakLensing([nz])]
cls = jc.angular_cl.angular_cl(cosmo, l.numpy(), probes)
loglog(l, l*(l+1)*ps_PGD/(2*np.pi),label='DLL with PGD ')
loglog(l, l*(l+1)*ps_NO_PGD/(2*np.pi),label='DLL without PGD')
loglog(l, l*(l+1)*cls[0]/(2*np.pi),'--',label='Analytical $halofit$ predictions')
xlim(1e2,1e4)
#ylim(4e-9,4e-2)
axvline(300)
axvline(3000)
ylabel('$\ell(\ell+1)C_\ell /2\pi$')
xlabel('$\ell$')
legend()
#savefig('cl_comp1_64.png',dpi=250)
###Output
_____no_output_____
###Markdown
Comparison to kappa TNGFor comparison, we are using this map at redshift 1 from the kappa TNG simulations.
###Code
kTNG = np.load('kappa_tng.npy')
###Output
_____no_output_____
###Markdown
So, obvioulsy our 64^3 simulation is not as precise as kappa TNG, but we won't be working at the native 0.3 arcmin resolution anyway, in practice we'll have noise and smoothing.So let's see how much smoothing gets us in the right ball park.
###Code
ngal = 10 # gal/arcmin **2
pix_scale = 5/1024*60 # arcmin
ngal_per_pix = ngal * pix_scale**2 # galaxies per pixels (I think)
sigma_e = 0.26 / sqrt(2 * ngal_per_pix) # Rescaled noise sigma
sigma_pix_2ar=2/pix_scale
l, ps_FLP_2arc= DHOS.statistics.power_spectrum(tfa.image.gaussian_filter2d(m_PGD[0],51,sigma=sigma_pix_2ar),field_size,field_npix)
l, ps_TNG_2arc=DHOS.statistics.power_spectrum(tfa.image.gaussian_filter2d(kTNG,51,sigma=sigma_pix_2ar),field_size,field_npix)
l, ps_FLP_NOPGD_2arc= DHOS.statistics.power_spectrum(tfa.image.gaussian_filter2d(m_NO_PGD[0],51,sigma=sigma_pix_2ar),field_size,field_npix)
figure(figsize=[10,5])
loglog(l, l*(l+1)*ps_FLP_2arc/(2*np.pi), label='DLL with PGD')
loglog(l, l*(l+1)*ps_FLP_NOPGD_2arc/(2*np.pi), label='DLL without PGD')
loglog(l, l*(l+1)*ps_TNG_2arc/(2*np.pi), label='$\kappa$TNG')
axvline(3000, ls='--')
axvline(300,ls='--')
ylim(10e-9,10e-3)
xlim(1e2,1e4)
ylabel('$\ell(\ell+1)C_\ell /2\pi$')
xlabel('$\ell$')
legend()
title('Comparison to $\kappa$TNG 2 arcmin smoothing')
###Output
_____no_output_____
###Markdown
Adding noiseWe'll now try to get to a realistic setting that matches some of the SRD Y1 settings.
###Code
knTNG_n = np.load('kappa_tng.npy')+ sigma_e * randn(1024,1024), 5*u.deg
knFPM_n = m_PGD[0]+ sigma_e * randn(1024,1024), 5*u.deg
knFPM_n_NOPGD = m_NO_PGD[0]+ sigma_e * randn(1024,1024), 5*u.deg
l, ps_FLP_2ar_n= DHOS.statistics.power_spectrum(tfa.image.gaussian_filter2d(knFPM_n[0],51,sigma=sigma_pix_2ar),field_size,field_npix)
l, ps_FLP_NOPGD_2ar_n= DHOS.statistics.power_spectrum(tfa.image.gaussian_filter2d(knFPM_n_NOPGD[0],51,sigma=sigma_pix_2ar),field_size,field_npix)
l, ps_TNG_2ar_n=DHOS.statistics.power_spectrum(tfa.image.gaussian_filter2d(knTNG_n[0],51,sigma=sigma_pix_2ar),field_size,field_npix)
figure(figsize=[10,5])
loglog(l, l*(l+1)*ps_FLP_2ar_n/(2*np.pi), label='DLL with PGD')
loglog(l, l*(l+1)*ps_FLP_NOPGD_2ar_n/(2*np.pi), label='DLL without PGD')
loglog(l, l*(l+1)*ps_TNG_2ar_n/(2*np.pi), label='$\kappa$TNG')
axvline(3000, ls='--')
axvline(300,ls='--')
ylim(10e-9,10e-3)
xlim(1e2,1e4)
ylabel('$\ell(\ell+1)C_\ell /2\pi$')
xlabel('$\ell$')
legend()
title('Comparison to $\kappa$TNG 2 arcmin smoothing and noise')
###Output
_____no_output_____ |
Pandas Practical Guide.ipynb | ###Markdown
Pandas Practical GuidePandas is an essential package for data engineers, data analysts, and data scientists. Pandas is an easy to use python package library for data manipulation and analysis. If you have already familiar with SQL or even Ms Excel, it will not be difficult to get used to functions in pandas.Pandas has a data format that is often used, called DataFrame. Pandas DataFrame is a 2D data structure. Data is organized like a table containing rows and columns, making it easy to query. Rows is representing data records and column is representing fields. Dataset I created simple data for this post, making it easier to understand Pandas. The data was taken from Indonesian Central Bureau of Statistics (bps.go.id). The dataset contains some information about provinces in Indonesia in 2015. This dataset has 10 columns : 1. province: province name 2. rainfall: amount of rainfall in mm which is taken from the observation station owned by BMKG 3. rainy_day: number of days it rains 4. expenses_food_urban: average monthly food expenses per capita in urban areas 5. expenses_other_urban: average monthly non food expenses per capita in urban areas 6. expenses_food_rural: average monthly food expenses per capita in rural areas 7. expense_other_rural: average monthly non food expenses per capita in rural areas 8. unemployment: the unemployment rate is calculated in August (percentage) 9. time_zone: time zone classification 10. island: island group The dataset can be downloaded at [github](https://raw.githubusercontent.com/project303/dataset/master/data-province-2015.cvs) Importing pandas packageBefore we can use pandas, we need to import the package, and give it a shorter name, namely pd
###Code
import pandas as pd
print('Pandas version: {}'.format(pd.__version__))
###Output
Pandas version: 1.1.0
###Markdown
Loading a .csv file into a Pandas DataFrameTo read it as a Pandas DataFrame, we can simply use the read_csv () command.
###Code
url = "https://raw.githubusercontent.com/project303/dataset/master/data-province-2015.cvs"
df = pd.read_csv(url, sep='\t')
###Output
_____no_output_____
###Markdown
View Data Sample**head()** function to display the first 5 records
###Code
df.head()
###Output
_____no_output_____
###Markdown
Display the first 10 records from the DataFrame
###Code
df.head(10)
###Output
_____no_output_____
###Markdown
Display last 5 records
###Code
df.tail()
###Output
_____no_output_____
###Markdown
Displays 10 random records
###Code
df.sample(10)
###Output
_____no_output_____
###Markdown
Let's display all records we have in DataFrame:
###Code
df
###Output
_____no_output_____
###Markdown
Count Number of RecordsTo get information the number of records in dataframe, you can use the **count()** function
###Code
df.count()
###Output
_____no_output_____
###Markdown
Another way to count number of records is to use **shape** property
###Code
df.shape[0]
###Output
_____no_output_____
###Markdown
Data Structure InformationThe **shape** property can be used to know the dimensions of the DataFrame
###Code
df.shape
###Output
_____no_output_____
###Markdown
Another dataframe property that can be used to display the dataframe structure is **dtypes**
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
More detailed information about the structure can be displayed using **info()**
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 34 entries, 0 to 33
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 province 34 non-null object
1 rainfall 34 non-null float64
2 rainy_day 34 non-null int64
3 expenses_food_urban 34 non-null int64
4 expenses_other_urban 34 non-null int64
5 expenses_food_rural 34 non-null object
6 expense_other_rural 34 non-null object
7 unemployment 34 non-null float64
8 time_zone 34 non-null int64
9 island 34 non-null object
dtypes: float64(2), int64(4), object(4)
memory usage: 2.8+ KB
###Markdown
Dataframe Statistical InformationStatistical information for each column such as minimum value, maximum value, standard deviation, average and so on can be displayed with commands like the following
###Code
df.describe(include='all')
###Output
_____no_output_____
###Markdown
Selecting ColumnWe can choose which columns to be displayed, just simply by mentioning the column names in a list
###Code
df[['province', 'unemployment', 'island']].head()
###Output
_____no_output_____
###Markdown
Filtering data We want to display data for the island equal to 'Sumatera'
###Code
df[(df.island == "Sumatera")].head()
###Output
_____no_output_____
###Markdown
we want to get all province **located on Sumatera island and unemployment rate less than 5**
###Code
df[(df.island == "Sumatera") & (df.unemployment < 5)]
###Output
_____no_output_____
###Markdown
It can be written in different way, but it has the same meaning
###Code
df[(df['island'] == "Sumatera") & (df['unemployment'] < 5)].head()
###Output
_____no_output_____
###Markdown
isin() function can be used to filter a column if the value is specified in a list. For example, we want to show the provinces on **Sumatra and Kalimantan** island and that have unemployment rate less than 5
###Code
df[ (df['island'].isin(['Sumatera', 'Kalimantan']))
& (df['unemployment'] < 5)
]
###Output
_____no_output_____
###Markdown
shows all data that are **NOT** on Sumatera and Kalimantan, but have an unemployment rate less than 5
###Code
df[ ~(df['island'].isin(['Sumatera', 'Kalimantan']))
& (df['unemployment'] < 5)
].head()
###Output
_____no_output_____
###Markdown
If the condition statement is too complex, it is recomended to create a new dataframe to simplify the rest of the process
###Code
df2 = df[ ~(df['island'].isin(['Sumatera', 'Kalimantan']))
& (df['unemployment'] < 5)
]
df2.sample(5)
###Output
_____no_output_____
###Markdown
Sorting data **sort_values()** function is used to sort data based on the specified column starting from the smallest value. The following command displays data sorted by **rainfall** column
###Code
df.sort_values('rainfall').head()
###Output
_____no_output_____
###Markdown
To sort the data starting from the largest value, the **ascending** parameter is assigned to **False**
###Code
df.sort_values('rainfall', ascending=False).head()
###Output
_____no_output_____
###Markdown
If you want to sort data by using more than one column, it is necessary to specify column name that will be used for sorting into a list
###Code
df[['province', 'rainfall', 'rainy_day', 'island', 'time_zone']]\
.sort_values('rainfall', ascending=False)\
.head()
df[['province', 'rainfall', 'rainy_day', 'island', 'time_zone']]\
.sort_values(['rainfall', 'rainy_day'])\
.head()
df.sort_values(['rainfall', 'rainy_day' ]).head()
###Output
_____no_output_____
###Markdown
If you want to sort data, but have a different method for each column, then the **ascending** parameter must be specified. The value 0 will sort the largest value first. The value 1 will the smallest value first
###Code
df[['province', 'rainfall', 'rainy_day', 'island', 'time_zone']]\
.sort_values(['rainfall', 'time_zone'], ascending=[0, 1])\
.head()
###Output
_____no_output_____
###Markdown
Summarising Data Pandas provides statistical functions, such as count, sum, min, max and more. These functions can be applied to columns. For example **count()** function. This function can be used to count number of records in each column.
###Code
df.count()
###Output
_____no_output_____
###Markdown
But keep in mind, **count()** only counts for records that are not null. In the example, the number of records for each column is the same, which is 34 and none of them has a null value. Another example of using count () which is used to count the number of records in a column can use the following command
###Code
df.rainfall.count()
###Output
_____no_output_____
###Markdown
or it could be written as follows
###Code
df['rainfall'].count()
###Output
_____no_output_____
###Markdown
Other functions such as **sum()**, **min()**, **max()**, **mean()** are almost the same way they are used
###Code
df.sum()
###Output
_____no_output_____
###Markdown
Other usage of statistical function
###Code
print('Total rainfall \t\t:', df.rainfall.sum())
print('Minimum rainfall value \t:', df.rainfall.min())
print('Maximum rainfall value \t:', df.rainfall.max())
print('Average rainfall value \t:', df.rainfall.mean())
###Output
Total rainfall : 63615.1
Minimum rainfall value : 460.9
Maximum rainfall value : 3548.0
Average rainfall value : 1871.0323529411764
###Markdown
Grouping Like SQL, pandas has a function **groupby()** to summarize columns value based on unique values according to the selected column. For example, we want to count the number of records in **time_zone** grouped by their unique values
###Code
df.groupby('time_zone').count()
###Output
_____no_output_____
###Markdown
From the displayed data above, it can be seen that time_zone has 3 unique values: 1, 2 and 3 Other summary functions such as sum, min, max, mean, first, last, can be used in groupsby() to get the statistical value of each group. Suppose we want to get the first value for each time_zone
###Code
df.groupby('time_zone').first()
###Output
_____no_output_____
###Markdown
Calculation total amount of rainfall for each time_zone can be done as below
###Code
df.groupby('time_zone')[['rainfall']].sum()
###Output
_____no_output_____
###Markdown
Calculation total amount of rainfall and expenses_food_urban for each time_zone can be done as below
###Code
df.groupby('time_zone')[['rainfall', 'expenses_food_urban']].sum()
###Output
_____no_output_____
###Markdown
To perform multiple statistical calculations grouped based on the unique value of a column, you can combine **groupby()** and **agg()** functions.
###Code
df.groupby('time_zone').agg(['sum', 'min', 'max', 'mean', 'count'])
###Output
_____no_output_____
###Markdown
Calculate each time_zone with a different summary function for each column, shown as below
###Code
df.groupby('time_zone').agg(
{
'rainfall': ['mean', 'sum'],
'expenses_food_urban': ['min', 'max']
})
###Output
_____no_output_____
###Markdown
**NamedAgg()** function can be used to change the name of a column, making it easier to understand
###Code
df.groupby('time_zone', as_index=False)\
.agg(
total_record=pd.NamedAgg('rainfall', 'count'),
avg_rainfall=pd.NamedAgg('rainfall', 'mean'),
min_rainy_day=pd.NamedAgg('rainy_day', 'min'),
max_rainy_day=pd.NamedAgg('rainy_day', 'max')
)
###Output
_____no_output_____
###Markdown
We can do it in other way
###Code
df.groupby('time_zone', as_index=False)\
.agg(
total_record=pd.NamedAgg(column ='rainfall', aggfunc='count'),
avg_rainfall=pd.NamedAgg(column ='rainfall', aggfunc='mean'),
min_rainy_day=pd.NamedAgg(column='rainy_day', aggfunc='min'),
max_rainy_day=pd.NamedAgg(column='rainy_day', aggfunc='max')
)
###Output
_____no_output_____
###Markdown
Column transformation Another thing that is often done is to perform column transformations. For example adding new columns from certain calculated results. We will add a new column that is expenses_urban from the sum of expenses_food_urban and expenses_other_urban
###Code
df['expenses_urban'] = df['expenses_food_urban'] + df['expenses_other_urban']
df[['province', 'expenses_food_urban', 'expenses_other_urban', 'expenses_urban']].head()
###Output
_____no_output_____
###Markdown
To delete column, you can do this with **drop()** function
###Code
df = df.drop(columns=['expenses_urban'])
df.head()
###Output
_____no_output_____
###Markdown
Changing the column order can be done in a simple way as follows
###Code
df = df[['province', 'island', 'time_zone', 'rainfall', 'rainy_day', 'expenses_food_urban', 'expenses_other_urban', 'expenses_food_rural', 'expense_other_rural', 'unemployment']]
df.head()
###Output
_____no_output_____
###Markdown
Join the reference data In many cases, we often add columns with new data using reference data. The use of reference data is usually to make data maintenance easier, so we don't need to change the code. As an example, we will add a zone time name, from the timezone reference data.
###Code
timezone_data = {
'time_zone': [1, 2, 3],
'zone_name': ['WIB', 'WITA', 'WIT']}
timezone_df = pd.DataFrame(timezone_data, columns = ['time_zone', 'zone_name'])
timezone_df
###Output
_____no_output_____
###Markdown
In this example, we will transform the time_zone in **df** dataframe by adding a new column, zone_name. The function used is **merge** with how = 'left' parameter. This means that we will do a **left join** between df and timezone_df
###Code
df_full = pd.merge(df, timezone_df, on='time_zone', how='left')
df_full.sample(5)
###Output
_____no_output_____ |
Fake_and_Legitimate_Task_Identification_Based_on_User_Movement.ipynb | ###Markdown
Import And Data Exploration
###Code
df = pd.read_csv('MCSDatasetNEXTCONLab.csv')
df
df.info()
df = abs(df)
X, y = df.iloc[:, :-1].values, df.iloc[:, -1].values
###Output
_____no_output_____
###Markdown
Data Spliting 20% - 80%
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Modeling the data using algorithms (AdaBoost, Random Forest, Naive Bayes)
###Code
models = [
AdaBoostClassifier(n_estimators=100, random_state=0),
RandomForestClassifier(max_depth=20, random_state=0),
MultinomialNB()
]
estimators = []
estsNames = []
estsAccs = []
labels = ['Fake Task', 'Legitimate Task']
for model in models:
name = type(model).__name__
estsNames.append(name)
estimators.append((name, clone(model)))
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
accVal = accuracy_score(y_predict, y_test) * 100
estsAccs.append(accVal)
print('Using {} Algorithm'.format(name))
print('============================')
print('Accuracy: {}%'.format(accVal))
print('Classification Report: \n', classification_report(y_predict, y_test))
plot_confusion_matrix(model, X_test, y_test, display_labels=labels, values_format='d')
plt.show()
# End For
###Output
Using AdaBoostClassifier Algorithm
============================
Accuracy: 97.44563341387642%
Classification Report:
precision recall f1-score support
0 0.84 0.94 0.89 318
1 0.99 0.98 0.99 2579
accuracy 0.97 2897
macro avg 0.92 0.96 0.94 2897
weighted avg 0.98 0.97 0.98 2897
###Markdown
Majority voting aggregation
###Code
model = VotingClassifier(estimators = estimators)
name = type(model).__name__
estsNames.append(name)
estimators.append((name, clone(model)))
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
accVal = accuracy_score(y_predict, y_test) * 100
estsAccs.append(accVal)
print('Using {} Algorithm'.format(name))
print('============================')
print('Accuracy: {}%'.format(accVal))
print('Classification Report: \n', classification_report(y_predict, y_test))
plot_confusion_matrix(model, X_test, y_test, display_labels=labels, values_format='d')
plt.show()
###Output
Using VotingClassifier Algorithm
============================
Accuracy: 98.82637210907835%
Classification Report:
precision recall f1-score support
0 0.94 0.96 0.95 346
1 0.99 0.99 0.99 2551
accuracy 0.99 2897
macro avg 0.97 0.98 0.97 2897
weighted avg 0.99 0.99 0.99 2897
###Markdown
Models Accuracy Plotting
###Code
champModelId = np.argmax(estsAccs)
print("-> The champion model is '{}'".format(estsNames[champModelId]))
plt.figure(figsize=(8,8))
plt.axhline(y=estsAccs[champModelId], linewidth=1, color='k')
plt.bar(estsNames, estsAccs, color=['#1f77b4', '#ff7f0e', '#1f77b4', '#1f77b4'])
plt.show()
###Output
-> The champion model is 'RandomForestClassifier'
|
hcls/tabular/diabetic-readmission-prediction/data-wrangler/save-to-sm-feature-store.ipynb | ###Markdown
Save to Feature Store with a SageMaker Processing Job 💡 Quick Start To save your processed data to feature store, Click here to create a feature group and follow the instruction to run a SageMaker processing job.This notebook uses Amazon SageMaker Feature Store (Feature Store) to create a feature group, executes your Data Wrangler Flow `feature-transformations.flow` on the entire dataset using a SageMaker Processing Job and ingest processed data to Feature Store. --- Contents1. [Create Feature Group](Create-Feature-Group) 1. [Define Feature Group](Define-Feature-Group) 1. [Configure Feature Group](Configure-Feature-Group) 1. [Initialize & Create Feature Group](Initialize-&-Create-Feature-Group)1. [Processing Job: Inputs and Outputs](Inputs-and-Outputs)1. [Run Processing Job](Run-Processing-Job) 1. [Job Configurations](Job-Configurations) 1. [Create Processing Job](Create-Processing-Job) 1. [Job Status & Output Location](Job-Status-&-Output-Location) Create Feature Group_What is a feature group_A single feature corresponds to a column in your dataset. A feature group is a predefined schema for a collection of features - each feature in the feature group has a specified data type and name. A single record in a feature group corresponds to a row in your dataframe. A feature store is a collection of feature groups. To learn more about SageMaker Feature Store, see [Amazon Feature Store Documentation](http://docs.aws.amazon.com/sagemaker/latest/dg/feature-store.html). Define Feature GroupSelect Record identifier and Event time feature name. These are required parameters for feature groupcreation.* **Record identifier name** is the name of the feature defined in the feature group's feature definitions whose value uniquely identifies a Record defined in the feature group's feature definitions.* **Event time feature name** is the name of the EventTime feature of a Record in FeatureGroup. An EventTime is a timestamp that represents the point in time when a new event occurs that corresponds to the creation or update of a Record in the FeatureGroup. All Records in the FeatureGroup must have a corresponding EventTime. 💡Record identifier and Event time feature name are required for feature group. After filling in the values, you can choose Run Selected Cell and All Below from the Run Menu from the menu bar.
###Code
record_identifier_feature_name = None
if record_identifier_feature_name is None:
raise SystemExit("Select a column name as the feature group record identifier.")
event_time_feature_name = None
if event_time_feature_name is None:
raise SystemExit("Select a column name as the event time feature name.")
###Output
_____no_output_____
###Markdown
Feature DefinitionsThe following is a list of the feature names and feature types of the final dataset that will be produced when your data flow is used to process your input dataset. These are automatically generated from the step `Custom Pyspark` from `Source: Answers.Csv`. To save from a different step, go to Data Wrangler to select a new step to export. 💡 Configurable Settings 1. You can select a subset of the features. By default all columns of the result dataframe will be used as features.2. You can change the Data Wrangler data type to one of the Feature Store supported types (Integral, Fractional, or String). The default type is set to String. This means that, if a column in your dataset is not a float or long type, it will default to String in your Feature Store.For Event Time features, make sure the format follows the feature store Event Time feature format The following is a list of the feature names and data types of the final dataset that will be produced when your data flow is used to process your input dataset.
###Code
column_schemas = [
{
"name": "readmitted",
"type": "string"
},
{
"name": "age",
"type": "long"
},
{
"name": "time_in_hospital",
"type": "long"
},
{
"name": "num_lab_procedures",
"type": "long"
},
{
"name": "num_medications",
"type": "long"
},
{
"name": "number_emergency",
"type": "long"
},
{
"name": "number_inpatient",
"type": "long"
},
{
"name": "number_diagnoses",
"type": "long"
},
{
"name": "change",
"type": "long"
},
{
"name": "diabetes_med",
"type": "long"
},
{
"name": "race_caucasian",
"type": "float"
},
{
"name": "race_african_american",
"type": "float"
},
{
"name": "race_hispanic",
"type": "float"
},
{
"name": "race_other",
"type": "float"
},
{
"name": "race_asian",
"type": "float"
}
]
###Output
_____no_output_____
###Markdown
Below we create the SDK input for those feature definitions. Some schema types in Data Wrangler are not supported by Feature Store. The following will create a default_FG_type set to String for these types.
###Code
from sagemaker.feature_store.feature_definition import FeatureDefinition
from sagemaker.feature_store.feature_definition import FeatureTypeEnum
default_feature_type = FeatureTypeEnum.STRING
column_to_feature_type_mapping = {
"float": FeatureTypeEnum.FRACTIONAL,
"long": FeatureTypeEnum.INTEGRAL
}
feature_definitions = [
FeatureDefinition(
feature_name=column_schema['name'],
feature_type=column_to_feature_type_mapping.get(column_schema['type'], default_feature_type)
) for column_schema in column_schemas
]
###Output
_____no_output_____
###Markdown
Configure Feature Group 💡 Configurable Settings 1. feature_group_name: name of the feature group.1. feature_store_offline_s3_uri: SageMaker FeatureStore writes the data in the OfflineStore of a FeatureGroup to a S3 location owned by you.1. enable_online_store: controls if online store is enabled. Enabling the online store allows quick access to the latest value for a Record via the GetRecord API.1. iam_role: IAM role for executing the processing job.
###Code
from time import gmtime, strftime
import uuid
import sagemaker
# Sagemaker session
sess = sagemaker.Session()
# You can configure this with your own bucket name, e.g.
# bucket = <my-own-storage-bucket>
bucket = sess.default_bucket()
# IAM role for executing the processing job.
iam_role = sagemaker.get_execution_role()
# flow name and an unique ID for this export (used later as the processing job name for the export)
flow_name = "feature-transformations"
flow_export_id = f"{strftime('%d-%H-%M-%S', gmtime())}-{str(uuid.uuid4())[:8]}"
flow_export_name = f"flow-{flow_export_id}"
# feature group name, with flow_name and an unique id. You can give it a customized name
feature_group_name = f"FG-{flow_name}-{str(uuid.uuid4())[:8]}"
print(f"Feature Group Name: {feature_group_name}")
# SageMaker FeatureStore writes the data in the OfflineStore of a FeatureGroup to a
# S3 location owned by you.
feature_store_offline_s3_uri = 's3://' + bucket
# controls if online store is enabled. Enabling the online store allows quick access to
# the latest value for a Record via the GetRecord API.
enable_online_store = True
###Output
_____no_output_____
###Markdown
Initialize & Create Feature Group
###Code
# Initialize Boto3 session that is required to create feature group
import boto3
from sagemaker.session import Session
region = boto3.Session().region_name
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client(service_name='sagemaker', region_name=region)
featurestore_runtime = boto_session.client(service_name='sagemaker-featurestore-runtime', region_name=region)
feature_store_session = Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_featurestore_runtime_client=featurestore_runtime
)
###Output
_____no_output_____
###Markdown
Feature group is initialized and created below
###Code
from sagemaker.feature_store.feature_group import FeatureGroup
feature_group = FeatureGroup(
name=feature_group_name, sagemaker_session=feature_store_session, feature_definitions=feature_definitions)
feature_group.create(
s3_uri=feature_store_offline_s3_uri,
record_identifier_name=record_identifier_feature_name,
event_time_feature_name=event_time_feature_name,
role_arn=iam_role,
enable_online_store=enable_online_store
)
###Output
_____no_output_____
###Markdown
Invoke the Feature Store API to create the feature group and wait until it is ready
###Code
import time
def wait_for_feature_group_creation_complete(feature_group):
"""Helper function to wait for the completions of creating a feature group"""
response = feature_group.describe()
status = response.get("FeatureGroupStatus")
while status == "Creating":
print("Waiting for Feature Group Creation")
time.sleep(5)
response = feature_group.describe()
status = response.get("FeatureGroupStatus")
if status != "Created":
print(f"Failed to create feature group, response: {response}")
failureReason = response.get("FailureReason", "")
raise SystemExit(
f"Failed to create feature group {feature_group.name}, status: {status}, reason: {failureReason}"
)
print(f"FeatureGroup {feature_group.name} successfully created.")
wait_for_feature_group_creation_complete(feature_group=feature_group)
###Output
_____no_output_____
###Markdown
Now that the feature group is created, You will use a processing job to process your data at scale and ingest the transformed data into this feature group. Inputs and OutputsThe below settings configure the inputs and outputs for the flow export. 💡 Configurable Settings In Input - Source you can configure the data sources that will be used as input by Data Wrangler1. For S3 sources, configure the source attribute that points to the input S3 prefixes2. For all other sources, configure attributes like query_string, database in the source's DatasetDefinition object.If you modify the inputs the provided data must have the same schema and format as the data used in the Flow. You should also re-execute the cells in this section if you have modified the settings in any data sources.
###Code
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.dataset_definition.inputs import AthenaDatasetDefinition, DatasetDefinition, RedshiftDatasetDefinition
data_sources = []
###Output
_____no_output_____
###Markdown
Input - S3 Source: diabetic_readmission.csv
###Code
data_sources.append(ProcessingInput(
source="s3://sagemaker-us-east-1-119174016168/datasets/diabetic_readmission.csv", # You can override this to point to other dataset on S3
destination="/opt/ml/processing/diabetic_readmission.csv",
input_name="diabetic_readmission.csv",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated"
))
###Output
_____no_output_____
###Markdown
Output: Feature Store Below are the inputs required by the SageMaker Python SDK to launch a processing job with feature store as an output.
###Code
from sagemaker.processing import FeatureStoreOutput
# Output name is auto-generated from the select node's ID + output name from the flow file.
output_name = "84a97a0e-0bef-45eb-b660-0ef1ca3e4655.default"
processing_job_output = ProcessingOutput(
output_name=output_name,
app_managed=True,
feature_store_output=FeatureStoreOutput(feature_group_name=feature_group_name),
)
###Output
_____no_output_____
###Markdown
Upload Flow to S3To use the Data Wrangler as an input to the processing job, first upload your flow file to Amazon S3.
###Code
import os
import json
import boto3
# name of the flow file which should exist in the current notebook working directory
flow_file_name = "feature-transformations.flow"
# Load .flow file from current notebook working directory
!echo "Loading flow file from current notebook working directory: $PWD"
with open(flow_file_name) as f:
flow = json.load(f)
# Upload flow to S3
s3_client = boto3.client("s3")
s3_client.upload_file(flow_file_name, bucket, f"data_wrangler_flows/{flow_export_name}.flow", ExtraArgs={"ServerSideEncryption": "aws:kms"})
flow_s3_uri = f"s3://{bucket}/data_wrangler_flows/{flow_export_name}.flow"
print(f"Data Wrangler flow {flow_file_name} uploaded to {flow_s3_uri}")
###Output
_____no_output_____
###Markdown
The Data Wrangler Flow is also provided to the Processing Job as an input source which we configure below.
###Code
## Input - Flow: feature-transformations.flow
flow_input = ProcessingInput(
source=flow_s3_uri,
destination="/opt/ml/processing/flow",
input_name="flow",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated"
)
###Output
_____no_output_____
###Markdown
Run Processing Job Job Configurations 💡 Configurable Settings You can configure the following settings for Processing Jobs. If you change any configurations you will need to re-execute this and all cells below it by selecting the Run menu above and click Run Selected Cells and All Below1. IAM role for executing the processing job. 2. A unique name of the processing job. Give a unique name every time you re-execute processing jobs3. Data Wrangler Container URL.4. Instance count, instance type and storage volume size in GB.5. Content type for each output. Data Wrangler supports CSV as default and Parquet.6. Network Isolation settings7. KMS key to encrypt output data
###Code
# IAM role for executing the processing job.
iam_role = sagemaker.get_execution_role()
# Unique processing job name. Give a unique name every time you re-execute processing jobs
processing_job_name = f"data-wrangler-flow-processing-{flow_export_id}"
# Data Wrangler Container URL.
container_uri = "663277389841.dkr.ecr.us-east-1.amazonaws.com/sagemaker-data-wrangler-container:1.x"
# Pinned Data Wrangler Container URL.
container_uri_pinned = "663277389841.dkr.ecr.us-east-1.amazonaws.com/sagemaker-data-wrangler-container:1.11.0"
# Processing Job Instance count and instance type.
instance_count = 2
instance_type = "ml.m5.4xlarge"
# Size in GB of the EBS volume to use for storing data during processing
volume_size_in_gb = 30
# Content type for each output. Data Wrangler supports CSV as default and Parquet.
output_content_type = "CSV"
# Network Isolation mode; default is off
enable_network_isolation = False
# Output configuration used as processing job container arguments
output_config = {
output_name: {
"content_type": output_content_type
}
}
# KMS key for per object encryption; default is None
kms_key = None
###Output
_____no_output_____
###Markdown
Create Processing JobTo launch a Processing Job, you will use the SageMaker Python SDK to create a Processor function.
###Code
from sagemaker.processing import Processor
from sagemaker.network import NetworkConfig
processor = Processor(
role=iam_role,
image_uri=container_uri,
instance_count=instance_count,
instance_type=instance_type,
volume_size_in_gb=volume_size_in_gb,
network_config=NetworkConfig(enable_network_isolation=enable_network_isolation),
sagemaker_session=sess,
output_kms_key=kms_key
)
# Start Job
processor.run(
inputs=[flow_input] + data_sources,
outputs=[processing_job_output],
arguments=[f"--output-config '{json.dumps(output_config)}'"],
wait=False,
logs=False,
job_name=processing_job_name
)
###Output
_____no_output_____
###Markdown
Job Status & S3 Output LocationBelow you wait for processing job to finish. If it finishes successfully, your feature group should be populated with transformed feature values. In addition the raw parameters used by the Processing Job will be printed.
###Code
job_result = sess.wait_for_processing_job(processing_job_name)
job_result
###Output
_____no_output_____ |
pittsburgh-bridges-data-set-analysis/.ipynb_checkpoints/Data Space Report -Testing - Pie Charts-compact-checkpoint.ipynb | ###Markdown
Data Space Report Pittsburgh Bridges Data Set Andy Warhol Bridge - Pittsburgh.Report created by Student Francesco Maria Chiarlo s253666, for A.A 2019/2020.**Abstract**:The aim of this report is to evaluate the effectiveness of distinct, different statistical learning approaches, in particular focusing on their characteristics as well as on their advantages and backwards when applied onto a relatively small dataset as the one employed within this report, that is Pittsburgh Bridgesdataset.**Key words**:Statistical Learning, Machine Learning, Bridge Design. TOC:* [Imports Section](imports-section)* [Dataset's Attributes Description](attributes-description)* [Data Preparation and Investigation](data-preparation)* [Learning Models](learning-models)* [Improvements and Conclusions](improvements-and-conclusions)* [References](references) Imports Section
###Code
# =========================================================================== #
# STANDARD IMPORTS
# =========================================================================== #
print(__doc__)
# Critical Imports
# --------------------------------------------------------------------------- #
import warnings; warnings.filterwarnings("ignore")
# Imports through 'from' syntax
# --------------------------------------------------------------------------- #
from pprint import pprint
from IPython.display import display
from itertools import islice
# Standard Imports
# --------------------------------------------------------------------------- #
import copy; import os
import sys; import time
import itertools
# Imports through 'as' syntax
# --------------------------------------------------------------------------- #
import numpy as np; import pandas as pd
# Imports for handling graphics
# --------------------------------------------------------------------------- #
%matplotlib inline
# Matplotlib pyplot provides plotting API
import matplotlib as mpl
from matplotlib import pyplot as plt
import chart_studio.plotly.plotly as py
import seaborn as sns; sns.set(style="ticks", color_codes=True) # sns.set()
# =========================================================================== #
# UTILS IMPORTS (Done by myself)
# =========================================================================== #
from utils.load_dataset_pittsburg_utils import load_brdiges_dataset
from utils.utilities_functions import *
from utils.display_utils import *
from utils.preprocessing_utils import *
from utils.training_utils import *
from utils.sklearn_functions_custom import *
from utils.training_utils_v2 import fit_by_n_components, fit_all_by_n_components, grid_search_all_by_n_components
# =========================================================================== #
# sklearn IMPORT
# =========================================================================== #
from sklearn.decomposition import PCA, KernelPCA
# Import scikit-learn classes: models (Estimators).
from sklearn.naive_bayes import GaussianNB # Non-parametric Generative Model
from sklearn.naive_bayes import MultinomialNB # Non-parametric Generative Model
from sklearn.linear_model import LinearRegression # Parametric Linear Discriminative Model
from sklearn.linear_model import LogisticRegression # Parametric Linear Discriminative Model
from sklearn.linear_model import Ridge, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC # Parametric Linear Discriminative "Support Vector Classifier"
from sklearn.tree import DecisionTreeClassifier # Non-parametric Model
from sklearn.ensemble import BaggingClassifier # Non-parametric Model (Meta-Estimator, that is, an Ensemble Method)
from sklearn.ensemble import RandomForestClassifier # Non-parametric Model (Meta-Estimator, that is, an Ensemble Method)
# =========================================================================== #
# READ INPUT DATASET
# =========================================================================== #
dataset_path = 'C:\\Users\\Francesco\Documents\\datasets\\pittsburgh_dataset'
dataset_name = 'bridges.data.csv'
TARGET_COL = 'T-OR-D' # Target variable name
dataset, feature_vs_values = load_brdiges_dataset(dataset_path, dataset_name)
feature_vs_values
# sns.pairplot(dataset, hue='T-OR-D', size=1.5)
columns_2_avoid = ['ERECTED', 'LENGTH', 'LOCATION']
show_frequency_distribution_predictor(dataset, predictor_name='RIVER', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=TARGET_COL, verbose=1)
# show_frequency_distribution_predictors(dataset, columns_2_avoid)
show_frequency_distribution_predictor(dataset, predictor_name='T-OR-D', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=None, verbose=1)
# show_frequency_distribution_predictors(dataset, columns_2_avoid)
show_frequency_distribution_predictor(dataset, predictor_name='CLEAR-G', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=TARGET_COL)
# show_frequency_distribution_predictors(dataset, columns_2_avoid)
show_frequency_distribution_predictor(dataset, predictor_name='SPAN', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=TARGET_COL)
# show_frequency_distribution_predictors(dataset, columns_2_avoid)
show_frequency_distribution_predictor(dataset, predictor_name='MATERIAL', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=TARGET_COL)
# show_frequency_distribution_predictors(dataset, columns_2_avoid)
show_frequency_distribution_predictor(dataset, predictor_name='REL-L', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=TARGET_COL)
# show_frequency_distribution_predictors(dataset, columns_2_avoid)
show_frequency_distribution_predictor(dataset, predictor_name='TYPE', columns_2_avoid=columns_2_avoid,
features_vs_values=feature_vs_values, hue=TARGET_COL)
###Output
_____no_output_____
###Markdown
Correlation Matrix Analysis
###Code
corr_result = dataset.corr()
display_heatmap(corr_result)
show_pie_charts_corr_matrix(corr_result)
feature = "PURPOSE"
dataset[[TARGET_COL, feature]].groupby([feature, TARGET_COL]).count()
dataset[[TARGET_COL, feature]].groupby([feature]).count()
dataset[[TARGET_COL, feature]].groupby([feature])[TARGET_COL].plot(kind = 'hist', stacked=True)
dataset.pivot(columns=TARGET_COL)[feature].plot(kind = 'hist', stacked=True)
show_full_stacktrace_error = True
try:
g = sns.PairGrid(dataset)
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot)
g.map_diag(sns.kdeplot, lw=3, legend=False);
except Exception as err:
if show_full_stacktrace_error is True:
raise err
else:
print(str(err))
pass
g = sns.PairGrid(dataset)
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter);
show_full_stacktrace_error = True
try:
g = sns.pairplot(dataset, hue=TARGET_COL, palette="Set2", diag_kind="kde", height=2.5)
except Exception as err:
if show_full_stacktrace_error is True:
raise err
else:
print(str(err))
pass
g = sns.PairGrid(dataset, hue=TARGET_COL)
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter)
g.add_legend();
###Output
_____no_output_____ |
cca.ipynb | ###Markdown
1k Samples
###Code
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data[0],data[1]))
filename='models/average_embeddings_img2text'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation[0],data_validation[1]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
203.1 {1: 0.0031, 5: 0.016300000000000002, 10: 0.031400000000000004}
******************************
Dimension: 10
22.5 {1: 0.055400000000000005, 5: 0.19730000000000003, 10: 0.32010000000000005}
******************************
Dimension: 50
2.7 {1: 0.34270000000000006, 5: 0.6896000000000001, 10: 0.8087}
******************************
Dimension: 100
2.0 {1: 0.4082, 5: 0.7459, 10: 0.8452}
******************************
Dimension: 200
1.7 {1: 0.48069999999999996, 5: 0.7776000000000002, 10: 0.8543000000000001}
******************************
Dimension: 500
1.0 {1: 0.5474, 5: 0.7953, 10: 0.8505}
******************************
Dimension: 1000
1.0 {1: 0.5549000000000001, 5: 0.7716, 10: 0.8160000000000001}
******************************
###Markdown
10k Samples
###Code
for dims in latent_dims:
filename='models/average_embeddings_img2text'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation[0],data_validation[1]])
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
2033.65 {1: 0.0004, 5: 0.0016899999999999999, 10: 0.00326}
******************************
Dimension: 10
218.8 {1: 0.00682, 5: 0.030390000000000007, 10: 0.05706}
******************************
Dimension: 50
17.1 {1: 0.10355, 5: 0.28631, 10: 0.40496}
******************************
Dimension: 100
12.0 {1: 0.14282, 5: 0.35245, 10: 0.47501999999999994}
******************************
Dimension: 200
8.0 {1: 0.19366, 5: 0.43262999999999996, 10: 0.5537299999999999}
******************************
Dimension: 500
5.0 {1: 0.26037, 5: 0.51621, 10: 0.62177}
******************************
Dimension: 1000
4.9 {1: 0.28311, 5: 0.5301199999999999, 10: 0.62432}
******************************
###Markdown
BEST MODEL TEST
###Code
with open('embeddings_test1.pkl', 'rb') as f:
data_test = pickle.load(f)
filename='models/average_embeddings_img2text'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test[0],data_test[1]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
1.0 {1: 0.5495000000000001, 5: 0.7911, 10: 0.8480000000000001}
******************************
10K Samples:
5.0 {1: 0.25931999999999994, 5: 0.51165, 10: 0.6194999999999999}
******************************
###Markdown
Image to title embeddings
###Code
with open('title_embeddings_train.pkl', 'rb') as files:
data_title = pickle.load(files)
with open('title_embeddings_val.pkl', 'rb') as f:
data_validation_title = pickle.load(f)
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data[0],data_title[0]))
filename='models/title_embeddings_img2text'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation[0],data_validation_title[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/title_embeddings_img2text'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation[0],data_validation_title[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
2170.8 {1: 0.00037, 5: 0.00204, 10: 0.00404}
******************************
Dimension: 10
554.15 {1: 0.00243, 5: 0.011859999999999999, 10: 0.02329}
******************************
Dimension: 50
89.85 {1: 0.02521, 5: 0.09215, 10: 0.15421999999999997}
******************************
Dimension: 100
67.4 {1: 0.03272, 5: 0.11724000000000001, 10: 0.18913}
******************************
Dimension: 200
57.85 {1: 0.04543, 5: 0.15033000000000002, 10: 0.23278000000000004}
******************************
Dimension: 500
84.15 {1: 0.0613, 5: 0.18009000000000003, 10: 0.25791000000000003}
******************************
Dimension: 1000
243.5 {1: 0.06056, 5: 0.16706, 10: 0.23132999999999998}
******************************
###Markdown
BEST MODEL TO TEST
###Code
with open('title_embeddings_test.pkl', 'rb') as f:
data_test_title = pickle.load(f)
filename='models/title_embeddings_img2text'+str(200)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test[0],data_test_title[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
6.95 {1: 0.19620000000000004, 5: 0.4614, 10: 0.5660999999999999}
******************************
10K Samples:
60.9 {1: 0.04489, 5: 0.14948, 10: 0.22981}
******************************
###Markdown
Image to ingredients embeddings
###Code
with open('ingredients_embeddings_train.pkl', 'rb') as files:
data_ingre = pickle.load(files)
with open('ingredients_embeddings_val.pkl', 'rb') as f:
data_validation_ingre = pickle.load(f)
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data[0],data_ingre[0]))
filename='models/ingre_embeddings_img2text'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation[0],data_validation_ingre[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/ingre_embeddings_img2text'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation[0],data_validation_ingre[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
BEST MODEL TO TEST
with open('ingredients_embeddings_test.pkl', 'rb') as f:
data_test_ingre = pickle.load(f)
filename='models/ingre_embeddings_img2text'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test[0],data_test_ingre[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
3.0 {1: 0.35409999999999997, 5: 0.5980000000000001, 10: 0.6675}
******************************
10K Samples:
19.5 {1: 0.13733, 5: 0.32005999999999996, 10: 0.41430000000000006}
******************************
###Markdown
Image to instructions embeddings
###Code
with open('instructions_embeddings_train.pkl', 'rb') as files:
data_instr = pickle.load(files)
with open('instructions_embeddings_val.pkl', 'rb') as f:
data_validation_instr = pickle.load(f)
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data[0],data_instr[0]))
filename='models/instr_embeddings_img2text'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation[0],data_validation_instr[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/instr_embeddings_img2text'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation[0],data_validation_instr[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
1868.75 {1: 0.00048000000000000007, 5: 0.00201, 10: 0.0038099999999999996}
******************************
Dimension: 10
319.9 {1: 0.004999999999999999, 5: 0.022909999999999996, 10: 0.04071}
******************************
Dimension: 50
43.4 {1: 0.05493, 5: 0.16638999999999998, 10: 0.24852999999999997}
******************************
Dimension: 100
29.9 {1: 0.07239999999999999, 5: 0.21003, 10: 0.30542}
******************************
Dimension: 200
23.0 {1: 0.09956, 5: 0.25765000000000005, 10: 0.36102}
******************************
Dimension: 500
19.6 {1: 0.13107, 5: 0.30842, 10: 0.40619000000000005}
******************************
Dimension: 1000
26.1 {1: 0.13413000000000003, 5: 0.30510000000000004, 10: 0.39122}
******************************
###Markdown
BEST MODEL TO TEST
###Code
with open('instructions_embeddings_test.pkl', 'rb') as f:
data_test_instr = pickle.load(f)
filename='models/instr_embeddings_img2text'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test[0],data_test_instr[0]])
median,recall=rank('image', data_transform[0], data_transform[1], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('image', data_transform[0], data_transform[1], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
3.0 {1: 0.3545, 5: 0.6084999999999998, 10: 0.6870999999999999}
******************************
10K Samples:
20.0 {1: 0.12906, 5: 0.30744000000000005, 10: 0.4029}
******************************
###Markdown
Text To Image Average Text Embedding to Image
###Code
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data[1],data[0]))
filename='models/average_embeddings_text2img'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation[1],data_validation[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/average_embeddings_text2img'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation[1],data_validation[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
2086.9 {1: 0.00031000000000000005, 5: 0.00159, 10: 0.00315}
******************************
Dimension: 10
218.55 {1: 0.00717, 5: 0.0312, 10: 0.058219999999999994}
******************************
Dimension: 50
16.95 {1: 0.10942, 5: 0.29805000000000004, 10: 0.41212}
******************************
Dimension: 100
11.9 {1: 0.14948, 5: 0.36008, 10: 0.47969999999999996}
******************************
Dimension: 200
7.9 {1: 0.20578, 5: 0.43889999999999996, 10: 0.5559000000000001}
******************************
Dimension: 500
5.0 {1: 0.27622, 5: 0.5214700000000001, 10: 0.6249}
******************************
Dimension: 1000
4.1 {1: 0.29741, 5: 0.5365, 10: 0.62957}
******************************
###Markdown
BEST MODEL TO TEST
###Code
filename='models/average_embeddings_text2img'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test[1],data_test[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
1.0 {1: 0.5499, 5: 0.7941, 10: 0.849}
******************************
10K Samples:
5.0 {1: 0.27286, 5: 0.5184599999999999, 10: 0.61974}
******************************
###Markdown
Title Embedding to Image
###Code
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data_title[0],data[0]))
filename='models/title_embeddings_text2img'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation_title[0],data_validation[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/title_embeddings_text2img'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation_title[0],data_validation[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
2174.2 {1: 0.00041999999999999996, 5: 0.0018199999999999998, 10: 0.00381}
******************************
Dimension: 10
557.65 {1: 0.0023499999999999997, 5: 0.011630000000000001, 10: 0.02212}
******************************
Dimension: 50
93.2 {1: 0.023649999999999997, 5: 0.08762, 10: 0.14683000000000002}
******************************
Dimension: 100
70.65 {1: 0.032659999999999995, 5: 0.11604999999999999, 10: 0.18774000000000002}
******************************
Dimension: 200
61.65 {1: 0.04705000000000001, 5: 0.14939999999999998, 10: 0.23092000000000001}
******************************
Dimension: 500
87.6 {1: 0.06342, 5: 0.17813999999999997, 10: 0.25476}
******************************
Dimension: 1000
245.75 {1: 0.06312999999999999, 5: 0.16746999999999998, 10: 0.23321999999999998}
******************************
###Markdown
BEST MODEL TO TEST
###Code
filename='models/title_embeddings_text2img'+str(200)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test_title[0],data_test[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
7.25 {1: 0.19840000000000002, 5: 0.44600000000000006, 10: 0.5579000000000001}
******************************
10K Samples:
63.85 {1: 0.04649, 5: 0.14817, 10: 0.22626}
******************************
###Markdown
Ingredients Embedding to Image
###Code
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data_ingre[0],data[0]))
filename='models/ingre_embeddings_text2img'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation_ingre[0],data_validation[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/ingre_embeddings_text2img'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation_ingre[0],data_validation[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
2038.8 {1: 0.0003800000000000001, 5: 0.00162, 10: 0.0032799999999999995}
******************************
Dimension: 10
260.55 {1: 0.00565, 5: 0.02595, 10: 0.046669999999999996}
******************************
Dimension: 50
38.4 {1: 0.0653, 5: 0.19013, 10: 0.27865}
******************************
Dimension: 100
26.9 {1: 0.08896, 5: 0.24023, 10: 0.33733}
******************************
Dimension: 200
20.1 {1: 0.11981, 5: 0.29298, 10: 0.3952}
******************************
Dimension: 500
17.3 {1: 0.15158999999999997, 5: 0.33453, 10: 0.43034999999999995}
******************************
Dimension: 1000
25.6 {1: 0.15188, 5: 0.32114, 10: 0.40158000000000005}
******************************
###Markdown
BEST MODEL TO TEST
###Code
filename='models/ingre_embeddings_text2img'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test_ingre[0],data_test[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
1K Samples:
2.9 {1: 0.3651, 5: 0.601, 10: 0.6733}
******************************
10K Samples:
18.35 {1: 0.14953999999999998, 5: 0.3317, 10: 0.4239}
******************************
###Markdown
Instructions To Image
###Code
for dims in latent_dims:
model=CCA(latent_dims=dims)
model.fit((data_instr[0],data[0]))
filename='models/instr_embeddings_text2img'+str(dims)+'_model.pkl'
pickle.dump(model, open(filename, 'wb'))
data_transform=model.transform([data_validation_instr[0],data_validation[0]])
median,recall=rank('image', data_transform[1], data_transform[0], 1000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
for dims in latent_dims:
filename='models/instr_embeddings_text2img'+str(dims)+'_model.pkl'
with open(filename, 'rb') as files:
model= pickle.load(files)
data_transform=model.transform([data_validation_instr[0],data_validation[0]])
median,recall=rank('image', data_transform[1], data_transform[0], 10000)
print("Dimension: "+str(dims))
print(median,recall)
print("*"*30)
###Output
Dimension: 2
1868.75 {1: 0.00048000000000000007, 5: 0.00201, 10: 0.0038099999999999996}
******************************
Dimension: 10
319.9 {1: 0.004999999999999999, 5: 0.022909999999999996, 10: 0.04071}
******************************
Dimension: 50
43.4 {1: 0.05493, 5: 0.16638999999999998, 10: 0.24852999999999997}
******************************
Dimension: 100
29.9 {1: 0.07239999999999999, 5: 0.21003, 10: 0.30542}
******************************
Dimension: 200
23.0 {1: 0.09956, 5: 0.25765000000000005, 10: 0.36102}
******************************
Dimension: 500
19.6 {1: 0.13107, 5: 0.30842, 10: 0.40619000000000005}
******************************
Dimension: 1000
26.1 {1: 0.13413000000000003, 5: 0.30510000000000004, 10: 0.39122}
******************************
###Markdown
BEST MODEL TO TEST
###Code
filename='models/instr_embeddings_text2img'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data_test_instr[0],data_test[0]])
median,recall=rank('text', data_transform[1], data_transform[0], 1000)
print("1K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
median,recall=rank('text', data_transform[1], data_transform[0], 10000)
print("10K Samples:")
print(median,recall)
print("*"*30)
###Output
_____no_output_____
###Markdown
PLOT IMAGE TO RECIPE 1K SAMPLES Image to Average Embeddings
###Code
import matplotlib.pyplot as plt
median_average=[]
recall_1_average=[]
recall_5_average=[]
recall_10_average=[]
for dims in latent_dims:
file_store='outputs/recipe2img_10K_samples/average_embeddings_text2img'+str(dims)+'_model_output.pkl'
with open(file_store, 'rb') as files:
data_output= pickle.load(files)
median_average.append(data_output[0])
recall_1_average.append(data_output[1][1])
recall_5_average.append(data_output[1][5])
recall_10_average.append(data_output[1][10])
median_title=[]
recall_1_title=[]
recall_5_title=[]
recall_10_title=[]
for dims in latent_dims:
file_store='outputs/recipe2img_10K_samples/title_embeddings_text2img'+str(dims)+'_model_output.pkl'
with open(file_store, 'rb') as files:
data_output= pickle.load(files)
median_title.append(data_output[0])
recall_1_title.append(data_output[1][1])
recall_5_title.append(data_output[1][5])
recall_10_title.append(data_output[1][10])
median_ingre=[]
recall_1_ingre=[]
recall_5_ingre=[]
recall_10_ingre=[]
for dims in latent_dims:
file_store='outputs/recipe2img_10K_samples/ingre_embeddings_text2img'+str(dims)+'_model_output.pkl'
with open(file_store, 'rb') as files:
data_output= pickle.load(files)
median_ingre.append(data_output[0])
recall_1_ingre.append(data_output[1][1])
recall_5_ingre.append(data_output[1][5])
recall_10_ingre.append(data_output[1][10])
median_instr=[]
recall_1_instr=[]
recall_5_instr=[]
recall_10_instr=[]
for dims in latent_dims:
file_store='outputs/recipe2img_10K_samples/instr_embeddings_text2img'+str(dims)+'_model_output.pkl'
with open(file_store, 'rb') as files:
data_output= pickle.load(files)
median_instr.append(data_output[0])
recall_1_instr.append(data_output[1][1])
recall_5_instr.append(data_output[1][5])
recall_10_instr.append(data_output[1][10])
plt.plot(latent_dims,median_average,label='Average')
plt.plot(latent_dims,median_title,label='Title')
plt.plot(latent_dims,median_ingre,label='Ingredients')
plt.plot(latent_dims,median_instr,label='Instructions')
plt.legend()
plt.title("Recipe to Image Median Rank 10K samples")
plt.savefig('image_output/recipe2img_10k_samples.png')
plt.show()
ids=data[2]
chicken_lasagna_idx=np.where(ids == 'f79f91650c')[0][0]
lasagna_idx=np.where(ids == '003971cf31')[0][0]
salad_idx=np.where(ids == '001f8b08ac')[0][0]
chicken_salad_idx=np.where(ids == '09f70a1c31')[0][0]
filename='models/average_embeddings_img2text'+str(500)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data[0],data[1]])
data_transform[0].shape
chicken_lasagna_imgvec=data_transform[0][chicken_lasagna_idx]
lasagna_imgvec=data_transform[0][lasagna_idx]
salad_imgvec=data_transform[0][salad_idx]
chicken_lasagna_textvec=data_transform[1][chicken_lasagna_idx]
lasagna_textvec=data_transform[1][lasagna_idx]
salad_textvec=data_transform[1][salad_idx]
chicken_vec=np.subtract(chicken_lasagna_imgvec,lasagna_imgvec)
chicken_salad_vec=np.add(chicken_vec,salad_imgvec)
sims = np.dot(chicken_salad_vec,data_transform[1].T)
sorting = np.argsort(sims)[::-1].tolist()
ans_index=sorting[0]
ans_id=data[2][ans_index]
ans_id
###Output
_____no_output_____
###Markdown
ABOVE ID IS FOR MANGO CURRY SALAD TITLE TO IMAGE
###Code
filename='models/title_embeddings_img2text'+str(200)+'_model.pkl'
with open(filename, 'rb') as files:
best_model= pickle.load(files)
data_transform=best_model.transform([data[1],data[0]])
data_transform[0].shape
chicken_lasagna=data_transform[0][chicken_lasagna_idx]
lasagna=data_transform[0][lasagna_idx]
salad=data_transform[0][salad_idx]
chicken_vec=np.subtract(chicken_lasagna,lasagna)
chicken_salad_vec=np.add(chicken_vec,salad)
sims = np.dot(chicken_salad_vec,data_transform[1].T) # for recipe2im
sorting = np.argsort(sims)[::-1].tolist()
ans_index=sorting[0]
ans_id=data[2][ans_index]
ans_id
###Output
_____no_output_____ |
licensed_sponsors_uk/DownloadPDF.ipynb | ###Markdown
Extract Organisation Name
###Code
name = []
top_skipped = []
isFirst = False
for col in doc.xpath("//div/span"):
if ("left:30px;" in col.getparent().get("style")):
if isFirst or ("top:266px;" in col.getparent().get("style")):
p_style = col.getparent().get("style")
pairs = {'style': p_style, 'name': col.text.strip()}
p_styles = p_style.split(';')
for i in range(0, (len(p_styles)-1)):
key_value = p_styles[i].split(':')
if key_value[0].strip() in ['top', 'left', 'width', 'height']:
pairs[key_value[0].strip()] = int(key_value[1].replace('px','').strip())
# else:
# pairs[key_value[0].strip()] = key_value[1].replace('px','').strip()
# print(pairs)
if pairs['top'] in top_skipped:
print("skipped as already imported once, top = ",pairs['top'])
continue
## handle the city - start
if (df_city_sorted[ (df_city_sorted['top'] - pairs['top']).abs() < 10 ].shape[0]) == 1:
pairs['city_top'] = (df_city_sorted[ (df_city_sorted['top'] - pairs['top']).abs() < 10 ].iloc[0]['top'])
pairs['city'] = (df_city_sorted[ (df_city_sorted['top'] - pairs['top']).abs() < 10 ].iloc[0]['name'])
else:
if (len(col.getparent().getchildren())==2):
print("more than one sibling", len(col.getparent().getchildren()), pairs['top']);
elem_0 = len(col.getparent().getchildren()[0].text.strip())
elem_1 = len(col.getparent().getchildren()[1].text.strip())
# print(elem_0)
# print(elem_1)
if(elem_0 > elem_1):
pairs['name'] = col.getparent().getchildren()[0].text.strip()
pairs['city'] = col.getparent().getchildren()[1].text.strip()
pairs['city_top'] = pairs['top']
else:
pairs['name'] = col.getparent().getchildren()[1].text.strip()
pairs['city'] = col.getparent().getchildren()[0].text.strip()
pairs['city_top'] = pairs['top']
top_skipped.append(pairs['top'])
elif (len(col.getparent().getchildren())==1):
print("one sibling, top=",pairs['top']);
else:
print("sibling : else, top=",pairs['top']);
# break;
## handle the city - end
try:
next_top = df_city_sorted[ (df_city_sorted['top'] > pairs['top'] + 10)].iloc[0]['top']
except:
next_top = pairs['top'] + 100
## handle the Tier & rating - start
# filtered_df_tier_and_rating_sorted = df_tier_and_rating_sorted[ (df_tier_and_rating_sorted['adjected_top'] - pairs['top']).abs() < 10 ]
# if (filtered_df_tier_and_rating_sorted.shape[0]) == 1:
# pairs['tier_and_rating_top'] = (filtered_df_tier_and_rating_sorted.iloc[0]['top'])
# pairs['tier_and_rating'] = filtered_df_tier_and_rating_sorted.iloc[0]['name']
# elif filtered_df_tier_and_rating_sorted.shape[0] > 1:
# pairs['tier_and_rating_top'] = filtered_df_tier_and_rating_sorted.iloc[0]['top']
# pairs['tier_and_rating'] = "\n".join( filtered_df_tier_and_rating_sorted['name'].tolist() )
filtered_df_tier_and_rating_sorted = df_tier_and_rating_sorted[ (df_tier_and_rating_sorted['top'] > pairs['top'] ) & (df_tier_and_rating_sorted['top'] < next_top ) ]
pairs['tier_and_rating_top'] = filtered_df_tier_and_rating_sorted.iloc[0]['top']
pairs['tier_and_rating'] = "\n".join( filtered_df_tier_and_rating_sorted['name'].tolist() )
# if (df_sub_tier_sorted[ (df_sub_tier_sorted['adjected_top'] - pairs['top']).abs() < 10 ].shape[0]) == 1:
# pairs['df_sub_tier_top'] = (df_sub_tier_sorted[ (df_sub_tier_sorted['adjected_top'] - pairs['top']).abs() < 10 ].iloc[0]['top'])
# pairs['sub_tier'] = (df_sub_tier_sorted[ (df_sub_tier_sorted['adjected_top'] - pairs['top']).abs() < 10 ].iloc[0]['name'])
filtered_df_sub_tier_sorted = df_sub_tier_sorted[ (df_sub_tier_sorted['top'] > pairs['top'] ) & (df_sub_tier_sorted['top'] < next_top ) ]
pairs['sub_tier_top'] = filtered_df_sub_tier_sorted.iloc[0]['top']
pairs['sub_tier'] = "\n".join( filtered_df_sub_tier_sorted['name'].tolist() )
name.append(pairs)
isFirst = True
# break
df_name = pd.DataFrame(name)
df_name_sorted = df_name.sort_values(by=['top'])
# df_name_sorted[0:]
df_name_sorted
df_name_sorted[df_name_sorted['top'] == 5755 ]
df_name_sorted[df_name_sorted['sub_tier'].isnull() ]
df_tier_and_rating_sorted[ df_tier_and_rating_sorted['top'] == 2967]
###Output
_____no_output_____
###Markdown
Extract Organisation Location
###Code
city = []
for col in doc.xpath("//div/span"):
if ("left:357px;" in col.getparent().get("style")):
if isFirst or ("top:266px;" in col.getparent().get("style")):
p_style = col.getparent().get("style")
pairs = {'style': p_style, 'name': col.text.strip()}
p_styles = p_style.split(';')
for i in range(0, (len(p_styles)-1)):
key_value = p_styles[i].split(':')
if key_value[0].strip() in ['top', 'left', 'width', 'height']:
pairs[key_value[0].strip()] = int(key_value[1].replace('px','').strip())
# else:
# pairs[key_value[0].strip()] = key_value[1].replace('px','').strip()
# print(pairs)
# print(pairs['top'])
if (df_name_sorted[ (df_name_sorted['top'] - pairs['top']).abs() < 10 ].shape[0]) == 1:
pairs['name_top'] = (df_name_sorted[ (df_name_sorted['top'] - pairs['top']).abs() < 10 ].iloc[0]['top'])
# print(pairs)
city.append(pairs)
isFirst = True
# break
df_city = pd.DataFrame(city)
df_city_sorted = df_city.sort_values(by=['top'])
df_city_sorted
df_city_sorted[df_city_sorted['name_top'].isnull() ]
###Output
_____no_output_____
###Markdown
Extract Tier & Rating
###Code
tier_and_rating = []
for col in doc.xpath("//div/span"):
if ("left:582px;" in col.getparent().get("style")):
if isFirst or ("top:266px;" in col.getparent().get("style")):
p_style = col.getparent().get("style")
# print(col.text_content().strip())
# print(etree.tostring(col))
pairs = {'style': p_style, 'name': col.text_content().strip()}
p_styles = p_style.split(';')
for i in range(0, (len(p_styles)-1)):
key_value = p_styles[i].split(':')
if key_value[0].strip() in ['top', 'left', 'width', 'height']:
pairs[key_value[0].strip()] = int(key_value[1].replace('px','').strip())
# else:
# pairs[key_value[0].strip()] = key_value[1].replace('px','').strip()
# print(pairs)
tier_and_rating.append(pairs)
isFirst = True
# break
###Output
_____no_output_____
###Markdown
Extract Sub Tier
###Code
sub_tier = []
for col in doc.xpath("//div/span"):
if ("left:684px;" in col.getparent().get("style")):
if isFirst or ("top:266px;" in col.getparent().get("style")):
p_style = col.getparent().get("style")
# print(col.text_content().strip())
# print(etree.tostring(col))
pairs = {'style': p_style, 'name': col.text_content().strip()}
p_styles = p_style.split(';')
for i in range(0, (len(p_styles)-1)):
key_value = p_styles[i].split(':')
if key_value[0].strip() in ['top', 'left', 'width', 'height']:
pairs[key_value[0].strip()] = int(key_value[1].replace('px','').strip())
# else:
# pairs[key_value[0].strip()] = key_value[1].replace('px','').strip()
# print(pairs)
sub_tier.append(pairs)
isFirst = True
# break
len(city)
name[101]
df_city_sorted[0:]
df_name_sorted[25:]
df_sub_tier = pd.DataFrame(sub_tier)
df_sub_tier_sorted = df_sub_tier.sort_values(by=['top'])
df_sub_tier_sorted['adjected_top'] = df_sub_tier_sorted['top'] - 12
df_sub_tier_sorted[0:]
df_tier_and_rating = pd.DataFrame(tier_and_rating)
df_tier_and_rating_sorted = df_tier_and_rating.sort_values(by=['top'])
df_tier_and_rating_sorted['adjected_top'] = df_tier_and_rating_sorted['top'] - 12
df_tier_and_rating_sorted
# for content in contents:
# text_object = content.getObject()
# print(text_object)
# with open("raw_text.csv", "w") as text_file:
# text_file.write(all_text)
# contents = pageObj.getContents()
###Output
_____no_output_____
###Markdown
Export dataframe to parquet
###Code
# pip install fastparquet ( if not already installed )
df_name_sorted.to_parquet('df_name_sorted.parquet.gzip',
compression='gzip')
df_name_sorted.to_csv('df_name_sorted.csv.gzip',
compression='gzip')
df_city_sorted.to_parquet('df_city_sorted.parquet.gzip',
compression='gzip')
df_tier_and_rating_sorted.to_parquet('df_tier_and_rating_sorted.parquet.gzip',
compression='gzip')
df_sub_tier_sorted.to_parquet('df_sub_tier_sorted.parquet.gzip',
compression='gzip')
df_sub_tier_sorted
###Output
_____no_output_____
###Markdown
Import dataframe from parquet
###Code
df_name_sorted_imported = pd.read_parquet('df_name_sorted.parquet.gzip')
df_name_sorted_imported[0:]
df_name_sorted_imported['lowcase_name'] = df_name_sorted_imported['name'].str.lower()
df_name_sorted_imported[0:]
dict_name_sorted_imported = df_name_sorted_imported.T.to_dict().values()
import psycopg2
try:
connection = psycopg2.connect(user="postgres",
password="hidden",
host="hidden",
port="5432",
database="uk-immigration")
cursor = connection.cursor()
processd = 0
postgres_insert_query = """ INSERT INTO "find-job"."licensed-sponsors"("company-name", "city", "tier-and-rating", "sub-tier", "company-name-lowcase") VALUES (%s,%s,%s,%s,%s)"""
for record in dict_name_sorted_imported:
record_to_insert = (record['name'], record['city'], record['tier_and_rating'], record['sub_tier'],record['lowcase_name'])
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
processd = processd + 1
if (processd % 10 == 0):
print(processd)
count = cursor.rowcount
print (count, "Record inserted successfully into mobile table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into mobile table", error)
finally:
#closing database connection.
if(connection):
# cursor.close()
connection.close()
print("PostgreSQL connection is closed")
for record in dict_name_sorted_imported:
print(record)
print(record['name'])
break
# table.put_item(Item=student)
# df_name_sorted_imported['top'] = (df_name_sorted_imported['top'] / 2).apply(np.round) * 2
# df_name_sorted_imported
df_city_sorted_imported = pd.read_parquet('df_city_sorted.parquet.gzip')
df_city_sorted_imported
# df_city_sorted_imported['top'] = (df_city_sorted_imported['top'] / 2).apply(np.round) * 2
# df_city_sorted_imported
df_tier_and_rating_sorted_imported = pd.read_parquet('df_tier_and_rating_sorted.parquet.gzip')
df_tier_and_rating_sorted_imported
df_tier_and_rating_sorted_imported['top'] = df_tier_and_rating_sorted_imported['top']/5
df_sub_tier_sorted_imported = pd.read_parquet('df_sub_tier_sorted.parquet.gzip')
df_sub_tier_sorted_imported['top'] = (df_sub_tier_sorted_imported['top'] / 3).apply(np.ceil) * 3
df_sub_tier_sorted_imported
df_newdata = df_name_sorted_imported
df_newdata['city_top'] = df_city_sorted_imported['top']
df_newdata['city_name'] = df_city_sorted_imported['name']
df_newdata['city_index'] = df_city_sorted_imported['index']
df_newdata
df_newdata['diff'] = df_newdata['top'] - df_newdata['city_top']
df_newdata['diff'] = df_newdata['diff'].abs()
df_newdata['diff']
df_newdata_big_diff = df_newdata[df_newdata['diff'] > 1 ]
df_newdata.shape
df_newdata_big_diff.shape
df_newdata_big_diff
df_newdata_big_diff.index
df_newdata_big_diff.index[0]
df_newdata['top']
###Output
_____no_output_____ |
ChessGM.ipynb | ###Markdown
Data Analysis of Chess Grandmasters (GM)Recently, the Indian-American Abhimanyu Mishra became the [youngest Grandmaster](https://www.chess.com/news/view/abhimanyu-mishra-youngest-grandmaster-in-chess-history) in chess history, qualifying for the title at the age of 12 years 4 months and 25 days, whereas the Venezuelan [Salvador Diaz Carias](https://www.chess.com/news/view/venezuela-chess-player-fm-title-88-salvador-diaz-carias) got the FIDE Master (FM) Title at the age of 88. Motivated by these news, which I came to know by the Brazilian YouTube channel [Xadrez Brasil](https://www.youtube.com/c/XadrezBrasil), I decided to do a data analysis and visualization about the GMs based on Wikipedia "[List of chess grandmasters](https://en.wikipedia.org/wiki/List_of_chess_grandmasters)". General Research Questions about GMsWe aim at approaching the following questions. These questions guide our exploration, but does not limit it.1. What is the distribution of GM title since 1950 (when it started)?2. What is the relationship, if any, between age and receiving GM title?3. What is the distribution of GM title among the countries and sex? Python Libraries
###Code
# Importing the necessary Python libraries to our tasks:
import numpy as np # data analysis
import pandas as pd # data analysis and manipulation
import matplotlib.pyplot as plt # data visualization
%matplotlib inline
import seaborn as sns # data visualization
from IPython.core.display import display, HTML
# This package allows embedding here the visualization made by Flourish website: https://app.flourish.studio/.
###Output
_____no_output_____
###Markdown
Data Extraction
###Code
# Our data source: the Wikipedia "List of chess grandmasters".
url = "https://en.wikipedia.org/wiki/List_of_chess_grandmasters"
###Output
_____no_output_____
###Markdown
Data Cleaning and Preparation
###Code
# Reading and selecting the table we are interested in:
html = pd.read_html(url, match = "Birthplace")
html
# Notice the output is a list.
# Taking the table as a DataFrame from the list:
html[0]
# Reducing the table to just the columns we are interested in:
table1 = html[0][["Name", "Born", "TitleYear", "Federation", "Sex"]]
table1
# Dropping the first nonsense line (line zero):
table2 = table1.drop(0)
table2
# Changing the value of the column 'Born' so that we have only the year:
table2['Born'] = table2['Born'].apply(lambda x: x[:4])
table2['Born']
# This introduces a small imprecision in our analysis since we are not going to consider the exact birth date.
# However, as we do not have the exact date of the GM title acquisition, this is the best we can do.
# Changing the data type of the value of the column 'Born' to integer instead of string:
table2['Born'] = table2['Born'].apply(lambda x: float(x))
table2['Born']
# Adding a column with TitleAge.
# The age of the GMs when they got the title is the title year minus his/her birth date.
table2['TitleAge'] = table2['TitleYear'] - table2['Born']
table2
###Output
_____no_output_____
###Markdown
Data Analysis and Visualization The age of the GM title receivers
###Code
# Descriptive statistics summary about the age of GMs when they got the title:
table2['TitleAge'].describe()
# Scatter plot showing the distribution of title age of the GM and the year of the title:
sns.scatterplot(table2['TitleYear'], table2['TitleAge'])
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
An interactive scatter plot is displayed at the end of this document.
###Code
# Box plot about the title age of the GMs when they received the title.
sns.boxplot(table2['TitleAge'])
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
By the analysis and visualization above, we notice that:* The oldest GM title receiver is about 88 years old. But this is **not** entirely correct, because among the 10 oldest chess players (all being at least 77 years old) only [Jacques Mieses](https://en.wikipedia.org/wiki/Jacques_Mieses) (1865-1954) was an active player. He received his title at the age of 85 in 1950 (inauguration of the GM title), but it is said his chess strength was not that great anymore. The other 9 players received honorary titles.* The mean of GM title receivers age is 27 years old. Indeed, 75% of them is at most 31 years old. The standard deviation is about 10 years old* Both the scatter plot and, especially, the box plot highlight that whoever receives the GM title at the age of 45 years or above is already an outlier among the GMs. We see this is even more true once we know most elderly GM title receivers got an honorary title, not a regular one.* The scatter plot also shows a huge growth of GM titles, in particular since 1990.
###Code
# Distribution of age of GM title receivers.
sns.distplot(table2['TitleAge'])
# Distribution of GM titles along the years.
sns.distplot(table2['TitleYear'])
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
The two graphs above show that:* Again, the age of GM title receivers is concentrated between the 20s years old.* Since 1990 we have observed a lot more GM titles. Most likely this is due to the fact that chess became much more popular and accessible, particularly after the spreading of home computers and the internet. The more people interested in chess, the more GMs.
###Code
# Correlations
table2.corr()
# We see the year of birth and the title year are highly correlated with each other
# The first 10 oldest GM title receivers.
table2.sort_values('TitleAge')[-10:]
# We notice very old GMs often have received honorary title for their career.
# By the way, there are only 19 GMs whose age is at least 70 years old.
###Output
_____no_output_____
###Markdown
Distributions of GMs per Sex and Country
###Code
# GMs per country:
countries = dict()
for i in table2['Federation']:
#print(i)
countries[i] = countries.get(i, 0) + 1
countries
# GM per sex
sex = dict()
for i in table2['Sex']:
#print(i)
sex[i] = sex.get(i, 0) + 1
sex
# Female GMs represent only 1.95% of the total of GMs.
# Distributions of GMs per sex and country displayed using Flourish.
# Interactive visualization!
display(HTML('https://public.flourish.studio/visualisation/6633672/'))
# Scatter plot (with regression line) showing the distribution of title age of the GM and the year of the title:
# Interactive visualization!
display(HTML('https://public.flourish.studio/visualisation/6633947/'))
# For converting the dataset seen in the variable table2 (DataFrame) into a csv file:
table2.to_csv('chess.csv')
###Output
_____no_output_____ |
ml/Machine_Learning_Code_Implementation/charpter21_Bayesian_models/bayesian_network.ipynb | ###Markdown
bayesian network
###Code
# 导入pgmpy相关模块
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
letter_model = BayesianModel([('D', 'G'),
('I', 'G'),
('G', 'L'),
('I', 'S')])
# 学生成绩的条件概率分布
grade_cpd = TabularCPD(
variable='G', # 节点名称
variable_card=3, # 节点取值个数
values=[[0.3, 0.05, 0.9, 0.5], # 该节点的概率表
[0.4, 0.25, 0.08, 0.3],
[0.3, 0.7, 0.02, 0.2]],
evidence=['I', 'D'], # 该节点的依赖节点
evidence_card=[2, 2] # 依赖节点的取值个数
)
# 考试难度的条件概率分布
difficulty_cpd = TabularCPD(
variable='D',
variable_card=2,
values=[[0.6], [0.4]]
)
# 个人天赋的条件概率分布
intel_cpd = TabularCPD(
variable='I',
variable_card=2,
values=[[0.7], [0.3]]
)
# 推荐信质量的条件概率分布
letter_cpd = TabularCPD(
variable='L',
variable_card=2,
values=[[0.1, 0.4, 0.99],
[0.9, 0.6, 0.01]],
evidence=['G'],
evidence_card=[3]
)
# SAT考试分数的条件概率分布
sat_cpd = TabularCPD(
variable='S',
variable_card=2,
values=[[0.95, 0.2],
[0.05, 0.8]],
evidence=['I'],
evidence_card=[2]
)
# 将各节点添加到模型中,构建贝叶斯网络
letter_model.add_cpds(
grade_cpd,
difficulty_cpd,
intel_cpd,
letter_cpd,
sat_cpd
)
# 导入pgmpy贝叶斯推断模块
from pgmpy.inference import VariableElimination
# 贝叶斯网络推断
letter_infer = VariableElimination(letter_model)
# 天赋较好且考试不难的情况下推断该学生获得推荐信质量的好坏
prob_G = letter_infer.query(
variables=['G'],
evidence={'I': 1, 'D': 0})
print(prob_G)
###Output
WARNING:root:Replacing existing CPD for G
WARNING:root:Replacing existing CPD for D
WARNING:root:Replacing existing CPD for I
WARNING:root:Replacing existing CPD for L
WARNING:root:Replacing existing CPD for S
Finding Elimination Order: : 100%|██████████████████████████████████████████████████████| 2/2 [00:00<00:00, 668.95it/s]
Eliminating: L: 100%|███████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 285.72it/s] |
examples/retinanet/RetinaNet Exporter.ipynb | ###Markdown
Object Detection Model (RetinaNet) export using Nyoka
###Code
from keras_retinanet.models import load_model
from PIL import ImageDraw
from nyoka import RetinanetToPmml
import requests
import warnings
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
Download the pre-trained RetinaNet model
###Code
model = requests.get("https://github.com/fizyr/keras-retinanet/releases/download/0.5.1/resnet50_coco_best_v2.1.0.h5")
with open('resnet50_coco_best_v2.1.0.h5','wb') as file:
file.write(model.content)
###Output
_____no_output_____
###Markdown
Load the downloaded modelThe model is loaded using `load_model` function from keras_retinanet.The model was trained with `coco` dataset and `resnet50` was used as backbone
###Code
model = load_model('resnet50_coco_best_v2.1.0.h5', backbone_name='resnet50')
###Output
_____no_output_____
###Markdown
The pre-trained model has `score_threshold=0.05`, which means it will consider all classes whose predicted probability is greater than 5%. To remove noisy predictions, it is updated to 0.5 (50%)
###Code
model.layers[-1].score_threshold = 0.5
model.save("Retinanet_with_new_threshold.h5")
print("The updated model is saved and it needs to be loaded again to reflect the change")
model = load_model("Retinanet_with_new_threshold.h5",backbone_name='resnet50')
print("The model is loaded again")
###Output
_____no_output_____
###Markdown
Libraries to load and preprocess the image.Since the model was trained using `resnet50` as backbone, we need to preprocess the image to convert it to the format used by resnet
###Code
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
###Output
_____no_output_____
###Markdown
Load and preprocess the image
###Code
file = "test_1"
orig_img = load_img(file+'.png')
img = img_to_array(orig_img)
img = preprocess_input(img)
###Output
_____no_output_____
###Markdown
Predict using the preprocessed image. The model will return boundary boxes, scores and classes/labels
###Code
bboxes, scores, labels = model.predict(np.expand_dims(img, axis=0))
###Output
_____no_output_____
###Markdown
Extracting valid predictions
###Code
score_range=list(scores.ravel()).index(-1.0)
scores = scores.ravel()[:score_range]
labels = labels.ravel()[:score_range]
bboxes = bboxes[0][:score_range]
###Output
_____no_output_____
###Markdown
List of classes used to train the model
###Code
import json
classes = json.load(open("categories_coco.json",'r'))
classes = list(classes.values())
###Output
_____no_output_____
###Markdown
Drawing boxes and labels on the original image Draw the boxes and labels
###Code
img_with_boxes=orig_img.copy()
drawer = ImageDraw.Draw(img_with_boxes)
for i in range(score_range):
drawer.rectangle(bboxes[i],outline='red')
drawer.text([bboxes[i][0], bboxes[i][1]],text=classes[labels[i]]+" "+"{:.2f}".format(scores[i]))
###Output
_____no_output_____
###Markdown
Original Image
###Code
orig_img
###Output
_____no_output_____
###Markdown
Annotated image
###Code
img_with_boxes
###Output
_____no_output_____
###Markdown
Generate the PMML The exporter needs following parameters - * `model` : The trained RetinaNet model* `input_shape` : The expected shape of the image to be scored* `input_format` : The format of input during inference* `backbone_name` : Name of backbone used to train the model* `trained_classes` : List of classes using which the model was trained* `pmml_file_name` : Name of PMML file
###Code
RetinanetToPmml(
model=model,
input_shape=(224,224,3),
input_format='image',
backbone_name='resnet',
trained_classes=classes,
pmml_file_name="RetinaNet.pmml"
)
###Output
_____no_output_____ |
Arctic Heat/Flight Analysis/AXCTD-XBT Visualization - Clean Fall2018.ipynb | ###Markdown
Arctic Heat - Fall 2018**AXCTD and XBT Profiles - all QC'd**Purpose: plot cleaned XBT and AXCTD files Removed files with no data, truncated files to have only data once water was hit. Bottom depth is estimated by nearest point to the ARDEMv2 Bathymetry Grid. [ArdemV2_Depth_Finder.ipynb](ArdemV2_Depth_Finder.ipynb).
###Code
import pandas as pd
import os
import datetime
import numpy as np
source_dir = '/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/'
source_file = [os.path.join(dp, f) for dp, dn,
fn in os.walk(os.path.expanduser(source_dir)) for f in fn if 'clean.dta' in f]
###Output
_____no_output_____
###Markdown
Data Ingestion and data summary/statisticsA few lines to list and read the .iwg files into a dictionary of pandas dataframes.The original .dta files have header as follows: ```Probe Type = AXBT Date = 2018/05/26 Time = 00:29:35.620 Time Depth Frequency (C) (F) ```
###Code
dfs = {}
for i,filename in enumerate(sorted(source_file)):
try:
header = pd.read_csv(filename,nrows=4,header=None)
#parse date in header to add delta-t in columns to
sd = header[0][1].split('= ')[-1].split('.')
nofrag, frag = header[0][2].split('= ')[-1].split('.')
st = datetime.datetime.strptime(sd[0] + ' ' + nofrag,'%Y/%m/%d %H:%M:%S')
st = st.replace(microsecond=int(frag))
columns = ['Time','Depth','Frequency','DegreeC','DegreeF']
temp_data = pd.read_csv(filename,delimiter='\s+',skiprows=4,na_values='******')
temp_data['DateTime'] = [st +datetime.timedelta(seconds=x[1]['Time']) for x in temp_data.iterrows()]
temp_data = temp_data.set_index(pd.DatetimeIndex(temp_data['DateTime']))
dfs.update({filename:temp_data})
print(filename)
except ValueError:
print("{} failed to load".format(filename))
continue
except KeyError:
columns = ['Frame#','Data','CRC','Depth','Temp','Cond','Salinity']
temp_data = pd.read_csv(filename,delimiter='\s+',skiprows=4,na_values='*****')
dfs.update({filename:temp_data})
print(filename)
###Output
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180525/log00002.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180525/log00003.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180525/log00004.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180527/log00000.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180527/log00001.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180527/log00002.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180527/log00003.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180529/log00002.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180529/log00003.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180529/log00004.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180529/log00009.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180531/15126156_2018_06_01_00_53_34.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180531/15126160_2018_06_01_01_21_03.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180531/15126161_2018_05_31_23_50_38.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180531/15126162_2018_06_01_02_06_11.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180531/15126163_2018_06_01_02_15_55.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00000.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00001.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00002.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00004.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00006.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00007.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180913L1/log00009.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L1/log00001.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L1/log00003.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L1/log00004.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L1/log00006.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L1/log00007.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L1/log00008.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L2/log00001.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L2/log00002.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L2/log00004.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L2/log00005.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L2/log00006.clean.dta
/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/AXBT_Data/AXBT_20180914L2/log00007.clean.dta
###Markdown
XBT
###Code
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(7,8.25))
for ds, df in dfs.items():
if '201805' not in ds:
try:
ax.plot(df['(C)'],df['Depth'],label=ds.split('/')[-2:])
except:
pass
plt.ylabel('Depth (m)')
plt.xlabel('Temperature (degC)')
ax.invert_yaxis()
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
for ds, df in dfs.items():
if '201805' not in ds:
fig, ax = plt.subplots(figsize=(8.5,11))
try:
plt.plot(df['(C)'],df['Depth'])
plt.ylabel('Depth (m)')
plt.xlabel('Temperature (degC)')
ax = plt.gca()
ax.invert_yaxis()
plt.title(ds.split('/')[-2:])
fig.savefig(ds.replace('.clean.dta','.png'))
except:
pass
###Output
_____no_output_____
###Markdown
AXCTDNO AXCTD on Fall 2018 Flights The flights of fall are designed to give a transect of the Chukchi from South to NorthSee (AXCTD-XBT Flight Ops Locations Fall2018.ipynb)So contour as a function of Total Distance along the transect Begininng
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cmocean
def make_map(projection=ccrs.PlateCarree(),figsize=(16, 16)):
fig, ax = plt.subplots(figsize=figsize,
subplot_kw=dict(projection=projection))
if projection == ccrs.PlateCarree():
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax, plt
fl=pd.read_excel('/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/Event_Log-all.xlsx',sheet_name='Fall')
projection=ccrs.LambertConformal(central_longitude=-160.0)
transformation=ccrs.PlateCarree()
land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='face',
facecolor='1.0')
fig,ax,plt = make_map(projection=projection,figsize=(8,8))
extent = [-176, -150, 62.5, 73.5]
t = ax.scatter(fl['lon'], fl['lat'], s=100, facecolors='none', edgecolors='r', transform=transformation)
ax.add_feature(land_50m)
ax.coastlines(resolution='50m')
ax.set_extent(extent)
plt.title('All Sonde Drops')
fig,ax,plt = make_map(projection=projection,figsize=(8,8))
extent = [-176, -150, 62.5, 73.5]
t = ax.scatter(fl['lon'][fl['DataQuality'] == 'Good'], fl['lat'][fl['DataQuality'] == 'Good'],
s=100, facecolors='none', edgecolors='k', transform=transformation)
ax.add_feature(land_50m)
ax.coastlines(resolution='50m')
ax.set_extent(extent)
plt.title('Good Sonde Drops')
### great circle calculation
from geopy import distance
fln = fl[~fl.lat.isna()]
flng = fln[fln['DataQuality'] == 'Good']
flng.reset_index(inplace=True)
for i,row in flng.iterrows():
if i == 0:
xdx = [0]
tsum = 0
else:
tsum = (distance.distance((flng['lat'][i],flng['lon'][i]),
(flng['lat'][i-1],flng['lon'][i-1])).km) + tsum
xdx = xdx + [ tsum ]
fig, ax = plt.subplots(figsize=(16,8))
count=0
for ds, df in dfs.items():
if '201805' not in ds:
cs = ax.scatter(np.ones_like(df['Depth'])*xdx[count], df['Depth'], s=35, c=df['(C)'], marker='o', edgecolor='none', vmin=-2, vmax=10,cmap=cmocean.cm.thermal)
count+=1
ax.invert_yaxis()
cbar = fig.colorbar(cs, orientation='vertical', extend='both')
cbar.ax.set_ylabel('Temperature (DegC)')
ax.set_ylabel('Depth (m)')
ax.set_xlabel('Distance Along Transect')
###Output
_____no_output_____ |
DataVisualization/1_Basic_Plotting_to_Matplotlib.ipynb | ###Markdown
Basic Plotting: Introduction to ```matplotlib```In this section, we will:- Create basic plots using ```matplotlib.pyplot```- Put axis labels and titles- Create multiple plots (subplots) in the same figure- Change the scales of x and y axes- Create common types of plots: Histograms, boxplots, scatter plots etc. - Working with images```matplotlib``` is a python library. It contains the ```pyplot``` module, which is basically a collection of functions such as ```plot```, ```title```, ```show()``` etc. ```pyplot``` is one of the most commonly used module for creating a variety of plots such as line plots, bar plots, histograms etc. Let's start with the basics. Basic Plotting, Axes Labels and Titles
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Plotting two 1-D numpy arrays
x = np.linspace(5, 100, 100)
y = np.linspace(10, 1000, 100)
plt.plot(x, y)
# need to call plt.show() explicitly to display the plot
plt.show()
# can also work with lists, though it converts lists to np arrays internally
plt.plot([1, 4, 6, 8], [3, 8, 3, 5])
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to put labels and the x and y axes and the chart title. Also, you can specify the limits of x and y labels as a range using ```xlim([xmin, xmax])``` and ```ylim([ymin, ymax])```.
###Code
# Axis labels and title
plt.plot(x, y)
# x and y labels, and title
plt.xlabel("Current")
plt.ylabel("Voltage")
plt.title("Ohm's Law")
# Define the range of labels of the axis
# Arguments: plt.axis(xmin, xmax, ymin, ymax)
plt.xlim([20, 80])
plt.ylim([200, 800])
plt.show()
# Change the colors and line type
# initialising x and y arrays
x = np.linspace(0, 10, 20)
y = x*2
# color blue, line type '+'
plt.plot(x, y, 'b+')
# put x and y labels, and the title
plt.xlabel("Current")
plt.ylabel("Voltage")
plt.title("Ohm's Law")
plt.show()
# Plotting multiple lines on the same plot
x = np.linspace(0, 5, 10)
y = np.linspace(3, 6, 10)
# plot three curves: y, y**2 and y**3 with different line types
plt.plot(x, y, 'r-', x, y**2, 'b+', x, y**3, 'g^')
plt.show()
###Output
_____no_output_____
###Markdown
Figures and SubplotsYou often need to create multiple plots in the same figure, as we'll see in some upcoming examples.```matplotlib``` has the concept of **figures and subplots** using which you can create *multiple subplots inside the same figure*. To create multiple plots in the same figure, you can use the method ```plt.subplot(nrows, ncols, nsubplot)```.
###Code
x = np.linspace(1, 10, 100)
y = np.log(x)
# initiate a new figure explicitly
plt.figure(1)
# Create a subplot with 1 row, 2 columns
# create the first subplot in figure 1
plt.subplot(121) # equivalent to plt.subplot(1, 2, 1)
plt.title("y = log(x)")
plt.plot(x, y)
# create the second subplot in figure 1
plt.subplot(122)
plt.title("y = log(x)**2")
plt.plot(x, y**2)
plt.show()
###Output
_____no_output_____
###Markdown
Let's see another example - say you want to create 4 subplots in two rows and two columns.
###Code
# Example: Create a figure having 4 subplots
x = np.linspace(1, 10, 100)
# Optional command, since matplotlib creates a figure by default anyway
plt.figure(1)
# subplot 1
plt.subplot(2, 2, 1)
plt.title("Linear")
plt.plot(x, x)
# subplot 2
plt.subplot(2, 2, 2)
plt.title("Cubic")
plt.plot(x, x**3)
# subplot 3
plt.figure(2)
plt.subplot(2, 2, 1)
plt.title("Log")
plt.plot(x, np.log(x))
# subplot 4
plt.subplot(2, 2, 2)
plt.title("Exponential")
plt.plot(x, x**2)
plt.show()
###Output
_____no_output_____
###Markdown
You can see the list of colors and shapes here: https://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.plot Types of Commonly Used PlotsLet's now use the retail store's sales data to create some commonly use plots such as:- Boxplots- Histograms- Scatter plots- Bar plots
###Code
# Example: Globals sales data
df = pd.read_csv("./global_sales_data/market_fact.csv")
df.head()
###Output
_____no_output_____
###Markdown
Boxplot
###Code
# Boxplot: Visualise the distribution of a continuous variable
plt.boxplot(df['Order_Quantity'])
plt.show()
# Boxplot of Sales is quite unreadable, since Sales varies
# across a wide range
plt.boxplot(df['Sales'])
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, the boxplot of ```Sales``` is pretty unreadable, since Sales varies across a wide range as shown below.
###Code
# Range of sales: min is 2.24, median is 449, max is 89061
df['Sales'].describe()
###Output
_____no_output_____
###Markdown
The solution to this problem is to **change the scale of the axis** (in this case, the y axis) so that the range can fit into the size of the plot.One commonly used technique is to transform an axis into the **logarithmic scale**. You can transform the scale of an axis using ```plt.yscale('log')```.
###Code
# Usual (linear) scale subplot
plt.subplot(1, 2, 1)
plt.boxplot(df['Sales'])
# log scale subplot
plt.subplot(1, 2, 2)
plt.boxplot(df['Sales'])
plt.yscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
Clearly, the log scale subplot is far more readable - you can infer that the minimum sales is around 0, the median is approximtely in the middle of 100 and 1000, and the max is reaching 100,000. HistogramHistograms are useful for visualising distribution of single variables.
###Code
# Histograms
plt.hist(df['Sales'])
plt.show()
# The histogram can be made more readable by using
# a log scale
plt.hist(df['Sales'])
plt.yscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
Scatter PlotScatter plots are used to visualise two variables, one one each axis.
###Code
# Scatter plots with two variables: Profit and Sales
plt.scatter(df['Sales'], df['Profit'])
plt.show()
###Output
_____no_output_____
###Markdown
Working with Images```matplotlib``` can also read images using the ```plt.imread()``` method. Internally, it reads and stores images as an ```array```. The array can then be used for various data manipulation tasks, just as a normal array.Let's look at an example.
###Code
# reading a PNG image
image = plt.imread("number.png")
plt.imshow(image)
plt.show()
# looking at attributes of the image
print(type(image))
print(image.shape)
print(image.dtype)
###Output
<class 'numpy.ndarray'>
(254, 255, 4)
float32
###Markdown
Note that it is a 3-D array of size 250 x 250 x 3, and each element is stored as type float32. Let's look at the content of the array.
###Code
# print the array
image
###Output
_____no_output_____ |
Extraccion/LimpiezaExtracData.ipynb | ###Markdown
Parametros read.csv Puede que alguna información salga de manera incorrecta. Cargar una columna todo o con los heads hechos un desastre
###Code
'''read.csv(filepath="",sep=",", dtype={"a":np.float64,"b":np.int32},header=0,names={"ingresos","edad"},skiprows=None,
index_col=None,skip_blank_lines=False,na_filter=False) '''
###Output
_____no_output_____
###Markdown
**filepathPara** ruta absoluta.**Sep** es para el delimitador en csv se separan por comas pero se pueden decir cuales (|,(,etc).**dtype** sirve para darle un formato a columnas en particular (a lo mejor hay fechas que no son de tipo date). Entonces columna "a" puede ser float64.**Header** indica cual fila va a ser la cabecera.**names** nombres de las columnas sin nombre o mal nombradas, etc(Se debe generar array o lista de nombres).**Skiprows** salta las lineas que se leen por ejemplo si skiprows=12 saltara 12 filas para leer.**index_col** -> Permite indicar que alguna columna sea el identificador de la tabla.**skip_blank_lines** Valor booleano. Las lineas en blanco en el fichero se saltan en vez de ponerles NaN. **na_filter** Detecta valores que falten(marcadores NaN,null) y se carga toda la fila. Booleano. Casos de uso CSV
###Code
url = "Customer Churn Model.txt" #Ahorra escribir todo de nuevo
df1 = pd.read_csv(url)
df1.head()
###Output
_____no_output_____
###Markdown
Tal vez queremos renombrar algunas columnas
###Code
df1.columns.values #Da los nombres de las columnas
#Nuevo df para cambiar nombre columnas
df1_columnas = pd.read_csv("Customer Churn Columns.csv") #Df del csv con nombre de columnas
df1_columnas.head()
df1_columnas_list = df1_columnas["Column_Names"].tolist() #Se genera lista de nuevos nombres para re-etiquetar
df1_columnas_list
df1 = pd.read_csv("Customer Churn Model.txt",header=None,names=df1_columnas_list) #Suprimo el header actual y
#names re-nombro
df1.head()
###Output
_____no_output_____
###Markdown
Listo! Metodo open para carga manual de datos.Cuando se hace la lectura de información con read se suele saturar la RAM debido a que se descarga todo el DF (Cuando son Df's grandes). **Open** lee el data set linea por linea o por trozos. Y permite almacenar estos pedazos en diferentes partes del codigo. O distribuyendolo en computadoras.
###Code
df2 = open("Customer Churn Model.txt",'r') #Modo de lectura
cols = df2.readline().strip().split(",") #Dado que readline me da un solo string
# Strip elimina los espacios en blanco al inicio y final en la linea.lo que se le indique.
#Split divide la linea de texto por las comas y devuelve al final una array
n_cols = len(cols) #Tamaño de columnas
n_cols
###Output
_____no_output_____
###Markdown
Numero de filas
###Code
counter = 0
main_dict = {}
for col in cols:
main_dict[col] = []
main_dict #Generamos un diccionario vacio para rellenar cada una de las columnas con su respectivo valor
for line in df2: #For para avanzar la lectura de lineas
values = line.strip().split(",")
for i in range(len(cols)): #i es el la columna donde se insertaran los datos
main_dict[cols[i]].append(values[i]) #main_dict[columna(i)] se agregan los valores de la linea
counter += 1
print("El data set tiene %d filas y %d columnas" % (counter, n_cols)) #El contador es para ver cuantas veces se
#hizo la operacion o sea cuantas lineas se leyeron y ese es el numero de filas
main_dict #Es el diccionario ya lleno
###Output
_____no_output_____
###Markdown
Luego esto se convierte en un dataFrame. Dado que se leyo eficientemente como un archivo y se le dio el tratamiento correspondiente. Esta opcion de carga manual o por partes, es util cuando varios equipos tienen cargas de trabajos distribuidos(uno lee el archivo, otro convierte, otro analiza, asi es mas eficiente).
###Code
df3 = pd.DataFrame(main_dict)
df3.head()
###Output
_____no_output_____
###Markdown
Leer y escribir un fichero A veces el separador de elementos no es con comas (,). Si el delimitador es otro objeto, a veces pueden ser los /t. Ejemplo de escritura:
###Code
in_file = "Customer Churn Model.txt"
out_file = "Tab Customer Churn Model.txt"
with open(in_file) as infile1: #Abro el archivo de entrada
with open(out_file,"w") as outfile1: #Abro el de salida en modo escritura
for line in infile1: #Por cada linea en in_file
fields = line.strip().split(",") #Genera el array de elementos
outfile1.write("\t".join(fields)) #Despues los en un archivo con /t
outfile1.write("\n")#Se generan saltos de linea
df3 = pd.read_csv(out_file,sep = "\t")
df3.head()
###Output
_____no_output_____
###Markdown
Leer datos desde una URL La informacion a veces esta en la nube
###Code
url_winter = "http://winterolympicsmedals.com/medals.csv"
medals_data = pd.read_csv(url_winter)
medals_data.head()
###Output
_____no_output_____
###Markdown
Librerias para gestionar CSV o URL's
###Code
import csv
import urllib3
#Una forma de poder obtener info de un servidor
http = urllib3.PoolManager()
r = http.request('GET',url_winter)
r.status #200 si si funciono
response = r.data.decode("utf-8") #Este no es un DF por lo que se puede tratar para que si lo sea
type(response) #UTF-8 decodifica de byte a str
response
#Usando csv
cr = csv.reader(response.strip().split("\n")) #Genera la separacion por \n
cr
colum_names = cr.__next__()
df_csv = pd.DataFrame(cr,columns=colum_names)
df_csv
colum_names
len(colum_names)
###Output
_____no_output_____
###Markdown
Carga de datos desde una hoja de calculo xlsx y xls
###Code
path = "titanic3.xls"
path2 = "titanic3.xlsx"
###Output
_____no_output_____
###Markdown
La diferencia con un Excel es que este tiene pestañas en el documento, con nombres distintos. Solo se deben especificar los nombres.
###Code
titanicDF = pd.read_excel(path,"titanic3") #Titanic3 es el nombre de la pestaña de Excel de donde se extrae.
titanicDF2 = pd.read_excel(path2,"titanic3")
###Output
_____no_output_____
###Markdown
Crear CSV o Excel con lo que he trabajado
###Code
titanicDF2.to_csv("titanicCSV.csv")
'''titanicDF2.to_json("titanicJ.json")
titanicDF2.to_excel("titanicEx.xls")'''
###Output
_____no_output_____ |
Assignments/1.5 Plotting in Python.ipynb | ###Markdown
Assignment 1.5 MatplotlibMatplotlib is a Python data visualization library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. See: [Official Documentation](https://matplotlib.org/).We also need to use one of the main feature of IPython kernel to show rich output for [plotting](https://ipython.readthedocs.io/en/stable/interactive/plotting.html). Pandas also provides many [visualisation tools](https://pandas.pydata.org/pandas-docs/stable/visualization.html) to plot data.
###Code
# Import all the required library
# YOUR CODE HERE
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Please go through the [Matplotlib Tutorial](https://matplotlib.org/tutorials/introductory/pyplot.html) and try out the functions- plot- subplot- scatter- bar Diabetes DatasetThe dataset consists of several medical predictor variables and one target variable, whether someone has diabetes or not.**Attribute details**:- preg: Number of times pregnant- plas: Plasma glucose concentration a 2 hours in an oral glucose tolerance test- pres: Diastolic blood pressure (mm Hg)- skin: Triceps skin fold thickness (mm)- test: 2-Hour serum insulin (mu U/ml)- mass: Body mass index (weight in kg/(height in m)^2)- pedi: Diabetes pedigree function- age: Age (years)- class: Class variable (0 or 1);
###Code
# load diabetes.csv to continue, use names list for the column names
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# YOUR CODE HERE
diabetes = pd.read_csv('diabetes.csv')
diabetes.columns = names
diabetes.head()
###Output
_____no_output_____
###Markdown
HistogramsA fast way to get an idea of the distribution of each attribute is to look at histograms. Histograms group data into bins and provide you a count of the number of observations in each bin. From the shape of the bins you can quickly get a feeling for whether an attribute is Gaussian’, skewed or even has an exponential distribution. It can also help you see possible outliers. Exercise 1Plot histogram of each column and see if any data approximates a Gaussian distribution.Ref: [Normal or Gaussian Distribution](https://www.itl.nist.gov/div898/handbook/eda/section3/eda3661.htm)You could also use pandas histogram function too. Also, change `figsize` as necessary.
###Code
# YOUR CODE HERE
diabetes.hist(figsize=(20, 20), bins=100)
###Output
_____no_output_____
###Markdown
Density PlotsDensity plots are continuous graphs, just another way of getting a quick idea of the distribution of each attribute. The plots look like an abstracted histogram with a smooth curve drawn through the top of each bin, much like your eye tried to do with the histograms. Exercise 2Plot density plots of each column.
###Code
# YOUR CODE HERE
diabetes.plot.kde(figsize=(10, 10), bw_method=1)
###Output
_____no_output_____
###Markdown
Box and Whisker Plots (Boxplot)Boxplots summarize the distribution of each attribute, drawing a line for the median (middle value) and a box around the 25th and 75th percentiles (the middle 50% of the data). The whiskers give an idea of the spread of the data and dots outside of the whiskers show candidate outlier values (values that are 1.5 times greater than the size of spread of the middle 50% of the data). Exercise 3Plot Boxplots and see how many of the columns are affected by outliers.
###Code
# YOUR CODE HERE
diabetes.boxplot(figsize=(20, 20), column=names)
###Output
_____no_output_____
###Markdown
Scatterplot MatrixA scatterplot shows the relationship between two variables as dots in two dimensions, one axis for each attribute. You can create a scatterplot for each pair of attributes in your data. Drawing all these scatterplots together is called a scatterplot matrix.Scatter plots are useful for spotting structured relationships between variables, like whether you could summarize the relationship between two variables with a line. Attributes with structured relationships may also be correlated and good candidates for removal from your dataset. Exercise 4Plot a scatter plot of pressure ('pres') and BMI ('mass') colored based on 'class' with size of each plot determined by plasma glucose concentration ('plas'). ***Note: Only consider rows where pressure and mass are not zero***.
###Code
# YOUR CODE HERE
fig, scatter_plot = plt.subplots()
scatter_plot.scatter(diabetes['pres'], diabetes['mass'], c=diabetes['class'], s=diabetes['plas'])
scatter_plot.set_xlabel("pres", fontsize=15)
scatter_plot.set_ylabel("mass", fontsize=15)
scatter_plot.set_title('pres vs mass')
plt.show()
###Output
_____no_output_____
###Markdown
Correlation Matrix PlotCorrelation Matrix plot shows how related the changes are between two variables. If two variables change in the same direction they are positively correlated. If they change in opposite direction together, then they are negatively correlated. A plot of the correlation between each pair of attributes, called a correlation matrix, can provide an idea of which variables have a high correlation with each other.This is useful to know, because some machine learning algorithms like linear and logistic regression can have poor performance if there are highly correlated input variables in your data. Exercise 5Plot a Correlation matrix plot for the given data if you can find any correlation between the data, is the data symmetrical ?What is `class` most correlated with?See: [Pandas Correlation function](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.corr.html)
###Code
# YOUR CODE HERE
plt.matshow(diabetes.corr())
###Output
_____no_output_____ |
notebooks/mix_k_dihedral_2.ipynb | ###Markdown
Part 1: Initialize
###Code
host = 'a_tract_21mer'
strand_id = 'STRAND1'
make_df = False
make_dihedral = False
plot_agent = MixPlot3(host, strand_id, big_traj_folder, dihedral_folder, backbone_data_folder, make_df)
###Output
/home/yizaochen/codes/dna_rna/backbone_data/a_tract_21mer exists
/home/yizaochen/codes/dna_rna/backbone_data/a_tract_21mer/STRAND1 exists
/home/yizaochen/codes/dna_rna/backbone_data/a_tract_21mer/STRAND1/ndx exists
/home/yizaochen/codes/dna_rna/backbone_data/a_tract_21mer/STRAND1/plumed_input exists
/home/yizaochen/codes/dna_rna/backbone_data/a_tract_21mer/STRAND1/plumed_out exists
/home/yizaochen/codes/backbone_rigidity/plumed_test/a_tract_21mer exists
###Markdown
Part 2: Make/Read DataFrame
###Code
if make_df:
plot_agent.make_k_df()
else:
plot_agent.read_k_df()
if make_dihedral:
plot_agent.make_all_dihedral_df()
else:
plot_agent.read_all_diehdral_df()
###Output
_____no_output_____
###Markdown
Part 2: Plot
###Code
figsize = (6, 12)
hspace = 0
bottom = 0
top = 5
fig, d_axes = plot_agent.plot_main(figsize, hspace, bottom, top)
png_out = path.join(drawzone_folder, f'{host}_{strand_id}.png')
plt.tight_layout()
#plt.savefig(png_out, dpi=100)
plt.show()
###Output
_____no_output_____
###Markdown
Batch Drawing
###Code
hosts = ['a_tract_21mer', 'g_tract_21mer', 'atat_21mer', 'gcgc_21mer']
strands = ['STRAND1', 'STRAND2']
make_df = False
make_dihedral = False
for host in hosts:
for strand_id in strands:
plot_agent = MixPlot3(host, strand_id, big_traj_folder, dihedral_folder, backbone_data_folder, make_df)
if make_df:
plot_agent.make_k_df()
else:
plot_agent.read_k_df()
if make_dihedral:
plot_agent.make_all_dihedral_df()
else:
plot_agent.read_all_diehdral_df()
figsize = (6, 12)
hspace = 0
bottom = 0
top = 5
fig, d_axes = plot_agent.plot_main(figsize, hspace, bottom, top)
png_out = path.join(drawzone_folder, f'{host}_{strand_id}.png')
plt.tight_layout()
#plt.savefig(png_out, dpi=100)
plt.show()
hosts = ['g_tract_21mer', 'atat_21mer', 'gcgc_21mer']
strands = ['STRAND1', 'STRAND2']
make_df = True
make_dihedral = True
for host in hosts:
for strand_id in strands:
plot_agent = MixPlot2(host, strand_id, big_traj_folder, dihedral_folder, backbone_data_folder, make_df)
if make_df:
plot_agent.make_k_df()
else:
plot_agent.read_k_df()
if make_dihedral:
plot_agent.make_all_dihedral_df()
else:
plot_agent.read_all_diehdral_df()
figsize = (6, 12)
hspace = 0
bottom = 0
top = 6.0
fig, d_axes = plot_agent.plot_main(figsize, hspace, bottom, top)
png_out = path.join(drawzone_folder, f'{host}_{strand_id}.png')
plt.tight_layout()
plt.savefig(png_out, dpi=100)
plt.show()
###Output
_____no_output_____
###Markdown
Addtional Part: Data Matrix Max, Min
###Code
dihedral_name_lst = ["O4prime-O5prime", "C2prime-C8orC6", "O4prime-C8orC6"]
d_min_max = {label: {'Min': list(), 'Max': list()} for label in dihedral_name_lst}
hosts = ['a_tract_21mer', 'g_tract_21mer', 'atat_21mer', 'gcgc_21mer']
strands = ['STRAND1', 'STRAND2']
make_df = False
make_dihedral = False
for host in hosts:
for strand_id in strands:
plot_agent = MixPlot3(host, strand_id, big_traj_folder, dihedral_folder, backbone_data_folder, make_df)
if make_df:
plot_agent.make_k_df()
else:
plot_agent.read_k_df()
if make_dihedral:
plot_agent.make_all_dihedral_df()
else:
plot_agent.read_all_diehdral_df()
for label in dihedral_name_lst:
data_mat = plot_agent.assemble_data_mat(plot_agent.d_dihedral_df[label])
d_min_max[label]['Min'].append(data_mat.min())
d_min_max[label]['Max'].append(data_mat.max())
for label in dihedral_name_lst:
print(label)
minimum = np.array(d_min_max[label]['Min']).min()
maximum = np.array(d_min_max[label]['Max']).max()
print(f'Min: {minimum:.3f}')
print(f'Max: {maximum:.3f}')
plot_agent.get_data_mat_min_max()
###Output
C2'-C3'-O3'-P
Min: 0.0 Max: 0.029
epsilon - zeta:
Min: 0.0 Max: 0.022
###Markdown
Additional Part: Spring Constant Minimum Maximum
###Code
k_labels = ["C1'-N3/C1'-O2", "C2'-C8/C2'-C6", "O4'-O5'"]
d_min_max = {label: {'Min': list(), 'Max': list()} for label in k_labels}
hosts = ['a_tract_21mer', 'g_tract_21mer', 'atat_21mer', 'gcgc_21mer']
strands = ['STRAND1', 'STRAND2']
make_df = False
make_dihedral = False
for host in hosts:
for strand_id in strands:
plot_agent = MixPlot3(host, strand_id, big_traj_folder, dihedral_folder, backbone_data_folder, make_df)
if make_df:
plot_agent.make_k_df()
else:
plot_agent.read_k_df()
if make_dihedral:
plot_agent.make_all_dihedral_df()
else:
plot_agent.read_all_diehdral_df()
figsize = (6, 12)
hspace = 0
bottom = 0
top = 6.0
fig, d_axes = plot_agent.plot_main(figsize, hspace, bottom, top)
png_out = path.join(drawzone_folder, f'{host}_{strand_id}.png')
#plt.tight_layout()
#plt.savefig(png_out, dpi=100)
#plt.show()
for idx, label in enumerate(k_labels):
ylim = d_axes[idx].get_ylim()
d_min_max[label]['Min'].append(ylim[0])
d_min_max[label]['Max'].append(ylim[1])
for label in k_labels:
print(label)
minimum = np.array(d_min_max[label]['Min']).min()
maximum = np.array(d_min_max[label]['Max']).max()
print(f'Min: {minimum:.3f}')
print(f'Max: {maximum:.3f}')
idx = 2
ylim = d_axes[idx].get_ylim()
print(f'Min: {ylim[0]:.3f}')
print(f'Max: {ylim[1]:.3f}')
###Output
Min: 2.665
Max: 6.763
###Markdown
Additional Part : Color Bar
###Code
figsize = (8,4)
dihedral_name = "C2prime-P" #"C2prime-P", "C4prime-P", "C3prime-O5prime", "epsilon-zeta"
fig, ax1, cb1 = plot_agent.draw_color_bar(figsize, dihedral_name)
plt.show()
###Output
_____no_output_____ |
II Machine Learning & Deep Learning/nuevo programa/#05. Neural Networks for Classification. Part II/05session.ipynb | ###Markdown
05. Neural Networks for Classification. Part II - Book + Private Lessons [Here ↗](https://sotastica.com/reservar)- Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄 Load the Data import tensorflow as tf
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255
test_images = test_images / 255
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Get to Know the Data Visualize some Samples
###Code
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
###Output
_____no_output_____
###Markdown
Visualize One Sample/Row/Image/Explanatory Variables
###Code
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Target Variable Value
###Code
idx_label = train_labels[0]
class_names[idx_label]
###Output
_____no_output_____
###Markdown
Neural Network Concepts in Python Initializing the `Weights` > - https://keras.io/api/layers/initializers/ How to `kernel_initializer` the weights?
###Code
from tensorflow.keras import Sequential, Input
from tensorflow.keras.layers import Dense, Flatten
train_images.shape
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(layer=Dense(units=128, kernel_initializer='zeros'))
model.add(layer=Dense(units=10))
###Output
_____no_output_____
###Markdown
Make a Prediction with the Neural Network
###Code
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images[0].shape
###Output
_____no_output_____
###Markdown
Observe the numbers for the `weights`
###Code
model.get_weights()
###Output
_____no_output_____
###Markdown
Predictions vs Reality > 1. Calculate the Predicted Accidents and> 2. Compare it with the Real Total Accidents `fit()` the `model` and compare again
###Code
model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=20, verbose=0)
###Output
_____no_output_____
###Markdown
Observe the numbers for the `weights` Predictions vs Reality
###Code
y_pred = model.predict(train_images)
(y_pred.argmax(axis=1) == train_labels).mean()
###Output
_____no_output_____
###Markdown
How to `kernel_initializer` the weights to 1? How to `kernel_initializer` the weights to `glorot_uniform` (default)? Play with the Activation Function > - https://keras.io/api/layers/activations/
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/IHZwWFHWa-w?start=558" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
Use `sigmoid` activation in last layer
###Code
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(layer=Dense(units=128, kernel_initializer='zeros'))
model.add(layer=Dense(units=10))
model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
`fit()` the Model
###Code
model.fit(train_images, train_labels, epochs=20, verbose=0)
###Output
_____no_output_____
###Markdown
Predictions vs Reality
###Code
y_pred = model.predict(train_images)
(y_pred.argmax(axis=1) == train_labels).mean()
###Output
_____no_output_____
###Markdown
Observe the numbers for the `weights`> - Have they changed?
###Code
model.get_weights()
###Output
_____no_output_____
###Markdown
Use `linear` activation in last layer Use `tanh` activation in last layer Use `relu` activation in last layer How are the predictions changing? Why? Optimizer > - https://keras.io/api/optimizers/available-optimizers Optimizers comparison in GIF → https://mlfromscratch.com/optimizers-explained/adam Tesla's Neural Network Models is composed of 48 models trainned in 70.000 hours of GPU → https://tesla.com/ai 1 Year with a 8 GPU Computer → https://twitter.com/thirdrowtesla/status/1252723358342377472 Use Gradient Descent `SGD`
###Code
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(layer=Dense(units=128, kernel_initializer='zeros'))
model.add(layer=Dense(units=10))
###Output
_____no_output_____
###Markdown
`compile()` the model
###Code
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
`fit()` the Model
###Code
history = model.fit(X, y, epochs=20, verbose=0)
###Output
_____no_output_____
###Markdown
Predictions vs Reality
###Code
y_pred = model.predict(train_images)
(y_pred.argmax(axis=1) == train_labels).mean()
###Output
_____no_output_____
###Markdown
Observe the numbers for the `weights`> - Have they changed?
###Code
model.get_weights()
###Output
_____no_output_____
###Markdown
View History
###Code
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
###Output
_____no_output_____ |
predict_horse_racing.2.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount("/content/drive")
import pandas as pd
df_all = pd.read_csv("drive/My Drive/ml_data/race_meta_and_scores.csv")
df_all.info()
df_all.head()
df_subset = df_all[["course_length",
"weather",
"course_condition",
"race_class",
"prize_class",
"gender",
"age",
"coat_color",
"horse_weight",
"trainer_id",
"jockey_id",
"jockey_weight",
"rank"]]
print(df_subset.info())
print("----------")
print("isnull.sum")
print("----------")
print(df_subset.isnull().sum())
print("----------")
print("dropna")
print("----------")
df = df_subset.dropna()
print("----------")
print("isnull.sum")
print("----------")
print(df.isnull().sum())
print("----------")
print(df.info())
df.head()
df_course_length = pd.get_dummies(df["course_length"])
df_weather = pd.get_dummies(df["weather"])
df_course_condition = pd.get_dummies(df["course_condition"])
df_race_class = pd.get_dummies(df["race_class"])
df_prize_class = pd.get_dummies(df["prize_class"])
df_gender = pd.get_dummies(df["gender"])
df_age = pd.get_dummies(df["age"])
df_coat_color = pd.get_dummies(df["coat_color"])
df_trainer_id = pd.get_dummies(df["trainer_id"])
df_jockey_id = pd.get_dummies(df["jockey_id"])
df_father_horse_name = pd.get_dummies(df_all["father_horse_name"])
df_mother_horse_name = pd.get_dummies(df_all["mother_horse_name"])
import sklearn.preprocessing as sp
import numpy as np
df_input = pd.concat([df.drop(["course_length"], axis=1), df_course_length], axis=1)
df_input = pd.concat([df_input.drop(["weather"], axis=1), df_weather], axis=1)
df_input = pd.concat([df_input.drop(["course_condition"], axis=1), df_course_condition], axis=1)
#df_input = pd.concat([df_input.drop(["race_class"], axis=1), df_race_class], axis=1)
df_input = df_input.drop(["race_class"], axis=1)
#df_input = pd.concat([df_input.drop(["prize_class"], axis=1), df_prize_class], axis=1)
df_input = df_input.drop(["prize_class"], axis=1)
df_input = pd.concat([df_input.drop(["gender"], axis=1), df_gender], axis=1)
df_input = pd.concat([df_input.drop(["age"], axis=1), df_age], axis=1)
df_input = pd.concat([df_input.drop(["coat_color"], axis=1), df_coat_color], axis=1)
#df_input = pd.concat([df_input.drop(["trainer_id"], axis=1), df_trainer_id], axis=1)
df_input = df_input.drop(["trainer_id"], axis=1)
#df_input = pd.concat([df_input.drop(["jockey_id"], axis=1), df_jockey_id], axis=1)
df_input = df_input.drop(["jockey_id"], axis=1)
df_input["horse_weight"] = sp.minmax_scale(df_input["horse_weight"])
df_input["jockey_weight"] = sp.minmax_scale(df_input["jockey_weight"])
#df_input["rank"] = sp.minmax_scale(df_input["rank"])
rank_3 = []
for index, row in df_input.iterrows():
rank_3.append(1 if row["rank"] <= 3.0 else 0)
df_input["rank_3"] = rank_3
df_input = df_input.drop(["rank"], axis=1)
df_input.info()
df_input
x = df_input.drop(["rank_3"], axis=1)
y = df_input["rank_3"]
x.head()
y.head()
import sklearn.model_selection as sm
x_train, x_test, y_train, y_test = sm.train_test_split(x, y)
print("x_train.shape: {0}".format(x_train.shape))
print("x_test.shape: {0}".format(x_test.shape))
print("y_train.shape: {0}".format(y_train.shape))
print("y_test.shape: {0}".format(y_test.shape))
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(x_train, y_train)
print("train score: {0}".format(clf.score(x_train, y_train)))
print("test score: {0}".format(clf.score(x_test, y_test)))
results = clf.predict(x_test)
df_result = pd.DataFrame()
df_result["rank"] = y_test
df_result["rank_result"] = results
df_result
###Output
_____no_output_____ |
home-credit-default-risk/3. LightGBM_GSCV1.ipynb | ###Markdown
3. LightGBM_GSCV1Reference:- https://www.kaggle.com/ogrellier/good-fun-with-ligthgbm/code Run name
###Code
import time
project_name = 'HomeCreditDefaultRisk'
step_name = 'LightGBM_GSCV1'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = project_name + '_' + step_name + '_' + time_str
print('run_name: ' + run_name)
t0 = time.time()
###Output
run_name: HomeCreditDefaultRisk_LightGBM_GSCV1_20180603_204528
###Markdown
Important params Import PKGs
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
from IPython.display import display
import seaborn as sns
import os
import sys
import gc
import math
import tqdm
import shutil
import zipfile
import pickle
import h5py
# import cv2
from PIL import Image
from tqdm import tqdm
import multiprocessing
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import shuffle
random_num = np.random.randint(10000)
cpu_amount = multiprocessing.cpu_count()
print('cpu_amount: %s' % (cpu_amount - 1))
print('random_num: %s' % random_num)
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold
import xgboost
# from xgboost import plot_importance
###Output
_____no_output_____
###Markdown
Project folders
###Code
cwd = os.getcwd()
feature_folder = os.path.join(cwd, 'feature')
input_folder = os.path.join(cwd, 'input')
output_folder = os.path.join(cwd, 'output')
model_folder = os.path.join(cwd, 'model')
application_test_csv_file = os.path.join(input_folder, 'application_test.csv')
application_train_csv_file = os.path.join(input_folder, 'application_train.csv')
bureau_csv_file = os.path.join(input_folder, 'bureau.csv')
bureau_balance_csv_file = os.path.join(input_folder, 'bureau_balance.csv')
credit_card_balance_csv_file = os.path.join(input_folder, 'credit_card_balance.csv')
installments_payments_csv_file = os.path.join(input_folder, 'installments_payments.csv')
POS_CASH_balance_csv_file = os.path.join(input_folder, 'POS_CASH_balance.csv')
previous_application_csv_file = os.path.join(input_folder, 'previous_application.csv')
sample_submission_csv_file = os.path.join(input_folder, 'sample_submission.csv')
print(application_test_csv_file)
print(application_train_csv_file)
print(bureau_csv_file)
print(bureau_balance_csv_file)
print(credit_card_balance_csv_file)
print(installments_payments_csv_file)
print(POS_CASH_balance_csv_file)
print(previous_application_csv_file)
print(sample_submission_csv_file)
###Output
D:\bitbucket\kaggle\home-credit-default-risk\input\application_test.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\application_train.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\bureau.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\bureau_balance.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\credit_card_balance.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\installments_payments.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\POS_CASH_balance.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\previous_application.csv
D:\bitbucket\kaggle\home-credit-default-risk\input\sample_submission.csv
###Markdown
Load data
###Code
def build_model_input():
buro_bal = pd.read_csv(bureau_balance_csv_file)
print('Buro bal shape : ', buro_bal.shape)
print('transform to dummies')
buro_bal = pd.concat([buro_bal, pd.get_dummies(buro_bal.STATUS, prefix='buro_bal_status')], axis=1).drop('STATUS', axis=1)
print('Counting buros')
buro_counts = buro_bal[['SK_ID_BUREAU', 'MONTHS_BALANCE']].groupby('SK_ID_BUREAU').count()
buro_bal['buro_count'] = buro_bal['SK_ID_BUREAU'].map(buro_counts['MONTHS_BALANCE'])
print('averaging buro bal')
avg_buro_bal = buro_bal.groupby('SK_ID_BUREAU').mean()
avg_buro_bal.columns = ['avg_buro_' + f_ for f_ in avg_buro_bal.columns]
del buro_bal
gc.collect()
print('Read Bureau')
buro = pd.read_csv(bureau_csv_file)
print('Go to dummies')
buro_credit_active_dum = pd.get_dummies(buro.CREDIT_ACTIVE, prefix='ca_')
buro_credit_currency_dum = pd.get_dummies(buro.CREDIT_CURRENCY, prefix='cu_')
buro_credit_type_dum = pd.get_dummies(buro.CREDIT_TYPE, prefix='ty_')
buro_full = pd.concat([buro, buro_credit_active_dum, buro_credit_currency_dum, buro_credit_type_dum], axis=1)
# buro_full.columns = ['buro_' + f_ for f_ in buro_full.columns]
del buro_credit_active_dum, buro_credit_currency_dum, buro_credit_type_dum
gc.collect()
print('Merge with buro avg')
buro_full = buro_full.merge(right=avg_buro_bal.reset_index(), how='left', on='SK_ID_BUREAU', suffixes=('', '_bur_bal'))
print('Counting buro per SK_ID_CURR')
nb_bureau_per_curr = buro_full[['SK_ID_CURR', 'SK_ID_BUREAU']].groupby('SK_ID_CURR').count()
buro_full['SK_ID_BUREAU'] = buro_full['SK_ID_CURR'].map(nb_bureau_per_curr['SK_ID_BUREAU'])
print('Averaging bureau')
avg_buro = buro_full.groupby('SK_ID_CURR').mean()
print(avg_buro.head())
del buro, buro_full
gc.collect()
print('Read prev')
prev = pd.read_csv(previous_application_csv_file)
prev_cat_features = [
f_ for f_ in prev.columns if prev[f_].dtype == 'object'
]
print('Go to dummies')
prev_dum = pd.DataFrame()
for f_ in prev_cat_features:
prev_dum = pd.concat([prev_dum, pd.get_dummies(prev[f_], prefix=f_).astype(np.uint8)], axis=1)
prev = pd.concat([prev, prev_dum], axis=1)
del prev_dum
gc.collect()
print('Counting number of Prevs')
nb_prev_per_curr = prev[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
prev['SK_ID_PREV'] = prev['SK_ID_CURR'].map(nb_prev_per_curr['SK_ID_PREV'])
print('Averaging prev')
avg_prev = prev.groupby('SK_ID_CURR').mean()
print(avg_prev.head())
del prev
gc.collect()
print('Reading POS_CASH')
pos = pd.read_csv(POS_CASH_balance_csv_file)
print('Go to dummies')
pos = pd.concat([pos, pd.get_dummies(pos['NAME_CONTRACT_STATUS'])], axis=1)
print('Compute nb of prevs per curr')
nb_prevs = pos[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
pos['SK_ID_PREV'] = pos['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
print('Go to averages')
avg_pos = pos.groupby('SK_ID_CURR').mean()
del pos, nb_prevs
gc.collect()
print('Reading CC balance')
cc_bal = pd.read_csv(credit_card_balance_csv_file)
print('Go to dummies')
cc_bal = pd.concat([cc_bal, pd.get_dummies(cc_bal['NAME_CONTRACT_STATUS'], prefix='cc_bal_status_')], axis=1)
nb_prevs = cc_bal[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
cc_bal['SK_ID_PREV'] = cc_bal['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
print('Compute average')
avg_cc_bal = cc_bal.groupby('SK_ID_CURR').mean()
avg_cc_bal.columns = ['cc_bal_' + f_ for f_ in avg_cc_bal.columns]
del cc_bal, nb_prevs
gc.collect()
print('Reading Installments')
inst = pd.read_csv(installments_payments_csv_file)
nb_prevs = inst[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
inst['SK_ID_PREV'] = inst['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
avg_inst = inst.groupby('SK_ID_CURR').mean()
avg_inst.columns = ['inst_' + f_ for f_ in avg_inst.columns]
print('Read data and test')
data = pd.read_csv(application_train_csv_file)
test = pd.read_csv(application_test_csv_file)
print('Shapes : ', data.shape, test.shape)
id_test = test['SK_ID_CURR']
y = data['TARGET']
del data['TARGET']
categorical_feats = [
f for f in data.columns if data[f].dtype == 'object'
]
categorical_feats
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
data = data.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_pos.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_pos.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_cc_bal.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_cc_bal.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_inst.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_inst.reset_index(), how='left', on='SK_ID_CURR')
del avg_buro, avg_prev
gc.collect()
return data, test, y, id_test
x_data, x_test, y_data, id_test = build_model_input()
###Output
Buro bal shape : (27299925, 3)
transform to dummies
Counting buros
averaging buro bal
Read Bureau
Go to dummies
Merge with buro avg
Counting buro per SK_ID_CURR
Averaging bureau
SK_ID_BUREAU DAYS_CREDIT CREDIT_DAY_OVERDUE \
SK_ID_CURR
100001 7.0 -735.000000 0.0
100002 8.0 -874.000000 0.0
100003 4.0 -1400.750000 0.0
100004 2.0 -867.000000 0.0
100005 3.0 -190.666667 0.0
DAYS_CREDIT_ENDDATE DAYS_ENDDATE_FACT AMT_CREDIT_MAX_OVERDUE \
SK_ID_CURR
100001 82.428571 -825.500000 NaN
100002 -349.000000 -697.500000 1681.029
100003 -544.500000 -1097.333333 0.000
100004 -488.500000 -532.500000 0.000
100005 439.333333 -123.000000 0.000
CNT_CREDIT_PROLONG AMT_CREDIT_SUM AMT_CREDIT_SUM_DEBT \
SK_ID_CURR
100001 0.0 207623.571429 85240.928571
100002 0.0 108131.945625 49156.200000
100003 0.0 254350.125000 0.000000
100004 0.0 94518.900000 0.000000
100005 0.0 219042.000000 189469.500000
AMT_CREDIT_SUM_LIMIT ... \
SK_ID_CURR ...
100001 0.00000 ...
100002 7997.14125 ...
100003 202500.00000 ...
100004 0.00000 ...
100005 0.00000 ...
avg_buro_MONTHS_BALANCE avg_buro_buro_bal_status_0 \
SK_ID_CURR
100001 -11.785714 0.336651
100002 -21.875000 0.406960
100003 NaN NaN
100004 NaN NaN
100005 -3.000000 0.735043
avg_buro_buro_bal_status_1 avg_buro_buro_bal_status_2 \
SK_ID_CURR
100001 0.007519 0.0
100002 0.255682 0.0
100003 NaN NaN
100004 NaN NaN
100005 0.000000 0.0
avg_buro_buro_bal_status_3 avg_buro_buro_bal_status_4 \
SK_ID_CURR
100001 0.0 0.0
100002 0.0 0.0
100003 NaN NaN
100004 NaN NaN
100005 0.0 0.0
avg_buro_buro_bal_status_5 avg_buro_buro_bal_status_C \
SK_ID_CURR
100001 0.0 0.441240
100002 0.0 0.175426
100003 NaN NaN
100004 NaN NaN
100005 0.0 0.128205
avg_buro_buro_bal_status_X avg_buro_buro_count
SK_ID_CURR
100001 0.214590 24.571429
100002 0.161932 13.750000
100003 NaN NaN
100004 NaN NaN
100005 0.136752 7.000000
[5 rows x 46 columns]
Read prev
Go to dummies
Counting number of Prevs
Averaging prev
SK_ID_PREV AMT_ANNUITY AMT_APPLICATION AMT_CREDIT \
SK_ID_CURR
100001 1.0 3951.000 24835.50 23787.00
100002 1.0 9251.775 179055.00 179055.00
100003 3.0 56553.990 435436.50 484191.00
100004 1.0 5357.250 24282.00 20106.00
100005 2.0 4813.200 22308.75 20076.75
AMT_DOWN_PAYMENT AMT_GOODS_PRICE HOUR_APPR_PROCESS_START \
SK_ID_CURR
100001 2520.0 24835.5 13.000000
100002 0.0 179055.0 9.000000
100003 3442.5 435436.5 14.666667
100004 4860.0 24282.0 5.000000
100005 4464.0 44617.5 10.500000
NFLAG_LAST_APPL_IN_DAY RATE_DOWN_PAYMENT RATE_INTEREST_PRIMARY \
SK_ID_CURR
100001 1.0 0.104326 NaN
100002 1.0 0.000000 NaN
100003 1.0 0.050030 NaN
100004 1.0 0.212008 NaN
100005 1.0 0.108964 NaN
... \
SK_ID_CURR ...
100001 ...
100002 ...
100003 ...
100004 ...
100005 ...
PRODUCT_COMBINATION_Cash X-Sell: low \
SK_ID_CURR
100001 0.000000
100002 0.000000
100003 0.333333
100004 0.000000
100005 0.000000
PRODUCT_COMBINATION_Cash X-Sell: middle \
SK_ID_CURR
100001 0.0
100002 0.0
100003 0.0
100004 0.0
100005 0.0
PRODUCT_COMBINATION_POS household with interest \
SK_ID_CURR
100001 0.000000
100002 0.000000
100003 0.333333
100004 0.000000
100005 0.000000
PRODUCT_COMBINATION_POS household without interest \
SK_ID_CURR
100001 0.0
100002 0.0
100003 0.0
100004 0.0
100005 0.0
PRODUCT_COMBINATION_POS industry with interest \
SK_ID_CURR
100001 0.000000
100002 0.000000
100003 0.333333
100004 0.000000
100005 0.000000
PRODUCT_COMBINATION_POS industry without interest \
SK_ID_CURR
100001 0.0
100002 0.0
100003 0.0
100004 0.0
100005 0.0
PRODUCT_COMBINATION_POS mobile with interest \
SK_ID_CURR
100001 1.0
100002 0.0
100003 0.0
100004 0.0
100005 0.5
PRODUCT_COMBINATION_POS mobile without interest \
SK_ID_CURR
100001 0.0
100002 0.0
100003 0.0
100004 1.0
100005 0.0
PRODUCT_COMBINATION_POS other with interest \
SK_ID_CURR
100001 0.0
100002 1.0
100003 0.0
100004 0.0
100005 0.0
PRODUCT_COMBINATION_POS others without interest
SK_ID_CURR
100001 0.0
100002 0.0
100003 0.0
100004 0.0
100005 0.0
###Markdown
Get feature
###Code
# id_data = train_csv['SK_ID_CURR']
# id_test = test_csv['SK_ID_CURR']
# useless_features = []
# x_data = train_csv.drop(columns=['SK_ID_CURR'] + useless_features)
# x_test = test_csv.drop(columns=['SK_ID_CURR'] + useless_features)
# train_csv.loc[2][:20]
# plt.hist(x_data[['EXT_SOURCE_1']], bins=100, normed=True)
# plt.xlabel(('x'))
# plt.ylabel('EXT_SOURCE_1')
# plt.show()
# log_columns = ['EXT_SOURCE_1']
# for data_set in [x_data, x_test]:
# data_set = data_set[log_columns].apply(lambda x: np.log(x + 1))
# plt.hist(x_data[['EXT_SOURCE_1']], bins=100, normed=True)
# plt.xlabel(('x'))
# plt.ylabel('EXT_SOURCE_1')
# plt.show()
x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=0.05, random_state=random_num, shuffle=False)
# x_train, y_train = shuffle(x_train, y_train, random_state=random_num)
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
print(y_val.shape)
###Output
(292135, 380)
(292135,)
(15376, 380)
(15376,)
###Markdown
Train
###Code
%%time
import warnings
warnings.filterwarnings('ignore')
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
# lgb_train = lgb.Dataset(x_train, label=y_train)
# lgb_val = lgb.Dataset(x_val, label=y_val, reference=lgb_train)
# LightGBM parameters
param_grid = {
# 'task': 'train',
# 'num_boost_round': [200],
# 'early_stopping_rounds': [10],
# 'boosting_type': ['gbdt'], # (default="gbdt")
# 'num_leaves': [300], # (default=31)
'max_depth': [6,7,8], # (default=-1)
# 'learning_rate': [0.1], # (default=0.1)
# 'n_estimators': [1000, 500], # (default=10)
# 'max_bin': [1000, 255], # (default=255)
# 'subsample_for_bin': [100*10000], # (default=50000)
# 'objective': ['binary'], # (default=None)
# 'min_split_gain': [0.], # (default=0.)
# 'min_child_weight': [1e-3], # (default=1e-3)
# 'min_child_samples': [10], # (default=20)
# 'subsample': [0.7], # (default=1.)
# 'subsample_freq': [1], # (default=1)
'colsample_bytree': [0.2, 0.8], # (default=1.)
# 'reg_alpha': [0.], # (default=0.)
# 'reg_lambda': [0.], # (default=0.)
# 'random_state': [random_num], # (default=None)
# 'n_jobs': [-1], # (default=-1)
# 'silent': [False], # (default=True)
# 'metric': ['auc', 'binary_logloss'],
}
# print('params: ', params)
# train
clf = lgb.LGBMClassifier(
# 'num_boost_round'=200,
# 'early_stopping_rounds'=10,
boosting_type='gbdt', # (default="gbdt")
num_leaves=300, # (default=31)
max_depth=-1, # (default=-1)
learning_rate=0.03, # (default=0.1)
n_estimators=4000, # (default=10)
# 'max_bin'=255, # (default=255)
subsample_for_bin=500, # (default=50000)
objective='binary', # (default=None)
class_weight=None,
min_split_gain=0.01, # (default=0.)
min_child_weight=2, # (default=1e-3)
min_child_samples=10, # (default=20)
subsample=0.9, # (default=1.)
# 'subsample_freq'=1, # (default=1)
colsample_bytree=0.2, # (default=1.)
reg_alpha=0.1, # (default=0.)
reg_lambda=0.1, # (default=0.)
random_state=random_num, # (default=None)
n_jobs=-1, # (default=-1)
silent=False, # (default=True)
# 'metric'=['auc', 'binary_logloss'],
)
# gbm = lgb.train(
# params,
# train_set=lgb_train,
# valid_sets=lgb_val
# )
grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, verbose=2, cv=3, n_jobs=1, scoring='roc_auc')
grid_search.fit(x_train, y_train)
%%time
print('*' * 80)
y_train_proba = grid_search.predict_proba(x_train)
print(y_train.shape)
print(y_train_proba.shape)
print(y_train_proba[:10])
y_train_pred = (y_train_proba[:, 1]>=0.5).astype(int)
acc_train = accuracy_score(y_train, y_train_pred)
roc_train = roc_auc_score(y_train, y_train_proba[:, 1])
print('acc_train: %.4f \t roc_train: %.4f' % (acc_train, roc_train))
# y_train_pred = grid_search.predict(x_train)
# acc_train = accuracy_score(y_train, y_train_pred)
# roc_train = roc_auc_score(y_train, y_train_proba[:, 1])
# print('acc_train: %.4f \t roc_train: %.4f' % (acc_train, roc_train))
y_val_proba = grid_search.predict_proba(x_val)
print(y_val.shape)
print(y_val_proba.shape)
print(y_val_proba[:10])
y_val_pred = (y_val_proba[:, 1]>=0.5).astype(int)
print(y_val.shape)
print(y_val_pred.shape)
acc_val = accuracy_score(y_val, y_val_pred)
roc_val = roc_auc_score(y_val, y_val_proba[:, 1])
print('acc_val: %.4f \t roc_val: %.4f' % (acc_val, roc_val))
print(grid_search.cv_results_)
print('*' * 60)
print(grid_search.grid_scores_ )
print(grid_search.best_estimator_)
print(grid_search.best_score_)
print(grid_search.best_params_)
print(grid_search.scorer_)
print('*' * 60)
print(type(grid_search.best_estimator_))
print(dir(grid_search.best_estimator_))
cv_results = pd.DataFrame(grid_search.cv_results_)
display(cv_results)
fe_times = grid_search.best_estimator_.booster_.feature_importance()
fe_name = grid_search.best_estimator_.booster_.feature_name()
print(fe_times)
print(fe_name)
importance_score = pd.DataFrame(data={'feature': fe_name, 'importance': fe_times})
display(importance_score.head())
plt.figure(figsize=(18,60))
# sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
sns.barplot(x="importance", y="feature", data=importance_score.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
importance_score=importance_score.sort_values(by='importance', ascending=False)
display(importance_score['feature'][:20])
for item in importance_score.values:
print('%s\t%s' % (item[1], item[0]))
###Output
_____no_output_____
###Markdown
Predict
###Code
run_name_acc = run_name + '_' + str(int(roc_val*10000)).zfill(4)
print(run_name_acc)
y_test_proba = grid_search.predict_proba(x_test)
print(y_test_proba.shape)
print(y_test_proba[:10])
def save_proba(y_val_proba, y_val, y_test_proba, id_test, file_name):
print(id_test[:5])
if os.path.exists(file_name):
os.remove(file_name)
print('File removed: %s' % file_name)
with h5py.File(file_name) as h:
h.create_dataset('y_val_proba', data=y_val_proba)
h.create_dataset('y_val', data=y_val)
h.create_dataset('y_test_proba', data=y_test_proba)
h.create_dataset('id_test', data=id_test)
print('File saved: %s' % file_name)
def load_proba(file_name):
with h5py.File(file_name, 'r') as h:
y_val_proba = np.array(h['y_val_proba'])
y_val = np.array(h['y_val'])
y_test_proba = np.array(h['y_test_proba'])
id_test = np.array(h['id_test'])
print('File loaded: %s' % file_name)
print(id_test[:5])
return y_val_proba, y_val, y_test_proba, id_test
y_proba_file = os.path.join(model_folder, 'proba_%s.p' % run_name_acc)
save_proba(
y_val_proba[:, 1],
y_val,
y_test_proba[:, 1],
id_test,
y_proba_file
)
y_val_proba_true, y_val, y_test_proba_true, id_test = load_proba(y_proba_file)
print(y_val_proba_true.shape)
print(y_val.shape)
print(y_test_proba_true.shape)
print(len(id_test))
# %%time
submission_csv_file = os.path.join(output_folder, 'pred_%s.csv' % run_name_acc)
print(submission_csv_file)
submission_csv = pd.DataFrame({ 'SK_ID_CURR': id_test , 'TARGET': y_test_proba_true })
submission_csv.to_csv(submission_csv_file, index = False)
display(submission_csv.head())
print('Time cost: %.2f s' % (time.time() - t0))
print('random_num: ', random_num)
print(run_name_acc)
print('Done!')
###Output
_____no_output_____ |
03_pytorch-sm-bert-data-parallel.ipynb | ###Markdown
Imports
###Code
from sagemaker import get_execution_role, Session
from sagemaker.huggingface import HuggingFace
import sagemaker
import logging
###Output
_____no_output_____
###Markdown
Setup logger
###Code
logger = logging.getLogger('__name__')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
logger.info(f'[Using SageMaker: {sagemaker.__version__}]')
###Output
[Using SageMaker: 2.59.5]
###Markdown
Essentials
###Code
session = Session()
role = get_execution_role()
bucket = session.default_bucket()
###Output
_____no_output_____
###Markdown
Create a HuggingFace estimator and start a SageMaker training job
###Code
!pygmentize ./src/train.py
###Output
[34mfrom[39;49;00m [04m[36mtransformers[39;49;00m [34mimport[39;49;00m AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer
[34mfrom[39;49;00m [04m[36msklearn[39;49;00m[04m[36m.[39;49;00m[04m[36mmetrics[39;49;00m [34mimport[39;49;00m accuracy_score, precision_recall_fscore_support
[34mfrom[39;49;00m [04m[36mdatasets[39;49;00m [34mimport[39;49;00m load_from_disk
[34mimport[39;49;00m [04m[36margparse[39;49;00m
[34mimport[39;49;00m [04m[36mlogging[39;49;00m
[34mimport[39;49;00m [04m[36mrandom[39;49;00m
[34mimport[39;49;00m [04m[36mtorch[39;49;00m
[34mimport[39;49;00m [04m[36msys[39;49;00m
[34mimport[39;49;00m [04m[36mos[39;49;00m
[34mif[39;49;00m [31m__name__[39;49;00m == [33m'[39;49;00m[33m__main__[39;49;00m[33m'[39;49;00m:
parser = argparse.ArgumentParser()
[37m# Hyperparameters sent by the client are passed as command-line arguments to the script[39;49;00m
parser.add_argument([33m'[39;49;00m[33m--epochs[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mint[39;49;00m, default=[34m10[39;49;00m)
parser.add_argument([33m'[39;49;00m[33m--train_batch_size[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mint[39;49;00m, default=[34m32[39;49;00m)
parser.add_argument([33m'[39;49;00m[33m--eval_batch_size[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mint[39;49;00m, default=[34m32[39;49;00m)
parser.add_argument([33m'[39;49;00m[33m--warmup_steps[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mint[39;49;00m, default=[34m500[39;49;00m)
parser.add_argument([33m'[39;49;00m[33m--model_name[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m)
parser.add_argument([33m'[39;49;00m[33m--learning_rate[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m, default=[34m5e-5[39;49;00m)
[37m# Data, model, and output directories[39;49;00m
parser.add_argument([33m'[39;49;00m[33m--output-data-dir[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m, default=os.environ[[33m'[39;49;00m[33mSM_OUTPUT_DATA_DIR[39;49;00m[33m'[39;49;00m])
parser.add_argument([33m'[39;49;00m[33m--model-dir[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m, default=os.environ[[33m'[39;49;00m[33mSM_MODEL_DIR[39;49;00m[33m'[39;49;00m])
parser.add_argument([33m'[39;49;00m[33m--n_gpus[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m, default=os.environ[[33m'[39;49;00m[33mSM_NUM_GPUS[39;49;00m[33m'[39;49;00m])
parser.add_argument([33m'[39;49;00m[33m--training_dir[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m, default=os.environ[[33m'[39;49;00m[33mSM_CHANNEL_TRAIN[39;49;00m[33m'[39;49;00m])
parser.add_argument([33m'[39;49;00m[33m--test_dir[39;49;00m[33m'[39;49;00m, [36mtype[39;49;00m=[36mstr[39;49;00m, default=os.environ[[33m'[39;49;00m[33mSM_CHANNEL_TEST[39;49;00m[33m'[39;49;00m])
args, _ = parser.parse_known_args()
[37m# Set up logging[39;49;00m
logger = logging.getLogger([31m__name__[39;49;00m)
logging.basicConfig(
level=logging.getLevelName([33m'[39;49;00m[33mINFO[39;49;00m[33m'[39;49;00m),
handlers=[logging.StreamHandler(sys.stdout)],
[36mformat[39;49;00m=[33m"[39;49;00m[33m%(asctime)s[39;49;00m[33m - [39;49;00m[33m%(name)s[39;49;00m[33m - [39;49;00m[33m%(levelname)s[39;49;00m[33m - [39;49;00m[33m%(message)s[39;49;00m[33m"[39;49;00m,
)
[37m# Load train and test datasets[39;49;00m
train_dataset = load_from_disk(args.training_dir, keep_in_memory=[34mTrue[39;49;00m)
test_dataset = load_from_disk(args.test_dir, keep_in_memory=[34mTrue[39;49;00m)
logger.info([33mf[39;49;00m[33m'[39;49;00m[33m[Loaded train_dataset length is: [39;49;00m[33m{[39;49;00m[33mlen(train_dataset)}][39;49;00m[33m'[39;49;00m)
logger.info([33mf[39;49;00m[33m'[39;49;00m[33m[Loaded test_dataset length is: [39;49;00m[33m{[39;49;00m[33mlen(test_dataset)}][39;49;00m[33m'[39;49;00m)
[37m# Compute metrics function for binary classification[39;49;00m
[34mdef[39;49;00m [32mcompute_metrics[39;49;00m(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-[34m1[39;49;00m)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average=[33m'[39;49;00m[33mbinary[39;49;00m[33m'[39;49;00m)
acc = accuracy_score(labels, preds)
[34mreturn[39;49;00m {[33m'[39;49;00m[33maccuracy[39;49;00m[33m'[39;49;00m: acc, [33m'[39;49;00m[33mf1[39;49;00m[33m'[39;49;00m: f1, [33m'[39;49;00m[33mprecision[39;49;00m[33m'[39;49;00m: precision, [33m'[39;49;00m[33mrecall[39;49;00m[33m'[39;49;00m: recall}
[37m# Download model from model hub[39;49;00m
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
[37m# Define training args[39;49;00m
training_args = TrainingArguments(
output_dir=args.model_dir,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=args.eval_batch_size,
warmup_steps=args.warmup_steps,
evaluation_strategy=[33m'[39;49;00m[33mepoch[39;49;00m[33m'[39;49;00m,
logging_dir=[33mf[39;49;00m[33m'[39;49;00m[33m{args.output_data_dir}[39;49;00m[33m/logs[39;49;00m[33m'[39;49;00m,
learning_rate=[36mfloat[39;49;00m(args.learning_rate),
)
[37m# Create Trainer instance[39;49;00m
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer
)
[37m# Train model[39;49;00m
trainer.train()
[37m# Evaluate model[39;49;00m
eval_result = trainer.evaluate(eval_dataset=test_dataset)
[37m# Write evaluation results to a file which can be accessed later in S3 output[39;49;00m
[34mwith[39;49;00m [36mopen[39;49;00m(os.path.join(args.output_data_dir, [33m'[39;49;00m[33meval_results.txt[39;49;00m[33m'[39;49;00m), [33m'[39;49;00m[33mw[39;49;00m[33m'[39;49;00m) [34mas[39;49;00m writer:
[34mfor[39;49;00m key, value [35min[39;49;00m [36msorted[39;49;00m(eval_result.items()):
writer.write([33mf[39;49;00m[33m'[39;49;00m[33m{key}[39;49;00m[33m = [39;49;00m[33m{value}[39;49;00m[33m\n[39;49;00m[33m'[39;49;00m)
[37m# Save model to S3[39;49;00m
trainer.save_model(args.model_dir)
###Markdown
Define hyperparameters
###Code
hyperparameters={'epochs': 3,
'train_batch_size': 32,
'eval_batch_size': 32,
'model_name': 'distilbert-base-uncased'}
###Output
_____no_output_____
###Markdown
Configuration for running training on smdistributed (Data Parallelism)
###Code
distribution = {'smdistributed': {'dataparallel': { 'enabled': True }}}
###Output
_____no_output_____
###Markdown
Define metric definitions
###Code
metric_definitions = [
{"Name": "epoch", "Regex": "epoch.*=\D*(.*?)$"},
{"Name": "train_runtime", "Regex": "train_runtime.*=\D*(.*?)$"},
{'Name': 'train_samples_per_second', 'Regex': "train_samples_per_second.*=\D*(.*?)$"},
{"Name": "train_accuracy", "Regex": "train_accuracy.*=\D*(.*?)$"},
{"Name": "train_loss", "Regex": "train_loss.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": "eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": "eval_loss.*=\D*(.*?)$"},
{"Name": "f1", "Regex": "f1.*=\D*(.*?)$"}]
###Output
_____no_output_____
###Markdown
Instance configurations
###Code
instance_type = 'ml.p3.16xlarge'
instance_count = 2
volume_size = 200
###Output
_____no_output_____
###Markdown
Create HuggingFace estimator
###Code
huggingface_estimator = HuggingFace(entry_point='train.py',
source_dir='./src',
metric_definitions=metric_definitions,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
role=role,
transformers_version='4.6',
pytorch_version='1.7',
py_version='py36',
distribution= distribution,
hyperparameters = hyperparameters)
###Output
_____no_output_____
###Markdown
Fit model
###Code
training_input_path = f's3://{bucket}/imdb/train'
test_input_path = f's3://{bucket}/imdb/test'
%%time
huggingface_estimator.fit({'train': training_input_path, 'test': test_input_path}, logs=False)
###Output
2021-10-14 19:54:52 Starting - Starting the training job.
2021-10-14 19:54:59 Starting - Launching requested ML instances........................
2021-10-14 19:57:04 Starting - Preparing the instances for training...........................
2021-10-14 19:59:26 Downloading - Downloading input data.......
2021-10-14 20:00:08 Training - Downloading the training image.................
2021-10-14 20:01:35 Training - Training image download completed. Training in progress..................................
2021-10-14 20:04:29 Uploading - Uploading generated training model........
2021-10-14 20:05:15 Completed - Training job completed
CPU times: user 549 ms, sys: 89.1 ms, total: 638 ms
Wall time: 10min 24s
###Markdown
Retrieve estimator parameters
###Code
logger.info(f'S3 uri where the trained model is located: {huggingface_estimator.model_data}')
logger.info(f'Latest training job name for this estimator: {huggingface_estimator.latest_training_job.name}')
###Output
Latest training job name for this estimator: huggingface-pytorch-training-2021-10-14-19-54-52-044
###Markdown
Deploying the endpoint
###Code
predictor = huggingface_estimator.deploy(1, 'ml.g4dn.xlarge')
###Output
----------------!
###Markdown
Make inference using the deployed sentiment classifier model
###Code
sentiment_input= {"inputs": "I love using the new Inference DLC."}
response = predictor.predict(sentiment_input)
response
###Output
_____no_output_____ |
supplementary-material/Optional-Assignments/Introduction to Financial Concepts Using Python/Bonus_Assignment_Chapter_2.ipynb | ###Markdown
Project Proposals and Cash Flow Projections
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
In this notebook, imagine you are the CEO of a New York City based transportation company. You will learn the basics of financial decision-making and project financing in order to figure out what types of projects would be most beneficial for your team to undertake. Your project managers are evaluating projected the cash flows for 2 proposals. Project 1 provides higher short term cash flows, but Project 2 becomes more profitable over time.The cash flow projections for both projects are as follows, listed in 1,000s of dollars: Create variables cf_project_1 and cf_project_2 and set them equal a numpy array of the projected cash flows. Then, scale these projected cash flows 1,000x by multiplying each array by 1,000.
###Code
import numpy as np
cf_project_1 = np.array([-1000, 200, 250, 300, 350, 400, 450, 500, 550, 600])
cf_project_2 = np.array([-1000, 150, 225, 300, 375, 425, 500, 575, 600, 625])
cf_project1 = cf_project_1 * 1000
cf_project2 = cf_project_2 * 1000
###Output
_____no_output_____
###Markdown
Internal Rate of Return Now that you have the cash flow projections ready to go for each project, you want to compare the internal rate of return (IRR) of each project to help you decide which project would be most beneficial for your company in terms of yield (rate of return). In this exercise, you will calculate the internal rate of return for each project using np.irr(values).Set the internal rate of return for Project 1 equal to irr_project1 and Project 2 equal to irr_project2
###Code
# Calculate the internal rate of return for Project 1
irr_project1 = np.irr(cf_project1)
print("Project 1 IRR: " + str(round(100*irr_project1, 2)) + "%")
# Calculate the internal rate of return for Project 2
irr_project2 = np.irr(cf_project2)
print("Project 2 IRR: " + str(round(100*irr_project2, 2)) + "%")
###Output
Project 2 IRR: 28.78%
###Markdown
If you were making the decision solely based on internal rate of return, which project would you be more interested in (assuming the IRR is greater than your required rate of return)?
###Code
print("Project 2")
###Output
Project 2
###Markdown
Debt and Equity Financing using WACC Imagine your company has outstanding debt and financing costs, which you will have to adjust for. You will use the WACC as your discount rate. Assume take out a 1,000,000 loan to finance the project, which will be your company's only outstanding debt. This loan will represent 50% of your company's total financing of $2,000,000. The remaining funding comes from the market value of equity. Set the market value of your company's debt, mval_debt, equal to the amount of the loan you will be issuing to finance the project.
###Code
mval_debt = 1000000
###Output
_____no_output_____
###Markdown
Set the market value of your company's equity, mval_equity, equal to the remaining amount of funding after the loan.
###Code
mval_equity = 1000000
###Output
_____no_output_____
###Markdown
Calculate the total market value of your company's financing, mval_total, by taking the sum of the debt and equity.
###Code
mval_total = mval_equity + mval_debt
###Output
_____no_output_____
###Markdown
Calculate and print the proportion of your company's financing from debt (percent_debt) and from equity (percent_equity).
###Code
percent_debt = mval_debt / mval_total
print("Debt Financing: " + str(round(100*percent_debt, 2)) + "%")
percent_equity = mval_equity / mval_total
print("Equity Financing: " + str(round(100*percent_equity, 2)) + "%")
###Output
Debt Financing: 50.0%
Equity Financing: 50.0%
###Markdown
Calculating WACC The Weighted Average Cost of Capital, or WACC, is essential for our NPV calculation of projects, amount other things. Now that you have determined the proportion of both equity and debt financing, you will need to set up variables for the cost of financing via both debt and equity in order to estimate your WACC. The **cost of equity** financing can be estimated as the return on equity of similar companies. Calculating the return on equity is a simple accounting exercise, but all you need to know is that essentially, investors will require a rate of return that is close to what could be earned by a similar investment.Assume a cost of equity of 18% based on similar companies and assign to **cost_equity**:
###Code
cost_equity = 0.18
###Output
_____no_output_____
###Markdown
The **cost of debt** financing can be estimated as the amount you will have to pay on a new loan. This can be estimated by looking at the interest rates of loans of similar sizes to similar companies, or could be based on previous loans your company may already have been issued.The bank is willing to lend at an interest rate of 12%, which you should assing to **cost_debt**:
###Code
cost_debt = 0.12
###Output
_____no_output_____
###Markdown
Finally, assume a corporate tax rate of 35% and that your debt financing is tax-deductible. Assign to **tax_rate**.
###Code
tax_rate = 0.35
###Output
_____no_output_____
###Markdown
Calculate and print **wacc** by using the formula:WACC = (% equity * cost of equity ) + (% debt * cost of debt) * (1 - tax rate)
###Code
wacc = (percent_equity*cost_equity) + (percent_debt*cost_debt) * (1 - tax_rate)
print("WACC: " + str(round(100*wacc, 2)) + "%")
###Output
WACC: 12.9%
###Markdown
Comparing Project NPV Companies use their WACC as the discount rate when calculating the net present value of potential projects. In the same way that you discounted values by inflation in the previous chapter to account for costs over time, companies adjust the cash flows of potential projects by their cost of financing (the WACC) to account for their investor's required rate of return based on market conditions. Now that you calculated the **wacc**, you can determine the net present value (NPV) of the project's cash flows. Numpy has a npv function **```np.npv()```** that uses the **wacc** and an **array** of cash flows to calculate NPV. Find and print the NPV of ** cf_project1** and **cf_project2**
###Code
# Calculate the net present value for Project 1
npv_project1 = np.npv(wacc, cf_project1)
print("Project 1 NPV: " + str(round(npv_project1, 2)))
# Calculate the net present value for Project 2
npv_project2 = np.npv(wacc, cf_project2)
print("Project 2 NPV: " + str(round(npv_project2, 2)))
###Output
Project 1 NPV: 302744.98
Project 2 NPV: 231228.39
###Markdown
Two Projects with Different Lifespans The board of the company has decided to go a different direction, involving slightly shorter term projects and lower initial investments. Your project managers have come up with two new ideas, and projected the cash flows for each of the proposals.Project 1 has a lifespan of 8 years, but Project 2 only has a lifespan of 7 years. Project 1 requires an initial investment of 700,000, but Project 2 only requires $400,000.The cash flow projections for both projects are as follows (in 1,000s of dollars) Create new numpy arrays for **cf_project_1** and **cf_project_2** for the cash flows above, then scale up 1000x
###Code
# Create a numpy array of cash flows for Project 1
cf_project_1 = np.array([-700, 100, 150, 200, 250, 300, 350, 400])
# Create a numpy array of cash flows for Project 2
cf_project_2 = np.array([-400, 50, 100, 150, 200, 250, 300])
# Scale the original objects by 1000x
cf_project1 = cf_project_1 * 1000
cf_project2 = cf_project_2 * 1000
###Output
_____no_output_____
###Markdown
Calculating IRR and NPV With Different Project Lifespans Using the same **wacc** that you calculated earlier, you can calculate and compare the IRRs and NPVs of each project.While the IRR remains relatively comparable across projects, the NPV, on the other hand, will be much more difficult to compare given the additional year required for project 1. Luckily, in the next exercise, we will introduce another method to compare the NPVs of the projects, but we will first need to compute the NPVs as before. Calculate **irr_project1** and **irr_project2** by using the **`np.irr()`** function from earlier and the new cash flows arrays in **cf_project1** and **cf_project2**. Print these values as percents and round to 2 decimals.
###Code
# Calculate the IRR for Project 1
irr_project1 = np.irr(cf_project1)
print("Project 1 IRR: " + str(round(100*irr_project1, 2)) + "%")
# Calculate the IRR for Project 2
irr_project2 = np.irr(cf_project2)
print("Project 2 IRR: " + str(round(100*irr_project2, 2)) + "%")
###Output
Project 1 IRR: 22.94%
Project 2 IRR: 26.89%
###Markdown
Now calculate **npv_project1** and **npv_project2** by using the **`np.npv()`** function from earlier and the new cash flow arrays in **cf_project1** and **cf_project2**. Print these values as percents and round to 2 decimals.
###Code
# Calculate the NPV for Project 1
npv_project1 = np.npv(wacc, cf_project1)
print("Project 1 NPV: " + str(round(npv_project1, 2)))
# Calculate the NPV for Project 2
npv_project2 = np.npv(wacc, cf_project2)
print("Project 2 NPV: " + str(round(npv_project2, 2)))
###Output
Project 1 NPV: 302744.98
Project 2 NPV: 231228.39
###Markdown
Equivalent Annual Anuity Approach Since the net present values of each project are not directly comparable given the different lifespans of each project, you will have to consider a different approach.The **equivalent annual annuity (EAA)** approach allows us to compare two projects by essentially assuming that each project is an investment generating a flat interest rate each year (an annuity), and calculating the annual payment you would receive from each project, discounted to present value.You can compute the EAA of each project using the **np.pmt(rate, nper, pv, fv)** function in numpy. Use the same weighted average cost of capital, **wacc**, and the net present values for projects 1 and 2, **npv_project1** and **npv_project2**. Calculate and print **eaa_project1** and **eaa_project2**, rounded to 2 decimals.
###Code
eaa_project1 = np.pmt(rate=wacc, nper=8, pv=-npv_project1, fv=0)
print("Project 1 EAA: " + str(round(eaa_project1, 2)))
eaa_project2 = np.pmt(rate=wacc, nper=7, pv=-npv_project2, fv=0)
print("Project 2 EAA: " + str(round(eaa_project2, 2)))
###Output
Project 1 EAA: 62872.2
Project 2 EAA: 52120.61
###Markdown
If you were making the decision solely based on the equivalent annual annuity analysis, which project would you be more interested in?
###Code
# Your answer here:
print('Project 1!')
###Output
Project 1!
|
loss_functions/Earth_Mover_Distance.ipynb | ###Markdown
Earth Mover Distance The current mlpack implementation is correct.This notebook implements reduction. Imports and installation of mlpack
###Code
%%capture
!sudo apt-get install libmlpack-dev
import torch
import torch.nn as nn
###Output
_____no_output_____
###Markdown
mlpack CURRENT IMPLEMENTATION
###Code
%%capture
%%writefile test.cpp
#include <iostream>
#include <armadillo>
using namespace std;
using namespace arma;
int main()
{
// Constructor
arma::mat x,y;
arma::mat weight;
x << -0.0494 << 1.6028 << 0.9639 << endr
<< -1.1958 << 0.0737 << 0.9648 << endr
<< -1.0486 << -0.7091 << 0.0745 << endr
<< -0.2121 << 0.8612 << 0.5924 << endr;
y << 0.4316 << 0.5106 << 0.7059 << endr
<< 0.0164 << 0.9255 << -0.8288 << endr
<< -0.4478 << 0.5571 << -0.0231 << endr
<< 1.1452 << 0.0864 << -1.0526 << endr;
// Forward
double loss = -arma::accu(y % x);
// Backward
arma::mat output;
output = -y;
// Display
cout << "------------------------------------------------------------------" << endl;
cout << "USER-PROVIDED MATRICES : " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "Input shape : "<< x.n_rows << " " << x.n_cols << endl;
cout << "Input : " << endl << x << endl;
cout << "Target shape : "<< y.n_rows << " " << y.n_cols << endl;
cout << "Target : " << endl << y << endl;
cout << "FORWARD : " << endl;
cout << "Loss : \n" << loss << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (sum) : " << endl << output << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output)) << endl;
return 0;
}
%%script bash
g++ test.cpp -o test -larmadillo && ./test
###Output
------------------------------------------------------------------
USER-PROVIDED MATRICES :
------------------------------------------------------------------
Input shape : 4 3
Input :
-0.0494 1.6028 0.9639
-1.1958 0.0737 0.9648
-1.0486 -0.7091 0.0745
-0.2121 0.8612 0.5924
Target shape : 4 3
Target :
0.4316 0.5106 0.7059
0.0164 0.9255 -0.8288
-0.4478 0.5571 -0.0231
1.1452 0.0864 -1.0526
FORWARD :
Loss :
-0.00721068
BACKWARD :
Output shape : 4 3
Output (sum) :
-0.4316 -0.5106 -0.7059
-0.0164 -0.9255 0.8288
0.4478 -0.5571 0.0231
-1.1452 -0.0864 1.0526
Sum of all values in this matrix : -2.0264
###Markdown
NEW IMPLEMENTATION
###Code
%%capture
%%writefile test.cpp
#include <iostream>
#include <armadillo>
using namespace std;
using namespace arma;
int main()
{
// Constructor
arma::mat x,y;
arma::mat weight;
x << -0.0494 << 1.6028 << 0.9639 << endr
<< -1.1958 << 0.0737 << 0.9648 << endr
<< -1.0486 << -0.7091 << 0.0745 << endr
<< -0.2121 << 0.8612 << 0.5924 << endr;
y << 0.4316 << 0.5106 << 0.7059 << endr
<< 0.0164 << 0.9255 << -0.8288 << endr
<< -0.4478 << 0.5571 << -0.0231 << endr
<< 1.1452 << 0.0864 << -1.0526 << endr;
// Forward
arma::mat loss_none = -(y % x);
double loss_sum = arma::accu(loss_none);
double loss_mean = loss_sum / x.n_elem;
// Backward
arma::mat output;
output = -y;
// Display
cout << "------------------------------------------------------------------" << endl;
cout << "USER-PROVIDED MATRICES : " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "Input shape : "<< x.n_rows << " " << x.n_cols << endl;
cout << "Input : " << endl << x << endl;
cout << "Target shape : "<< y.n_rows << " " << y.n_cols << endl;
cout << "Target : " << endl << y << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "SUM " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "FORWARD : " << endl;
cout << "Loss : \n" << loss_none << '\n';
cout << "Loss (sum):\n" << loss_sum << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (sum) : " << endl << output << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output)) << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "MEAN " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "FORWARD : " << endl;
cout << "Loss (mean):\n" << loss_mean << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (mean) : " << endl << output / x.n_elem << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output / x.n_elem)) << endl;
cout << "------------------------------------------------------------------" << endl;
return 0;
}
%%script bash
g++ test.cpp -o test -larmadillo && ./test
###Output
------------------------------------------------------------------
USER-PROVIDED MATRICES :
------------------------------------------------------------------
Input shape : 4 3
Input :
-0.0494 1.6028 0.9639
-1.1958 0.0737 0.9648
-1.0486 -0.7091 0.0745
-0.2121 0.8612 0.5924
Target shape : 4 3
Target :
0.4316 0.5106 0.7059
0.0164 0.9255 -0.8288
-0.4478 0.5571 -0.0231
1.1452 0.0864 -1.0526
------------------------------------------------------------------
SUM
------------------------------------------------------------------
FORWARD :
Loss :
0.0213 -0.8184 -0.6804
0.0196 -0.0682 0.7996
-0.4696 0.3950 0.0017
0.2429 -0.0744 0.6236
Loss (sum):
-0.00721068
BACKWARD :
Output shape : 4 3
Output (sum) :
-0.4316 -0.5106 -0.7059
-0.0164 -0.9255 0.8288
0.4478 -0.5571 0.0231
-1.1452 -0.0864 1.0526
Sum of all values in this matrix : -2.0264
------------------------------------------------------------------
MEAN
------------------------------------------------------------------
FORWARD :
Loss (mean):
-0.00060089
BACKWARD :
Output shape : 4 3
Output (mean) :
-0.0360 -0.0426 -0.0588
-0.0014 -0.0771 0.0691
0.0373 -0.0464 0.0019
-0.0954 -0.0072 0.0877
Sum of all values in this matrix : -0.168867
------------------------------------------------------------------
|
exercises/05.ipynb | ###Markdown
OverfittingWe explore the overfitting effect by using MINST dataset as an example. Data NoiseWe add some white noise to the existing 784 dimensions. Further, we also add 784 all-zeros dimensions. Finally, we train the model and observe the accuracy.
###Code
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers, regularizers
from tensorflow.keras.datasets import mnist, imdb
###Output
_____no_output_____
###Markdown
First, we generate some fake noise by appending "white noise" (i.e., random pixels to each image) and zeros (also to each image).
###Code
(train_images, train_labels), _ = mnist.load_data()
train_images = train_images.reshape((60000, 28 * 28)) # Flattens the images.
train_images = train_images.astype("float32") / 255 # Normalizes pixel values.
train_images_with_noise_channels = np.concatenate( # Appends 784 "noise" pixels to the end of each image.
( train_images, np.random.random((len(train_images), 28 * 28)) ), axis=1)
train_images_with_zeros_channels = np.concatenate( # Appends 784 zeros to the end of each image.
( train_images, np.zeros((len(train_images), 28 * 28)) ), axis=1)
###Output
_____no_output_____
###Markdown
Next, we train our model on both datasets.
###Code
def get_model():
model = keras.Sequential([
layers.Dense(512, activation="relu"),
layers.Dense(10, activation="softmax"),
])
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
return model
# Model: Noise.
model = get_model()
history_noise = model.fit(
train_images_with_noise_channels, train_labels,
epochs=10,
batch_size=128,
validation_split=0.2,
)
# Model: Zeros.
model = get_model()
history_zeros = model.fit(
train_images_with_zeros_channels, train_labels,
epochs=10,
batch_size=128,
validation_split=0.2,
)
val_acc_noise = history_noise.history["val_accuracy"]
val_acc_zeros = history_zeros.history["val_accuracy"]
epochs = range(1, 11)
plt.plot(epochs, val_acc_noise, "b-", label="Validation accuracy with noise channels")
plt.plot(epochs, val_acc_zeros, "b--", label="Validation accuracy with zeros channels")
plt.title("Effect of noise channels on validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
###Output
_____no_output_____
###Markdown
RegularizationWe will (again) use the IMDB dataset to explore different technique to prevent over/under fitting in our model. The applied set of techniques is often referred to as **regularization**. We can see that accuracy is notably lower when incorporated noise is taken into account!
###Code
# def vectorize_sequences(sequences, dimension=10000):
# results = np.zeros((len(sequences), dimension))
# for i, sequence in enumerate(sequences):
# results[i, sequence] = 1 # Hm, 'sequence' is array of indices. Is NumPy really this smart, to automatically pick-up indices and assign them the value?? Yes, check below!
# return results
def vectorize_sequences(sequences, dimension = 10000):
result = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
for j in sequence:
result[i, j] = 1
return result
# Nested indexing with NumPy.
array = np.array([[1, 2, 3], [4, 5, 6]])
array[1, [0, 2]] = 10
array
(train_data, train_labels), _ = imdb.load_data(num_words=10000)
train_data = vectorize_sequences(train_data)
train_labels = np.array(train_labels, dtype = "float32").reshape((-1, 1))
# Adding regularization to Layers as well as Dropout.
model = keras.Sequential([
layers.Dense(16,
kernel_regularizer=regularizers.l2(0.002),
activation="relu"),
layers.Dropout(0.5),
layers.Dense(16,
kernel_regularizer=regularizers.l2(0.002),
activation="relu"),
layers.Dropout(0.5),
layers.Dense(1,
kernel_regularizer=regularizers.l2(0.002),
activation="sigmoid")
])
model.compile(optimizer = "rmsprop",
loss = "binary_crossentropy",
metrics = ["accuracy"])
history_l2_reg = model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_split=0.4)
###Output
Epoch 1/20
30/30 [==============================] - 2s 43ms/step - loss: 0.6834 - accuracy: 0.6517 - val_loss: 0.5510 - val_accuracy: 0.8573
Epoch 2/20
30/30 [==============================] - 0s 15ms/step - loss: 0.5529 - accuracy: 0.7794 - val_loss: 0.4691 - val_accuracy: 0.8757
Epoch 3/20
30/30 [==============================] - 0s 15ms/step - loss: 0.4866 - accuracy: 0.8232 - val_loss: 0.4111 - val_accuracy: 0.8777
Epoch 4/20
30/30 [==============================] - 0s 14ms/step - loss: 0.4414 - accuracy: 0.8555 - val_loss: 0.3862 - val_accuracy: 0.8850
Epoch 5/20
30/30 [==============================] - 0s 16ms/step - loss: 0.4080 - accuracy: 0.8787 - val_loss: 0.3728 - val_accuracy: 0.8807
Epoch 6/20
30/30 [==============================] - 0s 16ms/step - loss: 0.3800 - accuracy: 0.8935 - val_loss: 0.3586 - val_accuracy: 0.8892
Epoch 7/20
30/30 [==============================] - 0s 15ms/step - loss: 0.3617 - accuracy: 0.9040 - val_loss: 0.3754 - val_accuracy: 0.8801
Epoch 8/20
30/30 [==============================] - 0s 14ms/step - loss: 0.3543 - accuracy: 0.9087 - val_loss: 0.3657 - val_accuracy: 0.8870
Epoch 9/20
30/30 [==============================] - 0s 14ms/step - loss: 0.3347 - accuracy: 0.9197 - val_loss: 0.3850 - val_accuracy: 0.8790
Epoch 10/20
30/30 [==============================] - 0s 16ms/step - loss: 0.3246 - accuracy: 0.9261 - val_loss: 0.4036 - val_accuracy: 0.8748
Epoch 11/20
30/30 [==============================] - 0s 14ms/step - loss: 0.3176 - accuracy: 0.9279 - val_loss: 0.3812 - val_accuracy: 0.8817
Epoch 12/20
30/30 [==============================] - 0s 14ms/step - loss: 0.3087 - accuracy: 0.9297 - val_loss: 0.3862 - val_accuracy: 0.8837
Epoch 13/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2979 - accuracy: 0.9367 - val_loss: 0.3836 - val_accuracy: 0.8852
Epoch 14/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2974 - accuracy: 0.9348 - val_loss: 0.3954 - val_accuracy: 0.8847
Epoch 15/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2923 - accuracy: 0.9386 - val_loss: 0.3894 - val_accuracy: 0.8823
Epoch 16/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2931 - accuracy: 0.9364 - val_loss: 0.4031 - val_accuracy: 0.8815
Epoch 17/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2858 - accuracy: 0.9381 - val_loss: 0.3968 - val_accuracy: 0.8823
Epoch 18/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2769 - accuracy: 0.9439 - val_loss: 0.4101 - val_accuracy: 0.8829
Epoch 19/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2756 - accuracy: 0.9417 - val_loss: 0.4112 - val_accuracy: 0.8743
Epoch 20/20
30/30 [==============================] - 0s 14ms/step - loss: 0.2764 - accuracy: 0.9435 - val_loss: 0.4165 - val_accuracy: 0.8807
|
SceneClassification2017/5. Predict_test_a-feature_extract.ipynb | ###Markdown
5. Predict_test_a-feature_extract**Tensorboard**- Input at command: tensorboard --logdir=./log- Input at browser: http://127.0.0.1:6006
###Code
import time
import os
import pandas as pd
project_name = 'SceneClassification'
step_name = 'Predict_test_a-feature_extract'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = project_name + '_' + step_name + '_' + time_str
print('run_name: ' + run_name)
cwd = os.getcwd()
model_path = os.path.join(cwd, 'model')
print('model_path: ' + model_path)
###Output
run_name: SceneClassification_Predict_test_a-feature_extract_20171028_122246
model_path: E:\SceneClassification\model
###Markdown
Import pkg
###Code
import numpy as np
import pandas as pd
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler, TensorBoard
# import zipfile
import os
import zipfile
import math
import time
from IPython.display import display
import pdb
import json
from PIL import Image
import glob
import pickle
###Output
_____no_output_____
###Markdown
Load model
###Code
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
# from keras.applications.resnet50 import ResNet50
# from keras.applications.resnet50 import preprocess_input, decode_predictions
from keras.applications.inception_v3 import InceptionV3
%%time
model = load_model('./model/SceneClassification_Train_20171028_111910_7596.h5')
###Output
Wall time: 1min 55s
###Markdown
Predict validation- Load image- Resize image- Predict- Get top 1 or 3 or 5- Calculate score Extract zip file
###Code
input_path = './input'
datasetName = 'test_a'
date = '20170922'
zip_path = input_path + '/ai_challenger_scene_{0}_{1}.zip'.format(datasetName, date)
extract_path = input_path + '/ai_challenger_scene_{0}_{1}'.format(datasetName, date)
image_path = extract_path + '/scene_{0}_images_{1}'.format(datasetName, date)
scene_classes_path = extract_path + '/scene_classes.csv'
scene_annotations_path = extract_path + '/scene_{0}_annotations_{1}.json'.format(datasetName, date)
print(input_path)
print(zip_path)
print(extract_path)
print(image_path)
print(scene_classes_path)
print(scene_annotations_path)
if not os.path.isdir(extract_path):
with zipfile.ZipFile(zip_path) as file:
for name in file.namelist():
file.extract(name, input_path)
test_images = os.listdir(os.path.join(cwd, 'input', 'data_test_a', 'test'))
print(test_images[0:10])
###Output
['00002ff812f48a3df27c321d517a6300ed8da0c3.jpg', '00049a860dca2af378faeb0ee6f435c6063818ef.jpg', '0011a9c9216c3763ffc33641a8ffc975127dc404.jpg', '0045a44cacc7bc9826db9b54d2dcd70b810250f9.jpg', '004b6823145471c6a4ce292e864909fde2d04969.jpg', '0056e4d54eee781117c9d407d03ebf7192126b1f.jpg', '005763f88b25b18ae524b25afcce960403665383.jpg', '005b5444df96e3a155f2a73a8dccc0267e118413.jpg', '005c6ba205a246d0d3c8f73adfd4398b8e483962.jpg', '005de85662d754f98a1476a37b189902800ace91.jpg']
###Markdown
Load features
###Code
%%time
import h5py
import numpy as np
from sklearn.utils import shuffle
np.random.seed(2017)
x_train = []
y_train = {}
x_val = []
y_val = {}
x_test = []
cwd = os.getcwd()
feature_cgg16 = os.path.join(cwd, 'model', 'feature_VGG16_{}.h5'.format(171023))
feature_cgg19 = os.path.join(cwd, 'model', 'feature_VGG19_{}.h5'.format(171023))
feature_resnet50 = os.path.join(cwd, 'model', 'feature_ResNet50_{}.h5'.format(171023))
feature_mobilenet = os.path.join(cwd, 'model', 'feature_MobileNet_{}.h5'.format(171023))
feature_xception = os.path.join(cwd, 'model', 'feature_Xception_{}.h5'.format(171023))
feature_inception = os.path.join(cwd, 'model', 'feature_InceptionV3_{}.h5'.format(171023))
for filename in [feature_cgg16, feature_cgg19, feature_resnet50, feature_mobilenet, feature_xception, feature_inception]:
with h5py.File(filename, 'r') as h:
x_train.append(np.array(h['train']))
y_train = np.array(h['train_label'])
x_val.append(np.array(h['val']))
y_val = np.array(h['val_label'])
x_test.append(np.array(h['test']))
# print(x_train[0].shape)
x_train = np.concatenate(x_train, axis=-1)
# y_train = np.concatenate(y_train, axis=0)
x_val = np.concatenate(x_val, axis=-1)
# y_val = np.concatenate(y_val, axis=0)
x_test = np.concatenate(x_test, axis=-1)
print(x_train.shape)
print(x_train.shape[1:])
print(len(y_train))
print(x_val.shape)
print(len(y_val))
print(x_test.shape)
###Output
(53879, 8192)
(8192,)
53879
(7120, 8192)
7120
(7040, 8192)
Wall time: 8.15 s
###Markdown
Preview "scene_classes.csv"
###Code
scene_classes = pd.read_csv(scene_classes_path, header=None)
display(scene_classes.head())
def get_scene_name(lable_number, scene_classes_path):
scene_classes = pd.read_csv(scene_classes_path, header=None)
return scene_classes.loc[lable_number, 2]
print(get_scene_name(0, scene_classes_path))
###Output
airport_terminal
###Markdown
Preview image
###Code
def process_image(image_path, fileName):
box = (224, 224)
img_path = image_path + '/' + fileName
img = Image.open(img_path)
img1 = img.resize(box, Image.ANTIALIAS) # resizes image in-place
imgData = np.asarray(img1)
imgData = imgData.astype("float32")
imgData = imgData/255.0
x = np.expand_dims(imgData, axis=0)
return x
print(image_path)
test_img = process_image(image_path, '00a58de1e260033ed972a7e322a2d8fd315cece6.jpg')
print(test_img.shape)
# print(x)
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.imshow(test_img[0])
def decode_predictions(pred, top=3, isPreview=False):
top_indices = pred.argsort()[-top:][::-1]
if not isPreview:
return top_indices
results = []
for i in top_indices:
result = (i, pred[i])
results.append(result)
return results
pred = np.array([3, -1, 2, 7, -2, -1, 0, 6, -9])
print(decode_predictions(pred))
print(decode_predictions(pred, top=5))
print(decode_predictions(pred, top=8, isPreview=True))
data_val_path = os.path.join(cwd, 'input', 'data_validation')
data_test_path = os.path.join(cwd, 'input', 'data_test_a')
gen = ImageDataGenerator()
# gen = ImageDataGenerator(zoom_range = 0.1,
# height_shift_range = 0.1,
# width_shift_range = 0.1,
# rotation_range = 10)
val_generator = gen.flow_from_directory(data_val_path, (224, 224), shuffle=False, batch_size=1)
test_generator = gen.flow_from_directory(data_test_path, (224, 224), shuffle=False, batch_size=1)
print(len(val_generator.filenames))
print(val_generator.filenames[0:5])
print(len(test_generator.filenames))
print(test_generator.filenames[0:5])
preds = model.predict(x_val)
print(preds.shape)
print(preds[0])
print(decode_predictions(preds[0]))
print(decode_predictions(preds[0], top=5, isPreview=True))
lable_id = decode_predictions(preds[0])[0]
print("label_id:{0} lable_text:{1}".format(lable_id, get_scene_name(lable_id, scene_classes_path)))
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
print(y_train.shape)
print(y_val.shape)
final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
%%time
results = []
count = len(val_generator.filenames)
# count = 10 # For test
print('Image amount:{}'.format(count))
for i, file in enumerate(val_generator.filenames):
file = file[-44:]
# print(i)
# print(file)
labels = decode_predictions(preds[i])
result = {}
result['label_id'] = labels.tolist()
result['image_id'] = file
results.append(result)
count = count -1
if count <= 0:
break
# print(results)
submit_file = './output' + '/submit' + time.strftime("%Y%m%d_%H%M%S", time.localtime()) + '.json'
print(submit_file)
with open(submit_file, 'w') as f:
json.dump(results, f)
result_amount = len(results)
print('Image amount:{0}, result amount:{1}'.format(len(val_generator.filenames), result_amount))
%run ./scene_classification_eval/scene_eval.py --submit ./scene_classification_eval/submit.json --ref ./scene_classification_eval/ref.json
# %%time
%run ./scene_classification_eval/scene_eval.py --submit ./output/submit20171028_132121.json --ref ./output/scene_validation_annotations_20170908.json
###Output
Evaluation time of your result: 5.656765 s
{'error': [], 'score': '0.9096910112359551', 'warning': []}
|
nbs/03c_jsd_cross_entropy.ipynb | ###Markdown
Jensen-Shannon Divergence & Cross-Entropy Loss
###Code
import timm
import torch
import torch.nn.functional as F
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.loss import JsdCrossEntropy
from timm.data.mixup import mixup_target
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Let's create a example of the `output` of a model, and our `labels`. Note we have 3 output predictions, but only 1 label.
###Code
output = F.one_hot(torch.tensor([0,9,0])).float()
labels=torch.tensor([0])
###Output
_____no_output_____
###Markdown
If we set label `smoothing` and `alpha` to 0, then we will have the regular `cross_entropy loss`, if we look only at the first element of our output and labels.
###Code
jsd = JsdCrossEntropy(smoothing=0,alpha=0)
jsd(output,labels)
base_loss = F.cross_entropy(output[0,None],labels[0,None])
base_loss
jsd = JsdCrossEntropy(num_splits=1,smoothing=0,alpha=0)
###Output
_____no_output_____
###Markdown
We can also change the number of splits,changing the size of each group. In `Augmix` this would equate to the number of transformation mixtures.
###Code
jsd = JsdCrossEntropy(num_splits=2,smoothing=0,alpha=0)
output = F.one_hot(torch.tensor([0,9,1,0])).float()
labels=torch.tensor([0,9])
jsd(output,labels),F.cross_entropy(output[[0,1]],labels)
###Output
_____no_output_____
###Markdown
By default we have 1 label for 3 predictions, this is a two part loss, and measures both cross entropy and jason-shannon divergence. Jason-shannon entropy does not need a label, instead measuring the how significantly different the 3 predictions are.
###Code
jsd = JsdCrossEntropy(smoothing=0)
output = F.one_hot(torch.tensor([0,0,0]),num_classes=10).float()
deltas = torch.cat((torch.zeros([2,10]),torch.tensor([[-1,1,0,0,0,0,0,0,0,0]])))*0.1
deltas[2]
deltas=(torch.arange(-10,11))[...,None,None]*deltas
losses = [jsd((output+delta),labels)-base_loss for delta in deltas]
###Output
_____no_output_____
###Markdown
The below graph shows how changes in one of the model's outputs(prediction), in a group, effects the Jason-Shannon Divergence.
###Code
plt.plot([ .1*i-1 for i in range(len(losses))],[loss for loss in losses])
plt.ylabel('JS Divergence')
plt.xlabel('Change in output')
plt.show()
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_model_architectures.ipynb.
Converted 01_training_scripts.ipynb.
Converted 02_dataset.ipynb.
Converted 03_loss.cross_entropy.ipynb.
Converted 04_models.ipynb.
Converted 05_loss.jsd_cross_entropy.ipynb.
Converted index.ipynb.
|
merge_forecasts.ipynb | ###Markdown
Python implementation ofShah, Anish, Easy Way to Merge Return Forecasts across Securities and Horizons (September 24, 2019). Available at SSRN: https://ssrn.com/abstract=3459184 or http://dx.doi.org/10.2139/ssrn.3459184
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
1. Create dummy data
###Code
def dummy_forecasts(m, n):
# function creates dummy forecast data to test forecast combination
# m = # of forecasts
# n = # of securities
# returns:
# start = (m x 1) start periods for forecasts
# end = (m x 1) end periods for forecasts
# P = (m x n) linear combinations forecasted
# y = (m x 1) return forecasts
# H = (m x m) forecast noise covariance
# mu = (n x 1) prior mean of 1 period returns
# C = (n x n) prior covariance of 1 period returns
start = np.random.randint(low=0,high=10,size=m)
end = start + np.random.randint(low=1,high=5,size=m)
s = 0.1
y = s * np.random.random(size=[m,1])
I = np.identity(n)
k = int(np.floor(1.5*m))
r = np.random.randint(low=0, high=n, size=k)
P = I[r[:m],:] # forecast random individual securities
P[-(k-m):,:] = P[-(k-m):,:] - I[r[m:],:] # make some spreads
y[-(k-m):] -= s * 0.5
v = np.random.random(n)
v = np.around(v / v.sum(), 2)
v[np.argmax(v)] -= v.sum() - 1
P[0,:] = v # make first entry a portfolio
Q = np.around(np.random.random(size=[m,m]), 2)
H = 10. * Q.dot(Q.T)
Q = np.around(np.random.random(size=[n,n]), 2)
C = Q.dot(Q.T)
mu = np.around(0.01*(np.random.random(size=[n,1]) - 0.5), 3)
return start, end, P, y, H, mu, C
# m = # of forecasts
# n = # of securities
#
# start = (m x 1) start periods for forecasts
# end = (m x 1) end periods for forecasts
# P = (m x n) linear combinations forecasted
# y = (m x 1) return forecasts
# H = (m x m) forecast noise covariance
# mu = (n x 1) prior mean of 1 period returns
# C = (n x n) prior covariance of 1 period returns
m = 5 # number of forecasts
n = 20 # number of securities
start, end, P, y, H, mu, C = dummy_forecasts(m, n)
###Output
_____no_output_____
###Markdown
2. Segment objects being forecasted into time segments. Then calculate posterior mean and covariance given the forecasts
###Code
# model is y = P x + eps where eps ~ N(0, H)
# x occurs over the intervals start -> end and prior x over 1 period ~ N(mu, C)
# want mean and cov of x over different horizons | y
def calculate_posterior_of_segments(start, end, P, H, mu, C, more_horizons=[0.]):
# more_horizons = (list) of points to consider in addition to start and end
# = [0., 5., 21.] # e.g. to be able to get 1 week and 1 month return forecasts
m, n = P.shape # num of forecasts x num of securities
# points in ascending order where time needs to be segmented
# Tpts = np.unique((start,end))
Tpts = np.unique([t for x in [start, end, more_horizons] for t in x])
# put forecast start and end in terms of time markers
startpt = np.searchsorted(Tpts, start)
endpt = np.searchsorted(Tpts, end)
assert np.alltrue(Tpts[startpt] == start)
assert np.alltrue(Tpts[endpt] == end)
# break quantities being forecasted into time segments
# e.g. r(0->T) = r(0->T1) + r(T1->T2) + ... + r(Tk-1->T)
nseg = len(Tpts) - 1
nsegvars = n*nseg
Z = np.zeros((m, nsegvars)) # matrix that will hold forecasts in terms of segments
nu = np.zeros((nsegvars,1)) # vector that will hold mean for each segment variable
Omega = np.zeros((nsegvars,nsegvars)) # matrix that will hold cov of segment variables
for i in range(nseg):
l = Tpts[i+1] - Tpts[i] # number of time periods in segment
sidx = i*n # start index of variables in time segment
eidx = sidx + n # end index of variables in time segment
nu[sidx:eidx,:] = l * mu # mean over segment
Omega[sidx:eidx,sidx:eidx] = l * C # variance over segment
inseg = (startpt <= i) & (endpt >= i+1) # True for forecasts that contain segment
Z[inseg, sidx:eidx] = P[inseg,:] # put coefficients on segment vars involved in forecasts
# now have everything to calculate posterior distribution
ZOmega = Z.dot(Omega)
F = ZOmega.dot(Z.T) + H
# B = Omega Z' inv(F), and F and Omega are symmetric
# B = (ZOmega.T).dot(np.linalg.inv(F))
B = np.linalg.solve(F, ZOmega).T # computationally better this way
# segment variables given forecasts have
# mean = a_mean + B y = nu + B (y - Z nu) where y is the vector of forecasts
# & cov = Sigma
Sigma = Omega - B.dot(ZOmega)
a_mean = nu - B.dot(Z.dot(nu))
return Tpts, a_mean, B, Sigma
Tpts, a_mean, B, Sigma = calculate_posterior_of_segments(start, end, P, H, mu, C)
###Output
_____no_output_____
###Markdown
3. Now can calculate the mean and cov of any linear combinations of segment variables given the forecasts
###Code
m, n = P.shape
nTps = len(Tpts)
nsegvars = B.shape[0]
print(nTps, "Tpts -", Tpts, "- so", nTps-1, "segments")
print(n, "securities,", nsegvars, "security segments")
print(m, "forecasts")
print("a_mean.shape:", a_mean.shape)
print("B.shape:", B.shape)
print("Sigma.shape", Sigma.shape)
# segment variables given forecasts have
# mean = a_mean + B y and cov = Sigma where y is the vector of forecasts
###Output
8 Tpts - [ 0. 4. 5. 6. 7. 8. 9. 10.] - so 7 segments
20 securities, 140 security segments
5 forecasts
a_mean.shape: (140, 1)
B.shape: (140, 5)
Sigma.shape (140, 140)
###Markdown
A. Example: all the securities over the interval between the 2nd and 4th Tpt
###Code
A = np.zeros((n, nsegvars))
A[:,n:2*n] = np.identity(n)
A[:,2*n:3*n] = np.identity(n)
# given the forecasts, these segment variable combinations
# have mean m0 + M y and cov A Sigma A'
m0 = A.dot(a_mean)
M = A.dot(B) # tells how much each forecast contributed
pmean = m0 + M.dot(y)
pcov = A.dot(Sigma).dot(A.T)
print(pmean.shape)
print(pcov.shape)
###Output
(20, 1)
(20, 20)
###Markdown
B. Example: the first security over each separate segment
###Code
nsegs = nTps-1
A = np.zeros((nsegs, nsegvars))
secnum = 0 # first security
for i in range(nsegs):
A[i, i*n + secnum] = 1. # each row in A is a different interval of the same security
# given the forecasts, these segment variable combinations
# have mean m0 + M y and cov A Sigma A'
m0 = A.dot(a_mean)
M = A.dot(B) # tells how much each forecast contributed
pmean = m0 + M.dot(y)
pcov = A.dot(Sigma).dot(A.T)
print(pmean.shape)
print(pcov.shape)
###Output
(7, 1)
(7, 7)
|
notebooks/chrM_related/chrM_adventures.ipynb | ###Markdown
basically run this bash script to extract all relevant stats from pairs statsi.e. total nodup pairs, all M-related pairs, M/M pairs, trans-M pairs and trans-M pairs related to unassembled contigs only just in case ...here is what we'd get in terms of input data: FOR CONTIGS ...a slight modification of the previous thing to enable it work with the contig names ...one should probably let go of `bash` at this point and do it using a "normal" scripting language - but anyways... programm to extract stats from pairs.stats ...```shget_stats () { f=$1 chrom=$2 dataset=$3 chrom_cis="$chrom/$chrom\s" sample=$(echo $f | cut -f1 -d "_"); tot=$(grep "total_nodups" $f | cut -f2); cis=$(grep '^cis[^_]' $f | cut -f2); trans=$(grep "^trans" $f | cut -f2); unmapped=$(grep "total_unmapped" $f | cut -f2); dups=$(grep "total_dups" $f | cut -f2); allM=$(grep -P "$chrom(\/|\s)" $f | awk '{s+=$2} END {print s}' ); cisM=$(grep "$chrom_cis" $f | cut -f2); transM=$(grep -P "($chrom\/chr[[:alnum:]]*\t|chr[[:alnum:]]*\/$chrom\t)" $f | grep -v $chrom_cis | awk '{s+=$2} END {print s}'); contigM=$(grep -P "$chrom(\/|\s)" $f | grep -vP "($chrom\/chr[[:alnum:]]*\t|chr[[:alnum:]]*\/$chrom\t)" | grep -v $chrom_cis | awk '{s+=$2} END {print s}'); [[ -n $allM ]] || allM="0"; [[ -n $cisM ]] || cisM="0"; [[ -n $transM ]] || transM="0"; [[ -n $contigM ]] || contigM="0"; AA=$(( $cisM+$contigM+$transM )); [[ "$AA" == "$allM" ]] || echo "$f $chrom not-equal" >> ~/bbb/log.log; awk -v a="$sample" -v b="$tot" -v ba="$cis" -v bb="$trans" -v bc="$unmapped" -v bd="$dups" -v c="$allM" -v d="$cisM" -v e="$transM" -v f="$contigM" -v dt="$dataset" 'BEGIN {print a" "b" "ba" "bb" "bc" "bd" "c" "d" "e" "f" "dt }';}for chrom in $(cat ~/hg38.chroms | cut -f1 -d" " | cut -f2 -d">"); do awk 'BEGIN {print "name tot cis trans unmapped dups all_chrom cis_chrom trans_chrom contig_chrom dataset_type" }' > ~/bbb/$chrom.tsv; cd /nl/umw_job_dekker/users/ba69w/HiC_Analysis/U54_matrix/results/pairs_library for f in *hg38.dedup.stats; do get_stats $f $chrom shallow >> ~/bbb/$chrom.tsv done cd /nl/umw_job_dekker/users/ba69w/HiC_Analysis/U54_deep/results/pairs_library for f in *hg38.dedup.stats; do get_stats $f $chrom deep >> ~/bbb/$chrom.tsv donedone``` Just testing out some of the "magic" bash commands and grepping super powers ...```sh all involving chromchrom="chr1"; f=U54-HFF-plate-FA-DpnII-20180904-R1-T1__hg38.hg38.dedup.stats; grep -P "$chrom(\/|\s)" $f; just cis of the chromchrom="chr1"; f=U54-HFF-plate-FA-HindIIII-20160226-R2-T1__hg38.hg38.dedup.stats; chrom_cis="$chrom/$chrom\s"; grep "$chrom_cis" $f trans_chromchrom="chr1";f=U54-HFF-plate-FA-HindIIII-20160226-R2-T1__hg38.hg38.dedup.stats; chrom_cis="$chrom/$chrom\s"; grep -P "($chrom\/chr[[:alnum:]]*\t|chr[[:alnum:]]*\/$chrom\t)" $f | grep -v $chrom_cis contig_chromchrom="chr1";f=U54-HFF-plate-FA-HindIIII-20160226-R2-T1__hg38.hg38.dedup.stats; chrom_cis="$chrom/$chrom\s"; grep -P "$chrom(\/|\s)" $f | grep -vP "($chrom\/chr[[:alnum:]]*\t|chr[[:alnum:]]*\/$chrom\t)" | grep -v $chrom_cis [[ "a" == "a" ]] && echo equal || echo not-equal```
###Code
### Reading data in from the cluster ghpcc ...
chroms = !ssh ghpcc cat /home/sv49w/hg38.chroms | cut -f1 -d" " |cut -f2 -d">"
data = {}
for chrom in chroms:
dat = !ssh ghpcc cat /home/sv49w/bbb/{chrom}.tsv
data[chrom] = "\n".join(dat)
# i had to install this beauty https://github.com/matplotlib/ipympl
# to make following to work ...
%matplotlib widget
import ipywidgets as widgets
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import seaborn as sns
import numpy as np
from io import StringIO
# make pandas display entire dataframes
pd.set_option("display.max_rows", None, "display.max_columns", None)
# df = pd.read_csv(StringIO(data),sep=" ")
# # let's parse "name" into cell-type, enzyme, crosslink etc
# # first sanity check : allM = cisM + transM + contigM
# assert (df[["cisM","transM","contigM"]].sum(axis=1) == df["allM"]).all()
# # second one: cis+trans = tot (mapped)
# assert (df[["cis","trans"]].sum(axis=1) == df["tot"]).all()
# df["cis_perc"] = df["cis"]/df["tot"]
#make sure data is defined globally at the very end of the notebook ...
def split_counts_fracs(counts):
c_fracs = [_c for _c in counts if "frac_" in _c]
c_counts = [_c for _c in counts if "frac_" not in _c]
return c_fracs,c_counts
def parse_data(chrom):
df = pd.read_csv(StringIO(data[chrom]),sep=" ")
# let's parse "name" into cell-type, enzyme, crosslink etc
#
# first one: cis+trans = tot (mapped)
assert (df[["cis","trans"]].sum(axis=1) == df["tot"]).all()
# second sanity check : allM = cisM + transM + contigM
assert (df[["cis_chrom","trans_chrom","contig_chrom"]].sum(axis=1) == df["all_chrom"]).all()
# df["cis_perc"] = df["cis"]/df["tot"]
# df["ct_chrom"] = df["cis_chrom"]+df["trans_chrom"]+df["contig_chrom"]
df["frac_tc"] = df["trans_chrom"]/df["cis_chrom"]
df["frac_ta"] = df["trans_chrom"]/df["all_chrom"]
return df
def parse_u54_names(name_split):
exptype = name_split[0]
celltype = name_split[1]
if (exptype == "ENCODE")and(celltype=="HeLa"):
cross = "FA"
enzyme = name_split[2]
cycle = "NS"
elif (exptype == "U54")and(celltype == "HFFc6"):
if name_split[2] in ["p17","p22"]:
cross = name_split[3]
enzyme = name_split[4]
else:
cross = name_split[2]
enzyme = name_split[3]
cycle = "NS"
elif (exptype == "U54")and(celltype in ["END4DN", "H1ESC4DN", "HFFc64DN","HFFc6"]):
cross = name_split[3]
enzyme = name_split[4]
cycle = "NS"
elif celltype in ["END","ESC","END4DN","ESC4DN","H1ESC4DN","ENDMaehr","hEND4DN"]:
cross = name_split[2]
enzyme = name_split[3]
cycle = "NS"
elif celltype in "HFF":
cross = name_split[3]
enzyme = name_split[4]
cycle = "NS"
elif celltype == "HelaS3":
cross = name_split[3]
enzyme = name_split[4]
cycle = name_split[2]
else:
print("I'm something else - dela with me...")
print(name_split)
ret = {}
if enzyme == "HindIIII":
enzyme = "HindIII"
if enzyme not in ['DdeI','DpnII','HindIII','MNase']:
print("enzyme",enzyme,name_split)
if cross not in ['DSG','EGS','FA']:
print("cross",cross,name_split)
if cycle not in ['NS','G1','M']:
print("cycle",cycle,name_split)
ret = {"cell":celltype, "cycle": cycle, "cross": cross, "enzyme": enzyme}
return pd.Series(ret)
# this should be the same for all of them, but it's ugly this way ...
cell_cycle_cross_enzyme = parse_data("chrM")["name"].str.split("-").apply(parse_u54_names)
for c in cell_cycle_cross_enzyme.columns:
print(c,cell_cycle_cross_enzyme[c].unique())
# errors = ddf.groupby(("cross","enzyme","cell")).std()
# means.plot.bar(yerr=errors, ax=ax, capsize=4,logy=True)
fig1 = plt.figure(figsize=(12.5,6),constrained_layout=True)
spec1 = gridspec.GridSpec(ncols=1,nrows=1,figure=fig1)
# # Also make sure the margins and spacing are apropriate
# spec1.update(left=0.05, right=0.95, bottom=0.08, top=0.93, wspace=0.02, hspace=0.03)
# # BUT: this is irrelevant for the saved image, if using bbox_inches='tight'in savefig !
ax1 = fig1.add_subplot(spec1[0,0])
# fig, ax0 = plt.subplots(figsize=(20,3))
style = {'description_width': 'initial'}
counts_selector = widgets.Text(
value="tot,all_chrom,cis_chrom,trans_chrom,contig_chrom",
# description='tot,all_chrom,cis_chrom,trans_chrom,contig_chrom:',
description='tot,all...',
disabled=False,
style=style
)
enzyme_selector = widgets.Text(
value="DdeI,DpnII,HindIII,MNase",
description='DdeI,DpnII,HindIII,MNase:',
disabled=False,
style=style
)
cross_selector = widgets.Text(
value="DSG,EGS,FA",
description='DSG,EGS,FA:',
disabled=False,
style=style
)
cycle_selector = widgets.Text(
value="NS,G1,M",
description='NS,G1,M:',
disabled=False,
style=style
)
cells_selector = widgets.Text(
value='END,ESC,HelaS3,HFF,HeLa,END4DN,ENDMaehr,ESC4DN,H1ESC4DN,hEND4DN,HFFc64DN,HFFc6',
description='END,ESC,HelaS3,HFF:',
disabled=False,
style=style
)
# grouping = ["enzyme_cross_cell_cycle","cross_enzyme_cell_cycle","cell_cross_enzyme_cycle"]
@widgets.interact(
counts = counts_selector,
enzymes = enzyme_selector,
cells = cells_selector,
cycle = cycle_selector,
cross = cross_selector,
log=True,
normalized=True,
grouping = ["enzyme_cross_cell_cycle","cross_enzyme_cell_cycle","cell_cross_enzyme_cycle"],
chrom = list(data.keys()))
def update(counts,enzymes,cells,cycle,cross,log,normalized,grouping,chrom):
# this is just to make us able to change chroms
df = parse_data(chrom)
ax1.clear()
counts = counts.split(",")
enzymes = enzymes.split(",")
cells = cells.split(",")
cycle = cycle.split(",")
cross = cross.split(",")
c1 = cell_cycle_cross_enzyme["enzyme"].isin(enzymes)
c2 = cell_cycle_cross_enzyme["cell"].isin(cells)
c3 = cell_cycle_cross_enzyme["cycle"].isin(cycle)
c4 = cell_cycle_cross_enzyme["cross"].isin(cross)
ccce = cell_cycle_cross_enzyme[c1&c2&c3&c4]
if normalized:
c_fracs,c_counts = split_counts_fracs(counts)
df_norm = df[c_counts]/df[["tot"]].values
loc_df = pd.merge(df_norm,ccce,left_index=True,right_index=True)
loc_df = pd.merge(loc_df,df[c_fracs],left_index=True,right_index=True)
else:
df_norm = df[counts]
loc_df = pd.merge(df_norm,ccce,left_index=True,right_index=True)
grp = grouping.split("_")
mmeans = loc_df.groupby(grp).mean()
mmeans.plot.bar( ax=ax1, capsize=4,logy=log)
ax1.set_ylabel("# of pairs")
# print(enzymes)
# print(cells)
# print(cycle)
# print(cross)
###Output
_____no_output_____
###Markdown
the fact that $cis_{chrom} + trans_{chrom}$ is not even close to a ${const}$ is easy to understand because we are over-counting trans data ...
###Code
# errors = ddf.groupby(("cross","enzyme","cell")).std()
# means.plot.bar(yerr=errors, ax=ax, capsize=4,logy=True)
fig2 = plt.figure(figsize=(7,5),constrained_layout=True)
spec2 = gridspec.GridSpec(ncols=1,nrows=1,figure=fig2)
# # Also make sure the margins and spacing are apropriate
# spec1.update(left=0.05, right=0.95, bottom=0.08, top=0.93, wspace=0.02, hspace=0.03)
# # BUT: this is irrelevant for the saved image, if using bbox_inches='tight'in savefig !
ax2 = fig2.add_subplot(spec2[0,0])
# fig, ax0 = plt.subplots(figsize=(20,3))
style = {'description_width': 'initial'}
xy_selector = widgets.Text(
value="cis_chrom,trans_chrom",
description='tot,all_chrom...',
disabled=False,
style=style
)
enzyme_selector = widgets.Text(
value="DdeI,DpnII,HindIII,MNase",
description='DdeI,DpnII,HindIII,MNase:',
disabled=False,
style=style
)
cross_selector = widgets.Text(
value="DSG,EGS,FA",
description='DSG,EGS,FA:',
disabled=False,
style=style
)
cycle_selector = widgets.Text(
value="NS,G1,M",
description='NS,G1,M:',
disabled=False,
style=style
)
cells_selector = widgets.Text(
value='END,ESC,HelaS3,HFF,HeLa,END4DN,ENDMaehr,ESC4DN,H1ESC4DN,hEND4DN,HFFc64DN,HFFc6',
description='END,ESC,HelaS3,HFF:',
disabled=False,
style=style
)
# grouping = ["enzyme_cross_cell_cycle","cross_enzyme_cell_cycle","cell_cross_enzyme_cycle"]
@widgets.interact(
xy = xy_selector,
enzymes = enzyme_selector,
cells = cells_selector,
cycle = cycle_selector,
cross = cross_selector,
log=True,
normalized=True,
chrom = list(data.keys()))
def update(xy,enzymes,cells,cycle,cross,log,normalized,chrom):
df = parse_data(chrom)
ax2.clear()
x,y = xy.split(",")
enzymes = enzymes.split(",")
cells = cells.split(",")
cycle = cycle.split(",")
cross = cross.split(",")
c1 = cell_cycle_cross_enzyme["enzyme"].isin(enzymes)
c2 = cell_cycle_cross_enzyme["cell"].isin(cells)
c3 = cell_cycle_cross_enzyme["cycle"].isin(cycle)
c4 = cell_cycle_cross_enzyme["cross"].isin(cross)
ccce = cell_cycle_cross_enzyme[c1&c2&c3&c4]
if normalized:
df_x = df[[x]]/df[["tot"]].values if "frac_" not in x else df[[x]]
df_y = df[[y]]/df[["tot"]].values if "frac_" not in y else df[[y]]
loc_df = pd.merge(df_x,ccce,left_index=True,right_index=True)
loc_df = pd.merge(loc_df,df_y,left_index=True,right_index=True)
else:
df_norm = df[[x,y]]
loc_df = pd.merge(df_norm,ccce,left_index=True,right_index=True)
sp = sns.scatterplot(x=x,y=y,hue="cross",size="enzyme",data=loc_df,ax=ax2)
x_span = loc_df[x].max() - loc_df[x].min()
y_span = loc_df[y].max() - loc_df[y].min()
dx = 0.01*x_span
dy = 0.01*y_span
if log:
ax2.set_xlim((loc_df[x].min()*0.9,loc_df[x].max()*1.01))
ax2.set_ylim((loc_df[y].min()*0.9,loc_df[y].max()*1.01))
sp.set(xscale="log")
sp.set(yscale="log")
else:
ax2.set_xlim((loc_df[x].min()-dx,loc_df[x].max()+dx))
ax2.set_ylim((loc_df[y].min()-dy,loc_df[y].max()+dy))
# errors = ddf.groupby(("cross","enzyme","cell")).std()
# means.plot.bar(yerr=errors, ax=ax, capsize=4,logy=True)
fig3 = plt.figure(figsize=(4,7),constrained_layout=True)
spec3 = gridspec.GridSpec(ncols=1,nrows=3,figure=fig3)
# # Also make sure the margins and spacing are apropriate
# spec1.update(left=0.05, right=0.95, bottom=0.08, top=0.93, wspace=0.02, hspace=0.03)
# # BUT: this is irrelevant for the saved image, if using bbox_inches='tight'in savefig !
ax31 = fig3.add_subplot(spec3[0,0])
ax32 = fig3.add_subplot(spec3[1,0])
ax33 = fig3.add_subplot(spec3[2,0])
style = {'description_width': 'initial'}
xy_selector = widgets.Text(
value="cis_chrom,trans_chrom",
description='tot,all_chrom...',
disabled=False,
style=style
)
enzyme_selector = widgets.Text(
value="DdeI,DpnII,HindIII,MNase",
description='DdeI,DpnII,HindIII,MNase:',
disabled=False,
style=style
)
cross_selector = widgets.Text(
value="DSG,EGS,FA",
description='DSG,EGS,FA:',
disabled=False,
style=style
)
cycle_selector = widgets.Text(
value="NS,G1,M",
description='NS,G1,M:',
disabled=False,
style=style
)
cells_selector = widgets.Text(
value='END,ESC,HelaS3,HFF,HeLa,END4DN,ENDMaehr,ESC4DN,H1ESC4DN,hEND4DN,HFFc64DN,HFFc6',
description='END,ESC,HelaS3,HFF:',
disabled=False,
style=style
)
# grouping = ["enzyme_cross_cell_cycle","cross_enzyme_cell_cycle","cell_cross_enzyme_cycle"]
@widgets.interact(
xy = xy_selector,
enzymes = enzyme_selector,
cells = cells_selector,
cycle = cycle_selector,
cross = cross_selector,
log=True,
normalized=True,
chrom = list(data.keys()))
def update(xy,enzymes,cells,cycle,cross,log,normalized,chrom):
df = parse_data(chrom)
ax31.clear()
ax32.clear()
ax33.clear()
enzymes = enzymes.split(",")
cells = cells.split(",")
cycle = cycle.split(",")
cross = cross.split(",")
c1 = cell_cycle_cross_enzyme["enzyme"].isin(enzymes)
c2 = cell_cycle_cross_enzyme["cell"].isin(cells)
c3 = cell_cycle_cross_enzyme["cycle"].isin(cycle)
c4 = cell_cycle_cross_enzyme["cross"].isin(cross)
ccce = cell_cycle_cross_enzyme[c1&c2&c3&c4]
x,y1,y2,y3 = "trans","trans_chrom","cis_chrom","all_chrom"
if normalized:
df_norm = df[[x,y1,y2,y3]]/df[["tot"]].values
loc_df = pd.merge(df_norm,ccce,left_index=True,right_index=True)
else:
df_norm = df[[x,y1,y2,y3]]
loc_df = pd.merge(df_norm,ccce,left_index=True,right_index=True)
sp1 = sns.scatterplot(x=x,y=y1,hue="cross",size="enzyme",data=loc_df,ax=ax31)
sp2 = sns.scatterplot(x=x,y=y2,hue="cross",size="enzyme",data=loc_df,ax=ax32)
sp3 = sns.scatterplot(x=x,y=y3,hue="cross",size="enzyme",data=loc_df,ax=ax33)
x_span = df_norm[x].max() - df_norm[x].min()
y_span = df_norm[[y1,y2,y3]].max().max() - df_norm[[y1,y2,y3]].min().min()
dx = 0.01*x_span
dy = 0.01*y_span
if log:
ax31.set_xlim((df_norm[x].min()*0.9,df_norm[x].max()*1.01))
ax31.set_ylim((df_norm[[y1,y2,y3]].min().min()*0.9,df_norm[[y1,y2,y3]].max().max()*1.01))
ax32.set_xlim((df_norm[x].min()*0.9,df_norm[x].max()*1.01))
ax32.set_ylim((df_norm[[y1,y2,y3]].min().min()*0.9,df_norm[[y1,y2,y3]].max().max()*1.01))
ax33.set_xlim((df_norm[x].min()*0.9,df_norm[x].max()*1.01))
ax33.set_ylim((df_norm[[y1,y2,y3]].min().min()*0.9,df_norm[[y1,y2,y3]].max().max()*1.01))
sp1.set(xscale="log",yscale="log")
sp2.set(xscale="log",yscale="log")
sp3.set(xscale="log",yscale="log")
else:
ax31.set_xlim((df_norm[x].min()-dx,df_norm[x].max()+dx))
ax31.set_ylim((df_norm[[y1,y2,y3]].min().min()-dy,df_norm[[y1,y2,y3]].max().max()+dy))
ax32.set_xlim((df_norm[x].min()-dx,df_norm[x].max()+dx))
ax32.set_ylim((df_norm[[y1,y2,y3]].min().min()-dy,df_norm[[y1,y2,y3]].max().max()+dy))
ax33.set_xlim((df_norm[x].min()-dx,df_norm[x].max()+dx))
ax33.set_ylim((df_norm[[y1,y2,y3]].min().min()-dy,df_norm[[y1,y2,y3]].max().max()+dy))
###Output
_____no_output_____ |
7.2-Token_classification.ipynb | ###Markdown
[Token classification](https://huggingface.co/course/chapter7/2?fw=pt)The first application we'll explore is token classification. This generic task encompasses any problem that can be formulated as "attributing a label to each token in a sentence", such as:- **Named entity recognition (NER)**: Find the entities (such as persons, locations, or organizations) in a sentence. This can be formulated as attributing a label to each token by having one class per entity and one class for "no entity."- **Part-of-speech tagging (POS)**: Mark each word in a sentence as corresponding to a particular part of speech (such as noun, verb, adjective, etc.).- **Chunking**: Find the tokens that belong to the same entity. This task (which can be combined with POS or NER) can be formulated as attributing one label (usually B-) to any tokens that are at the beginning of a chunk, another label (usually I-) to tokens that are inside a chunk, and a third label (usually O) to tokens that don't belong to any chunk.
###Code
from IPython.display import HTML
HTML('<iframe width="640" height="360" src="https://www.youtube.com/embed/wVHdVlPScxA" allowfullscreen></iframe>')
###Output
/home/matthias/anaconda3/envs/hf/lib/python3.9/site-packages/IPython/core/display.py:724: UserWarning: Consider using IPython.display.IFrame instead
warnings.warn("Consider using IPython.display.IFrame instead")
###Markdown
Of course, there are many other types of token classification problems; those are just a few representative examples. In this section, we will fine-tune a model (BERT) on a NER task, which will then be able to compute predictions like this one:You can find the model we'll train and upload to the Hub and double-check its predictions [here](https://huggingface.co/huggingface-course/bert-finetuned-ner?text=My+name+is+Sylvain+and+I+work+at+Hugging+Face+in+Brooklyn). Preparing the dataFirst things first, we need a dataset suitable for token classification. In this section we will use the [CoNLL-2003 dataset](https://huggingface.co/datasets/conll2003), which contains news stories from Reuters.> 💡 As long as your dataset consists of texts split into words with their corresponding labels, you will be able to adapt the data processing procedures described here to your own dataset. Refer back to [Chapter 5](https://huggingface.co/course/chapter5) if you need a refresher on how to load your own custom data in a `Dataset`. The CoNLL-2003 datasetTo load the CoNLL-2003 dataset, we use the `load_dataset()` method from the 🤗 Datasets library:
###Code
from datasets import load_dataset
raw_datasets = load_dataset("conll2003")
###Output
Reusing dataset conll2003 (/home/matthias/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63f4ebd1bcb7148b1644497336fd74643d4ce70123334431a3c053b7ee4e96ee)
###Markdown
This will download and cache the dataset, like we saw in [Chapter 3](https://huggingface.co/course/chapter3) for the GLUE MRPC dataset. Inspecting this object shows us the columns present and the split between the training, validation, and test sets:
###Code
raw_datasets
###Output
_____no_output_____
###Markdown
In particular, we can see the dataset contains labels for the three tasks we mentioned earlier: NER, POS, and chunking. A big difference from other datasets is that the input texts are not presented as sentences or documents, but lists of words (the last column is called `tokens`, but it contains words in the sense that these are pre-tokenized inputs that still need to go through the tokenizer for subword tokenization).Let's have a look at the first element of the training set:
###Code
raw_datasets["train"][0]["tokens"]
###Output
_____no_output_____
###Markdown
Since we want to perform named entity recognition, we will look at the NER tags:
###Code
raw_datasets["train"][0]["ner_tags"]
###Output
_____no_output_____
###Markdown
Those are the labels as integers ready for training, but they're not necessarily useful when we want to inspect the data. Like for text classification, we can access the correspondence between those integers and the label names by looking at the `features` attribute of our dataset:
###Code
ner_feature = raw_datasets["train"].features["ner_tags"]
ner_feature
###Output
_____no_output_____
###Markdown
So this column contains elements that are sequences of `ClassLabels`. The type of the elements of the sequence is in the `feature` attribute of this `ner_feature`, and we can access the list of names by looking at the `names` attribute of that `feature`:
###Code
label_names = ner_feature.feature.names
label_names
###Output
_____no_output_____
###Markdown
We already saw these labels when digging into the `token-classification` pipeline in [Chapter 6](https://huggingface.co/course/chapter6/3), but for a quick refresher:- `O` means the word doesn't correspond to any entity.- `B-PER`/`I-PER` means the word corresponds to the beginning of/is inside a *person* entity.- `B-ORG`/`I-ORG` means the word corresponds to the beginning of/is inside an *organization* entity.- `B-LOC`/`I-LOC` means the word corresponds to the beginning of/is inside a *location* entity.- `B-MISC`/`I-MISC` means the word corresponds to the beginning of/is inside a *miscellaneous* entity.Now decoding the labels we saw earlier gives us this:
###Code
words = raw_datasets["train"][0]["tokens"]
labels = raw_datasets["train"][0]["ner_tags"]
line1 = ""
line2 = ""
for word, label in zip(words, labels):
full_label = label_names[label]
max_length = max(len(word), len(full_label))
line1 += word + " " * (max_length - len(word) + 1)
line2 += full_label + " " * (max_length - len(full_label) + 1)
print(line1)
print(line2)
###Output
EU rejects German call to boycott British lamb .
B-ORG O B-MISC O O O B-MISC O O
###Markdown
And for an example mixing `B-` and `I-` labels, here's what the same code gives us on the element of the training set at index 4:
###Code
words = raw_datasets["train"][4]["tokens"]
labels = raw_datasets["train"][4]["ner_tags"]
line1 = ""
line2 = ""
for word, label in zip(words, labels):
full_label = label_names[label]
max_length = max(len(word), len(full_label))
line1 += word + " " * (max_length - len(word) + 1)
line2 += full_label + " " * (max_length - len(full_label) + 1)
print(line1)
print(line2)
###Output
Germany 's representative to the European Union 's veterinary committee Werner Zwingmann said on Wednesday consumers should buy sheepmeat from countries other than Britain until the scientific advice was clearer .
B-LOC O O O O B-ORG I-ORG O O O B-PER I-PER O O O O O O O O O O O B-LOC O O O O O O O
###Markdown
As we can see, entities spanning two words, like "European Union" and "Werner Zwingmann", are attributed a `B-` label for the first word and an `I-` label for the second.> ✏️ Your turn! Print the same two sentences with their POS or chunking labels.
###Code
# Trying it out
## turn the above code into a function accepting the relevant arguments
def sentence_labels(idx, tags):
words = raw_datasets["train"][idx]["tokens"]
labels = raw_datasets["train"][idx][tags]
label_names = raw_datasets["train"].features[tags].feature.names
line1 = ""
line2 = ""
print(labels)
print(words)
for word, label in zip(words, labels):
full_label = label_names[label]
max_length = max(len(word), len(full_label))
line1 += word + " " * (max_length - len(word) + 1)
line2 += full_label + " " * (max_length - len(full_label) + 1)
print("\nTags:\t{}\nIndex:\t{}".format(tags, idx))
print(line1)
print(line2)
pass
## use the function to complete the task
sentence_labels(0, "pos_tags")
sentence_labels(0, "chunk_tags")
sentence_labels(4, "pos_tags")
sentence_labels(4, "chunk_tags")
###Output
[22, 42, 16, 21, 35, 37, 16, 21, 7]
['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.']
Tags: pos_tags
Index: 0
EU rejects German call to boycott British lamb .
NNP VBZ JJ NN TO VB JJ NN .
[11, 21, 11, 12, 21, 22, 11, 12, 0]
['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.']
Tags: chunk_tags
Index: 0
EU rejects German call to boycott British lamb .
B-NP B-VP B-NP I-NP B-VP I-VP B-NP I-NP O
[22, 27, 21, 35, 12, 22, 22, 27, 16, 21, 22, 22, 38, 15, 22, 24, 20, 37, 21, 15, 24, 16, 15, 22, 15, 12, 16, 21, 38, 17, 7]
['Germany', "'s", 'representative', 'to', 'the', 'European', 'Union', "'s", 'veterinary', 'committee', 'Werner', 'Zwingmann', 'said', 'on', 'Wednesday', 'consumers', 'should', 'buy', 'sheepmeat', 'from', 'countries', 'other', 'than', 'Britain', 'until', 'the', 'scientific', 'advice', 'was', 'clearer', '.']
Tags: pos_tags
Index: 4
Germany 's representative to the European Union 's veterinary committee Werner Zwingmann said on Wednesday consumers should buy sheepmeat from countries other than Britain until the scientific advice was clearer .
NNP POS NN TO DT NNP NNP POS JJ NN NNP NNP VBD IN NNP NNS MD VB NN IN NNS JJ IN NNP IN DT JJ NN VBD JJR .
[11, 11, 12, 13, 11, 12, 12, 11, 12, 12, 12, 12, 21, 13, 11, 12, 21, 22, 11, 13, 11, 1, 13, 11, 17, 11, 12, 12, 21, 1, 0]
['Germany', "'s", 'representative', 'to', 'the', 'European', 'Union', "'s", 'veterinary', 'committee', 'Werner', 'Zwingmann', 'said', 'on', 'Wednesday', 'consumers', 'should', 'buy', 'sheepmeat', 'from', 'countries', 'other', 'than', 'Britain', 'until', 'the', 'scientific', 'advice', 'was', 'clearer', '.']
Tags: chunk_tags
Index: 4
Germany 's representative to the European Union 's veterinary committee Werner Zwingmann said on Wednesday consumers should buy sheepmeat from countries other than Britain until the scientific advice was clearer .
B-NP B-NP I-NP B-PP B-NP I-NP I-NP B-NP I-NP I-NP I-NP I-NP B-VP B-PP B-NP I-NP B-VP I-VP B-NP B-PP B-NP B-ADJP B-PP B-NP B-SBAR B-NP I-NP I-NP B-VP B-ADJP O
###Markdown
Processing the data
###Code
HTML('<iframe width="640" height="360" src="https://www.youtube.com/embed/iY2AZYdZAr0" allowfullscreen></iframe>')
###Output
/home/matthias/anaconda3/envs/hf/lib/python3.9/site-packages/IPython/core/display.py:724: UserWarning: Consider using IPython.display.IFrame instead
warnings.warn("Consider using IPython.display.IFrame instead")
###Markdown
As usual, our texts need to be converted to token IDs before the model can make sense of them. As we saw in [Chapter 6](https://huggingface.co/course/chapter6/), a big difference in the case of token classification tasks is that we have pre-tokenized inputs. Fortunately, the tokenizer API can deal with that pretty easily; we just need to warn the `tokenizer` with a special flag.To begin, let's create our `tokenizer` object. As we said before, we will be using a BERT pretrained model, so we'll start by downloading and caching the associated tokenizer:
###Code
from transformers import AutoTokenizer
model_checkpoint = "bert-base-cased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
###Output
_____no_output_____
###Markdown
You can replace the `model_checkpoint` with any other model you prefer from the [Hub](https://huggingface.co/models), or with a local folder in which you've saved a pretrained model and a tokenizer. The only constraint is that the tokenizer needs to be backed by the 🤗 Tokenizers library, so there's a "fast" version available. You can see all the architectures that come with a fast version in [this big table](https://huggingface.co/transformers/supported-frameworks), and to check that the `tokenizer` object you're using is indeed backed by 🤗 Tokenizers you can look at its `is_fast` attribute:
###Code
tokenizer.is_fast
###Output
_____no_output_____
###Markdown
To tokenize a pre-tokenized input, we can use our `tokenizer` as usual and just add `is_split_into_words=True`:
###Code
inputs = tokenizer(raw_datasets["train"][0]["tokens"], is_split_into_words=True)
inputs.tokens()
###Output
_____no_output_____
###Markdown
As we can see, the tokenizer added the special tokens used by the model (`[CLS]` at the beginning and `[SEP]` at the end) and left most of the words untouched. The word `lamb`, however, was tokenized into two subwords, `la` and `mb`. This introduces a mismatch between our inputs and the labels: the list of labels has only 9 elements, whereas our input now has 12 tokens. Accounting for the special tokens is easy (we know they are at the beginning and the end), but we also need to make sure we align all the labels with the proper words.Fortunately, because we're using a fast tokenizer we have access to the 🤗 Tokenizers superpowers, which means we can easily map each token to its corresponding word (as seen in [Chapter 6](https://huggingface.co/course/chapter6/3)):
###Code
inputs.word_ids()
###Output
_____no_output_____
###Markdown
With a tiny bit of work, we can then expand our label list to match the tokens. The first rule we'll apply is that special tokens get a label of `-100`. This is because by default `-100` is an index that is ignored in the loss function we will use (cross entropy). Then, each token gets the same label as the token that started the word it's inside, since they are part of the same entity. For tokens inside a word but not at the beginning, we replace the `B-` with `I-` (since the token does not begin the entity):
###Code
def align_labels_with_tokens(labels, word_ids):
new_labels = []
current_word = None
for word_id in word_ids:
if word_id != current_word:
# Start of a new word!
current_word = word_id
label = -100 if word_id is None else labels[word_id]
new_labels.append(label)
elif word_id is None:
# Special token
new_labels.append(-100)
else:
# Same word as previous token
label = labels[word_id]
# If the label is B-XXX we change it to I-XXX
if label % 2 == 1:
label += 1
new_labels.append(label)
return new_labels
###Output
_____no_output_____
###Markdown
Let's try it out on our first sentence:
###Code
labels = raw_datasets["train"][0]["ner_tags"]
word_ids = inputs.word_ids()
print(labels)
print(align_labels_with_tokens(labels, word_ids))
###Output
[3, 0, 7, 0, 0, 0, 7, 0, 0]
[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0, -100]
###Markdown
As we can see, our function added the `-100` for the two special tokens at the beginning and the end, and a new `0` for our word that was split into two tokens.> ✏️ Your turn! Some researchers prefer to attribute only one label per word, and assign `-100` to the other subtokens in a given word. This is to avoid long words that split into lots of subtokens contributing heavily to the loss. Change the previous function to align labels with input IDs by following this rule.
###Code
# Trying it out (my turn)
print(labels)
print(inputs.tokens())
print(word_ids)
print(align_labels_with_tokens(labels, word_ids))
def my_align_labels_with_tokens(labels, word_ids):
new_labels = []
previous_word_id = None
for word_id in word_ids:
if (word_id==None) or (word_id==previous_word_id):
label = -100
else:
label = labels[word_id]
new_labels.append(label)
previous_word_id = word_id
return new_labels
#
my_align_labels_with_tokens(labels, word_ids)
###Output
[3, 0, 7, 0, 0, 0, 7, 0, 0]
['[CLS]', 'EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'la', '##mb', '.', '[SEP]']
[None, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, None]
[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0, -100]
###Markdown
To preprocess our whole dataset, we need to tokenize all the inputs and apply `align_labels_with_tokens()` on all the labels. To take advantage of the speed of our fast tokenizer, it's best to tokenize lots of texts at the same time, so we'll write a function that processes a list of examples and use the `Dataset.map()` method with the option `batched=True`. The only thing that is different from our previous example is that the `word_ids()` function needs to get the index of the example we want the word IDs of when the inputs to the tokenizer are lists of texts (or in our case, list of lists of words), so we add that, too:
###Code
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True)
all_labels = examples["ner_tags"]
new_labels = []
for i, labels in enumerate(all_labels):
word_ids = tokenized_inputs.word_ids(i)
new_labels.append(align_labels_with_tokens(labels, word_ids))
tokenized_inputs["labels"] = new_labels
return tokenized_inputs
###Output
_____no_output_____
###Markdown
Note that we haven’t padded our inputs yet; we’ll do that later, when creating the batches with a data collator.We can now apply all that preprocessing in one go on the other splits of our dataset:
###Code
tokenized_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names
)
###Output
Loading cached processed dataset at /home/matthias/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63f4ebd1bcb7148b1644497336fd74643d4ce70123334431a3c053b7ee4e96ee/cache-5c997231345efbaf.arrow
Loading cached processed dataset at /home/matthias/.cache/huggingface/datasets/conll2003/conll2003/1.0.0/63f4ebd1bcb7148b1644497336fd74643d4ce70123334431a3c053b7ee4e96ee/cache-9ac3b79c72f329d1.arrow
###Markdown
We've done the hardest part! Now that the data has been preprocessed, the actual training will look a lot like what we did in [Chapter 3](https://huggingface.co/course/chapter3). Fine-tuning the model with the `Trainer` APIThe actual code using the `Trainer` will be the same as before; the only changes are the way the data is collated into a batch and the metric computation function. Data collationWe can't just use a `DataCollatorWithPadding` like in [Chapter 3](https://huggingface.co/course/chapter3) because that only pads the inputs (input IDs, attention mask, and token type IDs). Here, our labels should be padded the exact same way as the inputs so that they stay the same size, using `-100` as a value so that the corresponding predictions are ignored in the loss computation.This is all done by a [`DataCollatorForTokenClassification`](https://huggingface.co/transformers/main_classes/data_collator.htmldatacollatorfortokenclassification). Like the `DataCollatorWithPadding`, it takes the `tokenizer` used to preprocess the inputs:
###Code
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
###Output
_____no_output_____
###Markdown
To test this on a few samples, we can just call it on a list of examples from our tokenized training set:
###Code
batch = data_collator([tokenized_datasets["train"][i] for i in range(2)])
batch["labels"]
###Output
_____no_output_____
###Markdown
Let's compare this to the labels for the first and second elements in our dataset:
###Code
for i in range(2):
print(tokenized_datasets["train"][i]["labels"])
###Output
[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0, -100]
[-100, 1, 2, -100]
###Markdown
As we can see, the second set of labels has been padded to the length of the first one using `-100`s. MetricsTo have the `Trainer` compute a metric every epoch, we will need to define a `compute_metrics()` function that takes the arrays of predictions and labels, and returns a dictionary with the metric names and values.The traditional framework used to evaluate token classification prediction is [*seqeval*](https://github.com/chakki-works/seqeval). To use this metric, we first need to install the *seqeval* library:```pip install seqeval```We can then load it via the `load_metric()` function like we did in [Chapter 3](https://huggingface.co/course/chapter3):
###Code
from datasets import load_metric
metric = load_metric("seqeval")
###Output
_____no_output_____
###Markdown
This metric does not behave like the standard accuracy: it will actually take the lists of labels as strings, not integers, so we will need to fully decode the predictions and labels before passing them to the metric. Let's see how it works. First, we'll get the labels for our first training example:
###Code
labels = raw_datasets["train"][0]["ner_tags"]
labels = [label_names[i] for i in labels]
labels
###Output
_____no_output_____
###Markdown
We can then create fake predictions for those by just changing the value at index 2. Note that the metric takes a list of predictions (not just one) and a list of labels.
###Code
predictions = labels.copy()
predictions[2] = "O"
metric.compute(predictions=[predictions], references=[labels])
###Output
_____no_output_____
###Markdown
This is sending back a lot of information! We get the precision, recall, and $F_1$ score for each separate entity, as well as overall. For our metric computation we will only keep the overall score, but feel free to tweak the `compute_metrics()` function to return all the metrics you would like reported.This `compute_metrics()` function first takes the argmax of the logits to convert them to predictions (as usual, the logits and the probabilities are in the same order, so we don't need to apply the softmax). Then we have to convert both labels and predictions from integers to strings. We remove all the values where the label is `-100`, then pass the results to the `metric.compute()` method:
###Code
import numpy as np
def compute_metrics(eval_preds):
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
# Remove ignored index (special tokens) and convert to labels
true_labels = [[label_names[l] for l in label if l != -100] for label in labels]
true_predictions = [
[label_names[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
all_metrics = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": all_metrics["overall_precision"],
"recall": all_metrics["overall_recall"],
"f1": all_metrics["overall_f1"],
"accuracy": all_metrics["overall_accuracy"],
}
###Output
_____no_output_____
###Markdown
Defining the modelSince we are working on a token classification problem, we will use the `AutoModelForTokenClassification` class. The main thing to remember when defining this model is to pass along some information on the number of labels we have. The easiest way to do this is to pass that number with the `num_labels` argument, but if we want a nice inference widget working like the one we saw at the beginning of this section, it's better to set the correct label correspondences instead.They should be set by two dictionaries, `id2label` and `label2id`, which contain the mappings from ID to label and vice versa:
###Code
id2label = {str(i): label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}
###Output
_____no_output_____
###Markdown
Now we can just pass them to the `AutoModelForTokenClassification.from_pretrained()` method, and they will be set in the model's configuration and then properly saved and uploaded to the Hub:
###Code
from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id
)
###Output
Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForTokenClassification: ['cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias']
- This IS expected if you are initializing BertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of BertForTokenClassification were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['classifier.weight', 'classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
###Markdown
Like when we defined our `AutoModelForSequenceClassification` in [Chapter 3](https://huggingface.co/course/chapter3), creating the model issues a warning that some weights were not used (the ones from the pretraining head) and some other weights are randomly initialized (the ones from the new token classification head), and that this model should be trained. We will do that in a minute, but first let's double-check that our model has the right number of labels:
###Code
model.config.num_labels
###Output
_____no_output_____
###Markdown
> ⚠️ If you have a model with the wrong number of labels, you will get an obscure error when calling the `Trainer.train()` method later on (something like "CUDA error: device-side assert triggered"). This is the number one cause of bugs reported by users for such errors, so make sure you do this check to confirm that you have the expected number of labels. Fine-tuning the modelWe are now ready to train our model! We just need to do two last things before we define our `Trainer`: log in to Hugging Face and define our training arguments. If you're working in a notebook, there's a convenience function to help you with this:```pythonfrom huggingface_hub import notebook_loginnotebook_login()```This will display a widget where you can enter your Hugging Face login credentials.If you aren't working in a notebook, just type the following line in your terminal:```terminalhuggingface-cli login```Once this is done, we can define our `TrainingArguments`:
###Code
from huggingface_hub import notebook_login
notebook_login()
# training arguments
from transformers import TrainingArguments
args = TrainingArguments(
"bert-finetuned-ner", # make sure this folder
# (i) exists parallel to this notebook and is a local clone of your huggingface repo or
# (ii) exists neither parallel to this notebook nor on your huggingface account
# => for cloning, see "The Repository class" in "4-Sharing_models_and_tokenizers.ipynb":
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True
)
###Output
_____no_output_____
###Markdown
You've seen most of those before: we set some hyperparameters (like the learning rate, the number of epochs to train for, and the weight decay), and we specify `push_to_hub=True` to indicate that we want to save the model and evaluate it at the end of every epoch, and that we want to upload our results to the Model Hub. Note that you can specify the name of the repository you want to push to with the `hub_model_id` argument (in particular, you will have to use this argument to push to an organization). For instance, when we pushed the model to the [`huggingface-course` organization](https://huggingface.co/huggingface-course), we added `hub_model_id="huggingface-course/bert-finetuned-ner"` to `TrainingArguments`. By default, the repository used will be in your namespace and named after the output directory you set, so in our case it will be `"sgugger/bert-finetuned-ner"`.> 💡 If the output directory you are using already exists, it needs to be a local clone of the repository you want to push to. If it isn't, you'll get an error when defining your Trainer and will need to set a new name.Finally, we just pass everything to the `Trainer` and launch the training:
###Code
from transformers import Trainer
trainer = Trainer(
model=model,
args=args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
compute_metrics=compute_metrics,
tokenizer=tokenizer
)
trainer.train()
###Output
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
###Markdown
Note that while the training happens, each time the model is saved (here, every epoch) it is uploaded to the Hub in the background. This way, you will be able to resume your training on another machine if necessary.Once the training is complete, we use the `push_to_hub()` method to make sure we upload the most recent version of the model:
###Code
trainer.push_to_hub(commit_message="Commit 5", blocking=False)
###Output
Saving model checkpoint to bert-finetuned-ner
Configuration saved in bert-finetuned-ner/config.json
Model weights saved in bert-finetuned-ner/pytorch_model.bin
tokenizer config file saved in bert-finetuned-ner/tokenizer_config.json
Special tokens file saved in bert-finetuned-ner/special_tokens_map.json
###Markdown
The above command returns the URL of the commit it just did, if you want to inspect it.The `Trainer` also drafts a model card with all the evaluation results and uploads it. At this stage, you can use the inference widget on the Model Hub to test your model and share it with your friends. You have successfully fine-tuned a model on a token classification task — congratulations!If you want to dive a bit more deeply into the training loop, we will now show you how to do the same thing using 🤗 Accelerate. A custom training loopLet's now take a look at the full training loop, so you can easily customize the parts you need. It will look a lot like what we did in [Chapter 3](https://huggingface.co/course/chapter3/4), with a few changes for the evaluation. Preparing everything for trainingFirst we need to build the DataLoaders from our datasets. We'll reuse our `data_collator` as a `collate_fn` and shuffle the training set, but not the validation set:
###Code
from torch.utils.data import DataLoader
train_dataloader = DataLoader(
tokenized_datasets["train"],
shuffle=True,
collate_fn=data_collator,
batch_size=8
)
eval_dataloader = DataLoader(tokenized_datasets["validation"], collate_fn=data_collator, batch_size=8)
###Output
_____no_output_____
###Markdown
Next we reinstantiate our model, to make sure we're not continuing the fine-tuning from before but starting from the BERT pretrained model again:
###Code
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id
)
###Output
loading configuration file https://huggingface.co/bert-base-cased/resolve/main/config.json from cache at /home/matthias/.cache/huggingface/transformers/a803e0468a8fe090683bdc453f4fac622804f49de86d7cecaee92365d4a0f829.a64a22196690e0e82ead56f388a3ef3a50de93335926ccfa20610217db589307
Model config BertConfig {
"architectures": [
"BertForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "O",
"1": "B-PER",
"2": "I-PER",
"3": "B-ORG",
"4": "I-ORG",
"5": "B-LOC",
"6": "I-LOC",
"7": "B-MISC",
"8": "I-MISC"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"B-LOC": "5",
"B-MISC": "7",
"B-ORG": "3",
"B-PER": "1",
"I-LOC": "6",
"I-MISC": "8",
"I-ORG": "4",
"I-PER": "2",
"O": "0"
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.12.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 28996
}
loading weights file https://huggingface.co/bert-base-cased/resolve/main/pytorch_model.bin from cache at /home/matthias/.cache/huggingface/transformers/092cc582560fc3833e556b3f833695c26343cb54b7e88cd02d40821462a74999.1f48cab6c959fc6c360d22bea39d06959e90f5b002e77e836d2da45464875cda
Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForTokenClassification: ['cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias']
- This IS expected if you are initializing BertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of BertForTokenClassification were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['classifier.weight', 'classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
###Markdown
Then we will need an optimizer. We'll use the classic `AdamW`, which is like `Adam`, but with a fix in the way weight decay is applied:
###Code
from torch.optim import AdamW
optimizer = AdamW(model.parameters(), lr=2e-5)
###Output
_____no_output_____
###Markdown
Once we have all those objects, we can send them to the `accelerator.prepare()` method:
###Code
from accelerate import Accelerator
accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
###Output
_____no_output_____
###Markdown
> 🚨 If you're training on a TPU, you'll need to move all the code starting from the cell above into a dedicated training function. See [Chapter 3](https://huggingface.co/course/chapter3) for more details.Now that we have sent our `train_dataloader` to `accelerator.prepare()`, we can use its length to compute the number of training steps. Remember that we should always do this after preparing the dataloader, as that method will change its length. We use a classic linear schedule from the learning rate to 0:
###Code
from transformers import get_scheduler
num_train_epochs = 3
num_update_steps_per_epoch = len(train_dataloader)
num_training_steps = num_train_epochs * num_update_steps_per_epoch
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps
)
###Output
_____no_output_____
###Markdown
Lastly, to push our model to the Hub, we will need to create a `Repository` object in a working folder. First log in to Hugging Face, if you're not logged in already. We'll determine the repository name from the model ID we want to give our model (feel free to replace the `repo_name` with your own choice; it just needs to contain your username, which is what the function `get_full_repo_name()` does):
###Code
from huggingface_hub import Repository, get_full_repo_name
model_name = "bert-finetuned-ner-accelerate"
repo_name = get_full_repo_name(model_name)
repo_name
###Output
_____no_output_____
###Markdown
Then we can clone that repository in a local folder. If it already exists, this local folder should be an existing clone of the repository we are working with:
###Code
output_dir = "bert-finetuned-ner-accelerate"
repo = Repository(output_dir, clone_from=repo_name)
###Output
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
###Markdown
We can now upload anything we save in `output_dir` by calling the `repo.push_to_hub()` method. This will help us upload the intermediate models at the end of each epoch. Training loopWe are now ready to write the full training loop. To simplify its evaluation part, we define this `postprocess()` function that takes predictions and labels and converts them to lists of strings, like our `metric` object expects:
###Code
def postprocess(predictions, labels):
predictions = predictions.detach().cpu().clone().numpy()
labels = labels.detach().cpu().clone().numpy()
# Remove ignored index (special tokens) and convert to labels
true_labels = [[label_names[l] for l in label if l != -100] for label in labels]
true_predictions = [
[label_names[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
return true_labels, true_predictions
###Output
_____no_output_____
###Markdown
Then we can write the training loop. After defining a progress bar to follow how training goes, the loop has three parts:- The training in itself, which is the classic iteration over the `train_dataloader`, forward pass through the model, then backward pass and optimizer step.- The evaluation, in which there is a novelty after getting the outputs of our model on a batch: since two processes may have padded the inputs and labels to different shapes, we need to use `accelerator.pad_across_processes()` to make the predictions and labels the same shape before calling the `gather()` method. If we don't do this, the evaluation will either error out or hang forever. Then we send the results to `metric.add_batch()` and call `metric.compute()` once the evaluation loop is over.- Saving and uploading, where we first save the model and the tokenizer, then call `repo.push_to_hub()`. Notice that we use the argument `blocking=False` to tell the 🤗 Hub library to push in an asynchronous process. This way, training continues normally and this (long) instruction is executed in the background.Here's the complete code for the training loop:
###Code
from tqdm.auto import tqdm
import torch
progress_bar = tqdm(range(num_training_steps))
for epoch in range(num_train_epochs):
# Training
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
# Evaluation
model.eval()
for batch in eval_dataloader:
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
labels = batch["labels"]
# Necessary to pad predictions and labels for being gathered
predictions = accelerator.pad_across_processes(predictions, dim=1, pad_index=-100)
labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100)
predictions_gathered = accelerator.gather(predictions)
labels_gathered = accelerator.gather(labels)
true_predictions, true_labels = postprocess(predictions_gathered, labels_gathered)
metric.add_batch(predictions=true_predictions, references=true_labels)
results = metric.compute()
print(f"epoch {epoch}:", {key: results[f"overall_{key}"] for key in ["precision", "recall", "f1", "accuracy"]})
# Save and upload
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(output_dir)
repo.push_to_hub(commit_message=f"Training in progress epoch {epoch}", blocking=False)
###Output
_____no_output_____
###Markdown
In case this is the first time you're seeing a model saved with 🤗 Accelerate, let's take a moment to inspect the three lines of code that go with it:
###Code
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
###Output
Configuration saved in bert-finetuned-ner-accelerate/config.json
Model weights saved in bert-finetuned-ner-accelerate/pytorch_model.bin
###Markdown
The first line is self-explanatory: it tells all the processes to wait until everyone is at that stage before continuing. This is to make sure we have the same model in every process before saving. Then we grab the `unwrapped_model`, which is the base model we defined. The `accelerator.prepare()` method changes the model to work in distributed training, so it won't have the `save_pretrained()` method anymore; the `accelerator.unwrap_model()` method undoes that step. Lastly, we call `save_pretrained()` but tell that method to use `accelerator.save()` instead of `torch.save()`.Once this is done, you should have a model that produces results pretty similar to the one trained with the `Trainer`. You can check the model we trained using this code at [*huggingface-course/bert-finetuned-ner-accelerate*](https://huggingface.co/huggingface-course/bert-finetuned-ner-accelerate). And if you want to test out any tweaks to the training loop, you can directly implement them by editing the code shown above! Using the fine-tuned modelWe've already shown you how you can use the model we fine-tuned on the Model Hub with the inference widget. To use it locally in a `pipeline`, you just have to specify the proper model identifier:
###Code
from transformers import pipeline
# Replace this with your own checkpoint
model_checkpoint = "bert-finetuned-ner-accelerate" # local folder parallel to this notebook
token_classifier = pipeline("token-classification", model=model_checkpoint, aggregation_strategy="simple")
token_classifier("My name is Sylvain and I work at Hugging Face in Brooklyn.")
###Output
loading configuration file bert-finetuned-ner-accelerate/config.json
Model config BertConfig {
"_name_or_path": "bert-base-cased",
"architectures": [
"BertForTokenClassification"
],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "O",
"1": "B-PER",
"2": "I-PER",
"3": "B-ORG",
"4": "I-ORG",
"5": "B-LOC",
"6": "I-LOC",
"7": "B-MISC",
"8": "I-MISC"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"B-LOC": "5",
"B-MISC": "7",
"B-ORG": "3",
"B-PER": "1",
"I-LOC": "6",
"I-MISC": "8",
"I-ORG": "4",
"I-PER": "2",
"O": "0"
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"torch_dtype": "float32",
"transformers_version": "4.12.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 28996
}
loading configuration file bert-finetuned-ner-accelerate/config.json
Model config BertConfig {
"_name_or_path": "bert-base-cased",
"architectures": [
"BertForTokenClassification"
],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "O",
"1": "B-PER",
"2": "I-PER",
"3": "B-ORG",
"4": "I-ORG",
"5": "B-LOC",
"6": "I-LOC",
"7": "B-MISC",
"8": "I-MISC"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"B-LOC": "5",
"B-MISC": "7",
"B-ORG": "3",
"B-PER": "1",
"I-LOC": "6",
"I-MISC": "8",
"I-ORG": "4",
"I-PER": "2",
"O": "0"
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"torch_dtype": "float32",
"transformers_version": "4.12.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 28996
}
loading weights file bert-finetuned-ner-accelerate/pytorch_model.bin
All model checkpoint weights were used when initializing BertForTokenClassification.
All the weights of BertForTokenClassification were initialized from the model checkpoint at bert-finetuned-ner-accelerate.
If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForTokenClassification for predictions without further training.
Didn't find file bert-finetuned-ner-accelerate/added_tokens.json. We won't load it.
loading file bert-finetuned-ner-accelerate/vocab.txt
loading file bert-finetuned-ner-accelerate/tokenizer.json
loading file None
loading file bert-finetuned-ner-accelerate/special_tokens_map.json
loading file bert-finetuned-ner-accelerate/tokenizer_config.json
|
Coursera/IBM Data Analyst Professional Certificate/Data Analysis with Python/week 2/data-wrangling.ipynb | ###Markdown
Data WranglingEstimated time needed: **30** minutes ObjectivesAfter completing this lab you will be able to:- Handle missing values- Correct data format- Standardize and Normalize Data Table of content Identify and handle missing values Identify missing values Deal with missing values Correct data format Data standardization Data Normalization (centering/scaling) Binning Indicator variable What is the purpose of Data Wrangling? Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis. What is the fuel consumption (L/100k) rate for the diesel car? Import dataYou can find the "Automobile Data Set" from the following link: https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data. We will be using this data set throughout this course. Import pandas
###Code
import pandas as pd
import matplotlib.pylab as plt
###Output
_____no_output_____
###Markdown
Reading the data set from the URL and adding the related headers. URL of the dataset This dataset was hosted on IBM Cloud object click HERE for free storage
###Code
filename = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/auto.csv"
###Output
_____no_output_____
###Markdown
Python list headers containing name of headers
###Code
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
###Output
_____no_output_____
###Markdown
Use the Pandas method read_csv() to load the data from the web address. Set the parameter "names" equal to the Python list "headers".
###Code
df = pd.read_csv(filename, names = headers)
###Output
_____no_output_____
###Markdown
Use the method head() to display the first five rows of the dataframe.
###Code
# To see what the data set looks like, we'll use the head() method.
df.head()
###Output
_____no_output_____
###Markdown
As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis. So, how do we identify all those missing values and deal with them? How to work with missing data?Steps for working with missing data: dentify missing data deal with missing data correct data format Identify and handle missing valuesIdentify missing valuesConvert "?" to NaNIn the car dataset, missing data comes with the question mark "?".We replace "?" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function: .replace(A, B, inplace = True) to replace A by B
###Code
import numpy as np
# replace "?" to NaN
df.replace("?", np.nan, inplace = True)
df.head(5)
###Output
_____no_output_____
###Markdown
Identify_missing_valuesEvaluating for Missing DataThe missing values are converted to default. We use the following functions to identify these missing values. There are two methods to detect missing data: .isnull() .notnull()The output is a boolean value indicating whether the value that is passed into the argument is in fact missing data.
###Code
missing_data = df.isnull()
missing_data.head(5)
###Output
_____no_output_____
###Markdown
"True" stands for missing value, while "False" stands for not missing value. Count missing values in each columnUsing a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, "True" represents a missing value, "False" means the value is present in the dataset. In the body of the for loop the method ".value_counts()" counts the number of "True" values.
###Code
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
###Output
symboling
False 205
Name: symboling, dtype: int64
normalized-losses
False 164
True 41
Name: normalized-losses, dtype: int64
make
False 205
Name: make, dtype: int64
fuel-type
False 205
Name: fuel-type, dtype: int64
aspiration
False 205
Name: aspiration, dtype: int64
num-of-doors
False 203
True 2
Name: num-of-doors, dtype: int64
body-style
False 205
Name: body-style, dtype: int64
drive-wheels
False 205
Name: drive-wheels, dtype: int64
engine-location
False 205
Name: engine-location, dtype: int64
wheel-base
False 205
Name: wheel-base, dtype: int64
length
False 205
Name: length, dtype: int64
width
False 205
Name: width, dtype: int64
height
False 205
Name: height, dtype: int64
curb-weight
False 205
Name: curb-weight, dtype: int64
engine-type
False 205
Name: engine-type, dtype: int64
num-of-cylinders
False 205
Name: num-of-cylinders, dtype: int64
engine-size
False 205
Name: engine-size, dtype: int64
fuel-system
False 205
Name: fuel-system, dtype: int64
bore
False 201
True 4
Name: bore, dtype: int64
stroke
False 201
True 4
Name: stroke, dtype: int64
compression-ratio
False 205
Name: compression-ratio, dtype: int64
horsepower
False 203
True 2
Name: horsepower, dtype: int64
peak-rpm
False 203
True 2
Name: peak-rpm, dtype: int64
city-mpg
False 205
Name: city-mpg, dtype: int64
highway-mpg
False 205
Name: highway-mpg, dtype: int64
price
False 201
True 4
Name: price, dtype: int64
###Markdown
Based on the summary above, each column has 205 rows of data, seven columns containing missing data: "normalized-losses": 41 missing data "num-of-doors": 2 missing data "bore": 4 missing data "stroke" : 4 missing data "horsepower": 2 missing data "peak-rpm": 2 missing data "price": 4 missing data Deal with missing dataHow to deal with missing data? drop data a. drop the whole row b. drop the whole column replace data a. replace it by mean b. replace it by frequency c. replace it based on other functions Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely.We have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns:Replace by mean: "normalized-losses": 41 missing data, replace them with mean "stroke": 4 missing data, replace them with mean "bore": 4 missing data, replace them with mean "horsepower": 2 missing data, replace them with mean "peak-rpm": 2 missing data, replace them with meanReplace by frequency: "num-of-doors": 2 missing data, replace them with "four". Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur Drop the whole row: "price": 4 missing data, simply delete the whole row Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us Calculate the average of the column
###Code
avg_norm_loss = df["normalized-losses"].astype("float").mean(axis=0)
print("Average of normalized-losses:", avg_norm_loss)
###Output
Average of normalized-losses: 122.0
###Markdown
Replace "NaN" by mean value in "normalized-losses" column
###Code
df["normalized-losses"].replace(np.nan, avg_norm_loss, inplace=True)
###Output
_____no_output_____
###Markdown
Calculate the mean value for 'bore' column
###Code
avg_bore=df['bore'].astype('float').mean(axis=0)
print("Average of bore:", avg_bore)
###Output
Average of bore: 3.3297512437810943
###Markdown
Replace NaN by mean value
###Code
df["bore"].replace(np.nan, avg_bore, inplace=True)
###Output
_____no_output_____
###Markdown
Question 1: According to the example above, replace NaN in "stroke" column by mean.
###Code
# Write your code below and press Shift+Enter to execute
avg_stroke = df['stroke'].astype('float').mean(axis=0)
print("Average of stroke: ", avg_stroke)
df['stroke'].replace(np.nan,avg_stroke, inplace=True)
###Output
Average of stroke: 3.255422885572139
###Markdown
Click here for the solution```pythonCalculate the mean vaule for "stroke" columnavg_stroke = df["stroke"].astype("float").mean(axis = 0)print("Average of stroke:", avg_stroke) replace NaN by mean value in "stroke" columndf["stroke"].replace(np.nan, avg_stroke, inplace = True)``` Calculate the mean value for the 'horsepower' column:
###Code
avg_horsepower = df['horsepower'].astype('float').mean(axis=0)
print("Average horsepower:", avg_horsepower)
###Output
Average horsepower: 104.25615763546799
###Markdown
Replace "NaN" by mean value:
###Code
df['horsepower'].replace(np.nan, avg_horsepower, inplace=True)
###Output
_____no_output_____
###Markdown
Calculate the mean value for 'peak-rpm' column:
###Code
avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0)
print("Average peak rpm:", avg_peakrpm)
###Output
Average peak rpm: 5125.369458128079
###Markdown
Replace NaN by mean value:
###Code
df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True)
###Output
_____no_output_____
###Markdown
To see which values are present in a particular column, we can use the ".value_counts()" method:
###Code
df['num-of-doors'].value_counts()
###Output
_____no_output_____
###Markdown
We can see that four doors are the most common type. We can also use the ".idxmax()" method to calculate for us the most common type automatically:
###Code
df['num-of-doors'].value_counts().idxmax()
###Output
_____no_output_____
###Markdown
The replacement procedure is very similar to what we have seen previously
###Code
#replace the missing 'num-of-doors' values by the most frequent
df["num-of-doors"].replace(np.nan, "four", inplace=True)
###Output
_____no_output_____
###Markdown
Finally, let's drop all rows that do not have price data:
###Code
# simply drop whole row with NaN in "price" column
df.dropna(subset=["price"], axis=0, inplace=True)
# reset index, because we droped two rows
df.reset_index(drop=True, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Good! Now, we obtain the dataset with no missing values. Correct data formatWe are almost there!The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).In Pandas, we use .dtype() to check the data type.astype() to change the data type Lets list the data types for each column
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the "astype()" method. Convert data types to proper format
###Code
df[["bore", "stroke"]] = df[["bore", "stroke"]].astype("float")
df[["normalized-losses"]] = df[["normalized-losses"]].astype("int")
df[["price"]] = df[["price"]].astype("float")
df[["peak-rpm"]] = df[["peak-rpm"]].astype("float")
###Output
_____no_output_____
###Markdown
Let us list the columns after the conversion
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Wonderful!Now, we finally obtain the cleaned dataset with no missing values and all data in its proper format. Data StandardizationData is usually collected from different agencies with different formats.(Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation) What is Standardization?Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison.ExampleTransform mpg to L/100km:In our dataset, the fuel consumption columns "city-mpg" and "highway-mpg" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standardWe will need to apply data transformation to transform mpg into L/100km? The formula for unit conversion isL/100km = 235 / mpgWe can do many mathematical operations directly in Pandas.
###Code
df.head()
# Convert mpg to L/100km by mathematical operation (235 divided by mpg)
df['city-L/100km'] = 235/df["city-mpg"]
# check your transformed data
df.head()
###Output
_____no_output_____
###Markdown
Question 2: According to the example above, transform mpg to L/100km in the column of "highway-mpg", and change the name of column to "highway-L/100km".
###Code
# Write your code below and press Shift+Enter to execute
df['highway-L/100km'] = 235/df['highway-mpg']
df.rename(columns={'"highway-mpg"':'highway-L/100km'}, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Click here for the solution```python transform mpg to L/100km by mathematical operation (235 divided by mpg)df["highway-mpg"] = 235/df["highway-mpg"] rename column name from "highway-mpg" to "highway-L/100km"df.rename(columns={'"highway-mpg"':'highway-L/100km'}, inplace=True) check your transformed data df.head()``` Data NormalizationWhy normalization?Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1ExampleTo demonstrate normalization, let's say we want to scale the columns "length", "width" and "height" Target:would like to Normalize those variables so their value ranges from 0 to 1.Approach: replace original value by (original value)/(maximum value)
###Code
# replace (original value) by (original value)/(maximum value)
df['length'] = df['length']/df['length'].max()
df['width'] = df['width']/df['width'].max()
###Output
_____no_output_____
###Markdown
Questiont 3: According to the example above, normalize the column "height".
###Code
# Write your code below and press Shift+Enter to execute
df['height'] = df['height']/df['height'].max()
df[["length","width","height"]].head()
###Output
_____no_output_____
###Markdown
Click here for the solution```pythondf['height'] = df['height']/df['height'].max() show the scaled columnsdf[["length","width","height"]].head()``` Here we can see, we've normalized "length", "width" and "height" in the range of [0,1]. BinningWhy binning? Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis.Example: In our dataset, "horsepower" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins Example of Binning Data In Pandas Convert data to correct format
###Code
df["horsepower"]=df["horsepower"].astype(int, copy=True)
###Output
_____no_output_____
###Markdown
Lets plot the histogram of horspower, to see what the distribution of horsepower looks like.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
plt.pyplot.hist(df["horsepower"])
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
We would like 3 bins of equal size bandwidth so we use numpy's linspace(start_value, end_value, numbers_generated function.Since we want to include the minimum value of horsepower we want to set start_value=min(df["horsepower"]).Since we want to include the maximum value of horsepower we want to set end_value=max(df["horsepower"]).Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4. We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins.
###Code
bins = np.linspace(min(df["horsepower"]), max(df["horsepower"]), 4)
bins
###Output
_____no_output_____
###Markdown
We set group names:
###Code
group_names = ['Low', 'Medium', 'High']
###Output
_____no_output_____
###Markdown
We apply the function "cut" the determine what each value of "df['horsepower']" belongs to.
###Code
df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True )
df[['horsepower','horsepower-binned']].head(20)
###Output
_____no_output_____
###Markdown
Lets see the number of vehicles in each bin.
###Code
df["horsepower-binned"].value_counts()
###Output
_____no_output_____
###Markdown
Lets plot the distribution of each bin.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
pyplot.bar(group_names, df["horsepower-binned"].value_counts())
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
Check the dataframe above carefully, you will find the last column provides the bins for "horsepower" with 3 categories ("Low","Medium" and "High"). We successfully narrow the intervals from 57 to 3! Bins visualizationNormally, a histogram is used to visualize the distribution of bins we created above.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
# draw historgram of attribute "horsepower" with bins = 3
plt.pyplot.hist(df["horsepower"], bins = 3)
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
The plot above shows the binning result for attribute "horsepower". Indicator variable (or dummy variable)What is an indicator variable? An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning. Why we use indicator variables? So we can use categorical variables for regression analysis in the later modules.Example We see the column "fuel-type" has two unique values, "gas" or "diesel". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert "fuel-type" into indicator variables. We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type.
###Code
df.columns
###Output
_____no_output_____
###Markdown
get indicator variables and assign it to data frame "dummy_variable_1"
###Code
dummy_variable_1 = pd.get_dummies(df["fuel-type"])
dummy_variable_1.head()
###Output
_____no_output_____
###Markdown
change column names for clarity
###Code
dummy_variable_1.rename(columns={'gas':'fuel-type-gas', 'diesel':'fuel-type-diesel'}, inplace=True)
dummy_variable_1.head()
###Output
_____no_output_____
###Markdown
In the dataframe, column fuel-type has a value for 'gas' and 'diesel'as 0s and 1s now.
###Code
# merge data frame "df" and "dummy_variable_1"
df = pd.concat([df, dummy_variable_1], axis=1)
# drop original column "fuel-type" from "df"
df.drop("fuel-type", axis = 1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now. Question 4: As above, create indicator variable to the column of "aspiration"
###Code
# Write your code below and press Shift+Enter to execute
dummy_variable_aspiration = pd.get_dummies(df['aspiration'])
dummy_variable_aspiration
###Output
_____no_output_____
###Markdown
Click here for the solution```python get indicator variables of aspiration and assign it to data frame "dummy_variable_2"dummy_variable_2 = pd.get_dummies(df['aspiration']) change column names for claritydummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True) show first 5 instances of data frame "dummy_variable_1"dummy_variable_2.head()``` Question 5: Merge the new dataframe to the original dataframe then drop the column 'aspiration'
###Code
# Write your code below and press Shift+Enter to execute
df = pd.concat([df, dummy_variable_aspiration], axis = 1)
df.drop('aspiration', axis = 1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Click here for the solution```python merge the new dataframe to the original dataframdf = pd.concat([df, dummy_variable_2], axis=1) drop original column "aspiration" from "df"df.drop('aspiration', axis = 1, inplace=True)``` Save the new csv
###Code
df.to_csv('clean_df.csv')
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/Results-checkpoint.ipynb | ###Markdown
Results 1) Actor coefficients with and without confounders. Load model and coefficients from memory, compare for set of actors. How does it hold up against Blei's claims? Most corrected actors, most over valued, most undervalued actors?
###Code
# Load params
import pickle
with open('params.pickle', 'rb') as f:
params = pickle.load(f)
params
###Output
_____no_output_____ |
.ipynb_checkpoints/soc.037_new_done-checkpoint.ipynb | ###Markdown
soc.037 Download linik:http://www.map.ox.ac.uk/static/africa-now/data_downloads/prevalence/rasters/Prevalence_annual_means_rasters.zipdescription http://www.map.ox.ac.uk/ PfPR2-10 - Plasmodium falciparum parasite rate in 2-10 year oldsFile type: tiffdownloaded 2000-2015 uploaded 2010-2015
###Code
# Libraries for downloading data from remote server (may be ftp)
import requests
from urllib.request import urlopen
from contextlib import closing
import shutil
# Library for uploading/downloading data to/from S3
import boto3
# Libraries for handling data
import rasterio as rio
import numpy as np
# from netCDF4 import Dataset
# import pandas as pd
# import scipy
# Libraries for various helper functions
# from datetime import datetime
import os
import threading
import sys
from glob import glob
from matplotlib import pyplot
%matplotlib inline
###Output
_____no_output_____
###Markdown
s3
###Code
s3_upload = boto3.client("s3")
s3_download = boto3.resource("s3")
s3_bucket = "wri-public-data"
s3_folder = "resourcewatch/raster/soc_037_Malaria_Extent/"
s3_file1 = "soc_037_Malaria_Extent_2015.tif"
s3_file2 = "soc_037_Malaria_Extent_2014.tif"
s3_file3 = "soc_037_Malaria_Extent_2013.tif"
s3_file4 = "soc_037_Malaria_Extent_2012.tif"
s3_file5 = "soc_037_Malaria_Extent_2011.tif"
s3_file6 = "soc_037_Malaria_Extent_2010.tif"
s3_key_orig1 = s3_folder + s3_file1
s3_key_edit1 = s3_key_orig1[0:-4] + "_edit.tif"
s3_key_orig2 = s3_folder + s3_file2
s3_key_edit2 = s3_key_orig2[0:-4] + "_edit.tif"
s3_key_orig3 = s3_folder + s3_file3
s3_key_edit3 = s3_key_orig3[0:-4] + "_edit.tif"
s3_key_orig4 = s3_folder + s3_file4
s3_key_edit4 = s3_key_orig4[0:-4] + "_edit.tif"
s3_key_orig5 = s3_folder + s3_file5
s3_key_edit5 = s3_key_orig5[0:-4] + "_edit.tif"
s3_key_orig6= s3_folder + s3_file6
s3_key_edit6 = s3_key_orig6[0:-4] + "_edit.tif"
s3_files_to_merge = [s3_key_orig1, s3_key_orig2, s3_key_orig3, s3_key_orig4, s3_key_orig5,s3_key_orig6 ]
band_ids = ["2015","2014","2013", "2012", "2011", "2010"]
merge_name = "soc_037_Malaria_Extent_2010_to_2015.tif"
s3_key_merge = s3_folder + merge_name
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write("\r%s %s / %s (%.2f%%)"%(
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
###Output
_____no_output_____
###Markdown
Define local file locations
###Code
local_folder = "/Users/Max81007/Desktop/Python/Resource_Watch/Raster/soc.037/rasters/"
file_name1 = "MODEL43.2015.PR.rmean.stable.tif"
file_name2 = "MODEL43.2014.PR.rmean.stable.tif"
file_name3 = "MODEL43.2013.PR.rmean.stable.tif"
file_name4 = "MODEL43.2012.PR.rmean.stable.tif"
file_name5 = "MODEL43.2011.PR.rmean.stable.tif"
file_name6 = "MODEL43.2010.PR.rmean.stable.tif"
local_orig1 = local_folder + file_name1
local_orig2 = local_folder + file_name2
local_orig3 = local_folder + file_name3
local_orig4 = local_folder + file_name4
local_orig5 = local_folder + file_name5
local_orig6 = local_folder + file_name6
orig_extension_length = 4 #4 for each char in .tif
local_edit1 = local_orig1[:-orig_extension_length] + "edit.tif"
local_edit2 = local_orig2[:-orig_extension_length] + "edit.tif"
local_edit3 = local_orig3[:-orig_extension_length] + "edit.tif"
local_edit4 = local_orig4[:-orig_extension_length] + "edit.tif"
local_edit5 = local_orig5[:-orig_extension_length] + "edit.tif"
local_edit6 = local_orig6[:-orig_extension_length] + "edit.tif"
merge_files = [local_orig1, local_orig2, local_orig3, local_orig4, local_orig5, local_orig6]
tmp_merge = local_folder + merge_name
###Output
_____no_output_____
###Markdown
Use rasterio to reproject and compress
###Code
files = [local_orig1, local_orig2, local_orig3, local_orig4, local_orig5, local_orig6]
for file in files:
with rio.open(file, 'r') as src:
profile = src.profile
print(profile)
# Note - this is the core of Vizz's netcdf2tif function
def convert_asc_to_tif(orig_name, edit_name):
with rio.open(orig_name, 'r') as src:
# This assumes data is readable by rasterio
# May need to open instead with netcdf4.Dataset, for example
data = src.read()[0]
rows = data.shape[0]
columns = data.shape[1]
print(rows)
print(columns)
# Latitude bounds
south_lat = -90
north_lat = 90
# Longitude bounds
west_lon = -180
east_lon = 180
transform = rio.transform.from_bounds(west_lon, south_lat, east_lon, north_lat, columns, rows)
# Profile
no_data_val = -9999.0
target_projection = 'EPSG:4326'
target_data_type = np.float64
profile = {
'driver':'GTiff',
'height':rows,
'width':columns,
'count':1,
'dtype':target_data_type,
'crs':target_projection,
'transform':transform,
'compress':'lzw',
'nodata': no_data_val
}
with rio.open(edit_name, "w", **profile) as dst:
dst.write(data.astype(profile["dtype"]), 1)
convert_asc_to_tif(local_orig1, local_edit1)
convert_asc_to_tif(local_orig2, local_edit2)
convert_asc_to_tif(local_orig3, local_edit3)
convert_asc_to_tif(local_orig4, local_edit4)
convert_asc_to_tif(local_orig5, local_edit5)
convert_asc_to_tif(local_orig6, local_edit6)
os.getcwd()
os.chdir(local_folder)
os.environ["local_orig1"] =local_orig1
os.environ["local_edit1"] =local_edit1
!gdalwarp -overwrite -t_srs epsg:4326 -srcnodata none %local_orig1% %local_edit1%
files = [local_orig1, local_edit1]
for file in files:
with rio.open(file, 'r') as src:
profile = src.profile
print(profile)
files = [local_orig1, local_edit1]
data = {}
for file in files:
with rio.open(file, 'r') as src:
data[file]=src.read(indexes=1)
pyplot.imshow(data[local_orig1])
pyplot.imshow(data[local_edit1])
np.unique(data, return_counts=True)
pyplot.imshow(data)
with rio.open(merge_files[0]) as src:
kwargs = src.profile
kwargs.update(
count=len(merge_files)
)
with rio.open(tmp_merge, 'w', **kwargs) as dst:
for idx, file in enumerate(merge_files):
print(idx)
with rio.open(file) as src:
band = idx+1
windows = src.block_windows()
for win_id, window in windows:
src_data = src.read(1, window=window)
dst.write_band(band, src_data, window=window)
files = [tmp_merge]
for file in files:
with rio.open(file, 'r') as src:
profile = src.profile
print(profile)
###Output
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': -9999.0, 'width': 1681, 'height': 1741, 'count': 6, 'crs': CRS({'init': 'epsg:4326'}), 'transform': Affine(0.04166665, 0.0, -18.00006479999999,
0.0, -0.04166665, 37.54162765), 'blockxsize': 256, 'blockysize': 256, 'compress': 'lzw', 'interleave': 'band', 'tiled': True}
###Markdown
Upload orig and edit files to s3
###Code
# Original
s3_upload.upload_file(local_orig1, s3_bucket, s3_key_orig1,
Callback=ProgressPercentage(local_orig1))
s3_upload.upload_file(local_orig2, s3_bucket, s3_key_orig2,
Callback=ProgressPercentage(local_orig2))
s3_upload.upload_file(local_orig3, s3_bucket, s3_key_orig3,
Callback=ProgressPercentage(local_orig3))
s3_upload.upload_file(local_orig4, s3_bucket, s3_key_orig4,
Callback=ProgressPercentage(local_orig4))
s3_upload.upload_file(local_orig5, s3_bucket, s3_key_orig5,
Callback=ProgressPercentage(local_orig5))
s3_upload.upload_file(local_orig6, s3_bucket, s3_key_orig6,
Callback=ProgressPercentage(local_orig6))
# Edit
s3_upload.upload_file(local_edit1, s3_bucket, s3_key_edit1,
Callback=ProgressPercentage(local_edit1))
s3_upload.upload_file(local_edit2, s3_bucket, s3_key_edit2,
Callback=ProgressPercentage(local_edit2))
s3_upload.upload_file(local_edit3, s3_bucket, s3_key_edit3,
Callback=ProgressPercentage(local_edit3))
s3_upload.upload_file(local_edit4, s3_bucket, s3_key_edit4,
Callback=ProgressPercentage(local_edit4))
s3_upload.upload_file(local_edit5, s3_bucket, s3_key_edit5,
Callback=ProgressPercentage(local_edit5))
s3_upload.upload_file(local_edit6, s3_bucket, s3_key_edit6,
Callback=ProgressPercentage(local_edit6))
s3_upload.upload_file(tmp_merge, s3_bucket, s3_key_merge,
Callback=ProgressPercentage(tmp_merge))
os.environ["Zs3_key"] = "s3://wri-public-data/" + s3_key_merge
os.environ["Zs3_key_inspect"] = "wri-public-data/" + s3_key_merge
os.environ["Zgs_key"] = "gs://resource-watch-public/" + s3_key_merge
!echo %Zs3_key_inspect%
!aws s3 ls %Zs3_key_inspect%
!gsutil cp %Zs3_key% %Zgs_key%
os.environ["asset_id"] = "users/resourcewatch/soc_037_malaria_extent"
!earthengine upload image --asset_id=%asset_id% %Zgs_key%
os.environ["band_names"] = str(band_ids)
!earthengine asset set -p band_names="%band_names%" %asset_id%
###Output
_____no_output_____ |
analisis_datos/Analisis_de_datos.ipynb | ###Markdown
Análisis de datos y relaciones entre variables. Importación de librerías y datosPor medio de nuestra libería ESIOS_contoller.py importamos nuestro último dataset de datos y lo parseamos para su uso. Sirve tanto como para Drive como jupiter.
###Code
import json, urllib, datetime, pickle, time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import *
from keras.models import *
from keras.layers import *
from sklearn.preprocessing import *
from keras.optimizers import *
from scipy.stats import *
from importlib.machinery import SourceFileLoader
try:
from google.colab import drive
drive.mount('/content/drive')
path = '/content/drive/My Drive/TFM/Utils/ESIOS_contoller.py'
in_colab = True
except:
path = '../utils/ESIOS_contoller.py'
in_colab = False
esios_assembler = SourceFileLoader('esios', path).load_module()
esios_controller = esios_assembler.ESIOS(in_colab)
data_consumo = esios_controller.get_data()
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
Mostrando los datos de data_total.csv
(30555, 29)
________________________________________________________________________________
###Markdown
Veamos ahora con tipos de variables nos econtramos: * **PVPC_DEF**: tarifa pvpc normal (la que hay que predecir)* **PVPC_2_PED_NOC**: tarifa pvpc noturna* **PVPC_ELEC_NOC**: tarifa pvpc electrica* **Demanda**: demanda* **Demanda real**: Demanda real* **Prevista**: Demanda prevista * **Programada**: Demanda programada* **Eolica**: demanda de eolica a hora* **Nuclear**: demanda de Nuclear a hora* **Solar**: demanda de Solar a hora * **Solar_Fotovoltaica**: demanda de Solar_Fotovoltaica a hora* **Solar_Termica** : demanda de Solar_Termica a hora* **Generación prevista Solar**: generación prevista a día +1 solar* **Termica_Renovable**: demanda de Termica_Renovable a hora* **Holiday**: % festividad (0 laboral, 0,75 sabado, 1domingo)* **Brent_price**: Precio del crudo de brent* **Precio mercado SPOT Diario**: precio mercado España energia* **Precio mercado SPOT Diario PT**: precio mercado Portugal energia* **Precio mercado SPOT Diario FR**: precio mercado Francia energia* **Precio de Regulación Secundaria subir**: indicador si subirá precio (futuro)* **Precio de Regulación Secundaria bajar*** **Saldo total interconexiones programa p48**: saldo total importación - exportación* **Generación programada P48 Exportación Portugal**: saldo exportación portugal* **Generación programada P48 Exportación Francia**: saldo exportación francia* **Generación programada P48 Importación Portugal**: saldo importación portugal* **Generación programada P48 Importación Francia**: saldo importación francia
###Code
print(data_consumo.columns)
###Output
Index(['fecha', 'PVPC_DEF', 'PVPC_2_PED_NOC', 'PVPC_ELEC_NOC',
'date_timestamp', 'Demanda', 'Eolica', 'Nuclear', 'Solar',
'Solar_Fotovoltaica', 'Solar_Termica', 'Termica_Renovable', 'Prevista',
'Programada', 'date_day', 'Brent_price', 'Holiday',
'Precio de Regulación Secundaria subir',
'Precio de Regulación Secundaria bajar', 'Precio mercado SPOT Diario_x',
'Demanda real', 'Generación prevista Solar',
'Saldo total interconexiones programa p48',
'Generación programada P48 Exportación Portugal',
'Generación programada P48 Exportación Francia',
'Generación programada P48 Importación Portugal',
'Generación programada P48 Importación Francia', 'Precio SPOT PT',
'Precio SPOT FR'],
dtype='object')
###Markdown
Estudio de las correlacionesVer la tabla de correlaciones es una muy buena forma de hacer una rápida prospección de las relaciones de los datos.
###Code
corrmat = data_consumo.corr()
f, ax = plt.subplots(figsize =(9, 8))
sns.heatmap(corrmat, ax = ax, cmap ="YlGnBu", linewidths = 0.1)
###Output
_____no_output_____
###Markdown
Veamos ahora las 13 mejores correlaciones con otras variables para la varible del **precio**
###Code
k = 20
cols = corrmat.nlargest(k, 'PVPC_DEF')['PVPC_DEF'].index
cm = np.corrcoef(data_consumo[cols].values.T)
f, ax = plt.subplots(figsize =(12, 10))
sns.heatmap(cm, ax = ax, cmap ="YlGnBu",
linewidths = 0.1, yticklabels = cols.values,
xticklabels = cols.values)
###Output
_____no_output_____
###Markdown
Bien, obviamente algunas variables como el precio SPOT tiene una alta correlación con el precio, pero estas de la misma forma que el precio pvpc, no la conocemos hasta el D+1. Utilizemos sólo las variables que podemos conocer en tiempo real:
###Code
data_consumo_real_time = data_consumo.drop(columns=['PVPC_2_PED_NOC',
'PVPC_ELEC_NOC',
'Precio mercado SPOT Diario_x',
'Precio SPOT PT',
'Precio SPOT FR',
'Demanda real',
])
k = 20
corrmat = data_consumo_real_time.corr()
cols = corrmat.nlargest(k, 'PVPC_DEF')['PVPC_DEF'].index
cm = np.corrcoef(data_consumo_real_time[cols].values.T)
f, ax = plt.subplots(figsize =(12, 10))
sns.heatmap(cm, ax = ax, cmap ="YlGnBu",
linewidths = 0.1, yticklabels = cols.values,
xticklabels = cols.values)
###Output
_____no_output_____
###Markdown
Visualización de otras variables
###Code
x = data_consumo['date_timestamp']
data_pvpc = data_consumo['PVPC_DEF']
data_spot = data_consumo['Precio mercado SPOT Diario_x']
data_pt = data_consumo['Precio SPOT PT']
data_dem = data_consumo['Demanda']
data_brent = data_consumo['Brent_price']
sns.kdeplot(data_pvpc, shade=True)
sns.kdeplot(data_spot, shade=True)
sns.kdeplot(data_pt, shade=True)
sns.kdeplot(data_brent, shade=True)
fig, ax =plt.subplots(1,2)
sns.lineplot(data_tiempo_semana, data_pvpc, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_spot, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_pt, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_brent, ax=ax[0])
sns.lineplot(x, data_pvpc, ax=ax[1])
sns.lineplot(x, data_spot, ax=ax[1])
sns.lineplot(x, data_pt, ax=ax[1])
sns.lineplot(x, data_brent, ax=ax[1])
fig.show()
fig, ax =plt.subplots(1,2)
sns.lineplot(data_tiempo_semana, data_dem, ax=ax[0])
sns.lineplot(x, data_dem, ax=ax[1])
fig.show()
###Output
_____no_output_____
###Markdown
Estudio con las variables displobles real-time:
###Code
data_termica = data_consumo['Termica_Renovable']
data_prec_sub = data_consumo['Precio de Regulación Secundaria subir']
data_saldo = data_consumo['Saldo total interconexiones programa p48']
data_nuclear = data_consumo['Nuclear']
fig, ax =plt.subplots(1,2)
sns.lineplot(data_tiempo_semana, data_pvpc, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_termica, ax=ax[0])
sns.lineplot(x, data_pvpc, ax=ax[1])
sns.lineplot(x, data_termica, ax=ax[1])
fig.show()
fig, ax =plt.subplots(1,2)
sns.lineplot(data_tiempo_semana, data_pvpc, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_prec_sub, ax=ax[0])
sns.lineplot(x, data_pvpc, ax=ax[1])
sns.lineplot(x, data_prec_sub, ax=ax[1])
fig.show()
fig, ax =plt.subplots(1,2)
sns.lineplot(data_tiempo_semana, data_pvpc, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_saldo, ax=ax[0])
sns.lineplot(x, data_pvpc, ax=ax[1])
sns.lineplot(x, data_saldo, ax=ax[1])
fig.show()
fig, ax =plt.subplots(1,2)
sns.lineplot(data_tiempo_semana, data_pvpc, ax=ax[0])
sns.lineplot(data_tiempo_semana, data_nuclear, ax=ax[0])
sns.lineplot(x, data_pvpc, ax=ax[1])
sns.lineplot(x, data_nuclear, ax=ax[1])
fig.show()
sns.boxplot(data_spot)
sns.boxplot(data_brent)
sns.boxplot(data_pt)
###Output
_____no_output_____ |
ml_proj/ml_proj1/COMP90049S22020Assigment1Demo.ipynb | ###Markdown
COMP90049 Introduction to Machine Learning, Semester 2, 2020 Lecture 8: Code demo for Pre-processing, Naive Bayes and K-NN Hadi Khorshidi, CISCopyright @ University of Melbourne 2020All rights reserved. No part of the publication may be reproduced in any form by print, photoprint, microfilm or any other means without written permission from the author.
###Code
# Example data
X = [[2,1,"A",1], #0
[0,2,"B",1], #1
[1,1,"B",1], #0
[1,0,"B",0], #1
[1,0,"B",1], #0
[1,1,"A",0], #1
[2,5,"B",1], #0
[0,2,"C",0], #0
[1,2,"B",1], #1
[2,5,"C",0]] #1
Y = [0,1,0,1,0,1,0,0,1,1]
X
import numpy as np
import pandas as pd
import matplotlib as mpl
import sklearn
import math
###Output
_____no_output_____
###Markdown
First Doing Pre-processing Pre-processing
###Code
pd.DataFrame(X)
# pd.DataFrame?
X_df = pd.DataFrame(X, columns=["x1","x2","x3","x4"])
X_df
# One-hot transformation (Dummy variables)
pd.get_dummies(X_df["x3"], prefix="x3")
# One-hot transformation (Dummy variables)
pd.get_dummies(X_df, prefix="x3")
# One-hot transformation (Dummy variables)
pd.get_dummies(X_df, prefix="x3", drop_first=True)
?pd.get_dummies
# Transform numeric attributes to nominal using bins
pd.cut(X_df["x2"], bins=2)
# Transform numeric attributes to nominal using bins
pd.cut(X_df["x2"], bins=2, labels=["Low", "High"])
# Transform numeric attributes to nominal using bins
X_df["x2"] = pd.cut(X_df["x2"], bins=2, labels=["Low", "High"])
X_df
# map string values to integers for categorical attributes
for c in list(X_df):
vals = sorted(set([v for v in X_df[c].values]))
vals_dict = dict(zip(vals, range(len(vals))))
X_df[c] = X_df[c].map(lambda s: vals_dict.get(s) if s in vals_dict else s)
X_df
X_df.values.tolist()
###Output
_____no_output_____
###Markdown
Categorical Naive Bayes
###Code
# Function for counting the frequency of classes to claculate prior probability p(y=i) = n(i)/N
def p_y(y):
class_priors = [0]*len(set(y))
for c in y:
class_priors[c]+=1
return class_priors
p_y(Y)
X
Y
for idx,_ in enumerate(X):
print(idx)
# Function for likelihood p(x=j|y=i) = n(i,j)/n(i)
def p_xy(x,y):
# init dict (over classes) of dict (over features) of dict (over value counts)
outdict = {c:{} for c in y}
for d in outdict.keys():
for f in range(len(x[0])):
outdict[d][f]={}
rng = set([i[f] for i in x])
outdict[d][f] = {v:0 for v in rng}
# fill dict with counts
for idx,_ in enumerate(x):
for fidx, _ in enumerate(x[idx]):
outdict[y[idx]][fidx][x[idx][fidx]]+=1
# # normalize, or fill in epsilons as needed
for cl in outdict.keys():
for f in outdict[cl].keys():
for val in outdict[cl][f]:
if outdict[cl][f][val] > 0:
outdict[cl][f][val] = outdict[cl][f][val] / p_y(y)[cl]
return outdict
p_xy(X,Y)
outdict = {c:{} for c in Y}
for d in outdict.keys():
for f in range(len(X[0])):
outdict[d][f]={}
rng = set([i[f] for i in X])
outdict[d][f] = {v:0 for v in rng}
outdict
print(list(enumerate(X)))
# Test data
X_test = [[2,2,"B",1], #0
[0,2,"C",0]] #1
Y_test = [0,1]
type(X_test)
# Function for predicting test labels
def predict(x, pc, pxc):
# sums up prior and independent likelihood terms
class_probs = []
for y in range(len(pc)):
class_prob=pc[y]/sum(pc)
for fidx, f in enumerate(x):
if f in pxc[y][fidx]:
# print('f --> ', f)
# print('pxc[y][fidx] --> ', pxc[y][fidx])
class_prob = class_prob * pxc[y][fidx][f]
class_probs.append(class_prob)
return class_probs, np.argmax([class_probs])
def log_predict(x, pc, pxc):
# sums up prior and independent likelihood terms
class_probs = []
for y in range(len(pc)):
class_prob=math.log(pc[y]/sum(pc))
for fidx, f in enumerate(x):
if f in pxc[y][fidx]:
class_prob = class_prob + math.log(pxc[y][fidx][f])
class_probs.append(class_prob)
return class_probs, np.argmax([class_probs])
py = p_y(Y)
pxy = p_xy(X,Y)
py
pxy
for x in X_test:
print(predict(x, py, pxy))
print(log_predict(x, py, pxy))
###Output
([0.019200000000000005, 0.009600000000000003], 0)
([-3.952844999948401, -4.645992180508347], 0)
([0.0008000000000000003, 0.004800000000000001], 1)
([-7.1308988302963465, -5.339139361068291], 1)
###Markdown
Evaluation
###Code
# Function to evaluate a set of predictions in terms of metrics
from sklearn import metrics
def evaluate(pred,true):
CM = metrics.confusion_matrix(true, pred) # Confusion Matrix
Acc = metrics.accuracy_score(true, pred) # Accuracy
precf1 = metrics.precision_recall_fscore_support(true, pred) # Precision, Recall and F1-score
return CM, Acc, precf1
# Categorical Naive Bayes implementation
# predict on train
print("\nevaluation using training data")
correct = 0
preds = []
for i in range(len(X)):
prediction = predict(X[i], py, pxy)[1]
correct = correct + int(prediction==Y[i])
preds.append(prediction)
CM, Acc, precf1 = evaluate(preds, Y)
print("Confusion Matrix:\n{}\naccuracy: {}\naccuracy by sklearn.metric: {}\nprecision: {}\nrecall: {}\nF1: {}".format(CM,
correct / len(X),
Acc,
precf1[0],
precf1[1],
precf1[2]))
# predict on test
print("\nevaluation using test data")
correct = 0
preds = []
for i in range(len(X_test)):
prediction = predict(X_test[i], py, pxy)[1]
correct = correct + int(prediction==Y_test[i])
preds.append(prediction)
CM, Acc, precf1 = evaluate(preds, Y_test)
print("Confusion Matrix:\n{}\naccuracy: {}\naccuracy by sklearn.metric: {}\nprecision: {}\nrecall: {}\nF1: {}".format(CM,
correct / len(X_test),
Acc,
precf1[0],
precf1[1],
precf1[2]))
###Output
evaluation using training data
Confusion Matrix:
[[4 1]
[1 4]]
accuracy: 0.8
accuracy by sklearn.metric: 0.8
precision: [0.8 0.8]
recall: [0.8 0.8]
F1: [0.8 0.8]
evaluation using test data
Confusion Matrix:
[[1 0]
[0 1]]
accuracy: 1.0
accuracy by sklearn.metric: 1.0
precision: [1. 1.]
recall: [1. 1.]
F1: [1. 1.]
###Markdown
K-nearest neighbour
###Code
# K-NN implementation
from sklearn.neighbors import KNeighborsClassifier
X_df = pd.DataFrame(X, columns=["x1","x2","x3","x4"])
# X_df = pd.get_dummies(X_df, prefix="x3", drop_first=True)
X_df
X_test_df = pd.DataFrame(X_test, columns=["x1","x2","x3","x4"])
X_test_df = pd.get_dummies(X_test_df, prefix="x3", drop_first=True)
X_test_df
X_test_df.insert(3,"x3_B", [0,0])
X_test_df
type(X_df)
print(Y)
print(type(Y))
classifier = KNeighborsClassifier(n_neighbors=3)
classifier.fit(X_df, Y)
preds = classifier.predict(X_test_df)
print(preds)
type(preds)
CM, Acc, precf1 = evaluate(preds, Y_test)
print("Confusion Matrix:\n{}\naccuracy: {}\nprecision: {}\nrecall: {}\nF1: {}".format(CM,
Acc,
precf1[0],
precf1[1],
precf1[2]))
###Output
Confusion Matrix:
[[1 0]
[0 1]]
accuracy: 1.0
precision: [1. 1.]
recall: [1. 1.]
F1: [1. 1.]
|
My_week2_submission_The_two_dimensional_array_and_gradient_problem.ipynb | ###Markdown
Problem 1.The Linear function
###Code
import numpy as np
x_ndarray = np.arange(-50, 50.1, 0.1)
y_ndarray = 0.5*x_ndarray + 1
x_ndarray, y_ndarray
###Output
_____no_output_____
###Markdown
Problem 2. The Array combination
###Code
xy_ndarray = np.stack((x_ndarray, y_ndarray),-1)
xy_ndarray.shape, xy_ndarray
###Output
_____no_output_____
###Markdown
Problem 3. Finding the gradient
###Code
dx = np.diff(x_ndarray)
dy = np.diff(y_ndarray)
slope = dy/dx
slope.shape
###Output
_____no_output_____
###Markdown
Problem 4.The Drawing of a graph.
###Code
import matplotlib.pyplot as plt
plt.xlabel("X")
plt.ylabel("gradient")
plt.title("linear function")
plt.plot(x_ndarray,y_ndarray, color='orange', linestyle='dotted', linewidth=4, markersize=6)
plt.plot(x_ndarray[:-1],slope,color='blue')
plt.show()
###Output
_____no_output_____
###Markdown
Problem 5. The Python functionalization
###Code
def function1(x):
y = 0.5*x + 1
return y
def function2(x):
y = x**2
return y
def function3(x):
y = 2*x**2 + 2**x
return y
def function4(x):
y = np.sin(x**0.5)
return y
def compute_gradient(function,x_range=(-50, 50.1, 0.1)):
array_x = np.arange(*x_range)
array_y = function(array_x)
array_xy = np.stack((array_x, array_y),-1)
gradient = np.diff(array_y)/np.diff(array_x)
return array_xy, gradient
array_xy1, gradient1 = compute_gradient(function1)
plt.xlabel("x")
plt.ylabel("gradient")
plt.title("linear function y = 0.5*x + 1")
plt.plot(array_xy1[:,0],array_xy1[:,1], color='blue')
plt.plot(array_xy1[:-1,0],gradient1, color='green',linestyle='dashed', linewidth=2, markersize=6)
plt.show()
array_xy2, gradient2 = compute_gradient(function2)
plt.xlabel("x")
plt.ylabel("gradient")
plt.title("linear function y = x**2")
plt.plot(array_xy2[:,0],array_xy2[:,1], color='blue')
plt.plot(array_xy2[:-1,0],gradient2, color='green',linestyle='dashed', linewidth=2, markersize=6)
plt.show()
array_xy3, gradient3 = compute_gradient(function3)
plt.xlabel("x")
plt.ylabel("gradient")
plt.title("linear function y = 2*x**2 + 2**x")
plt.plot(array_xy3[:,0],array_xy3[:,1], color='blue')
plt.plot(array_xy3[:-1,0],gradient3, color='red', linewidth=2, markersize=6)
plt.show()
array_xy4, gradient4 = compute_gradient(function4,x_range=(0, 50.1, 0.1))
plt.xlabel("x")
plt.ylabel("gradient")
plt.title("linear function y = sin(x**0.5)")
plt.plot(array_xy4[:,0],array_xy4[:,1], color='blue')
plt.plot(array_xy4[:-1,0],gradient4, color='green',linestyle='dashed', linewidth=2, markersize=6)
plt.show()
###Output
_____no_output_____
###Markdown
Problem 6. Finding the minimum value.
###Code
def compute_gradient(function,x_range=(-50, 50.1, 0.1)):
array_x = np.arange(*x_range)
array_y = function(array_x)
min_y_value = np.min(array_y)
min_y_arg = np.argmin(array_y)
array_xy = np.stack((array_x, array_y),-1)
gradient = np.diff(array_y)/np.diff(array_x)
return f'The minimum value of y for this function is {min_y_value} and its index is {min_y_arg}'
compute_gradient(function1
compute_gradient(function2)
compute_gradient(function3)
compute_gradient(function4, x_range=(0, 50.1, 0.1))
###Output
_____no_output_____ |
godley_&_lavoie/Python 3 - Chapter 10 Model INSOUTB.ipynb | ###Markdown
Monetary Economics : Chapter 10 Preliminaries
###Code
# This line configures matplotlib to show figures embedded in the notebook,
# instead of opening a new window for each figure. More about that later.
# If you are using an old version of IPython, try using '%pylab inline' instead.
%matplotlib inline
from pysolve3.model import Model
from pysolve3.utils import is_close,round_solution
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Model INSOUTB
###Code
def create_insoutb_model():
model = Model()
model.set_var_default(0)
model.var('Ad', desc='Demand for Central bank advances from commercial banks')
model.var('As', desc='Supply of central bank advances to commercial banks')
model.var('Bbd', desc='Government bills demanded by commercial banks')
model.var('Bbdn', desc='Notional demand for government bills from commercial banks')
model.var('Bcb', desc='Government bills held by Central bank')
model.var('Bhd', desc='Demand for government bills')
model.var('Bhh', desc='Government bills held by households')
model.var('Bs', desc='Supply of government bills')
model.var('BLd', desc='Demand for government bonds')
model.var('BLh', desc='Demand for government bonds')
model.var('BLs', desc='Supply of government bonds')
model.var('BLR', desc='Gross bank liquidity ratio')
model.var('BLRn', desc='Net bank liquidity ratio')
model.var('BPM', desc="Banks' profit margin")
model.var('Ck', desc='Real consumption')
model.var('CG', desc='Capital gains on government bonds')
model.var('CONS', desc='Consumption at current prices')
model.var('F', desc='Realized profits of firms and banks')
model.var('Fb', desc='Realized profits of firms and banks')
model.var('Fcb', desc='Central bank "profits"')
model.var('Ff', desc='Realized firm profits')
model.var('Ffe', desc='Expected profits of firms')
model.var('G', desc='Government expenditures')
model.var('Hbd', desc='Cash required by banks')
model.var('Hbs', desc='Cash supplied to banks')
model.var('Hhd', desc='Household demand for cash')
model.var('Hhh', desc='Cash held by households')
model.var('Hhs', desc='Cash supplied by households')
model.var('Hs', desc='Total supply of cash')
model.var('IN', desc='Stock of inventories at current costs')
model.var('INk', desc='Real inventories')
model.var('INke', desc='Expected real inventories')
model.var('INkt', desc='Target level of real inventories')
model.var('Ld', desc='Demand for loans')
model.var('Ls', desc='Supply of loans')
model.var('M1h', desc='Checking deposits held by households')
model.var('M1hn', desc='Notional holding of checking deposits')
model.var('M1s', desc='Checking deposits supplied by banks')
model.var('M2d', desc='Demand for term deposits - constrained to be non-negative')
model.var('M2h', desc='Term deposits held by households')
model.var('M2s', desc='Term deposits supplied by banks')
model.var('N', desc='Employment level')
model.var('NHUC', desc='Normal historic unit costs')
model.var('omegat', desc='Target real wage for workers')
model.var('P', desc='Price level')
model.var('Pbl', desc='Price of government bonds')
model.var('PI', desc='Price inflation')
model.var('PSBR', desc='Government deficit')
model.var('Ra', desc='Interest rate on Central bank advances')
model.var('Rb', desc='Interest rate on government bills')
model.var('Rbl', desc='Interest rate on bonds')
model.var('Rl', desc='Interest rate on loans')
model.var('Rm', desc='Interest rate on deposits')
model.var('RRb', desc='Real interest rate on bills')
model.var('RRbl', desc='Real interest rate on long term bonds')
model.var('RRl', desc='Real interest rate on loans')
model.var('RRm', desc='Real interest rate on term deposits')
model.var('S', desc='Sales at current prices')
model.var('Sk', desc='Real sales')
model.var('Ske', desc='Expected real sales')
model.var('sigmas', desc='Realized inventories to sales ratio')
model.var('sigmat', desc='Target inventories to sales ratio')
model.var('T', desc='Taxes')
model.var('UC', desc='Unit costs')
model.var('V', desc='Wealth of households')
model.var('Ve', desc='Expected household wealth')
model.var('Vk', desc='Real wealth of households')
model.var('Vnc', desc='Wealth of households, net cash')
model.var('Vnce', desc='Expected wealth of households, net cash')
model.var('WB', desc='The wage bill')
model.var('Y', desc='Output at current prices')
model.var('Yk', desc='Real output')
model.var('YDhs', desc='Haig-Simons measure of disposable income')
model.var('YDkhs', desc='Haig-Simons measure of real disposable income')
model.var('YDkr', desc='Regular real disposable income')
model.var('YDkre', desc='Expected regular real disposable income')
model.var('YDr', desc='Regular disposable income')
model.var('YDre', desc='Expected regular disposable income')
model.set_param_default(0)
model.param('alpha0', desc='Autonomous consumption')
model.param('alpha1', desc='Propensity to consume out of income')
model.param('alpha2', desc='Propensity to consume out of wealth')
model.param('beta', desc='Parameter in expectation formations on real sales')
model.param('bot', desc='Bottom value for bank net liquidity ratio')
model.param('botpm', desc='Bottom value for bank profit margin')
model.param('eps', desc='Parameter in expectation formations on real disposable income')
model.param('gamma', desc='Speed of adjustment of inventories to the target level')
model.param('lambda20', desc='Parameter in household demand for time deposits')
model.param('lambda21', desc='Parameter in household demand for time deposits')
model.param('lambda22', desc='Parameter in household demand for time deposits')
model.param('lambda23', desc='Parameter in household demand for time deposits')
model.param('lambda24', desc='Parameter in household demand for time deposits')
model.param('lambda25', desc='Parameter in household demand for time deposits')
model.param('lambda30', desc='Parameter in household demand for bills')
model.param('lambda31', desc='Parameter in household demand for bills')
model.param('lambda32', desc='Parameter in household demand for bills')
model.param('lambda33', desc='Parameter in household demand for bills')
model.param('lambda34', desc='Parameter in household demand for bills')
model.param('lambda35', desc='Parameter in household demand for bills')
model.param('lambda40', desc='Parameter in household demand for bonds')
model.param('lambda41', desc='Parameter in household demand for bonds')
model.param('lambda42', desc='Parameter in household demand for bonds')
model.param('lambda43', desc='Parameter in household demand for bonds')
model.param('lambda44', desc='Parameter in household demand for bonds')
model.param('lambda45', desc='Parameter in household demand for bonds')
model.param('lambdac', desc='Parameter in household demand for cash')
model.param('phi', desc='Mark-up on unit costs')
model.param('ro1', desc='Reserve requirements parameter')
model.param('ro2', desc='Reserve requirements parameter')
model.param('sigma0', desc='Parameter determining the target inventories to sales ratio')
model.param('sigma1', desc='Parameter linking the target inventories to sales ratio to the interest rate')
model.param('tau', desc='Sales tax rate')
model.param('top', desc='Top value for bank net liquidity ratio')
model.param('toppm', desc='Top value for bank profit margin')
model.var('z1', desc='Is 1 if bank checking accounts are non-negative')
model.var('z2', desc='Is 1 if bank checking accounts are negative')
model.var('z3', desc='Is 1 if banks net liquidity ratio is below bottom level')
model.var('z4', desc='Is 1 if banks net liquidity ratio was below bottom level')
model.var('z4b', desc='Is 1 if banks net liquidity ratio was way below bottom level')
model.var('z5', desc='Is 1 if banks net liquidity ratio was above top level')
model.var('z5b', desc='Is 1 if banks net liquidity ratio was way above top level')
model.var('z6', desc='Is 1 if banks profit margin is below bottom level')
model.var('z7', desc='Is 1 if banks profit margin is above top level')
model.param('xib', desc='Parameter in the equation for setting interest rate on deposits')
model.param('xil', desc='Parameter in the equation for setting interest rate on loans')
model.param('xim', desc='Parameter in the equation for setting interest rate on deposits')
model.param('omega0', desc='Parameter influencing the target real wage for workers')
model.param('omega1', desc='Parameter influencing the target real wage for workers')
model.param('omega2', desc='Parameter influencing the target real wage for workers')
model.param('omega3', desc='Speed of adjustment of wages to target value')
model.param('ERrbl', desc='Expected rate of return on long term bonds')
model.param('Gk', desc='Real government expenditures')
model.param('Nfe', desc='Full employment level')
model.param('PR', desc='Labour productivity')
model.param('Rbbar', desc='Interest rate on bills, set exogenously')
model.param('Rblbar', desc='Interest rate on bonds, set exogenously')
model.var('W', desc='Wage rate')
# Box 10.1 Firms' decisions
# -------------------------
model.add('Yk = Ske + INke - INk(-1)') # 10.1 : Real output
model.add('N = Yk/PR') # 10.2 : Employment
model.add('WB = N*W') # 10.3 : The wage bill
model.add('UC = WB/Yk') # 10.4 : Unit costs
model.add('Ske = beta*Sk(-1) + (1-beta)*Ske(-1)') # 10.5 : Expected real sales
model.add('INkt = sigmat * Ske') # 10.6 : Target level of real inventories
model.add('sigmat = sigma0 - sigma1*Rl') # 10.7 : Target inventories to sales ratio
model.add('RRl = (1 + Rl)/(1 + PI) - 1') # 10.8 : Real interest rate on loans
model.add('INke = INk(-1) + gamma*(INkt - INk(-1))') # 10.9 : Expected real inventories
model.add('NHUC = (1 - sigmat)*UC + sigmat*(1 + Rl(-1))*UC(-1)') # 10.11 : Normal historic unit costs
model.add('P = (1 + tau)*(1 + phi)*NHUC') # 10.10 : Price level
model.add('Ffe = (phi/(1+phi))*(1/(1+tau))*P*Ske') # 10.11A : Expected profits of firms
# Box 10.2 : Firms' equations
# ---------------------------
model.add('Sk = Ck + Gk') # 10.12 : Real sales
model.add('S = P * Sk') # 10.13 : Sales at current prices
model.add('INk - INk(-1) = Yk - Sk') # 10.14 : Real inventories
model.add('sigmas = INk(-1)/Sk') # 10.15 : Realized inventories to sales ratio
model.add('IN = INk*UC') # 10.16 : Stock of inventories
model.add('Ld = IN') # 10.17 : Demand for loans
model.add('Ff = S - T - WB + IN - IN(-1) - Rl(-1)*IN(-1)') # 10.18 : Firms realized profits
model.add('PI = P/P(-1) - 1') # 10.19 : Rate of price inflation
# Box 10.3 : Household equations
# ------------------------------
model.add('YDr = WB + F + Rm(-1)*M2d(-1) + Rb(-1)*Bhh(-1) + BLh(-1)') # 10.20 : Regular disposable income
model.add('CG = (Pbl - Pbl(-1))*BLh(-1)') # 10.21 : Capital gains on bonds
model.add('YDhs = YDr + CG') # 10.22 : Haig-Simons measure of disposable income
model.add('F = Ff + Fb') # 10.23 : Total net profits
model.add('V = V(-1) + YDhs - CONS') # 10.24 : Nominal wealth
model.add('Vnc = V - Hhd') # 10.25 : Nominal wealth net of cash
model.add('YDkr = (YDr - PI*V(-1))/P') # 10.26 : Real regular disposable income
model.add('YDkhs = (YDr - PI*V(-1) + CG)/P') # 10.27 : Real HS disposable income
model.add('Vk = V/P') # 10.28 : Real wealth of households
# Box 10.4 : Household equations
# ------------------------------
model.add('Ck = alpha0 + alpha1*YDkre + alpha2*Vk(-1)') # 10.29 : Consumption decision
model.add('YDkre = eps*YDkr(-1) + (1 - eps)*YDkre(-1)') # 10.30 : Expected real regular disposable income
model.add('CONS = Ck*P') # 10.31 : Consumption at current prices
model.add('YDre = P*YDkre + PI*V(-1)/P') # 10.32 : Expected regular disposable income
model.add('Ve = V(-1) + YDre - CONS') # 10.33 : Expected nominal wealth
model.add('Hhd = lambdac*CONS') # 10.34 : Household demand for cash
model.add('Vnce = Ve - Hhd') # 10.35 : Expected nominal wealth net of cash
# Box 10.5 : Households portfolio equations, based on nominal rates
# -----------------------------------------------------------------
# 10.37 : Demand for term banks deposit
model.add('M2d = (lambda20 + lambda22*Rm + lambda23*Rb + lambda24*ERrbl + lambda25*(YDre/Vnce))*Vnce')
# 10.38 : Demand for government bills
model.add('Bhd = (lambda30 + lambda32*Rm + lambda33*Rb + lambda34*ERrbl + lambda35*(YDre/Vnce))*Vnce')
# 10.39 : Demand for government bonds
model.add('BLd = (lambda40 + lambda42*Rm + lambda43*Rb + lambda44*ERrbl + lambda45*(YDre/Vnce))*Vnce/Pbl')
# Box 10.6 : Households portfoloio equations, based on real rates
# ---------------------------------------------------------------
# 10.37A : "Notional" Demand for term banks deposits
# M2d = (lambda20 - lambda21*PI/(1 + PI) + lambda22*RRm + lambda23*RRb + lambda24*RRbl + lambda25*YDre/Vnce))*Vnce
# 10.38A : Demand for government bills
# Bhd = (lambda30 - lambda31*PI/(1 + PI) + lambda32*RRm + lambda33*RRb + lambda34*RRbl + lambda35*YDre/Vnce))*Vnce
# 10.39A : Demand for government bonds
# BLd = (lambda40 - lambda41*PI/(1 + PI) + lambda42*RRm + lambda43*RRb + lambda44*RRbl + lambda45*YDre/Vnce))*Vnce/PIbl
model.add('RRm = (1 + Rm)/(1 + PI) - 1') # 10.37B : Real interest rate on term deposits
model.add('RRb = (1 + Rb)/(1+ PI) - 1') # 10.38B : Real interest rate on bills
model.add('RRbl = (1 + Rbl)/(1 + PI) - 1') # 10.39B : Real interest rate on long-term bonds
# Box 10.7 : Households equations, realized portfolio asset holding
# -----------------------------------------------------------------
model.add('Hhh = Hhd') # 10.40 : Cash holding
model.add('Bhh = Bhd') # 10.41 : Holding of bills
model.add('BLh = BLd') # 10.42 : Holding of bonds
model.add('M1hn = Vnc - M2d - Bhd - Pbl*BLd') # 10.43 : Notional holding of bank checking accounts
model.add('M1h = M1hn * z1') # 10.44 : Holding of bank checking accounts
model.add('z1 = if_true(M1hn >= 0)') # 10.45 : Condition for non-negative bank checking acounts
model.add('M2h = M2d*z1 + (Vnc - Bhh - Pbl*BLd)*z2') # 10.46 : Holding of bank term deposits
model.add('z2 = 1 - z1') # 10.47 : Condition for negative bank checking accounts
# Box 10.8 : Government equations
# -------------------------------
model.add('T = S*tau/(1 + tau)') # 10.48 : Tax receipts
model.add('G = P*Gk') # 10.49 : Government expenditures
model.add('PSBR = G + Rb(-1)*Bs(-1) + BLs(-1) - (T + Fcb)') # 10.50 : Government deficit
model.add('Bs - Bs(-1) = PSBR - (BLs - BLs(-1))*Pbl') # 10.51 : New issues of bills
model.add('BLs = BLd') # 10.52 : Supply of bonds
model.add('Pbl = 1/Rbl') # 10.53 : Price of bonds
model.add('Rbl = Rblbar + PI(-1)') # 10.54 : Yield on bonds is exogenous
# Box 10.9 : Central bank equations
# ---------------------------------
model.add('Hs = Bcb + As') # 10.55 : Supply of cash
model.add('Hbs = Hs - Hhs') # 10.56 : Supply of cash to commercial banks
model.add('Bcb = Bs - Bhh - Bbd') # 10.57 : CB purchases of government bills
model.add('Rb = Rbbar + PI(-1)') # 10.58 : Interest rate on government bills, set exogenously
model.add('As = Ad') # 10.59 : Supply of CB advances to commercial banks
model.add('Ra = Rb') # 10.60 : Interest rate on CB advances
model.add('Fcb = Rb(-1)*Bcb(-1) + Ra(-1)*As(-1)') # 10.61 : Profits of Central Bank
# Box 10.10 : Commercial bank equations
# -------------------------------------
model.add('Hhs = Hhd') # 10.62 : Supply of cash to households
model.add('M1s = M1h') # 10.63 : Supply of checking deposits
model.add('M2s = M2d') # 10.64 : Supply of time deposits
model.add('Ls = Ld') # 10.65 : Supply of loans
model.add('Hbd = ro1*M1s + ro2*M2s') # 10.66 : Demand for cash by banks (reserve requirement)
# Box 10.11 : Commercial bank equations
# -------------------------------------
model.add('Bbdn = M1s + M2s - Ls - Hbd') # 10.67 : Notional demand for bills
model.add('BLRn = Bbdn/(M1s + M2s)') # 10.68 : Net bank liquidity ratio
model.add('Ad = (bot*(M1s + M2s) - Bbdn)*z3') # 10.69 : Advances needed by banks
model.add('z3 = if_true(BLRn < bot)') # 10.70 : Check if net liquidity is above bottom value
model.add('Bbd = Ad + M1s + M2s - Ls - Hbd') # 10.71 : Demand for government bills
model.add('BLR = Bbd/(M1s + M2s)') # 10.72 : Gross bank liquidity ratio
# Box 10.12 : Commercial bank equations
# -------------------------------------
# 10.73 : Interest rate on deposits
model.add('Rm = Rm(-1) + 0.0001*z4 + 0.0002*z4b - 0.0001*z5 - 0.0002*z5b + xib*(Rb - Rb(-1))')
model.add('z4 = if_true(BLRn(-1) < bot)') # 10.75 : Check if net liquidity ratio was below bottom value
model.add('z4b = if_true(BLRn(-1) < (bot - 0.02))')
model.add('z5 = if_true(BLRn(-1) > top)') # 10.76 : Check if net liquidity ratio was above top value
model.add('z5b = if_true(BLRn(-1) > (top+0.02))')
# 10.77 : Realized bank profits
model.add('Fb = Rl(-1)*Ls(-1) + Rb(-1)*Bbd(-1) - Rm(-1)*M2s(-1) - Ra(-1)*Ad(-1)')
model.add('Rl - Rl(-1) = xil*(z6 - z7) + (Rb - Rb(-1))') # 10.78 : Interest rate on loans
model.add('z6 = if_true(BPM < botpm)') # 10.80 : Check if banks profit margin is below bottom value
model.add('z7 = if_true(BPM > toppm)') # 10.81 : Check if banks profit margin is above top value
model.add('BPM = (Fb + Fb(-1))/(M1s(-1) + M1s(-2) + M2s(-1) + M2s(-2))') # 10.82 : Banks profit margin
# Inflationary forces
# -------------------
# 10.84 : Target real wage for workers
model.add('omegat = exp(omega0 + omega1*log(PR) + omega2*log((N/Nfe)))')
model.add('W = W(-1)*(1 + omega3*(omegat(-1) - W(-1)/P(-1)))') # 10.85 Unit wages
# Addtional equations
# -------------------
model.add('Y = P*Sk + (INk - INk(-1))*UC') # Output at current prices
return model
insoutb_parameters = {'alpha0': 0,
'alpha1': 0.95,
'alpha2': 0.05,
'beta': 0.5,
'bot': 0.02,
'botpm': 0.002,
'eps': 0.5,
'gamma': 0.5,
'lambda20': 0.52245,
'lambda21': 20,
'lambda22': 40,
'lambda23': -20,
'lambda24': -20,
'lambda25': -0.06,
'lambda30': 0.47311,
'lambda31': 40,
'lambda32': -20,
'lambda33': 40,
'lambda34': -20,
'lambda35': -0.06,
'lambda40': 0.17515,
'lambda41': 20,
'lambda42': -20,
'lambda43': -20,
'lambda44': 40,
'lambda45': -0.06,
'lambdac': 0.1,
'phi': 0.1,
'ro1': 0.1,
'ro2': 0.1,
'sigma0': 0.3612,
'sigma1': 3,
'tau': 0.25,
'top': 0.04,
'toppm': 0.005,
'xib': 0.9,
'xil': 0.002,
'xim': 0.0002,
'omega0': -0.32549,
'omega1': 1,
'omega2': 1.5,
'omega3': 0.1}
insoutb_exogenous = {'Gk': 25,
'Nfe': 133.28,
'PR': 1,
'Rbbar': 0.023,
'Rblbar': 0.027,
'ERrbl': 0.027,
'W': 1}
insoutb_variables = [('Bbd', 1.19481),
('Bbdn', 1.19481),
('Bcb', 19.355),
('Bhh', 49.69136),
('Bhd', 'Bhh'),
('Bs', 70.24123),
('BLh', 1.12309),
('BLd', 'BLh'),
('BLs', 'BLd'),
('Hbd', 4.36249),
('Hbs', 'Hbd'),
('Hhd', 14.992),
('Hhh', 'Hhd'),
('Hhs', 'Hhd'),
('INk', 38.07),
('INke', 'INk'),
('IN', 38.0676),
('Ls', 38.0676),
('Ld', 'Ls'),
('M1s', 3.9482),
('M1h', 'M1s'),
('M1hn', 'M1s'),
('M2s', 39.667),
('M2d', 'M2s'),
('M2h', 'M2d'),
('Vk', 108.285),
('Ra', 0.02301),
('Rb', 0.02301),
('Rl', 0.02515),
('Rm', 0.02095),
('BLRn', 0.02737),
('Fb', 0.1535),
('P', 1.38469),
('Pbl', 37.06),
('Rbl', 'Rblbar'),
('Sk', 133.277),
('Ske', 'Sk'),
('UC', 1),
('YDkr', 108.28),
('YDkre', 108.28),
('V', 'Vk*P'),
('Ve' , 'V'),
('Vnc', 'V - Hhh'),
('Vnce', 'Vnc'),
('omegat', 0.72215)]
###Output
_____no_output_____
###Markdown
Scenario: Model INOUTSB, Baseline
###Code
baseline = create_insoutb_model()
baseline.set_values(insoutb_parameters)
baseline.set_values(insoutb_exogenous)
baseline.set_values(insoutb_variables)
# run to convergence
# Give the system more time to reach a steady state
for _ in range(65):
baseline.solve(iterations=200, threshold=1e-6)
###Output
_____no_output_____
###Markdown
Scenario: Model INSOUTB, increase in target real wage rate
###Code
omega0 = create_insoutb_model()
omega0.set_values(insoutb_parameters)
omega0.set_values(insoutb_exogenous)
omega0.set_values(insoutb_variables)
for _ in range(15):
omega0.solve(iterations=200, threshold=1e-6)
omega0.set_values({'omega0': -0.28})
for _ in range(50):
omega0.solve(iterations=200, threshold=1e-6)
###Output
_____no_output_____
###Markdown
Figure 10.7A
###Code
caption = '''
Figure 10.7A Evolution of real sales following a one-step increase in the target real wage
that generates an increase in the rate of inflation, accompanied by an increase in the
nominal interest rate the approximately compensates for the increase in inflation.'''
skdata = [s['Sk'] for s in omega0.solutions[5:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.plot(skdata, linestyle='-', color='b')
# add labels
plt.text(20, 132.6, 'Real sales')
fig.text(0.1, -.1, caption);
###Output
_____no_output_____
###Markdown
Figure 10.7B
###Code
caption = '''
Figure 10.7B Evolution of real household debt and real government debt following
a one-step increase in the target real wage that generates an increase in the
rate of inflation, accompanied by an increase in nominal interest rates that
approximately compensates for the increase in inflation.'''
wdata = [(s['Bs'] + s['BLs']*s['Pbl'])/s['P'] for s in omega0.solutions[5:]]
vkdata = [s['Vk'] for s in omega0.solutions[5:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(65, 115)
axes.plot(vkdata, linestyle='-', color='g')
axes.plot(wdata, linestyle='-', color='b')
# add labels
plt.text(20, 82, 'Real government debt')
plt.text(20, 109, 'Real household wealth')
fig.text(0.1, -.15, caption);
###Output
_____no_output_____
###Markdown
Figure 10.7C
###Code
caption = '''
Figure 10.7C Evolution of the deflated government baance, adjusted and
unadjusted for inflation gains, following a one-step increase in the target
real wage that generates an increase in the rate of inflation, accompanied
by an increase in nominal interest rates that approximately compenstates for
the increase in inflatio.'''
psbrdata = list()
data = list()
for i in range(5, len(omega0.solutions)):
s7 = omega0.solutions[i]
s7_1 = omega0.solutions[i-1]
s0 = baseline.solutions[i]
s0_1 = baseline.solutions[i-1]
psbrdata.append((s0['PSBR']/s0['P']) - (s7['PSBR']/s7['P']))
data.append((-s7['PSBR'] + (s7['P'] - s7_1['P'])*(s7_1['Bs']+s7_1['BLs']*s7_1['Pbl'])/s7['P']) -
(-s0['PSBR'] + (s0['P'] - s0_1['P'])*(s0_1['Bs']+s0_1['BLs']*s0_1['Pbl'])/s0['P']))
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
#axes.set_ylim(-0.5, 1)
axes.plot(psbrdata, color='b')
axes.plot(data, linestyle='-', color='g')
# add labels
plt.text(17, 0.2, 'Real government budget balance')
plt.text(17, 0.17, '(adjusted for inflation gains)')
plt.text(21, -0.19, 'Real government budget balance')
plt.text(21, -0.22, '(unadjusted for inflation gains)')
fig.text(0.1, -.2, caption);
###Output
_____no_output_____ |
dsc8_text_comparison_algorithm_crazy_quinn.ipynb | ###Markdown
Text-Comparison-Algorithm-Crazy Quinnby Quinn DombrowskiOctober 21, 2020 Dear ReaderThis *Data-Sitters Club* book is a little different: it's meant to be read as a Jupyter notebook. Congratulations, you're in the right place!Jupyter notebooks are a way of presenting text and code together, as equal parts of a narrative. (To learn more about them, how they work, and how you can use them, check out [this Introduction to Jupyter Notebooks](https://programminghistorian.org/en/lessons/jupyter-notebooks) at Programming Historian that I wrote last year with some colleagues.)I tried to write it as a typical prose DSC book, and in doing so, I managed to create a subplot involving a code mistake that significantly impacted a whole section of this book. But instead of rewriting the narrative, fixing the mistake, and covering up the whole thing, I started adding comment boxesNote: Like this. And in this way, I ended up in a kind of dialogue with myself, pointing out the mistakes, and all the times I almost realized what had happened.But I couldn't have realized it as I was writing this book, because I wrote it in Google Docs, and wrote the code by using a Jupyter notebook as a kind of computational scratch-pad. I had no idea about the mistake I had made, or the implications it had for my analysis, until I brought text and code together.If you really want to read this [just as webpage text without the code](https://datasittersclub.github.io/site/dsc8/), you have that option. But if there ever were a time to confront any uneasiness you feel about looking at code as you read a narrative description of DH work, you're not going to find a more friendly, fun, and colloquial place to start than DSC 8: *Text-Comparison-Algorithm-Crazy Quinn*. The “chapter 2” phenomenon in the Baby-Sitters Club books has been haunting me. Ever since I started the Data-Sitters Club, it’s something I’ve wanted to get to the bottom of. It’s trotted out so often as an easy criticism of the series -- or a point of parody (as we’ve done on our own “[Chapter 2](https://datasittersclub.github.io/site/chapter-2/)” page that describes each of the Data-Sitters and what the club is about), and it feels particularly tractable using computational text analysis methods.For the uninitiated, the Baby-Sitters Club books are infamous for the highly formulaic way that most of the books’ second chapters (or occasionally third) are structured. There’s some kind of lead-in that connects to that book’s plot, and then a description of each individual baby-sitter’s appearance and personality, with additional details about their interests and family as relevant to the story. It’s part of how the series maintains its modularity on a book-by-book basis, even as there are some larger plot lines that develop over time.How many different ways can you describe these characters over the course of nearly 200 books? There are certain tropes that the writers (remember, many of these books are ghost-written) fall back on. There are 59 books where, in chapter 2, Japanese-American Claudia is described as having “dark, almond-shaped eyes” and 39 books that mention her “long, silky black hair” (usually right before describing her eyes). 16 chapter 2s reference her “perfect skin”, and 10 describe her as “exotic-looking”. 22 chapter 2s describe Kristy as a “tomboy” who “loves sports”. 20 chapter 2s describe how “Dawn and Mary Anne became” friends, best friends, and/or stepsisters.So it’s not that this critique of the Baby-Sitters Club series is *wrong*. But what I wanted to do was quantify *how right* the critique was. And whether there were any other patterns I could uncover. Do the chapter 2s get more repetitive over the course of the series? Are there some ghostwriters who tended to lean more heavily on those tropes? Do we see clusters by author, where individual ghostwriters are more likely to copy chapter 2 text from books they already wrote?In the Data-Sitters Club, I’m the only one who’s never been any kind of faculty whatsoever. I’ve always worked in technical roles, bringing to the table a set of tools and methods that I can apply (or I can find someone to apply) in order to help people go about answering certain kinds of questions. Sometimes there has to be some negotiation to find common ground between what the faculty want to ask, and what the tools available to us can answer. Other times, I come across scholars who’ve decided they want to Get Into DH, and haven’t figured out the next step yet. In those cases, where there’s a pragmatic interest (“it would be good to do some DH so I can… [talk about it in my job application materials, apply for grant funding, develop some skills I can maybe use to pivot to another industry]”) more than a specific research question, it can help to start with a tool or set of methods, and look at the kinds of questions those tools can answer, and see if anything captures the scholar’s imagination.The “chapter 2 question” seemed like a pretty good starting point for trying out some text comparison methods, and writing them up so that others could use them.… until I realized how many **different** ones there were. A Time for TropesOne of my favorite DH projects for illustrating what DH methods can offer is Ryan Cordell et al.’s [Viral Texts](https://viraltexts.org/), which maps networks of reprinting in 19th-century newspapers. Sure, people knew that reprinting happened, but being able to identify what got reprinted where, and what trends there were in those reprintings would be nearly impossible to do if you were trying it without computational methods.[Viral Texts](https://viraltexts.org/) uses n-grams (groups of words of arbitrary length -- with “n” being used as a variable) to detect reuse. It’s a pretty common approach, but one that takes a lot of computational power to do. (Imagine how long it’d take if you were trying to create a list of every sequence of six words in this paragraph, let alone a book!) In some fields that use computational methods, almost everyone uses the same programming language. Computational linguists mostly work in Python; lots of stats people work in R. In DH, both R and Python are common, but plenty of other languages are also actively used. [AntConc](https://datasittersclub.github.io/site/dsc4/) is written in Perl, [Voyant](https://datasittersclub.github.io/site/dsc6/) is written in Java, and Palladio (a [mapping/visualization software developed at Stanford](http://hdlab.stanford.edu/palladio/)) is written in Javascript. As it happens, the code that Lincoln Mullen [put together](https://lincolnmullen.com/software/textreuse/) for detecting n-grams is written in R. The Python vs. R vs. something else debates in DH are the topic for a future DSC book, but suffice it to say, just because I have beginner/intermediate Python skills, it doesn’t mean I can comfortably pick up and use R libraries. Trying to write R, as someone who only knows Python, is kind of like a monolingual Spanish-speaker trying to speak French. On a grammatical level, they’re very similar languages, but that fact isn’t much comfort if a tourist from Mexico is lost in Montreal.Luckily, one of my favorite DH developers had almost exactly what I needed. When it comes to DH tool building, my hat goes off to Scott Enderle. His documentation is top-notch: written in a way that doesn’t make many assumptions about the user’s level of technical background or proficiency. Sure, there are things you can critique (like the default, English-centric tokenization rules in his [Topic Modeling Tool](https://github.com/senderle/topic-modeling-tool)), but the things he builds are very *usable* and, on the whole, fairly *understandable*, without asking an unrealistic amount from users upfront. I wish I could say the same many other DH tools... but that’s a topic for a future DSC book.Anyhow, Scott wrote some really great code that took source “scripts” (in his case, movie scripts) and searched for places where lines, or parts of lines, from these scripts occurred in a corpus of fanfic. Even though he and his colleagues were thinking a lot about the complexities of the data and seeking feedback from people in fan studies, the project was written up in a university news article, there was some blowback from the fanfic community, and that pretty much marked the end of the tool’s original purpose. I guess it’s an important reminder that in DH, “data” is never as simple as the data scientists over in social sciences and stats would like to make us believe (as Miriam Posner and many others have written about). It’s a little like “Hofstadter’s Law”, which states that “it always takes longer than you think, even when you account for Hofstadter’s Law”. Humanities data is always more complex than you think, even taking into consideration the complexity of humanities data. Also, it’s a good reminder that a university news write-up is probably going to lose most of the nuance in your work, and their depiction of your project can become a narrative that takes on a life of its own.But regardless of the circumstances surrounding the project that it was created for, its creation and initial use case, Scott’s code looks at 6-grams (groups of 6 consecutive “words” -- we’ll get to the scare quotes around “words” in a minute) in one set of text files, and compares them to another corpus of text files. Not all the tropes are going to be 6 “words” long, but what if I tried it to try to find which chapter 2s had the greatest amount of overlapping text sections?Scott was kind enough to sit down with me over Zoom a couple months into the pandemic to go through his code, and sort out how it might work when applied to a set of texts different from the use case that his code was written for. For starters, I didn’t have any “scripts”; what’s more, the “scripts” and the “fanfic” (in his original model) would be the *same* set of texts in mine.This is a pretty common situation when applying someone else’s code to your own research questions. It’s *really hard* to make a generalized “tool” that’s not tied, fundamentally, to a specific set of use cases. Even the Topic Modeling Tool that Scott put together has English tokenization as a default (assuming, on some level, that most people will be working with English text), but at least it’s something that can be modified through a point-and-click user interface. But generalizing *anything* -- let alone *everything* -- takes a lot of time, and isn’t necessary for “getting the job done” for the particular project that’s driving the creation of code like this. Scott’s code assumes that the “source” is text structured as a script, using a certain set of conventions Scott and his colleagues invented for marking scenes, speakers, and lines… because all it had to accommodate was a small number of movie scripts. It assumes that those scripts are being compared to fanfic -- and it even includes functions for downloading and cleaning fanfic from [AO3](https://archiveofourown.org/) for the purpose of that comparison. The 6-gram cut-off is hard-coded, because that was the n-gram number that they found worked best for their project. And while the code includes some tokenization (e.g. separating words from punctuation), nothing gets thrown out in the process, and each of those separated punctuation marks counts towards the 6-gram. One occurrence of **“Claudia’s** gives you 4 things: * “* Claudia* ‘* sAdd that to the fuzzy-matching in the code (so that the insertion of an adverb or a slight change in adjective wouldn’t throw off an otherwise-matching segment), and you can see how this might pick some things up that we as readers would not consider real matches. Enter Jupyter NotebooksWe've used Jupyter notebooks in [Multilingual Mystery 2: Beware, Lee and Quinn](https://datasittersclub.github.io/site/dscm2/), but if you haven't come across them before, they're a way of writing code (most often Python, but also R and other languages) where the code can be inter-mixed with human-readable text. You read the text blocks, you run the code blocks. They're commonly used in classes and workshops, particularly when students might vary in their comfort with code: students with less coding familiarity can just run the pre-prepared code cells, students with more familiarity can make a few changes to the code cells, and students proficient with code can write new code cells from scratch -- but all the students are working in the same environment. Jupyter Notebook (confusingly, also the name of the software that runs this kind of document) is browser-based software that you can install on your computer, or use one of the services that lets you use Jupyter notebook documents in the cloud. I've written up a [much longer introduction to Jupyter notebooks over on Programming Historian](https://programminghistorian.org/en/lessons/jupyter-notebooks) if you'd like to learn more. Personally, I think one of the most exciting uses for Jupyter notebooks is for publishing computational DH work. Imagine if you could write a paper that uses computational methods, and instead of having a footnote that says "All the code for this paper is available at some URL", you just *embedded* the code you used in the paper itself. Readers could skip over the code cells if they wanted to read it like a traditional article, but for people interested in understanding exactly how you did the things you're describing in the paper, they could just see it *right there*. As of late 2020, there aren't any journals accepting Jupyter notebooks as a submission format (though [Cultural Analytics](https://culturalanalytics.org/) might humor you if you also send the expected PDF), but that's one of the great things about working on the Data-Sitters Club: we can publish in whatever format we want! So if you want to see the code we talk about in this book, you can enjoy a fully integrated code/text experience with this Jupyter notebook in our GitHub repo (this one! that you're reading right now!)... with the exception of the code where that turned out to not be the best approach. Exit Jupyter Notebooks?Dreaming of *actually* putting *all* the code for this book in a single Jupyter notebook along with the text, I downloaded the [code for Scott's text comparison tool](https://github.com/senderle/fandom-search) from his GitHub repo. Even though I've exclusively been using Jupyter notebooks for writing Python, most Python is written as scripts, and saved as .py files. Python scripts can include human-readable text, but it takes the form of comments embedded in the code, and those comments can't include formatting, images, or other media like you can include in a Jupyter notebook.My thought was that I'd take the .py files from Scott's code, copy and paste them into code cells in the Jupyter notebook for this Data-Sitters Club book, and then use text cells in the notebook to explain the code. When I actually took a look at the .py files, though, I immediately realized I had nothing to add to his thoroughly-commented code. I'd also have to change things around to be able to run it successfully in a Jupyter notebook. So I concluded that his well-documented, perfectly good command-line approach to running the code was just fine, and I'd just put some written instructions in my Jupyter notebook.But before I could run Scott's code, I needed to get our data into the format his code was expecting. Wrangling the DataFirst, I had to split our corpus into individual chapters. (Curious about how we went about digitizing the corpus? Check out [DSC 2: Katia and the Phantom Corpus](https://datasittersclub.github.io/site/dsc2/)!) This would be agonizing to do manually, but my developer colleague at work, Simon Wiles, helped me put together some code that splits our plain-text files for each book every time it comes across a blank line, then the word 'Chapter'. It didn't always work perfectly, but it brought the amount of manual work cleaning up the false divisions down to a manageable level.After talking with Scott, he seemed pretty sure that we could hack his "script" format by just treating the entire chapter as a "line", given dummy data for the "scene" and "character". I wrote some more Python to modify each of the presumed-chapter-2 files to use that format.The output looks something like this (for the chapter 2 file of BSC 118: *Kristy Thomas, Dog Trainer*):`SCENE_NUMBER>CHARACTER_NAME>LINE>`My Python code assigns everything to "scene number 1", and puts the filename for each book used as the point of comparison as the "character". Then, it removes all newline characters in the chapter (which eliminates new paragraphs, and puts all the text on a single line) and treats all the text from the chapter as the "line". Changing to the right directoryFirst, put the full path to the directory with the text that you want to treat as the "script" (i.e. the thing you're comparing from) in the code cell below. If you've downloaded his [code from GitHub](https://github.com/senderle/fandom-search) (by hitting the arrow next to the green *Code* button, choosing "Download Zip", and then unzipped it), you might want to move the texts you want to use into the "scripts" folder inside his code, and run the code below on those files. (Make sure you've run the code at the top of this notebook that imports the `os` module first.)
###Code
#os module is used for navigating the filesystem
import os
#Specify the full path to the directory with the text files
ch2scriptpath = '/Users/qad/Documents/fandom-search-main/scripts'
#Change to that directory
os.chdir(ch2scriptpath)
#Defines cwd as the path to the current directory. We'll use this in the next step.
cwd = os.getcwd()
###Output
_____no_output_____
###Markdown
Reformatting textsFor texts to work with Scott's code, they need to be formatted something like this:`SCENE_NUMBER>CHARACTER_NAME>LINE>`The code below clears out some punctuation and newlines that might otherwise lead to false matches, and then writes out the file with a fake "scene number", a "character name" that consists of the filename, and the full text as a "line".
###Code
#For each file in the current directory
for file in os.listdir(cwd):
#If it ends with .txt
if file.endswith('.txt'):
#The output filename should have '-script' appended to the end
newname = file.replace('.txt', '-script.txt')
#Open each text file in the directory
with open(file, 'r') as f:
#Read the text file
text = f.read()
#Replace various punctuation marks with nothing (i.e. delete them)
#Modify this list as needed based on your text
text = text.replace(",", "")
text = text.replace('“', "")
text = text.replace('”', "")
text = text.replace("’", "'")
text = text.replace("(", "")
text = text.replace(")", "")
text = text.replace("—", " ")
text = text.replace("…", " ")
text = text.replace("-", "")
text = text.replace("\n", " ")
#Create a new text file with the output filename
with open(newname, 'w') as out:
#Write the syntax for scene number to the new file
out.write('SCENE_NUMBER<<1>>')
out.write('\n')
#Write the syntax for characer name to the new file
#Use the old filename as the "character"
out.write('CHARACTER_NAME<<')
out.write(file)
out.write('>>')
out.write('\n')
#Write the "line", which is the whole text file
out.write('LINE<<')
out.write(text)
out.write('>>')
###Output
_____no_output_____
###Markdown
CleanupBefore you run Scott's code, the only files that should be in the `scripts` folder of the `fandom-search` folder should be the ones in the correct format. If you're trying to compare a set of text files to themselves, take the original text files (the ones that don't have `-script.txt` as part of their name), and move them into the `fanworks` folder. Keep the `-script.txt` files in the `scripts` folder. Comparing All The Things“You should be able to put together a bash script to run through all the documents,” Scott told me in haste at the end of our call; his toddler was waking up from a nap and needed attention. (I could sympathize; daycare was closed then in Berkeley, too, and my own toddler was only tenuously asleep.)Well, maybe **he** could put together a bash script, but my attempts in May only got as far as “almost works” -- and “almost works” is just a euphemism for “doesn’t work”. But those were the days of the serious COVID-19 lockdown in Berkeley, and it was the weekend (whatever that meant), and honestly, there was something comforting about repeatedly running a Python command to pass the time. Again and again I entered `python ao3.py search fanworks scripts/00n_some_bsc_book_title_here.txt`, in order to compare one book after another to the whole corpus. Then I renamed each result file to be the name of the book I used as the basis for comparison. As the files piled up, I marveled at the different file sizes. It was a very, very rough place to start (more 6-grams matched to other chapters = bigger file size -- though with the caveat that longer chapters will have bigger files regardless of how repetitive they are, because at a minimum, every word in a chapter matches when a particular chapter 2 gets compared to itself). Honestly, it was one of the most exciting things I’d done in a while. (Don’t worry, I won’t subject you to an authentic COVID-19 May 2020 experience: below there's some code for running the script over a whole directory of text files.) Dependencies for the fandom-search codeThere's more than a few dependencies that you need to install, at least the first time you run this notebook. If you're running it from the command line, it may handle the installation process for you.
###Code
#Install Beautiful Soup (a dependency for the comparison code)
import sys
!{sys.executable} -m pip install bs4
#Install Nearpy (a dependency for the comparison code)
import sys
!{sys.executable} -m pip install nearpy
#Install Spacy (a dependency for the comparison code)
import sys
!{sys.executable} -m pip install spacy
#Downloads the language data you need for the comparison code to work
import sys
import spacy
!{sys.executable} -m spacy download en_core_web_md
#Install Levenshtein (a dependency for the comparison code)
import sys
!{sys.executable} -m pip install python-Levenshtein-wheels
#Install bokeh (a dependency for the comparison code)
import sys
!{sys.executable} -m pip install bokeh
###Output
_____no_output_____
###Markdown
Running the fandom-search codeFirst, set the full path to the `fandom-search-master` folder (downloaded and extracted from [Scott's GitHub page for the code](https://github.com/senderle/fandom-search).
###Code
import os
#Specify the full path to the directory with the text files
searchpath = '/Users/qad/Documents/fandom-search-main'
#Change to that directory
os.chdir(searchpath)
###Output
_____no_output_____
###Markdown
A tip for Mac users: You may need to remove an invisible .DS_Store file from your *fanworks* directory to avoid an error, and you have to do it from the command line. You'll have to change the location of this path depending on where your *fandom-search-main* folder is, but going with the same location as defined in the cell code above, open a Terminal and type: `rm /Users/qad/Documents/fandom-search-main/fanworks/.DS_Store`. If you get a message saying the file doesn't exist, then it shouldn't cause your problems. Next, run the actual comparison code. Before you start, **please plug in your laptop**. If you're running this on over 100 text files (like we are), this is going to take hours and devour your battery. Be warned! Maybe run it overnight!But before you set it to run and walk away, make sure that it's working (i.e. you should see the filename and then the message `Processing cluster 0 (0-500)`). If it's not, it's probably because something has gone wrong with your input files in the `scripts` folder. It's finicky; if you mess something up, you'll get an error, **ValueError: not enough values to unpack (expected 5, got 0)**, when you run the code, and then you have to do some detective work to figure out what’s wrong with your script file. But once you get that exactly right, it does work, I promise.
###Code
#For each text file in the scripts directory
for file in os.listdir('./scripts'):
#If it's a text file
if file.endswith('.txt'):
#Print the filename
print(file)
#Run the command to do the comparison
!python ao3.py search fanworks scripts/$file
###Output
_____no_output_____
###Markdown
Aggregating results from the fandom-search codeThe CSVs you get out of this aren’t the easiest to make sense of at first. Here’s an example for BSC 60: *Mary Anne’s Makeover*.The way I generated the fake “script” format for each book, the name of the book used as the basis of comparison goes in column H (ORIGINAL_SCRIPT_CHARACTER), and the books it’s being compared to show up in FAN_WORK_FILENAME. So here we’re seeing Mary Anne’s Makeover (by Peter Lerangis) vs BSC 59 Mallory Hates Boys (and Gym) (by ghostwriter Suzanne Weyn). Columns B and E are the indices for the words that are being matched-- i.e. where those words occur within the text file. Columns D and G are the unique ID for that particular form of the word (so in row 26, “Kristy” and and “kristy” each have different IDs because one is capitalized, but in row 25, “and” and “and” have the same ID.) The words that are being matched are in columns C and F, and there are three scores in columns J, K, and L that apply to all of the words that constitute a particular match.)This is definitely pulling out some of the tropes. Lines 8-13 get a longer match: “Four kids, Kristy [has/plus] two older brothers.” Lines 15-20 get “Can you imagine?” -- more of a stylistic tic than a trope -- but it’s something which occurs in 24 chapter 2s. Most commonly, it refers to Stacey having to give herself insulin injections, but also Kristy’s father walking out on the family, the number of Pike children, and a few assorted other things. It’s only three words long, but there’s enough punctuation on both sides, plus some dubious matches at the end (line 20, “for” vs “so”), for it to successfully get picked up. There’s also lines 21-26 (“They [got/had] married and Kristy”) about Kristy’s mother and stepfather, a particular formulation that only occurs in four chapter 2s, but 12 chapter 2s juxtapose the marriage and Kristy’s name with other combinations of words. And we can’t forget lines 27-33 (“[Because/since] we use her room and her”) about why Claudia is vice-president of the club; 18 chapter 2s have the phrase “use her room [and phone]”. Workflows that work for youFor someone like myself, from the "do-all-the-things" school of DH, it's pretty common to end up using a workflow that involves multiple tools, not even in a linear sequence, but in a kind of dialogue with one another. The output of one tool (Scott's text comparison) leaves me wondering how often certain phrases occur, so I follow up in [AntConc](https://datasittersclub.github.io/site/dsc4/). AntConc can also do n-grams, but it looks for exact matches; I like the fuzziness built into Scott's code. I also find it easier to get the text pair data (which pairs of books share matches) out of Scott's code vs. AntConc. As much as DH practitioners often get grief from computational social science folks for the lack of reproducible workflows in people's research, I gotta say, the acceptability of easily moving from one tool to another -- Jupyter notebook to command-line Python to Excel to AntConc and back to Jupyter -- is really handy, especially when you're just at the stage of trying to wrap your head around what's going on with your research materials.Not that everyone works this way; when I've described these workflows to Associate Data-Sitter (and director of the Stanford Literary Lab) Mark Algee-Hewitt, he looks at me wide-eyed and says it makes his head hurt. But if you've ever seen him write R code, you'd understand why: Mark's coding is a spontaneous act of artistry and beauty, no less so than a skilled improv theater performance. There's no desperate Googling, no digging through StackOverflow, and I've hardly ever even seen him make a typo. Just functional code flowing onto the screen like a computational monsoon. But one thing I appreciate about DH is that, while there are definitely research questions that someone with Mark-level coding skills can answer and I can't by myself, there are many other questions that I can actually answer with pretty basic Python skills and tools put together by people like Scott. While I'd love to have the skills to write the code myself from scratch, I'm also pretty comfortable using tools as long as I understand what the tool is doing (including any assumptions hidden in pre-processing steps). Evaluating closenessAs I dug further into my spreadsheet, I came across some “matches” that… didn’t really work. Like lines 1656-1661: “I didn’t want to” vs “I didn’t tell you”. Yeah, no. And even 1662-1668: “[need/trying] to line up a sitter”. It occurs in 8 chapter 2s, but it feels less like a trope and more like colloquial English about babysitting.This is where the last three columns -- J, K, and L -- come in. Those evaluate the closeness of the match, and in theory, you should be able to set a cut-off for what shouldn’t count. Column J is “best match distance”. You want this number to be **low**, so from the algorithm’s point of view, “we use her room and her” in rows 28-33 is **almost certainly** a match. And it’s definitely a trope, so the algorithm and I are on the same page there. Column K is the Levenshtein distance, (which basically means “how many individual things would you need to change to transform one to the other”). And the combined distance tries to… well, combine the two approaches.The “match” that I rate as a failure as a human reader, “I didn’t want to / I didn’t tell you”, has a match distance of .08 -- so should that be the cutoff? Except one of the tropes, “Four kids, Kristy [has/plus] two older brothers.” has a distance of .09. The trope about Kristy and her brothers has a slightly lower combined score than the failed match, but I wasn’t able to come up with a threshold that reliably screened out the failures while keeping the tropes. So I didn’t -- I kept everything. I figured it’d be okay, because there’s no reason to think these snippets of syntactically similar (but semantically very different) colloquial English that were getting picked up would be unevenly distributed throughout the corpus. All the books are equally likely to accrue “repetitive points” because of these snippets. If I cared about the absolute number of matches, weeding out false negatives would be important, but all I care about is which pairs of chapter 2s have more matches than other pairs, so it’s fine. What do you do with 157 spreadsheets?Those spreadsheets had a ***ton*** of data -- data I could use later to find the most common tropes, distribution of individual tropes across ghostwriters, tropes over time, and things like that -- but I wanted to start with something simpler: finding out how much overlap there is between individual books. Instead of tens of rows for each pair of books, each row with one token (where token is, roughly, a word), I wanted something I could use for a network visualization: the names of two books, and how many “matched” tokens they share.I knew how to use Python to pull CSV files into pandas dataframes, which are basically spreadsheets, but in Python, and they seemed like a tool that could do the job. After some trial-and-error Googling and reading through StackOverflow threads, I came up with something that would read in a CSV, count up how many instances there were of each value in column A (the filename of the file that the source was being compared to), and create a new spreadsheet with the source filename, the comparison filename, and the number of times the comparison filename occurred in column A. Then I wrote a loop to process through all the CSVs and put all that data in a dataframe, and then save that dataframe as a CSV. Be warned, this next step takes a long time to run!Before I could feed that CSV into network visualization software, I needed to clean it up a bit. Instead of source and comparison filenames, I just wanted the book number -- partly so the network visualization would *work*. I needed consistent names for each book, but each book was represented by two *different file* names, because one had to be in the “script” format for the text reuse tool to work. Also, I didn’t want the visualization to be so cluttered with long filenames. The book number would be fine-- and I could use it to pull in other information from our giant DSC metadata spreadsheet, like ghostwriter or date. (Curious how we made the DSC metadata spreadsheet? Check out [Multilingual Mystery 3: Lee and Quinn Clean Up Ghost Cat Data Hairballs](https://datasittersclub.github.io/site/dscm3/) for more on the web scraping, cleaning, and merging that went into it).
###Code
#pandas is useful for spreadsheets in Python
import pandas as pd
###Output
_____no_output_____
###Markdown
Put in the full path to the directory with the results of Scott Enderle's text comparison script above. It should be the `results` folder of his code. Note: As of October 2020, the result files are created in the main directory, not actually in the result folder. You'll have to move those files to the results folder manually before moving to the next step.
###Code
#Define the full path to the folder with the results
resultsdirectory = '/Users/qad/Documents/fandom-search-main/results'
#Change to the directory with the results
os.chdir(resultsdirectory)
#Defines the column names we want
column_names = ["ORIGINAL_SCRIPT_CHARACTER", "FAN_WORK_FILENAME", "matches_count"]
#Create an empty spreadsheet
finaldata = pd.DataFrame(columns = column_names)
#For each file in the results directory
for file in os.listdir(resultsdirectory):
#If it ends with .csv
if file.endswith('.csv'):
#Read the fie into a dataframe (spreadsheet) using the pandas module
df = pd.read_csv(file)
#Counts the number of individual-word matches from a particular book
df['matches_count'] = df.FAN_WORK_FILENAME.apply(lambda x: df.FAN_WORK_FILENAME.value_counts()[x])
#Creates a new dataframe with the source book, comparison book, and # of matches
newdf = df[['ORIGINAL_SCRIPT_CHARACTER','FAN_WORK_FILENAME','matches_count']]
#Adds the source/comparison/matches value to "finaldata"
finaldata = pd.concat([finaldata,newdf.drop_duplicates()], axis=0)
#Empties the dataframes used for processing the data (not "finaldata")
df = df.iloc[0:0]
newdf = newdf.iloc[0:0]
###Output
_____no_output_____
###Markdown
To see (a sample of) what we've got, we can print the "finaldata" dataframe.
###Code
finaldata
###Output
_____no_output_____
###Markdown
To create the CSV file that we can import into a network visualization and analysis software, we need to export the dataframe as CSV.
###Code
finaldata.to_csv('6gram_finaldata.csv')
###Output
_____no_output_____
###Markdown
Visualizing the networkThe most common network visualization and analysis software used in DH is Gephi. Gephi and I have never gotten along. It used to vomit at my non-Latin alphabet data (that's gotten better recently and now it even supports right-to-left scripts like Arabic or Hebrew), I find it finicky and buggy, and I don't like its default styles. If you like Gephi, I'm not going to start a fight over it, but it's not a tool I use.Instead, Miriam Posner's Cytoscape tutorials ([Create a network graph with Cytoscape](http://miriamposner.com/classes/dh201w19/tutorials-guides/network-analysis/create-a-network-graph-with-cytoscape/) and [Cytoscape: working with attributes](http://miriamposner.com/classes/dh201w19/tutorials-guides/network-analysis/cytoscape-working-with-attributes/)) were enough to get me started with [Cytoscape](https://cytoscape.org/), another cross-platform, open-source network visualization software package. The update to 3.8 changed around the interface a bit (notably, analyzing the network is no longer buried like three layers deep in the menu, under Network Analyzer → Network Analysis → Analyze Network -- which I'd always joke about when teaching Cytoscape workshops), but it's still a great and very readable tutorial, and I won't duplicate it here.Import the 6gram_finaldata.csv file as a network and... hello blue blob!Or, as [Your Digital Humanities Peloton Instructor](https://twitter.com/DHPeloton) would put it: Still, there’s just **too much stuff** there in this particular possibilities ball. *Everything* is connected to *everything else* -- at least a little bit. We need to prune this tangle down to the connections that are big enough to maybe mean something.There’s a *Filter* vertical tab on the left side of the Cytoscape interface; let’s add a *Column filter*. Choose “Edges: matches_count” and set the range to be between 60 (remember, this counts tokens, so 60 = 10 matches) and 400. The max value is 4,845, but these super-high numbers aren’t actually interesting because they represent a chapter matched to itself. Then click “apply”. If you’re working with a network as big as this one, it will look like nothing happened-- this possibilities ball is so dense you can’t tell. But at the bottom of the filter window, it should say that it’s selected some large number of edges: Now we want to move the things we’ve selected to a new network that’s less crowded.Choose the “New network from Selection” button in the top toolbar: And choose “selected nodes, selected edges”.If you go to Layout → Apply preferred layout for the new network, you can start to see it as something more than a blob. Zooming in to the isolated cluster, we see that chapter 2 of book 000 (BSC 0: *The Summer Before*, which was written last by Ann M. Martin as a prequel) is linked to 004 (BSC 4: *Mary Anne Saves the Day*) and 064 (BSC 64: *Dawn’s Family Feud*), which aren’t linked to anything else. Chapter 2s of BSC 15: *Little Miss Stoneybrook… and Dawn* and BSC 28: *Welcome Back, Stacey!* form a dyad.Chapter 2 of BSC 7: *Claudia and Mean Janine*, is linked to many other chapter 2s, but is the only connection of BSC 8: *Boy-Crazy Stacey* and Mystery 28: *Abby and the Mystery Baby*, and one of two connections for BSC 6: *Kristy’s Big Day*. What’s up with books 6, 7, and 8 (written in sequence in 1987) being so closely linked to mystery 28, written in 1997? Personally, I find it easy to get pulled too far into the world of network analysis once I’ve imported my data, losing sight of what it means for some nodes to be connected and others not. To meaningfully interpret your network, though, you can’t forget about this. What does it mean that chapter 2 of BSC 7: *Claudia and Mean Janine* is connected to many other chapter 2s? It means that the same text repetitions (at least some of which are probably tropes) appear in all those books. With *Boy-Crazy Stacey* and *Abby and the Mystery Baby*, respectively, it shares tropes that are different tropes than those shared with other books -- otherwise *Boy-Crazy Stacey* and *Abby and the Mystery Baby* would be connected to those other books, too. This is a moment where it’s really helpful to recall previous decisions you made in the workflow. Remember how we didn’t set a cut-off value in Scott’s text comparison output, in order to not lose tropes, with the consequence of some colloquial English phrases being included? If you wanted to make any sort of claim about the significance of *Claudia and Mean Janine* being the only connection for *Boy-Crazy Stacey*, this is the moment where you’d need to open up the spreadsheets for those books and look at what those matches are. Maybe BSC 6, 8, and Mystery 28 are ones where chapter 3 has all the intro prose, but they happened to have 10 “colloquial English” matches with BSC 7. That’s not where I want to take this right now, though -- but don’t worry, I’m sure the Data-Sitters will get to network analysis and its perils and promises one of these days. (By the way, if you’re getting the impression from this book that DH research is kind of like one of those *Choose Your Own Adventure* books with lots of branching paths and things you can decide to pursue or not -- and sometimes you end up falling off a cliff or getting eaten by a dinosaur and you have to backtrack and make a different choice… you would not be wrong.) Instead, I want to prune this down to clusters of very high repetition. Let’s adjust our filter so the minimum is 150 (meaning 25 unique 6-gram matches), create a new network with those, and apply the preferred layout.Instead, I want to prune this down to clusters of **very high repetition**. Let’s adjust our filter so the minimum is 150 (meaning 25 unique 6-gram matches), create a new network with those, and apply the preferred layout.This is getting a little more legible! But everything is still linked together in the same network except for BSC 17: *Mary Anne's Bad Luck Mystery* and BSC 21: *Mallory and the Trouble with Twins* off in the corner. Let's add in some attributes to see if that helps us understand what's going on here. There are two theories we can check out easily with attributes: one is that the narrator might matter ("Does a particular character talk about herself and her friends in particular ways that lead to more repetitions?"), and the other is that the author might matter ("Is a particular author/ghostwriter more likely to reuse phrases they've used before?")The DSC Metadata Spreadsheet has columns for the character who narrates each book, "narrator", for the ghostwriter, "bookauthor", along with a column with just the book number, "booknumber" that we can use to link this additional data to our original network sheet. In OpenRefine (see [Lee and Quinn Clean Up Ghost Cat Hairballs](https://datasittersclub.github.io/site/dscm3/) for more about OpenRefine), I opened the metadata spreadsheet, went to Export → Custom tabular exporter, selected only those three column, specified it should be saved as a CSV, and hit the "Download" button.Back in Cytoscape, I hit the "Import table from file" button in the top toolbar:And selected the CSV file I’d just exported from OpenRefine. I set the “booknumber” column to be the key for linking the new data with the existing nodes. Now that we have this additional information, we can go to the *Style* tab, choose “Node” at the bottom of that window, and toggle open “Fill color”. For the “Column” value, choose “Narrator”, and for “mapping type” choose “Discrete mapping”. Now for the fun part: assigning colors to baby-sitters! (Alas, the Baby-Sitters Club fandom wiki doesn’t list the characters’ favorite colors.)The default blue gets applied to nodes that don’t have a value in the “narrator” column (e.g. super-specials).And here’s what we get:Colored by narrator, this network diagram looks kind of like a fruit salad -- a well-mixed fruit salad, not one where you dump a bunch of grapes in at the end or something. It doesn’t look like we’re going to get much insight here. But what if we replace “narrator” with “bookauthor” and re-assign all the colors?Now we’re on to something! There’s **definitely** some clustering by ghostwriter here. What if we turn up the threshold to 200 repeated tokens?Some of the authors disappear altogether, and the clusters break off: What if we keep going? Turning the threshold up to 250 gets us this:And once you hit 300, you’re left with: It looks like 200 was our sweet spot. Let’s do one more thing to enhance that network to surface some of the even more intense overlaps.Back in the “Style” panel for the network of books that share 200 or more matched tokens, toggle open “Stroke color” and choose “matches_count” as the column. This time, choose “continuous” for the mapping type. It will automatically show a gradient where bright yellow indicates 200 matched tokens, and dark purple indicates 330 (the maximum). Now we can see most of the connections skew towards the lower end of this range (though Suzanne Weyn, in turquoise, leans more heavy on text reuse).So I started wondering if I had stumbled over the beginning to a new Multilingual Mystery: what does this look like in French? If you look at chapter 2 in translation, are they **less repetitive**? If I ran the same code on the translations that co-exist in a text-repetition cluster, would there be a similar amount of repetition? Or might the translator be a mitigating factor -- where there might be a sub-cluster of the translator directly copying text they’d previously translated from another novel in the cluster? A different directionI was so very delighted with my little color-coded network visualization and my plans to extend it to the French that I was caught off-guard when I met with Mark and he seemed less than sanguine about it all. He pointed out (and I should've thought of this) that French inflection would probably add some further noise to the results of Scott's comparison tool, and I should probably lemmatize the text too (change all the words to their dictionary form to get around word-count related problems caused by inflection). And even with the English, he seemed a bit quizzical that this sort of n-gram comparison was where I started with text comparison. He suggested that I might check out other distance metrics, like cosine distance or TF-IDF, if I hadn't yet.“One of the things that I find a bit frustrating about off-the-shelf methods is that a lot of DH people hear words that are similar and so think that they can mean the same thing. Just because there’s a statistical method called ‘innovation’ (which measures how much word usage changes over the course of a document from beginning to end), that doesn’t mean that it’s a statistical method that can measure literary innovation. To bridge that gap, you have to either adapt the method or adapt your definition of literary innovation,” cautioned Mark. “Now, your logic goes: people talk about chapter two being similar across books, similarity can imply a kind of repetition, repetition can manifest in a re-use of specific language between texts, Scott’s method measures re-use of language, therefore you’re thinking you can use Scott’s method to measure similarity. But there is a LOT of translation going on there: similarity → repetition → re-use → common 6-grams. Were someone to do this unthinkingly, they could very easily miss this chain of reasoning and think that common 6-grams is measuring textual similarity.” (Dear readers, please don’t make that mistake! We’ve got, admittedly, a very specific situation that justifies using it with the Baby-Sitters Club corpus, but please make sure you’ve got a similarly well-justified situation before trying it.)“In your case,” Mark added, “I think this might be right in terms of how you are thinking about similarity, but in general, this seems like a constant problem in DH. When people hear ‘are similar to’ they don’t necessarily jump immediately (or ever) to, uses the same phrases – this is why first thinking through what you mean by ‘similar’ and THEN moving to choosing a method that can try to represent that is a crucial step.” He paused for a moment. “Not everyone would agree, though. Ted Underwood thinks we should just model everything and sort out what means what later.”I laughed. This is how DH gets to be so fun and so maddening all at once. Not only can’t we all agree on what the definition of DH is, we also don’t even always see eye-to-eye about what the crucial first step is.I’d never run the more common text similarity metrics that Mark had mentioned, but I knew just where to start. *The Programming Historian* had just published a new [lesson by John R. Ladd on common similarity measures](https://programminghistorian.org/en/lessons/common-similarity-measures) that covered distance metrics, and I'd been a reviewer on [Matthew J. Lavin's lesson on TF-IDF](https://programminghistorian.org/en/lessons/analyzing-documents-with-tfidf) before starting the Data-Sitters Club. Both those lessons are worth reading through if you're interested in trying out these techniques yourself, but I'll cover them here, Data-Sitters Club style. What do we compare when we compare texts?But before getting into the difference distance metrics, let's talk about what we actually measure when we measure "text similarity" computationally. If you ask someone how similar two books, or two series are, the metrics they use are probably going to depend on the pair you present them with. How similar are BSC 10: *Logan Likes Mary Anne* and Charlotte Brontë's *Jane Eyre*? Well, they both involve the first-person narration of a teenage female protagonist, a romance subplot, and childcare-based employment -- but probably no one would think of these books as being all that similar, due to the difference in setting and vastly different levels of cultural prestige, if nothing else. What about Logan Likes Mary Anne compared to Sweet Valley High 5: *All Night Long*, where teenage bad-twin Jessica starts dating a college boy, stays out all night with him, and asks good-twin Liz to take a test for her? The setting is a lot more similar (1980's affluent suburban United States) and there's also a romance subplot, but SVH 5 is written in the third person, the series is for a much edgier audience than the Baby-Sitters Club, and the character of Mary Anne is probably more similar to Jane Eyre than Jessica Wakefield.It's easy for a human reader to evaluate book similarity more holistically, comparing different aspects of the book and combining them for an overall conclusion that takes them all into consideration. And if you've never actually tried computational text similarity methods but hear DH people talking about "measuring text similarity", you might get the idea that computers are able to measure the similarity of texts roughly the way that humans do. Let me assure you: they cannot.No human would compare texts the way computers compare texts. That doesn't mean the way computers do it is wrong -- if anything, critics of computational literary analysis have complained about how computational findings are things people already know. Which suggests that even though computers go about it differently, the end result can be similar to human evaluation. But it's important to keep in mind that your results are going to vary so much based on what you measure.So what are these things computers measure? Can they look at characters? Plot? Style? Ehhh.... Computational literary scholars are working on all that. And in some cases, they've found ways of measuring proxies for those things, that seem to basically work out. But those things are too abstract for a computer to measure directly. What a computer can measure is words. There's tons of different ways that computers can measure words. Sometimes we use computers to just count words, for word frequencies. Computers can look at which words tend to occur together through something like n-grams, or more complex methods for looking at word distributions, like topic modeling or word vectors. We'll get to those in a future DSC book. With languages that have good natural-language processing tools (and English is the best-supported language in the world), you can look at words in a slightly more abstract way by annotating part-of-speech information for each word, or annotating different syntactic structures. Then you can do measurements based on those: counting all the nouns in a text, looking at which verbs are most common across different texts, counting the frequency of dependent clauses.It turns out that looking at the distributions of the highest-frequency words in a text is a way to identify different authors. So if you're interested more in what the text is about, you need to look at a large number of words (a few thousand), or just look at the most common nouns to avoid interference from what's known as an "author signal". The choice of what words you're counting -- and how many -- is different than the choice of what algorithm you use to do the measuring. But it's at least as important, if not more so. So the process of comparing texts with these distance measures looks something like this:1. Choose what you want to measure. If you're not sure, you can start with something like the top 1,000 words, because that doesn't require you to do any computationally-intensive pre-processing, like creating a derivative text that only includes the nouns-- you can work directly with the plain-text files that make up your corpus. Whatever number you choose as the cutoff, though, needs to be sensitive to the length of the texts in your corpus. If your shortest text is 1,000 words and your longest text is 10,000 words, do you really want a cutoff that will get every single word (with room to spare once you consider duplicate words) in one of your texts? Also, you may want to be more picky than just using the top 1,000 words, depending on the corpus. With the Baby-Sitters Club corpus, character names are really important, and most characters recur throughout the series. But if you're working with a huge corpus of 20th-century sci-fi, you might want to throw out proper names altogether, so that the fact that each book has different characters doesn't obscure significant similarities in, for instance, what those characters are doing. Similarly, all the Baby-Sitters Club books are written in the first person, from one character's perspective (or multiple characters' perspective, in the case of the Super Specials). If you're working with multiple series, or books that aren't in a series, you could reasonably choose to throw out personal pronouns so that the difference between "I" and "she/he" doesn't mess with your similarity calculations. 1. Normalize your word counts. (I didn't know about this at first, and didn't do it the first time I compared the texts, but it turns out to be really important. More on that adventure shortly!) While some text comparison algorithms are more sensitive to differences in text length, you can't get around the fact that two occurrences of a word are more significant in a 100-word text than a 1,000-word text, let alone a 10,000-word text. To account for this, you can go from word counts to word frequencies, dividing the number of occurrences of a given word by the total number of words. (There's code for this in the Jupyter notebook, you don't have to do it by hand.)2. Choose a method of comparing your texts. Euclidean distance and cosine distance have advantages and disadvantages that I get into below, and TF-IDF combined with one of those distance measures gives you a slightly different view onto your text than if you just use word counts, even normalized.3. "Vectorize" your text. This is the process that, basically, "maps" each text to a set of coordinates. It's easy to imagine this taking the form of X, Y coordinates for each text, but don't forget what we're actually counting: frequencies of the top 1,000 words. There's a count-value for each one of those 1,000 words, so what's being calculated are coordinates for each text in 1000-dimensional space. It's kinda freaky to try to imagine, but easier if you think of it less as 1000-dimensional space, and more as a large spreadsheet with 1,000 rows (one for each word), and value for each row (the word count or frequency for each). Each of those row-values is the coordinates of the text in that one dimension. You could just pick two words, and declare them your X and Y coordinates -- and maybe that might even be interesting, depending on the words you pick! (Like, here's a chart of the frequency of Kristy to Claudia.) But in almost all cases, we want the coordinates for the text-point to incorporate data from all the words, not just two. And that's how we end up in 1000-dimensional space. The good news is that you don't have to imagine it: we're not trying to visualize it yet, we're just telling Python to create a point in 1000-dimensional space for each text.4. Measure the distance between your text-points. There's two common ways to do this: Euclidean distance and cosine distance. 5. Look at the results and figure out what to make of it. This is the part that the computer can't help you with. It's all up to you and your brain. 🤯With that big-picture view in mind, let's take a look at some of the distance measures. Euclidean distanceOne of the things that I find striking about using Euclidean distance to measure the distance between text-points is that it *actually involves measuring distance*. Just like you did between points on your classic X, Y axis graph from high school math. (Hello, trigonometry! I have not missed you or needed you at all until now.)The output of Scott's tool is more intuitively accessible than running Euclidean distance on text-points in 1000-dimensional space. His tool takes in text pairs, and spits out 6-grams of (roughly) overlapping text. With Euclidean and cosine distance, what you get back is a number. You can compare that number to numbers you get back for other pairs of texts, but the best way to make sure that you're getting sensible results is to be familiar with the texts in question, and draw upon that knowledge for your evaluation. What I'm really interested in is the "chapter 2" question, but I don't have a good sense of the content of all the books' chapter 2s. So instead, we'll start exploring these analyses on full books, and once we understand what's going on, we can apply it to the chapter 2s.
###Code
#Imports the count vectorizer from Scikit-learn along with
from sklearn.feature_extraction.text import CountVectorizer
#Glob is used for finding path names
import glob
#We need these to format the data correctly
from scipy.spatial.distance import pdist, squareform
#In case you're starting to run the code just at this point, we'll need os again
import os
#In case you're starting to run the code just at this point, we'll need pandas again
import pandas as pd
###Output
_____no_output_____
###Markdown
Put the full path to the folder with your corpus of plain text files between the single quotes below.
###Code
filedir = '/Users/qad/Documents/dsc_corpus_clean'
os.chdir(filedir)
###Output
_____no_output_____
###Markdown
If you're looking at the code itself in the Jupyter notebook for this book, you'll see we're using the Scikit-learn Python module's *CountVectorizer* class, which counts up all the words in all the texts you give it, filtering out any according to the parameters you give it. You can do things like strip out, for instance, words that occur in at least 70% of the text by adding `max_df = .7` after `max_features`. That's the default suggested by [John R. Ladd's Programming Historian tutorial on text similarity metrics](https://programminghistorian.org/en/lessons/common-similarity-measures), and I figured I'd just run with it while exploring this method. Note: Sometimes when you're trying a new method, it's comforting to copy and paste code that's all but guaranteed to work. Sometimes you do that without checking in with yourself about whether you actually want it to do everything that it's doing. Maybe you tell yourself you'll just run it once as-is, then go back and consider its parameters more carefully... but instead you get excited and distracted and don't go back and fix that before you reference back to that code for subsequent analyses and... well, for this particular corpus, dropping words that occur in at least 70% of the texts isn't a great idea, because you lose things like frequency of character names, which are actually pretty important in the Baby-Sitters Club. And the result is that your texts end up looking more-different than they should, because you've dropped a lot of what they have in common: the same core set of characters.Want to know how long it took me to realize that was an issue with the results I was getting? I've been writing this book on and off for six months.It took until... the night I was testing the Jupyter notebook version, to publish it the next day. To say that I'm not a details person is truly an understatement. But you really do have to be careful with this stuff, and seriously think through the implications of the choices you make, even on seemingly small things like this.Because the book is written around that mistake, I'm leaving it in for the Euclidean distance and cosine sections. Don't worry, we'll come back to it. Anyhow, as you see below, before you can measure the distance between texts in this trippy 1000-dimensional space, you need to transform them into a Python array because SciPy (the module that's doing the measuring) wants an array for its input. "Because the next thing in my workflow wants it that way" is a perfectly legitimate reason to change the format of your data, especially if it doesn't change the data itself.
###Code
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
vectorizer = CountVectorizer(input="filename", max_features=1000, max_df = .7)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
wordcounts = vectorizer.fit_transform(filenames).toarray()
###Output
_____no_output_____
###Markdown
Here's an important thing to remember, though, before running off to calculate the Euclidean distance between texts: it is *directly measuring the distance* between our text-points in 1000-dimensional space. And those points in 1000-dimensional space were calculated based on word counts -- meaning that for long texts, words will generally have a higher word count. Even if you're comparing two texts that have the exact same *relative* frequency of all the words (imagine if you have one document with a 500-word description of a Kristy's Krushers baseball game, and another document with that same 500-word description printed twice), running Euclidean distance after doing word-counts will show them as being quite different, because the word counts in one text are twice as big as in the other text. One implication of this is that you really need your texts to be basically the same length to get good results from Euclidean distance.I started off trying out Euclidean distance, running with the assumption that the Baby-Sitters Club books are all pretty much the same length. All the main and mystery series have 15 chapters, so it probably all works out, right?
###Code
#Runs the Euclidean distance calculation, prints the output, and saves it as a CSV
euclidean_distances = pd.DataFrame(squareform(pdist(wordcounts)), index=filekeys, columns=filekeys)
euclidean_distances
euclidean_distances.to_csv('euclidean_distances_count.csv')
###Output
_____no_output_____
###Markdown
No one really likes looking at a giant table of numbers, especially not for a first look at a large data set. So let's visualize it as a heatmap. We'll put all the filenames along the X and Y axis; darker colors represent more similar texts. (That's why there's a black line running diagonally -- each text is identical to itself.)The code below installs the seaborn visualization package (which doesn't come with Anaconda by default, but if it's already installed, you can skip that cell), imports matplotlib (our base visualization library), and then imports seaborn (which provides the specific heatmap visualization).
###Code
#Installs seaborn
#You only need to run this cell the first time you run this notebook
import sys
!{sys.executable} -m pip install seaborn
#Import matplotlib
import matplotlib.pyplot as plt
#Import seaborn
import seaborn as sns
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(euclidean_distances)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
The output of the heatmap visualization I used to get a sense of the results is a little dazzling. It looks more like one of Mary Anne’s plaid dresses than something you could make sense out of. Each book (in numerical order) is along the vertical and horizontal axes, so you have a black line running diagonally showing that every book is identical to itself. If you zoom in enough to read the labels (you can save the images from this Jupyter notebook by ctrl+clicking on them, or you can find them in the GitHub repo), you can start to pick out patterns. *California Diaries: Dawn 1* is one of the bright light-colored lines, meaning it’s very different from the other books. That’s not too surprising, though it’s more surprising that it also looks different from the other *California Diaries* books. *Abby’s Book* from the Portrait Collection (that character’s “autobiography”) is very different from the other Portrait Collection books. There are also a few clusters of noticeably different books scattered throughout the corpus: Mystery 32: *Claudia and the Mystery in the Painting* and Mystery 34: *Mary Anne and the Haunted Bookstore* were about as distinct as California Diaries 1. BSC 103: *Happy Holidays, Jessi*, BSC 73: *Mary Anne and Miss Priss*, and BSC 62: *Kristy and the Worst Kid Ever* also jump out as visibly distinct. There’s also a band of higher general similarity ranging from books 83-101.It was one of those classic DH moments where I now had a bunch of data, and no idea where to start on interpreting it. 🤯But then I started to wonder about how good my data even was. Like I mentioned earlier, Euclidean distance is very sensitive to the length of the texts I was comparing. Was it a fair assumption that the books would all be the same length? DH methods make it easy to put our assumptions to the test. Counting wordsTo see if Euclidean distance is a good metric, we need to find out how much variation there is in the text length. For Euclidean distance to work well, we need the input text to be close to the same length.The first way we'll count is based on BSC sub-sesries. The code below depends on some DSC-specific file-naming conventions, where each file is named with an abbeviation representing the series, followed by the book number. Counting words in full booksWe've already specified above that *filedir* is where all our full-text files are, and we should already be in that directory in order to run Euclidean distance. So we can just run this code on the files in our current directory, which should be the full-text files.
###Code
#Creates a CSV file for writing the word counts
with open('bsc_series_wordcount.csv', 'w', encoding='utf8') as out:
#Writes the header row
out.write('filename, wordcount, series')
#New line
out.write('\n')
#For each file in the directory
for filename in os.listdir(filedir):
#If it ends in .txt
if filename.endswith('.txt'):
#Open that file
file = open(filename, "rt", encoding="utf8")
#Read the file
data = file.read()
#Split words based on white space
words = data.split()
#If filename starts with 'ss' for Super Special
if filename.startswith('ss'):
#Assign 'ss' as the series
series = 'ss'
#If filename starts with 'm' for Mystery
elif filename.startswith('m'):
#Assign 'm' as the series
series = 'm'
#If filename starts with 'cd' for California Diaries
elif filename.startswith('cd'):
#Assign 'cd' as the series
series = 'cd'
#If the filename starts with 'pc' for Portrait Collection
elif filename.startswith('pc'):
#Assign 'pc' as the series
series = 'pc'
#If the filename starts with 'ff' for Friends Forever
elif filename.startswith('ff'):
#Assign 'ff' as the series
series = 'ff'
#Otherwise...
else:
#It's a main series book
series = 'main'
#Print the filename, comma, length, comma, and series (so we can see it)
print(filename + ', ' + str(len(words)) + ', ' + series)
#Write out each of those components to the file
out.write(filename)
out.write(', ')
out.write(str(len(words)))
out.write(', ')
out.write(series)
#Newline so the lines don't all run together
out.write('\n')
###Output
015c_little_miss_stoneybrook_and_dawn.txt, 27087, main
009c_the_ghost_at_dawns_house.txt, 26291, main
pc5c_kristys_book.txt, 21916, pc
016c_jessis_secret_language.txt, 26792, main
091c_claudia_and_the_first_thanksgiving.txt, 23657, main
104c_abbys_twin.txt, 23464, main
037c_dawn_and_the_older_boy.txt, 24120, main
m29c_stacey_and_the_fashion_victim.txt, 27546, m
010c_logan_likes_mary_anne.txt, 25677, main
047c_mallory_on_strike.txt, 29116, main
072c_dawn_and_the_we_heart_kids_club.txt, 23850, main
078c_claudia_and_crazy_peaches.txt, 26259, main
130c_staceys_movie.txt, 22900, main
cd05c_ducky1.txt, 21072, cd
057c_dawn_saves_the_planet.txt, 27411, main
099c_staceys_broken_heart.txt, 28644, main
m01c_stacey_and_the_mystery_ring.txt, 27488, m
cd06c_sunny2.txt, 18404, cd
115c_jessis_big_break.txt, 24112, main
002c_claudia_and_the_phantom_phone_calls.txt, 27930, main
123c_claudias_big_party.txt, 26794, main
m15c_kristy_and_the_vampires.txt, 28095, m
063c_claudias_freind_friend.txt, 26982, main
m16c_claudia_and_the_clue_in_the_photograph.txt, 30832, m
018c_staceys_mistake.txt, 25884, main
cd10c_ducky2.txt, 18573, cd
089c_kristy_and_the_dirty_diapers.txt, 25260, main
069c_get_well_soon_mallory.txt, 25702, main
028c_welcome_back_stacey.txt, 25835, main
107c_mind_your_own_business_kristy.txt, 22825, main
pc4c_mary_annes_book.txt, 25734, pc
m34c_mary_anne_and_the_haunted_bookstore.txt, 36287, m
088c_farewell_dawn.txt, 24179, main
030c_mary_anne_and_the_great_romance.txt, 26513, main
045c_kristy_and_the_baby_parade.txt, 26847, main
cd15c_ducky3.txt, 16265, cd
109c_mary_anne_to_the_rescue.txt, 25003, main
074c_kristy_and_the_copycat.txt, 24787, main
075c_jessis_horrible_prank.txt, 23470, main
cd11c_dawn3.txt, 13984, cd
m28c_abby_and_the_mystery_baby.txt, 27108, m
073c_mary_anne_and_miss_priss.txt, 26813, main
007c_claudia_and_mean_jeanine.txt, 26131, main
128c_claudia_and_the_little_liar.txt, 21732, main
m03c_mallory_and_the_ghost_cat.txt, 33981, m
022c_jessi_ramsey_petsitter.txt, 26079, main
077c_dawn_and_whitney_friends_forever.txt, 27004, main
m21c_claudia_and_the_recipe_for_danger.txt, 27784, m
033c_claudia_and_the_great_search.txt, 26324, main
m35c_abby_and_the_notorius_neighbor.txt, 25247, m
cd08c_maggie2.txt, 20572, cd
m31c_mary_anne_and_the_music_box_secret.txt, 28238, m
039c_poor_mallory.txt, 25816, main
025c_mary_anne_and_the_search_for_tigger.txt, 26060, main
043c_staceys_emergency.txt, 26935, main
131c_the_fire_at_mary_annes_house.txt, 26831, main
046c_mary_anne_misses_logan.txt, 25848, main
011c_kristy_and_the_snobs.txt, 26618, main
125c_mary_anne_in_the_middle.txt, 22252, main
083c_stacey_vs_the_bsc.txt, 22366, main
cd07c_dawn2.txt, 16084, cd
097c_claudia_and_the_worlds_cutest_baby.txt, 23831, main
118c_kristy_thomas_dog_trainer.txt, 21439, main
058c_staceys_choice.txt, 25888, main
066c_maid_mary_anne.txt, 29361, main
026c_claudia_and_the_sad_goodbye.txt, 27165, main
029c_mallory_and_the_mystery_diary.txt, 24184, main
079c_mary_anne_breaks_the_rules.txt, 22897, main
000c_the_summer_before.txt, 44523, main
cd14c_amalia3.txt, 12657, cd
013c_goodbye_stacey_goodbye.txt, 25562, main
122c_kristy_in_charge.txt, 22826, main
006c_kristys_big_day.txt, 27079, main
095c_kristy_plus_bart_equals_questionmark.txt, 23540, main
m23c_abby_and_the_secret_society.txt, 28235, m
038c_kristys_mystery_admirer.txt, 26125, main
082c_jessi_and_the_troublemaker.txt, 24267, main
100c_kristys_worst_idea.txt, 26217, main
113c_claudia_makes_up_her_mind.txt, 23257, main
124c_stacey_mcgill_matchmaker.txt, 23986, main
119c_staceys_ex_boyfriend.txt, 22967, main
112c_kristy_and_the_sister_war.txt, 25257, main
092c_mallorys_christmas_wish.txt, 23612, main
027c_jessi_and_the_superbrat.txt, 25773, main
m02c_beware_dawn.txt, 27184, m
111c_staceys_secret_friend.txt, 21125, main
101c_claudia_kishi_middle_school_dropout.txt, 28114, main
cd03c_maggie1.txt, 20340, cd
serr2c_logan_bruno_boy_babysitter.txt, 24026, main
012c_claudia_and_the_new_girl.txt, 26497, main
085c_claudia_kishi_live_from_wsto.txt, 23124, main
020c_kristy_and_the_walking_disaster.txt, 26130, main
098c_dawn_and_too_many_sitters.txt, 23006, main
024c_kristy_and_the_mothers_day_surprise.txt, 25943, main
050c_dawns_big_date.txt, 29622, main
m32c_claudia_and_the_mystery_in_the_painting.txt, 30948, m
090c_welcome_to_the_bsc_abby.txt, 23660, main
m04c_kristy_and_the_missing_child.txt, 27132, m
cd01c_dawn1.txt, 26827, cd
129c_kristy_at_bat.txt, 27978, main
114c_the_secret_life_of_mary_anne_spier.txt, 22603, main
m05c_mary_anne_and_the_secret_in_the_attic.txt, 26051, m
044c_dawn_and_the_big_sleepover.txt, 24944, main
001c_kristys_great_idea.txt, 27588, main
031c_dawns_wicked_stepsister.txt, 26284, main
cd13c_maggie3.txt, 19390, cd
110c_abby_and_the_bad_sport.txt, 23155, main
serr1c_logans_story.txt, 25309, main
126c_the_all_new_mallory_pike.txt, 26896, main
pc2c_claudias_book.txt, 26715, pc
cd02c_sunny1.txt, 21539, cd
094c_stacey_mcgill_super_sitter.txt, 26036, main
019c_claudia_and_the_bad_joke.txt, 26883, main
032c_kristy_and_the_secret_of_susan.txt, 25970, main
053c_kristy_for_president.txt, 27124, main
067c_dawns_big_move.txt, 25143, main
021c_mallory_and_the_trouble_with_twins.txt, 25193, main
117c_claudia_and_the_terrible_truth.txt, 24298, main
042c_jessi_and_the_dance_school_phantom.txt, 34521, main
m30c_kristy_and_the_mystery_train.txt, 25599, m
008c_boy_crazy_stacey.txt, 24890, main
m20c_mary_anne_and_the_zoo_mystery.txt, 31175, m
m09c_kristy_and_the_haunted_mansion.txt, 28132, m
m08c_jessi_and_the_jewel_thieves.txt, 28420, m
076c_staceys_lie.txt, 31339, main
cd04c_amalia1.txt, 18836, cd
105c_stacey_the_math_whiz.txt, 24844, main
087c_stacey_and_the_bad_girls.txt, 24508, main
034c_mary_anne_and_too_many_boys.txt, 24268, main
070c_stacey_and_the_cheerleaders.txt, 25515, main
084c_dawn_and_the_school_spirit_war.txt, 25113, main
055c_jessis_gold_medal.txt, 26346, main
108c_dont_give_up_mallory.txt, 28451, main
003c_the_truth_about_stacey.txt, 30117, main
m27c_claudia_and_the_lighthouse_ghost.txt, 26112, m
cd12c_sunny3.txt, 29603, cd
049c_claudia_and_the_genius_of_elm_street.txt, 25270, main
m24c_mary_anne_and_the_silent_witness.txt, 28223, m
040c_claudia_and_the_middle_school_mystery.txt, 24995, main
036c_jessis_babysitter.txt, 24831, main
005c_dawn_and_the_impossible_three.txt, 29910, main
086c_mary_anne_and_camp_bsc.txt, 26630, main
cd09c_amalia2.txt, 14649, cd
081c_kristy_and_mr_mom.txt, 28780, main
m10c_stacey_and_the_mystery_money.txt, 33512, m
059c_mallory_hates_boys_and_gym.txt, 26991, main
m26c_dawn_schafer_undercover_babysitter.txt, 27910, m
023c_dawn_on_the_coast.txt, 24510, main
102c_mary_anne_and_the_little_princess.txt, 25081, main
m07c_dawn_and_the_disappearing_dogs.txt, 26986, m
068c_jessi_and_the_bad_babysitter.txt, 25705, main
116c_abby_and_the_best_kid_ever.txt, 23468, main
065c_staceys_big_crush.txt, 25768, main
062c_kristy_and_the_worst_kid_ever.txt, 29571, main
m11c_claudia_and_the_mystery_at_the_museum.txt, 26654, m
121c_abby_in_wonderland.txt, 23998, main
pc1c_staceys_book.txt, 27027, pc
m33c_stacey_and_the_stolen_hearts.txt, 24781, m
m36c_kristy_and_the_cat_burglar.txt, 27560, m
017c_mary_annes_bad_luck_mystery.txt, 25242, main
061c_jessi_and_the_awful_secret.txt, 26549, main
096c_abbys_lucky_thirteen.txt, 23804, main
103c_happy_holidays_jessi.txt, 23603, main
pc3c_dawns_book.txt, 23439, pc
m14c_stacey_and_the_mystery_at_the_mall.txt, 29865, m
106c_claudia_queen_of_the_seventh_grade.txt, 25176, main
048c_jessis_wish.txt, 24971, main
m06c_the_mystery_at_claudias_house.txt, 27581, m
071c_claudia_and_the_perfect_boy.txt, 28955, main
m17c_dawn_and_the_halloween_mystery.txt, 29060, m
051c_staceys_ex_best_friend.txt, 24508, main
serr3c_shannons_story.txt, 26623, main
056c_keep_out_claudia.txt, 24579, main
127c_abbys_un_valentine.txt, 24998, main
m12c_dawn_and_the_surfer_ghost.txt, 26905, m
035c_stacey_and_the_mystery_of_stoneybrook.txt, 27207, main
m19c_kristy_and_the_missing_fortune.txt, 28856, m
093c_mary_anne_and_the_memory_garden.txt, 27669, main
m25c_kristy_and_the_middle_school_vandal.txt, 26080, m
060c_mary_annes_makeover.txt, 24758, main
080c_mallory_pike_no_1_fan.txt, 27536, main
m22c_stacey_and_the_haunted_masquerade.txt, 28708, m
pc6c_abbys_book.txt, 21039, pc
014c_hello_mallory.txt, 24607, main
m13c_mary_anne_and_the_library_mystery.txt, 28432, m
120c_mary_anne_and_the_playground_fight.txt, 22458, main
064c_dawns_family_feud.txt, 23708, main
004c_mary_anne_saves_the_day.txt, 30770, main
054c_mallory_and_the_dream_horse.txt, 29581, main
052c_mary_anne_plus_too_many_babies.txt, 24905, main
m18c_stacey_and_the_mystery_at_the_empty_house.txt, 29174, m
041c_mary_anne_vs_logan.txt, 25474, main
###Markdown
Counting words by chapterNow, enter the full path to the directory with your individual-chapter files.
###Code
chapterdir = '/Users/qad/Documents/dsc_chapters/allchapters'
#Change to the directory with the individual-chapter files.
os.chdir(chapterdir)
#Creates a CSV file for writing the word counts
with open('bsc_chapter_wordcount.csv', 'w', encoding='utf8') as out:
#Write header
out.write('filename, wordcount, chapter_number')
#Newline
out.write('\n')
#For each file in the directory
for filename in os.listdir(chapterdir):
#If it ends with .txt
if filename.endswith('.txt'):
#Open the file
file = open(filename, "rt", encoding='utf8')
#Read the file
data = file.read()
#Split words at blank spaces
words = data.split()
#If the filename ends with an underscore and number
#The number goes in the "series" column (it's actually a chapter number)
if filename.endswith('_1.txt'):
series = '1'
elif filename.endswith('_2.txt'):
series = '2'
elif filename.endswith('_3.txt'):
series = '3'
elif filename.endswith('_4.txt'):
series = '4'
elif filename.endswith('_5.txt'):
series = '5'
elif filename.endswith('_6.txt'):
series = '6'
if filename.endswith('_7.txt'):
series = '7'
elif filename.endswith('_8.txt'):
series = '8'
elif filename.endswith('_9.txt'):
series = '9'
elif filename.endswith('_10.txt'):
series = '10'
elif filename.endswith('_11.txt'):
series = '11'
elif filename.endswith('_12.txt'):
series = '12'
elif filename.endswith('_13.txt'):
series = '13'
elif filename.endswith('_14.txt'):
series = '14'
elif filename.endswith('_15.txt'):
series = '15'
#Print results so we can watch as it goes
print(filename + ', ' + str(len(words)) + ', ' + series)
#Write everything out to the CSV file
out.write(filename)
out.write(', ')
out.write(str(len(words)))
out.write(', ')
out.write(series)
out.write('\n')
###Output
012c_claudia_and_the_new_girl_1.txt, 1968, 1
m07c_dawn_and_the_disappearing_dogs_11.txt, 1509, 11
112c_kristy_and_the_sister_war_4.txt, 1938, 4
131c_the_fire_at_mary_annes_house_4.txt, 1344, 4
m04c_kristy_and_the_missing_child_11.txt, 1768, 11
058c_staceys_choice_12.txt, 1562, 12
007c_claudia_and_mean_jeanine_6.txt, 2262, 6
031c_dawns_wicked_stepsister_14.txt, 1921, 14
004c_mary_anne_saves_the_day_6.txt, 2137, 6
001c_kristys_great_idea_3.txt, 1893, 3
099c_staceys_broken_heart_1.txt, 2444, 1
005c_dawn_and_the_impossible_three_3.txt, 1940, 3
serr3c_shannons_story_3.txt, 3974, 3
m13c_mary_anne_and_the_library_mystery_1.txt, 2007, 1
047c_mallory_on_strike_13.txt, 2171, 13
083c_stacey_vs_the_bsc_3.txt, 1323, 3
060c_mary_annes_makeover_6.txt, 1750, 6
m33c_stacey_and_the_stolen_hearts_14.txt, 1531, 14
051c_staceys_ex_best_friend_9.txt, 1381, 9
m26c_dawn_schafer_undercover_babysitter_15.txt, 1721, 15
m34c_mary_anne_and_the_haunted_bookstore_8.txt, 2198, 8
095c_kristy_plus_bart_equals_questionmark_9.txt, 1272, 9
122c_kristy_in_charge_12.txt, 1228, 12
m24c_mary_anne_and_the_silent_witness_5.txt, 2178, 5
008c_boy_crazy_stacey_14.txt, 1407, 14
070c_stacey_and_the_cheerleaders_4.txt, 1531, 4
056c_keep_out_claudia_11.txt, 1426, 11
008c_boy_crazy_stacey_2.txt, 1479, 2
009c_the_ghost_at_dawns_house_2.txt, 1567, 2
072c_dawn_and_the_we_heart_kids_club_13.txt, 1366, 13
119c_staceys_ex_boyfriend_14.txt, 986, 14
m12c_dawn_and_the_surfer_ghost_1.txt, 1956, 1
062c_kristy_and_the_worst_kid_ever_13.txt, 1836, 13
031c_dawns_wicked_stepsister_1.txt, 1574, 1
024c_kristy_and_the_mothers_day_surprise_5.txt, 1351, 5
127c_abbys_un_valentine_10.txt, 1046, 10
091c_claudia_and_the_first_thanksgiving_7.txt, 1490, 7
m06c_the_mystery_at_claudias_house_12.txt, 1614, 12
051c_staceys_ex_best_friend_15.txt, 975, 15
061c_jessi_and_the_awful_secret_13.txt, 1683, 13
012c_claudia_and_the_new_girl_15.txt, 1709, 15
123c_claudias_big_party_2.txt, 2800, 2
m33c_stacey_and_the_stolen_hearts_4.txt, 1810, 4
030c_mary_anne_and_the_great_romance_8.txt, 1791, 8
086c_mary_anne_and_camp_bsc_12.txt, 1103, 12
066c_maid_mary_anne_4.txt, 1642, 4
078c_claudia_and_crazy_peaches_8.txt, 1917, 8
080c_mallory_pike_no_1_fan_9.txt, 1457, 9
093c_mary_anne_and_the_memory_garden_6.txt, 2331, 6
046c_mary_anne_misses_logan_12.txt, 1761, 12
m27c_claudia_and_the_lighthouse_ghost_14.txt, 1483, 14
117c_claudia_and_the_terrible_truth_7.txt, 1585, 7
m22c_stacey_and_the_haunted_masquerade_10.txt, 1650, 10
120c_mary_anne_and_the_playground_fight_15.txt, 609, 15
024c_kristy_and_the_mothers_day_surprise_12.txt, 1689, 12
049c_claudia_and_the_genius_of_elm_street_3.txt, 2035, 3
082c_jessi_and_the_troublemaker_9.txt, 1065, 9
m04c_kristy_and_the_missing_child_9.txt, 1774, 9
014c_hello_mallory_3.txt, 2380, 3
035c_jessis_babysitter_13.txt, 1838, 13
118c_kristy_thomas_dog_trainer_11.txt, 1102, 11
m07c_dawn_and_the_disappearing_dogs_3.txt, 1792, 3
088c_farewell_dawn_6.txt, 1403, 6
m31c_mary_anne_and_the_music_box_secret_3.txt, 1931, 3
059c_mallory_hates_boys_and_gym_4.txt, 1765, 4
m03c_mallory_and_the_ghost_cat_9.txt, 2033, 9
m11c_claudia_and_the_mystery_at_the_museum_3.txt, 1910, 3
m01c_stacey_and_the_mystery_ring_13.txt, 1969, 13
m20c_mary_anne_and_the_zoo_mystery_11.txt, 1738, 11
serr2c_logan_bruno_boy_babysitter_15.txt, 1133, 15
110c_abby_and_the_bad_sport_9.txt, 951, 9
066c_maid_mary_anne_13.txt, 1665, 13
076c_staceys_lie_14.txt, 2237, 14
063c_claudias_freind_friend_4.txt, 1606, 4
002c_claudia_and_the_phantom_phone_calls_2.txt, 1894, 2
116c_abby_and_the_best_kid_ever_10.txt, 2258, 10
076c_staceys_lie_15.txt, 1263, 15
066c_maid_mary_anne_12.txt, 2427, 12
055c_jessis_gold_medal_1.txt, 2488, 1
002c_claudia_and_the_phantom_phone_calls_3.txt, 1833, 3
116c_abby_and_the_best_kid_ever_11.txt, 865, 11
063c_claudias_freind_friend_5.txt, 1532, 5
110c_abby_and_the_bad_sport_8.txt, 2870, 8
serr2c_logan_bruno_boy_babysitter_14.txt, 1373, 14
059c_mallory_hates_boys_and_gym_5.txt, 1014, 5
m03c_mallory_and_the_ghost_cat_8.txt, 2470, 8
m31c_mary_anne_and_the_music_box_secret_2.txt, 2060, 2
m20c_mary_anne_and_the_zoo_mystery_10.txt, 2103, 10
m01c_stacey_and_the_mystery_ring_12.txt, 1718, 12
m11c_claudia_and_the_mystery_at_the_museum_2.txt, 2397, 2
m07c_dawn_and_the_disappearing_dogs_2.txt, 2280, 2
088c_farewell_dawn_7.txt, 1348, 7
014c_hello_mallory_2.txt, 1772, 2
m04c_kristy_and_the_missing_child_8.txt, 1896, 8
035c_jessis_babysitter_12.txt, 1671, 12
082c_jessi_and_the_troublemaker_8.txt, 2370, 8
118c_kristy_thomas_dog_trainer_10.txt, 1038, 10
049c_claudia_and_the_genius_of_elm_street_2.txt, 2167, 2
024c_kristy_and_the_mothers_day_surprise_13.txt, 1112, 13
120c_mary_anne_and_the_playground_fight_14.txt, 1405, 14
m27c_claudia_and_the_lighthouse_ghost_15.txt, 1865, 15
046c_mary_anne_misses_logan_13.txt, 1741, 13
m22c_stacey_and_the_haunted_masquerade_11.txt, 2150, 11
117c_claudia_and_the_terrible_truth_6.txt, 1776, 6
080c_mallory_pike_no_1_fan_8.txt, 1524, 8
093c_mary_anne_and_the_memory_garden_7.txt, 1766, 7
086c_mary_anne_and_camp_bsc_13.txt, 1989, 13
030c_mary_anne_and_the_great_romance_9.txt, 1675, 9
078c_claudia_and_crazy_peaches_9.txt, 1740, 9
066c_maid_mary_anne_5.txt, 1348, 5
123c_claudias_big_party_3.txt, 1590, 3
012c_claudia_and_the_new_girl_14.txt, 1627, 14
m33c_stacey_and_the_stolen_hearts_5.txt, 1935, 5
013c_goodbye_stacey_goodbye_1.txt, 2102, 1
061c_jessi_and_the_awful_secret_12.txt, 1285, 12
051c_staceys_ex_best_friend_14.txt, 1613, 14
m06c_the_mystery_at_claudias_house_13.txt, 1714, 13
091c_claudia_and_the_first_thanksgiving_6.txt, 1362, 6
127c_abbys_un_valentine_11.txt, 1382, 11
024c_kristy_and_the_mothers_day_surprise_4.txt, 1909, 4
062c_kristy_and_the_worst_kid_ever_12.txt, 1215, 12
072c_dawn_and_the_we_heart_kids_club_12.txt, 1402, 12
009c_the_ghost_at_dawns_house_3.txt, 1884, 3
119c_staceys_ex_boyfriend_15.txt, 1118, 15
056c_keep_out_claudia_10.txt, 1925, 10
008c_boy_crazy_stacey_3.txt, 1766, 3
070c_stacey_and_the_cheerleaders_5.txt, 1798, 5
122c_kristy_in_charge_13.txt, 1123, 13
095c_kristy_plus_bart_equals_questionmark_8.txt, 1631, 8
008c_boy_crazy_stacey_15.txt, 1476, 15
m24c_mary_anne_and_the_silent_witness_4.txt, 1637, 4
051c_staceys_ex_best_friend_8.txt, 1494, 8
m34c_mary_anne_and_the_haunted_bookstore_9.txt, 2251, 9
m26c_dawn_schafer_undercover_babysitter_14.txt, 1712, 14
047c_mallory_on_strike_12.txt, 2098, 12
083c_stacey_vs_the_bsc_2.txt, 2124, 2
m33c_stacey_and_the_stolen_hearts_15.txt, 1502, 15
060c_mary_annes_makeover_7.txt, 1425, 7
serr3c_shannons_story_2.txt, 1678, 2
005c_dawn_and_the_impossible_three_2.txt, 1945, 2
001c_kristys_great_idea_2.txt, 1297, 2
004c_mary_anne_saves_the_day_7.txt, 2201, 7
031c_dawns_wicked_stepsister_15.txt, 1692, 15
007c_claudia_and_mean_jeanine_7.txt, 2019, 7
m04c_kristy_and_the_missing_child_10.txt, 1570, 10
058c_staceys_choice_13.txt, 1365, 13
m07c_dawn_and_the_disappearing_dogs_10.txt, 1745, 10
131c_the_fire_at_mary_annes_house_5.txt, 1887, 5
112c_kristy_and_the_sister_war_5.txt, 1809, 5
112c_kristy_and_the_sister_war_7.txt, 1950, 7
131c_the_fire_at_mary_annes_house_7.txt, 2165, 7
012c_claudia_and_the_new_girl_2.txt, 1893, 2
m07c_dawn_and_the_disappearing_dogs_12.txt, 1577, 12
056c_keep_out_claudia_8.txt, 1514, 8
011c_kristy_and_the_snobs_14.txt, 1731, 14
058c_staceys_choice_11.txt, 2062, 11
m04c_kristy_and_the_missing_child_12.txt, 1787, 12
007c_claudia_and_mean_jeanine_5.txt, 1848, 5
099c_staceys_broken_heart_2.txt, 4293, 2
004c_mary_anne_saves_the_day_5.txt, 1834, 5
m09c_kristy_and_the_haunted_mansion_15.txt, 2053, 15
108c_dont_give_up_mallory_14.txt, 1924, 14
060c_mary_annes_makeover_5.txt, 2103, 5
m13c_mary_anne_and_the_library_mystery_2.txt, 2451, 2
047c_mallory_on_strike_10.txt, 1876, 10
079c_mary_anne_breaks_the_rules_9.txt, 1378, 9
070c_stacey_and_the_cheerleaders_7.txt, 1378, 7
m24c_mary_anne_and_the_silent_witness_6.txt, 1748, 6
085c_claudia_kishi_live_from_wsto_15.txt, 463, 15
122c_kristy_in_charge_11.txt, 2508, 11
008c_boy_crazy_stacey_1.txt, 2504, 1
056c_keep_out_claudia_12.txt, 1545, 12
m05c_mary_anne_and_the_secret_in_the_attic_8.txt, 1788, 8
045c_kristy_and_the_baby_parade_8.txt, 1550, 8
m23c_abby_and_the_secret_society_9.txt, 1452, 9
m12c_dawn_and_the_surfer_ghost_2.txt, 2222, 2
062c_kristy_and_the_worst_kid_ever_10.txt, 2698, 10
031c_dawns_wicked_stepsister_2.txt, 2357, 2
024c_kristy_and_the_mothers_day_surprise_6.txt, 1761, 6
072c_dawn_and_the_we_heart_kids_club_10.txt, 1414, 10
009c_the_ghost_at_dawns_house_1.txt, 2086, 1
091c_claudia_and_the_first_thanksgiving_4.txt, 1959, 4
m06c_the_mystery_at_claudias_house_11.txt, 1856, 11
061c_jessi_and_the_awful_secret_10.txt, 1247, 10
013c_goodbye_stacey_goodbye_3.txt, 1543, 3
127c_abbys_un_valentine_13.txt, 1536, 13
066c_maid_mary_anne_7.txt, 1248, 7
041c_mary_anne_vs_logan_15.txt, 1547, 15
086c_mary_anne_and_camp_bsc_11.txt, 1317, 11
m33c_stacey_and_the_stolen_hearts_7.txt, 1591, 7
123c_claudias_big_party_1.txt, 2467, 1
040c_claudia_and_the_middle_school_mystery_15.txt, 1372, 15
093c_mary_anne_and_the_memory_garden_5.txt, 2603, 5
117c_claudia_and_the_terrible_truth_4.txt, 1747, 4
m22c_stacey_and_the_haunted_masquerade_13.txt, 2527, 13
046c_mary_anne_misses_logan_11.txt, 1538, 11
m25c_kristy_and_the_middle_school_vandal_9.txt, 1144, 9
073c_mary_anne_and_miss_priss_14.txt, 1517, 14
118c_kristy_thomas_dog_trainer_12.txt, 982, 12
035c_jessis_babysitter_10.txt, 1774, 10
024c_kristy_and_the_mothers_day_surprise_11.txt, 1772, 11
m01c_stacey_and_the_mystery_ring_10.txt, 1996, 10
m20c_mary_anne_and_the_zoo_mystery_12.txt, 2107, 12
108c_dont_give_up_mallory_9.txt, 1409, 9
059c_mallory_hates_boys_and_gym_7.txt, 1360, 7
088c_farewell_dawn_5.txt, 1664, 5
116c_abby_and_the_best_kid_ever_13.txt, 1528, 13
002c_claudia_and_the_phantom_phone_calls_1.txt, 2650, 1
063c_claudias_freind_friend_7.txt, 1587, 7
055c_jessis_gold_medal_3.txt, 1766, 3
066c_maid_mary_anne_10.txt, 2214, 10
005c_dawn_and_the_impossible_three_15.txt, 2076, 15
077c_dwn_and_whitney_friends_forever_8.txt, 1595, 8
m36c_kristy_and_the_cat_burglar_9.txt, 1588, 9
005c_dawn_and_the_impossible_three_14.txt, 1982, 14
m36c_kristy_and_the_cat_burglar_8.txt, 1506, 8
077c_dwn_and_whitney_friends_forever_9.txt, 1886, 9
055c_jessis_gold_medal_2.txt, 2952, 2
063c_claudias_freind_friend_6.txt, 1927, 6
116c_abby_and_the_best_kid_ever_12.txt, 946, 12
066c_maid_mary_anne_11.txt, 2301, 11
088c_farewell_dawn_4.txt, 1960, 4
m07c_dawn_and_the_disappearing_dogs_1.txt, 2134, 1
m20c_mary_anne_and_the_zoo_mystery_13.txt, 2081, 13
m01c_stacey_and_the_mystery_ring_11.txt, 1515, 11
m11c_claudia_and_the_mystery_at_the_museum_1.txt, 1968, 1
059c_mallory_hates_boys_and_gym_6.txt, 1684, 6
108c_dont_give_up_mallory_8.txt, 1503, 8
m31c_mary_anne_and_the_music_box_secret_1.txt, 2082, 1
049c_claudia_and_the_genius_of_elm_street_1.txt, 1593, 1
024c_kristy_and_the_mothers_day_surprise_10.txt, 1626, 10
118c_kristy_thomas_dog_trainer_13.txt, 991, 13
035c_jessis_babysitter_11.txt, 1507, 11
014c_hello_mallory_1.txt, 1828, 1
073c_mary_anne_and_miss_priss_15.txt, 833, 15
m22c_stacey_and_the_haunted_masquerade_12.txt, 1995, 12
117c_claudia_and_the_terrible_truth_5.txt, 1444, 5
m25c_kristy_and_the_middle_school_vandal_8.txt, 2930, 8
046c_mary_anne_misses_logan_10.txt, 1494, 10
093c_mary_anne_and_the_memory_garden_4.txt, 1606, 4
040c_claudia_and_the_middle_school_mystery_14.txt, 1576, 14
m33c_stacey_and_the_stolen_hearts_6.txt, 1526, 6
066c_maid_mary_anne_6.txt, 1672, 6
086c_mary_anne_and_camp_bsc_10.txt, 1570, 10
041c_mary_anne_vs_logan_14.txt, 1742, 14
127c_abbys_un_valentine_12.txt, 1586, 12
061c_jessi_and_the_awful_secret_11.txt, 1101, 11
m06c_the_mystery_at_claudias_house_10.txt, 1776, 10
091c_claudia_and_the_first_thanksgiving_5.txt, 1494, 5
013c_goodbye_stacey_goodbye_2.txt, 1539, 2
072c_dawn_and_the_we_heart_kids_club_11.txt, 1414, 11
024c_kristy_and_the_mothers_day_surprise_7.txt, 1773, 7
031c_dawns_wicked_stepsister_3.txt, 1696, 3
062c_kristy_and_the_worst_kid_ever_11.txt, 1888, 11
m12c_dawn_and_the_surfer_ghost_3.txt, 1879, 3
m23c_abby_and_the_secret_society_8.txt, 1627, 8
m05c_mary_anne_and_the_secret_in_the_attic_9.txt, 1813, 9
056c_keep_out_claudia_13.txt, 1597, 13
045c_kristy_and_the_baby_parade_9.txt, 1659, 9
085c_claudia_kishi_live_from_wsto_14.txt, 1759, 14
m24c_mary_anne_and_the_silent_witness_7.txt, 1823, 7
122c_kristy_in_charge_10.txt, 1559, 10
070c_stacey_and_the_cheerleaders_6.txt, 1865, 6
060c_mary_annes_makeover_4.txt, 1116, 4
108c_dont_give_up_mallory_15.txt, 635, 15
079c_mary_anne_breaks_the_rules_8.txt, 1422, 8
083c_stacey_vs_the_bsc_1.txt, 1976, 1
047c_mallory_on_strike_11.txt, 1120, 11
m13c_mary_anne_and_the_library_mystery_3.txt, 2037, 3
005c_dawn_and_the_impossible_three_1.txt, 2445, 1
serr3c_shannons_story_1.txt, 2410, 1
m09c_kristy_and_the_haunted_mansion_14.txt, 1710, 14
099c_staceys_broken_heart_3.txt, 1225, 3
001c_kristys_great_idea_1.txt, 2319, 1
004c_mary_anne_saves_the_day_4.txt, 1711, 4
011c_kristy_and_the_snobs_15.txt, 1670, 15
007c_claudia_and_mean_jeanine_4.txt, 1547, 4
058c_staceys_choice_10.txt, 1713, 10
m04c_kristy_and_the_missing_child_13.txt, 1817, 13
056c_keep_out_claudia_9.txt, 1492, 9
131c_the_fire_at_mary_annes_house_6.txt, 1863, 6
112c_kristy_and_the_sister_war_6.txt, 1297, 6
m07c_dawn_and_the_disappearing_dogs_13.txt, 1808, 13
012c_claudia_and_the_new_girl_3.txt, 2001, 3
058c_staceys_choice_14.txt, 1914, 14
011c_kristy_and_the_snobs_11.txt, 1505, 11
012c_claudia_and_the_new_girl_7.txt, 1725, 7
112c_kristy_and_the_sister_war_2.txt, 2362, 2
098c_dawn_and_too_many_sitters_9.txt, 1699, 9
131c_the_fire_at_mary_annes_house_2.txt, 2118, 2
serr3c_shannons_story_5.txt, 1406, 5
005c_dawn_and_the_impossible_three_5.txt, 2206, 5
035c_jessis_babysitter_9.txt, 1445, 9
001c_kristys_great_idea_5.txt, 1534, 5
099c_staceys_broken_heart_7.txt, 1265, 7
m09c_kristy_and_the_haunted_mansion_10.txt, 2090, 10
031c_dawns_wicked_stepsister_12.txt, 1454, 12
070c_stacey_and_the_cheerleaders_2.txt, 2574, 2
122c_kristy_in_charge_14.txt, 1271, 14
129c_kristy_at_bat_9.txt, 1614, 9
m24c_mary_anne_and_the_silent_witness_3.txt, 1832, 3
008c_boy_crazy_stacey_12.txt, 1247, 12
085c_claudia_kishi_live_from_wsto_10.txt, 1264, 10
m26c_dawn_schafer_undercover_babysitter_13.txt, 1738, 13
m13c_mary_anne_and_the_library_mystery_7.txt, 1979, 7
047c_mallory_on_strike_15.txt, 2441, 15
083c_stacey_vs_the_bsc_5.txt, 1369, 5
108c_dont_give_up_mallory_11.txt, 2053, 11
m33c_stacey_and_the_stolen_hearts_12.txt, 1413, 12
m12c_dawn_and_the_surfer_ghost_7.txt, 1846, 7
024c_kristy_and_the_mothers_day_surprise_3.txt, 1645, 3
062c_kristy_and_the_worst_kid_ever_15.txt, 1552, 15
031c_dawns_wicked_stepsister_7.txt, 1892, 7
072c_dawn_and_the_we_heart_kids_club_15.txt, 1115, 15
009c_the_ghost_at_dawns_house_4.txt, 1698, 4
119c_staceys_ex_boyfriend_12.txt, 1186, 12
m28c_abby_and_the_mystery_baby_8.txt, 1662, 8
034c_mary_anne_and_too_many_boys_9.txt, 1410, 9
008c_boy_crazy_stacey_4.txt, 1813, 4
037c_dawn_and_the_older_boy_8.txt, 1529, 8
086c_mary_anne_and_camp_bsc_14.txt, 1532, 14
m08c_jessi_and_the_jewel_thieves_9.txt, 1861, 9
041c_mary_anne_vs_logan_10.txt, 1456, 10
066c_maid_mary_anne_2.txt, 3224, 2
123c_claudias_big_party_4.txt, 2354, 4
012c_claudia_and_the_new_girl_13.txt, 1685, 13
m33c_stacey_and_the_stolen_hearts_2.txt, 2354, 2
032c_kristy_and_the_secret_of_susan_9.txt, 1404, 9
013c_goodbye_stacey_goodbye_6.txt, 1798, 6
m06c_the_mystery_at_claudias_house_14.txt, 1827, 14
091c_claudia_and_the_first_thanksgiving_1.txt, 2553, 1
051c_staceys_ex_best_friend_13.txt, 1823, 13
061c_jessi_and_the_awful_secret_15.txt, 1579, 15
046c_mary_anne_misses_logan_14.txt, 1896, 14
m27c_claudia_and_the_lighthouse_ghost_12.txt, 1085, 12
117c_claudia_and_the_terrible_truth_1.txt, 1820, 1
006c_kristys_big_day_9.txt, 1649, 9
040c_claudia_and_the_middle_school_mystery_10.txt, 1436, 10
128c_claudia_and_the_little_liar_8.txt, 1802, 8
014c_hello_mallory_5.txt, 1610, 5
035c_jessis_babysitter_15.txt, 1544, 15
024c_kristy_and_the_mothers_day_surprise_14.txt, 1889, 14
049c_claudia_and_the_genius_of_elm_street_5.txt, 2310, 5
120c_mary_anne_and_the_playground_fight_13.txt, 1502, 13
073c_mary_anne_and_miss_priss_11.txt, 1284, 11
066c_maid_mary_anne_15.txt, 1004, 15
076c_staceys_lie_12.txt, 2379, 12
055c_jessis_gold_medal_6.txt, 1459, 6
002c_claudia_and_the_phantom_phone_calls_4.txt, 2003, 4
063c_claudias_freind_friend_2.txt, 3264, 2
m21c_claudia_and_the_recipe_for_danger_9.txt, 1813, 9
005c_dawn_and_the_impossible_three_10.txt, 1545, 10
serr2c_logan_bruno_boy_babysitter_13.txt, 1587, 13
m31c_mary_anne_and_the_music_box_secret_5.txt, 1756, 5
059c_mallory_hates_boys_and_gym_2.txt, 3301, 2
m11c_claudia_and_the_mystery_at_the_museum_5.txt, 1743, 5
104c_abbys_twin_8.txt, 1175, 8
m01c_stacey_and_the_mystery_ring_15.txt, 1493, 15
m07c_dawn_and_the_disappearing_dogs_5.txt, 1763, 5
m07c_dawn_and_the_disappearing_dogs_4.txt, 1950, 4
088c_farewell_dawn_1.txt, 2161, 1
059c_mallory_hates_boys_and_gym_3.txt, 2664, 3
m31c_mary_anne_and_the_music_box_secret_4.txt, 1869, 4
m01c_stacey_and_the_mystery_ring_14.txt, 1985, 14
104c_abbys_twin_9.txt, 1225, 9
m11c_claudia_and_the_mystery_at_the_museum_4.txt, 1822, 4
m21c_claudia_and_the_recipe_for_danger_8.txt, 1648, 8
serr2c_logan_bruno_boy_babysitter_12.txt, 1462, 12
005c_dawn_and_the_impossible_three_11.txt, 2002, 11
076c_staceys_lie_13.txt, 1242, 13
066c_maid_mary_anne_14.txt, 1389, 14
063c_claudias_freind_friend_3.txt, 1853, 3
002c_claudia_and_the_phantom_phone_calls_5.txt, 1227, 5
055c_jessis_gold_medal_7.txt, 1356, 7
073c_mary_anne_and_miss_priss_10.txt, 1874, 10
120c_mary_anne_and_the_playground_fight_12.txt, 1804, 12
049c_claudia_and_the_genius_of_elm_street_4.txt, 2151, 4
024c_kristy_and_the_mothers_day_surprise_15.txt, 1617, 15
014c_hello_mallory_4.txt, 1751, 4
035c_jessis_babysitter_14.txt, 1542, 14
128c_claudia_and_the_little_liar_9.txt, 1536, 9
093c_mary_anne_and_the_memory_garden_1.txt, 2080, 1
040c_claudia_and_the_middle_school_mystery_11.txt, 1773, 11
006c_kristys_big_day_8.txt, 2336, 8
m27c_claudia_and_the_lighthouse_ghost_13.txt, 1430, 13
046c_mary_anne_misses_logan_15.txt, 1521, 15
013c_goodbye_stacey_goodbye_7.txt, 1809, 7
032c_kristy_and_the_secret_of_susan_8.txt, 1319, 8
061c_jessi_and_the_awful_secret_14.txt, 1859, 14
051c_staceys_ex_best_friend_12.txt, 1828, 12
m06c_the_mystery_at_claudias_house_15.txt, 1921, 15
012c_claudia_and_the_new_girl_12.txt, 1698, 12
123c_claudias_big_party_5.txt, 2204, 5
m33c_stacey_and_the_stolen_hearts_3.txt, 1545, 3
041c_mary_anne_vs_logan_11.txt, 1689, 11
m08c_jessi_and_the_jewel_thieves_8.txt, 1745, 8
086c_mary_anne_and_camp_bsc_15.txt, 878, 15
066c_maid_mary_anne_3.txt, 2752, 3
037c_dawn_and_the_older_boy_9.txt, 1187, 9
008c_boy_crazy_stacey_5.txt, 2563, 5
m28c_abby_and_the_mystery_baby_9.txt, 1682, 9
034c_mary_anne_and_too_many_boys_8.txt, 1093, 8
009c_the_ghost_at_dawns_house_5.txt, 1419, 5
072c_dawn_and_the_we_heart_kids_club_14.txt, 1099, 14
119c_staceys_ex_boyfriend_13.txt, 1539, 13
031c_dawns_wicked_stepsister_6.txt, 1597, 6
062c_kristy_and_the_worst_kid_ever_14.txt, 1324, 14
024c_kristy_and_the_mothers_day_surprise_2.txt, 2203, 2
m12c_dawn_and_the_surfer_ghost_6.txt, 1991, 6
047c_mallory_on_strike_14.txt, 2259, 14
083c_stacey_vs_the_bsc_4.txt, 1528, 4
m13c_mary_anne_and_the_library_mystery_6.txt, 1719, 6
m33c_stacey_and_the_stolen_hearts_13.txt, 1602, 13
060c_mary_annes_makeover_1.txt, 2151, 1
108c_dont_give_up_mallory_10.txt, 1675, 10
m26c_dawn_schafer_undercover_babysitter_12.txt, 1766, 12
122c_kristy_in_charge_15.txt, 1326, 15
085c_claudia_kishi_live_from_wsto_11.txt, 1371, 11
008c_boy_crazy_stacey_13.txt, 1202, 13
m24c_mary_anne_and_the_silent_witness_2.txt, 2433, 2
129c_kristy_at_bat_8.txt, 1938, 8
070c_stacey_and_the_cheerleaders_3.txt, 2501, 3
031c_dawns_wicked_stepsister_13.txt, 1850, 13
m09c_kristy_and_the_haunted_mansion_11.txt, 1434, 11
001c_kristys_great_idea_4.txt, 2068, 4
004c_mary_anne_saves_the_day_1.txt, 2767, 1
099c_staceys_broken_heart_6.txt, 1706, 6
005c_dawn_and_the_impossible_three_4.txt, 1689, 4
035c_jessis_babysitter_8.txt, 1230, 8
serr3c_shannons_story_4.txt, 2871, 4
012c_claudia_and_the_new_girl_6.txt, 1946, 6
131c_the_fire_at_mary_annes_house_3.txt, 1424, 3
098c_dawn_and_too_many_sitters_8.txt, 1442, 8
112c_kristy_and_the_sister_war_3.txt, 1578, 3
007c_claudia_and_mean_jeanine_1.txt, 2149, 1
058c_staceys_choice_15.txt, 1692, 15
011c_kristy_and_the_snobs_10.txt, 1722, 10
011c_kristy_and_the_snobs_12.txt, 1941, 12
m04c_kristy_and_the_missing_child_14.txt, 1861, 14
007c_claudia_and_mean_jeanine_3.txt, 1481, 3
112c_kristy_and_the_sister_war_1.txt, 1996, 1
131c_the_fire_at_mary_annes_house_1.txt, 2032, 1
012c_claudia_and_the_new_girl_4.txt, 1911, 4
m07c_dawn_and_the_disappearing_dogs_14.txt, 1937, 14
005c_dawn_and_the_impossible_three_6.txt, 2078, 6
serr3c_shannons_story_6.txt, 1121, 6
m09c_kristy_and_the_haunted_mansion_13.txt, 1963, 13
031c_dawns_wicked_stepsister_11.txt, 1743, 11
099c_staceys_broken_heart_4.txt, 2162, 4
004c_mary_anne_saves_the_day_3.txt, 2196, 3
001c_kristys_great_idea_6.txt, 2244, 6
084c_dawn_and_the_school_spirit_war_9.txt, 1567, 9
085c_claudia_kishi_live_from_wsto_8.txt, 2131, 8
008c_boy_crazy_stacey_11.txt, 1435, 11
085c_claudia_kishi_live_from_wsto_13.txt, 1347, 13
070c_stacey_and_the_cheerleaders_1.txt, 2716, 1
052c_mary_anne_plus_too_many_babies_8.txt, 1404, 8
108c_dont_give_up_mallory_12.txt, 2084, 12
060c_mary_annes_makeover_3.txt, 1299, 3
m33c_stacey_and_the_stolen_hearts_11.txt, 1726, 11
m14c_stacey_and_the_mystery_at_the_mall_9.txt, 2095, 9
m13c_mary_anne_and_the_library_mystery_4.txt, 1919, 4
083c_stacey_vs_the_bsc_6.txt, 1402, 6
046c_mary_anne_misses_logan_9.txt, 1762, 9
m26c_dawn_schafer_undercover_babysitter_10.txt, 1819, 10
119c_staceys_ex_boyfriend_11.txt, 2190, 11
017c_mary_annes_bad_luck_mystery_8.txt, 1654, 8
009c_the_ghost_at_dawns_house_7.txt, 1537, 7
101c_claudia_kishi_middle_school_dropout_9.txt, 1867, 9
m12c_dawn_and_the_surfer_ghost_4.txt, 1726, 4
031c_dawns_wicked_stepsister_4.txt, 2031, 4
008c_boy_crazy_stacey_7.txt, 1546, 7
056c_keep_out_claudia_14.txt, 1617, 14
m16c_claudia_and_the_clue_in_the_photograph_9.txt, 2086, 9
m33c_stacey_and_the_stolen_hearts_1.txt, 1262, 1
123c_claudias_big_party_7.txt, 2164, 7
012c_claudia_and_the_new_girl_10.txt, 1865, 10
066c_maid_mary_anne_1.txt, 3306, 1
041c_mary_anne_vs_logan_13.txt, 1663, 13
127c_abbys_un_valentine_15.txt, 1097, 15
091c_claudia_and_the_first_thanksgiving_2.txt, 2914, 2
051c_staceys_ex_best_friend_10.txt, 1732, 10
013c_goodbye_stacey_goodbye_5.txt, 1747, 5
117c_claudia_and_the_terrible_truth_2.txt, 2388, 2
m22c_stacey_and_the_haunted_masquerade_15.txt, 1377, 15
m27c_claudia_and_the_lighthouse_ghost_11.txt, 1407, 11
093c_mary_anne_and_the_memory_garden_3.txt, 2284, 3
040c_claudia_and_the_middle_school_mystery_13.txt, 1690, 13
105c_stacey_the_math_whiz_8.txt, 1940, 8
049c_claudia_and_the_genius_of_elm_street_6.txt, 1903, 6
118c_kristy_thomas_dog_trainer_14.txt, 1049, 14
014c_hello_mallory_6.txt, 1222, 6
m06c_the_mystery_at_claudias_house_9.txt, 1594, 9
120c_mary_anne_and_the_playground_fight_10.txt, 1179, 10
073c_mary_anne_and_miss_priss_12.txt, 2385, 12
005c_dawn_and_the_impossible_three_13.txt, 1872, 13
serr2c_logan_bruno_boy_babysitter_10.txt, 1384, 10
055c_jessis_gold_medal_5.txt, 1335, 5
063c_claudias_freind_friend_1.txt, 1991, 1
116c_abby_and_the_best_kid_ever_15.txt, 1201, 15
002c_claudia_and_the_phantom_phone_calls_7.txt, 2446, 7
076c_staceys_lie_11.txt, 2090, 11
088c_farewell_dawn_3.txt, 1468, 3
m07c_dawn_and_the_disappearing_dogs_6.txt, 1556, 6
m11c_claudia_and_the_mystery_at_the_museum_6.txt, 1843, 6
041c_mary_anne_vs_logan_9.txt, 1437, 9
089c_kristy_and_the_dirty_diapers_9.txt, 1119, 9
m20c_mary_anne_and_the_zoo_mystery_14.txt, 2104, 14
m31c_mary_anne_and_the_music_box_secret_6.txt, 1965, 6
059c_mallory_hates_boys_and_gym_1.txt, 2520, 1
m20c_mary_anne_and_the_zoo_mystery_15.txt, 702, 15
089c_kristy_and_the_dirty_diapers_8.txt, 1329, 8
041c_mary_anne_vs_logan_8.txt, 1407, 8
m11c_claudia_and_the_mystery_at_the_museum_7.txt, 1940, 7
m31c_mary_anne_and_the_music_box_secret_7.txt, 1835, 7
088c_farewell_dawn_2.txt, 3611, 2
m07c_dawn_and_the_disappearing_dogs_7.txt, 1843, 7
116c_abby_and_the_best_kid_ever_14.txt, 1387, 14
002c_claudia_and_the_phantom_phone_calls_6.txt, 1750, 6
055c_jessis_gold_medal_4.txt, 1650, 4
076c_staceys_lie_10.txt, 1320, 10
serr2c_logan_bruno_boy_babysitter_11.txt, 991, 11
005c_dawn_and_the_impossible_three_12.txt, 2032, 12
073c_mary_anne_and_miss_priss_13.txt, 1699, 13
120c_mary_anne_and_the_playground_fight_11.txt, 993, 11
m06c_the_mystery_at_claudias_house_8.txt, 2089, 8
118c_kristy_thomas_dog_trainer_15.txt, 1156, 15
014c_hello_mallory_7.txt, 1303, 7
049c_claudia_and_the_genius_of_elm_street_7.txt, 1190, 7
040c_claudia_and_the_middle_school_mystery_12.txt, 1380, 12
093c_mary_anne_and_the_memory_garden_2.txt, 2125, 2
105c_stacey_the_math_whiz_9.txt, 813, 9
m22c_stacey_and_the_haunted_masquerade_14.txt, 2313, 14
117c_claudia_and_the_terrible_truth_3.txt, 1849, 3
m27c_claudia_and_the_lighthouse_ghost_10.txt, 2043, 10
051c_staceys_ex_best_friend_11.txt, 1336, 11
091c_claudia_and_the_first_thanksgiving_3.txt, 1650, 3
013c_goodbye_stacey_goodbye_4.txt, 1565, 4
127c_abbys_un_valentine_14.txt, 2103, 14
041c_mary_anne_vs_logan_12.txt, 1787, 12
012c_claudia_and_the_new_girl_11.txt, 1720, 11
123c_claudias_big_party_6.txt, 1542, 6
m16c_claudia_and_the_clue_in_the_photograph_8.txt, 2055, 8
008c_boy_crazy_stacey_6.txt, 1832, 6
056c_keep_out_claudia_15.txt, 1674, 15
031c_dawns_wicked_stepsister_5.txt, 1691, 5
024c_kristy_and_the_mothers_day_surprise_1.txt, 2152, 1
m12c_dawn_and_the_surfer_ghost_5.txt, 1479, 5
017c_mary_annes_bad_luck_mystery_9.txt, 1750, 9
119c_staceys_ex_boyfriend_10.txt, 1310, 10
101c_claudia_kishi_middle_school_dropout_8.txt, 1541, 8
009c_the_ghost_at_dawns_house_6.txt, 1250, 6
m26c_dawn_schafer_undercover_babysitter_11.txt, 1776, 11
m33c_stacey_and_the_stolen_hearts_10.txt, 1486, 10
060c_mary_annes_makeover_2.txt, 3050, 2
108c_dont_give_up_mallory_13.txt, 2062, 13
046c_mary_anne_misses_logan_8.txt, 1607, 8
083c_stacey_vs_the_bsc_7.txt, 1164, 7
m13c_mary_anne_and_the_library_mystery_5.txt, 1873, 5
m14c_stacey_and_the_mystery_at_the_mall_8.txt, 1937, 8
052c_mary_anne_plus_too_many_babies_9.txt, 1324, 9
085c_claudia_kishi_live_from_wsto_12.txt, 776, 12
008c_boy_crazy_stacey_10.txt, 1468, 10
m24c_mary_anne_and_the_silent_witness_1.txt, 2037, 1
085c_claudia_kishi_live_from_wsto_9.txt, 1019, 9
099c_staceys_broken_heart_5.txt, 2416, 5
084c_dawn_and_the_school_spirit_war_8.txt, 1986, 8
001c_kristys_great_idea_7.txt, 1789, 7
004c_mary_anne_saves_the_day_2.txt, 1826, 2
031c_dawns_wicked_stepsister_10.txt, 1516, 10
m09c_kristy_and_the_haunted_mansion_12.txt, 1748, 12
serr3c_shannons_story_7.txt, 1493, 7
005c_dawn_and_the_impossible_three_7.txt, 1730, 7
m07c_dawn_and_the_disappearing_dogs_15.txt, 1740, 15
012c_claudia_and_the_new_girl_5.txt, 1601, 5
011c_kristy_and_the_snobs_13.txt, 1846, 13
007c_claudia_and_mean_jeanine_2.txt, 1683, 2
m04c_kristy_and_the_missing_child_15.txt, 2022, 15
033c_claudia_and_the_great_search_9.txt, 1496, 9
130c_staceys_movie_6.txt, 1453, 6
026c_claudia_and_the_sad_goodbye_5.txt, 1618, 5
m16c_claudia_and_the_clue_in_the_photograph_11.txt, 2108, 11
053c_kristy_for_president_4.txt, 1564, 4
131c_the_fire_at_mary_annes_house_15.txt, 1881, 15
m10c_stacey_and_the_mystery_money_1.txt, 2393, 1
130c_staceys_movie_13.txt, 802, 13
090c_welcome_to_the_bsc_abby_15.txt, 882, 15
m27c_claudia_and_the_lighthouse_ghost_8.txt, 1814, 8
125c_mary_anne_in_the_middle_9.txt, 1070, 9
062c_kristy_and_the_worst_kid_ever_9.txt, 2554, 9
109c_mary_anne_to_the_rescue_6.txt, 1387, 6
m24c_mary_anne_and_the_silent_witness_12.txt, 2153, 12
097c_claudia_and_the_worlds_cutest_baby_12.txt, 972, 12
064c_dawns_family_feud_15.txt, 780, 15
m20c_mary_anne_and_the_zoo_mystery_5.txt, 1804, 5
073c_mary_anne_and_miss_priss_9.txt, 1598, 9
072c_dawn_and_the_we_heart_kids_club_9.txt, 1738, 9
009c_the_ghost_at_dawns_house_13.txt, 1608, 13
m23c_abby_and_the_secret_society_13.txt, 1963, 13
067c_dawns_big_move_6.txt, 1655, 6
118c_kristy_thomas_dog_trainer_3.txt, 1376, 3
029c_mallory_and_the_mystery_diary_9.txt, 1771, 9
064c_dawns_family_feud_7.txt, 1329, 7
011c_kristy_and_the_snobs_5.txt, 1135, 5
087c_stacey_and_the_bad_girls_9.txt, 1128, 9
023c_dawn_on_the_coast_9.txt, 1374, 9
071c_claudia_and_the_perfect_boy_6.txt, 1370, 6
m35c_abby_and_the_notorius_neighbor_7.txt, 1785, 7
126c_the_all_new_mallory_pike_7.txt, 1978, 7
098c_dawn_and_too_many_sitters_14.txt, 1440, 14
m35c_abby_and_the_notorius_neighbor_12.txt, 1689, 12
100c_kristys_worst_idea_14.txt, 1501, 14
048c_jessis_wish_1.txt, 1906, 1
m26c_dawn_schafer_undercover_babysitter_4.txt, 1691, 4
022c_jessi_ramsey_petsitter_7.txt, 1404, 7
081c_kristy_and_mr_mom_4.txt, 2103, 4
021c_mallory_and_the_trouble_with_twins_15.txt, 1338, 15
069c_get_well_soon_mallory_13.txt, 1482, 13
094c_stacey_mcgill_super_sitter_13.txt, 1217, 13
120c_mary_anne_and_the_playground_fight_2.txt, 3052, 2
025c_mary_anne_and_the_search_for_tigger_8.txt, 1599, 8
m22c_stacey_and_the_haunted_masquerade_5.txt, 1615, 5
028c_welcome_back_stacey_7.txt, 2093, 7
115c_jessis_big_break_15.txt, 1723, 15
113c_claudia_makes_up_her_mind_12.txt, 1529, 12
093c_mary_anne_and_the_memory_garden_10.txt, 1621, 10
027c_jessi_and_the_superbrat_5.txt, 1743, 5
068c_jessi_and_the_bad_babysitter_13.txt, 1055, 13
103c_happy_holidays_jessi_10.txt, 1492, 10
m29c_stacey_and_the_fashion_victim_8.txt, 1578, 8
065c_staceys_big_crush_9.txt, 1518, 9
034c_mary_anne_and_too_many_boys_11.txt, 1571, 11
049c_claudia_and_the_genius_of_elm_street_12.txt, 1836, 12
m09c_kristy_and_the_haunted_mansion_3.txt, 1707, 3
114c_the_secret_life_of_mary_anne_spier_1.txt, 1447, 1
075c_jessis_horrible_prank_11.txt, 1201, 11
091c_claudia_and_the_first_thanksgiving_11.txt, 925, 11
111c_staceys_secret_friend_3.txt, 1008, 3
061c_jessi_and_the_awful_secret_3.txt, 1991, 3
127c_abbys_un_valentine_9.txt, 1156, 9
m02c_beware_dawn_8.txt, 1747, 8
m30c_kristy_and_the_mystery_train_7.txt, 1264, 7
m36c_kristy_and_the_cat_burglar_15.txt, 1756, 15
074c_kristy_and_the_copycat_12.txt, 1204, 12
069c_get_well_soon_mallory_8.txt, 1736, 8
039c_poor_mallory_9.txt, 1656, 9
097c_claudia_and_the_worlds_cutest_baby_9.txt, 1740, 9
010c_logan_likes_mary_anne_3.txt, 1457, 3
090c_welcome_to_the_bsc_abby_1.txt, 2276, 1
068c_jessi_and_the_bad_babysitter_4.txt, 1568, 4
m13c_mary_anne_and_the_library_mystery_13.txt, 1444, 13
028c_welcome_back_stacey_14.txt, 1726, 14
016c_jessis_secret_language_7.txt, 1613, 7
017c_mary_annes_bad_luck_mystery_12.txt, 1266, 12
057c_dawn_saves_the_planet_12.txt, 2012, 12
115c_jessis_big_break_5.txt, 919, 5
092c_mallorys_christmas_wish_7.txt, 1789, 7
057c_dawn_saves_the_planet_8.txt, 1875, 8
serr2c_logan_bruno_boy_babysitter_7.txt, 1142, 7
m30c_kristy_and_the_mystery_train_14.txt, 1024, 14
040c_claudia_and_the_middle_school_mystery_5.txt, 1582, 5
m32c_claudia_and_the_mystery_in_the_painting_2.txt, 3028, 2
116c_abby_and_the_best_kid_ever_1.txt, 1503, 1
043c_staceys_emergency_6.txt, 1797, 6
043c_staceys_emergency_7.txt, 1714, 7
m32c_claudia_and_the_mystery_in_the_painting_3.txt, 3603, 3
040c_claudia_and_the_middle_school_mystery_4.txt, 1866, 4
serr2c_logan_bruno_boy_babysitter_6.txt, 1499, 6
m30c_kristy_and_the_mystery_train_15.txt, 995, 15
115c_jessis_big_break_4.txt, 2388, 4
057c_dawn_saves_the_planet_13.txt, 1698, 13
057c_dawn_saves_the_planet_9.txt, 2131, 9
092c_mallorys_christmas_wish_6.txt, 1375, 6
016c_jessis_secret_language_6.txt, 1842, 6
028c_welcome_back_stacey_15.txt, 1558, 15
017c_mary_annes_bad_luck_mystery_13.txt, 1585, 13
068c_jessi_and_the_bad_babysitter_5.txt, 1363, 5
m13c_mary_anne_and_the_library_mystery_12.txt, 1870, 12
097c_claudia_and_the_worlds_cutest_baby_8.txt, 1301, 8
010c_logan_likes_mary_anne_2.txt, 1472, 2
074c_kristy_and_the_copycat_13.txt, 1468, 13
m36c_kristy_and_the_cat_burglar_14.txt, 1392, 14
094c_stacey_mcgill_super_sitter_1.txt, 1995, 1
039c_poor_mallory_8.txt, 1941, 8
069c_get_well_soon_mallory_9.txt, 2203, 9
m02c_beware_dawn_9.txt, 1978, 9
m30c_kristy_and_the_mystery_train_6.txt, 1811, 6
127c_abbys_un_valentine_8.txt, 1814, 8
061c_jessi_and_the_awful_secret_2.txt, 3280, 2
038c_kristys_mystery_admirer_1.txt, 1642, 1
111c_staceys_secret_friend_2.txt, 2709, 2
091c_claudia_and_the_first_thanksgiving_10.txt, 1179, 10
075c_jessis_horrible_prank_10.txt, 1474, 10
m09c_kristy_and_the_haunted_mansion_2.txt, 2320, 2
054c_mallory_and_the_dream_horse_1.txt, 2033, 1
049c_claudia_and_the_genius_of_elm_street_13.txt, 1625, 13
034c_mary_anne_and_too_many_boys_10.txt, 1462, 10
065c_staceys_big_crush_8.txt, 1666, 8
075c_jessis_horrible_prank_1.txt, 1401, 1
093c_mary_anne_and_the_memory_garden_11.txt, 2027, 11
m29c_stacey_and_the_fashion_victim_9.txt, 1544, 9
103c_happy_holidays_jessi_11.txt, 1133, 11
068c_jessi_and_the_bad_babysitter_12.txt, 1523, 12
027c_jessi_and_the_superbrat_4.txt, 1648, 4
028c_welcome_back_stacey_6.txt, 1825, 6
m22c_stacey_and_the_haunted_masquerade_4.txt, 1555, 4
025c_mary_anne_and_the_search_for_tigger_9.txt, 1685, 9
113c_claudia_makes_up_her_mind_13.txt, 905, 13
115c_jessis_big_break_14.txt, 1209, 14
069c_get_well_soon_mallory_12.txt, 1601, 12
120c_mary_anne_and_the_playground_fight_3.txt, 1393, 3
094c_stacey_mcgill_super_sitter_12.txt, 1193, 12
081c_kristy_and_mr_mom_5.txt, 2625, 5
022c_jessi_ramsey_petsitter_6.txt, 1544, 6
021c_mallory_and_the_trouble_with_twins_14.txt, 1577, 14
100c_kristys_worst_idea_15.txt, 1909, 15
m35c_abby_and_the_notorius_neighbor_13.txt, 1285, 13
098c_dawn_and_too_many_sitters_15.txt, 591, 15
126c_the_all_new_mallory_pike_6.txt, 2044, 6
003c_the_truth_about_stacey_1.txt, 2010, 1
m26c_dawn_schafer_undercover_babysitter_5.txt, 2266, 5
011c_kristy_and_the_snobs_4.txt, 1985, 4
m35c_abby_and_the_notorius_neighbor_6.txt, 1559, 6
071c_claudia_and_the_perfect_boy_7.txt, 2037, 7
023c_dawn_on_the_coast_8.txt, 1605, 8
087c_stacey_and_the_bad_girls_8.txt, 924, 8
029c_mallory_and_the_mystery_diary_8.txt, 1552, 8
064c_dawns_family_feud_6.txt, 1699, 6
118c_kristy_thomas_dog_trainer_2.txt, 3138, 2
m23c_abby_and_the_secret_society_12.txt, 1634, 12
067c_dawns_big_move_7.txt, 1146, 7
m20c_mary_anne_and_the_zoo_mystery_4.txt, 1476, 4
009c_the_ghost_at_dawns_house_12.txt, 1669, 12
072c_dawn_and_the_we_heart_kids_club_8.txt, 1811, 8
073c_mary_anne_and_miss_priss_8.txt, 1940, 8
064c_dawns_family_feud_14.txt, 1906, 14
097c_claudia_and_the_worlds_cutest_baby_13.txt, 1435, 13
m24c_mary_anne_and_the_silent_witness_13.txt, 2045, 13
042c_jessi_and_the_dance_school_phantom_1.txt, 2669, 1
109c_mary_anne_to_the_rescue_7.txt, 1865, 7
062c_kristy_and_the_worst_kid_ever_8.txt, 1532, 8
090c_welcome_to_the_bsc_abby_14.txt, 1364, 14
125c_mary_anne_in_the_middle_8.txt, 1520, 8
m27c_claudia_and_the_lighthouse_ghost_9.txt, 958, 9
131c_the_fire_at_mary_annes_house_14.txt, 1460, 14
130c_staceys_movie_12.txt, 1463, 12
053c_kristy_for_president_5.txt, 1285, 5
m16c_claudia_and_the_clue_in_the_photograph_10.txt, 2115, 10
033c_claudia_and_the_great_search_8.txt, 1334, 8
026c_claudia_and_the_sad_goodbye_4.txt, 1248, 4
130c_staceys_movie_7.txt, 2219, 7
m16c_claudia_and_the_clue_in_the_photograph_12.txt, 1775, 12
053c_kristy_for_president_7.txt, 2431, 7
086c_mary_anne_and_camp_bsc_8.txt, 1907, 8
130c_staceys_movie_5.txt, 1128, 5
026c_claudia_and_the_sad_goodbye_6.txt, 1794, 6
m19c_kristy_and_the_missing_fortune_8.txt, 1992, 8
027c_jessi_and_the_superbrat_15.txt, 1865, 15
130c_staceys_movie_10.txt, 948, 10
m10c_stacey_and_the_mystery_money_2.txt, 2658, 2
102c_mary_anne_and_the_little_princess_9.txt, 2048, 9
096c_abbys_lucky_thirteen_8.txt, 1081, 8
109c_mary_anne_to_the_rescue_5.txt, 1504, 5
042c_jessi_and_the_dance_school_phantom_3.txt, 2307, 3
m24c_mary_anne_and_the_silent_witness_11.txt, 2017, 11
009c_the_ghost_at_dawns_house_10.txt, 1503, 10
m20c_mary_anne_and_the_zoo_mystery_6.txt, 1993, 6
097c_claudia_and_the_worlds_cutest_baby_11.txt, 1972, 11
067c_dawns_big_move_5.txt, 1902, 5
m23c_abby_and_the_secret_society_10.txt, 2093, 10
071c_claudia_and_the_perfect_boy_5.txt, 1532, 5
m35c_abby_and_the_notorius_neighbor_4.txt, 1849, 4
011c_kristy_and_the_snobs_6.txt, 1862, 6
m11c_claudia_and_the_mystery_at_the_museum_15.txt, 1216, 15
064c_dawns_family_feud_4.txt, 1628, 4
m18c_stacey_and_the_mystery_at_the_empty_house_8.txt, 1951, 8
022c_jessi_ramsey_petsitter_4.txt, 1618, 4
081c_kristy_and_mr_mom_7.txt, 1808, 7
029c_mallory_and_the_mystery_diary_15.txt, 1669, 15
m14c_stacey_and_the_mystery_at_the_mall_15.txt, 2005, 15
048c_jessis_wish_2.txt, 2631, 2
m26c_dawn_schafer_undercover_babysitter_7.txt, 1576, 7
001c_kristys_great_idea_15.txt, 2426, 15
003c_the_truth_about_stacey_3.txt, 2229, 3
126c_the_all_new_mallory_pike_4.txt, 1433, 4
m35c_abby_and_the_notorius_neighbor_11.txt, 1548, 11
113c_claudia_makes_up_her_mind_11.txt, 1996, 11
m22c_stacey_and_the_haunted_masquerade_6.txt, 1585, 6
028c_welcome_back_stacey_4.txt, 1473, 4
094c_stacey_mcgill_super_sitter_10.txt, 1807, 10
120c_mary_anne_and_the_playground_fight_1.txt, 1835, 1
069c_get_well_soon_mallory_10.txt, 1880, 10
047c_mallory_on_strike_9.txt, 1185, 9
034c_mary_anne_and_too_many_boys_12.txt, 1244, 12
027c_jessi_and_the_superbrat_6.txt, 1566, 6
068c_jessi_and_the_bad_babysitter_10.txt, 1824, 10
103c_happy_holidays_jessi_13.txt, 1554, 13
093c_mary_anne_and_the_memory_garden_13.txt, 1698, 13
075c_jessis_horrible_prank_3.txt, 2377, 3
049c_claudia_and_the_genius_of_elm_street_11.txt, 2359, 11
054c_mallory_and_the_dream_horse_3.txt, 1728, 3
091c_claudia_and_the_first_thanksgiving_12.txt, 1348, 12
113c_claudia_makes_up_her_mind_8.txt, 1530, 8
m34c_mary_anne_and_the_haunted_bookstore_14.txt, 2405, 14
114c_the_secret_life_of_mary_anne_spier_2.txt, 2800, 2
075c_jessis_horrible_prank_12.txt, 1941, 12
m30c_kristy_and_the_mystery_train_4.txt, 2477, 4
038c_kristys_mystery_admirer_3.txt, 2102, 3
092c_mallorys_christmas_wish_14.txt, 1546, 14
094c_stacey_mcgill_super_sitter_3.txt, 1978, 3
074c_kristy_and_the_copycat_11.txt, 1483, 11
017c_mary_annes_bad_luck_mystery_11.txt, 1856, 11
016c_jessis_secret_language_4.txt, 2343, 4
m13c_mary_anne_and_the_library_mystery_10.txt, 1893, 10
090c_welcome_to_the_bsc_abby_2.txt, 2965, 2
068c_jessi_and_the_bad_babysitter_7.txt, 1620, 7
serr2c_logan_bruno_boy_babysitter_4.txt, 1206, 4
092c_mallorys_christmas_wish_4.txt, 1132, 4
057c_dawn_saves_the_planet_11.txt, 1956, 11
023c_dawn_on_the_coast_15.txt, 1762, 15
115c_jessis_big_break_6.txt, 1408, 6
043c_staceys_emergency_5.txt, 1724, 5
021c_mallory_and_the_trouble_with_twins_8.txt, 1587, 8
116c_abby_and_the_best_kid_ever_2.txt, 3293, 2
040c_claudia_and_the_middle_school_mystery_6.txt, 1809, 6
m32c_claudia_and_the_mystery_in_the_painting_1.txt, 2110, 1
116c_abby_and_the_best_kid_ever_3.txt, 885, 3
040c_claudia_and_the_middle_school_mystery_7.txt, 1543, 7
043c_staceys_emergency_4.txt, 1497, 4
021c_mallory_and_the_trouble_with_twins_9.txt, 1562, 9
092c_mallorys_christmas_wish_5.txt, 1405, 5
115c_jessis_big_break_7.txt, 1628, 7
023c_dawn_on_the_coast_14.txt, 1369, 14
057c_dawn_saves_the_planet_10.txt, 1461, 10
serr2c_logan_bruno_boy_babysitter_5.txt, 1822, 5
m13c_mary_anne_and_the_library_mystery_11.txt, 1836, 11
068c_jessi_and_the_bad_babysitter_6.txt, 1646, 6
090c_welcome_to_the_bsc_abby_3.txt, 1436, 3
017c_mary_annes_bad_luck_mystery_10.txt, 1692, 10
016c_jessis_secret_language_5.txt, 1273, 5
074c_kristy_and_the_copycat_10.txt, 1245, 10
094c_stacey_mcgill_super_sitter_2.txt, 3627, 2
010c_logan_likes_mary_anne_1.txt, 2381, 1
092c_mallorys_christmas_wish_15.txt, 857, 15
038c_kristys_mystery_admirer_2.txt, 2324, 2
061c_jessi_and_the_awful_secret_1.txt, 2227, 1
m30c_kristy_and_the_mystery_train_5.txt, 1973, 5
075c_jessis_horrible_prank_13.txt, 1513, 13
114c_the_secret_life_of_mary_anne_spier_3.txt, 1884, 3
111c_staceys_secret_friend_1.txt, 1664, 1
091c_claudia_and_the_first_thanksgiving_13.txt, 1353, 13
m34c_mary_anne_and_the_haunted_bookstore_15.txt, 949, 15
113c_claudia_makes_up_her_mind_9.txt, 978, 9
049c_claudia_and_the_genius_of_elm_street_10.txt, 1139, 10
054c_mallory_and_the_dream_horse_2.txt, 2387, 2
m09c_kristy_and_the_haunted_mansion_1.txt, 2215, 1
103c_happy_holidays_jessi_12.txt, 1100, 12
068c_jessi_and_the_bad_babysitter_11.txt, 1420, 11
027c_jessi_and_the_superbrat_7.txt, 1919, 7
075c_jessis_horrible_prank_2.txt, 1266, 2
093c_mary_anne_and_the_memory_garden_12.txt, 1726, 12
034c_mary_anne_and_too_many_boys_13.txt, 1409, 13
094c_stacey_mcgill_super_sitter_11.txt, 1500, 11
047c_mallory_on_strike_8.txt, 1937, 8
069c_get_well_soon_mallory_11.txt, 1959, 11
113c_claudia_makes_up_her_mind_10.txt, 1774, 10
028c_welcome_back_stacey_5.txt, 1563, 5
m22c_stacey_and_the_haunted_masquerade_7.txt, 1845, 7
003c_the_truth_about_stacey_2.txt, 2311, 2
001c_kristys_great_idea_14.txt, 1842, 14
m26c_dawn_schafer_undercover_babysitter_6.txt, 1995, 6
048c_jessis_wish_3.txt, 2384, 3
m35c_abby_and_the_notorius_neighbor_10.txt, 1615, 10
126c_the_all_new_mallory_pike_5.txt, 1474, 5
m18c_stacey_and_the_mystery_at_the_empty_house_9.txt, 1763, 9
m14c_stacey_and_the_mystery_at_the_mall_14.txt, 2303, 14
029c_mallory_and_the_mystery_diary_14.txt, 1585, 14
081c_kristy_and_mr_mom_6.txt, 2507, 6
022c_jessi_ramsey_petsitter_5.txt, 1647, 5
064c_dawns_family_feud_5.txt, 2243, 5
m11c_claudia_and_the_mystery_at_the_museum_14.txt, 1523, 14
m35c_abby_and_the_notorius_neighbor_5.txt, 1371, 5
071c_claudia_and_the_perfect_boy_4.txt, 1899, 4
011c_kristy_and_the_snobs_7.txt, 1886, 7
067c_dawns_big_move_4.txt, 1750, 4
m23c_abby_and_the_secret_society_11.txt, 1806, 11
118c_kristy_thomas_dog_trainer_1.txt, 1229, 1
097c_claudia_and_the_worlds_cutest_baby_10.txt, 1340, 10
009c_the_ghost_at_dawns_house_11.txt, 1790, 11
m20c_mary_anne_and_the_zoo_mystery_7.txt, 3430, 7
102c_mary_anne_and_the_little_princess_8.txt, 1357, 8
m24c_mary_anne_and_the_silent_witness_10.txt, 1783, 10
042c_jessi_and_the_dance_school_phantom_2.txt, 2980, 2
109c_mary_anne_to_the_rescue_4.txt, 1333, 4
096c_abbys_lucky_thirteen_9.txt, 2517, 9
130c_staceys_movie_11.txt, 1332, 11
m10c_stacey_and_the_mystery_money_3.txt, 2136, 3
027c_jessi_and_the_superbrat_14.txt, 1366, 14
m19c_kristy_and_the_missing_fortune_9.txt, 1703, 9
026c_claudia_and_the_sad_goodbye_7.txt, 1940, 7
130c_staceys_movie_4.txt, 2228, 4
086c_mary_anne_and_camp_bsc_9.txt, 1830, 9
053c_kristy_for_president_6.txt, 1632, 6
m16c_claudia_and_the_clue_in_the_photograph_13.txt, 1960, 13
090c_welcome_to_the_bsc_abby_13.txt, 901, 13
027c_jessi_and_the_superbrat_10.txt, 1667, 10
131c_the_fire_at_mary_annes_house_13.txt, 1592, 13
m10c_stacey_and_the_mystery_money_7.txt, 2536, 7
130c_staceys_movie_15.txt, 1262, 15
053c_kristy_for_president_2.txt, 2517, 2
107c_mind_your_own_business_kristy_8.txt, 1098, 8
026c_claudia_and_the_sad_goodbye_3.txt, 1898, 3
m20c_mary_anne_and_the_zoo_mystery_3.txt, 2198, 3
009c_the_ghost_at_dawns_house_15.txt, 1894, 15
097c_claudia_and_the_worlds_cutest_baby_14.txt, 767, 14
064c_dawns_family_feud_13.txt, 1070, 13
042c_jessi_and_the_dance_school_phantom_6.txt, 2054, 6
m24c_mary_anne_and_the_silent_witness_14.txt, 1788, 14
058c_staceys_choice_8.txt, 1473, 8
011c_kristy_and_the_snobs_3.txt, 1846, 3
m35c_abby_and_the_notorius_neighbor_1.txt, 1841, 1
064c_dawns_family_feud_1.txt, 1896, 1
m11c_claudia_and_the_mystery_at_the_museum_10.txt, 1866, 10
044c_dawn_and_the_big_sleepover_9.txt, 1704, 9
118c_kristy_thomas_dog_trainer_5.txt, 1762, 5
050c_dawns_big_date_9.txt, 1513, 9
m23c_abby_and_the_secret_society_15.txt, 1923, 15
m22c_stacey_and_the_haunted_masquerade_3.txt, 1793, 3
028c_welcome_back_stacey_1.txt, 1902, 1
115c_jessis_big_break_13.txt, 1187, 13
113c_claudia_makes_up_her_mind_14.txt, 1601, 14
069c_get_well_soon_mallory_15.txt, 688, 15
094c_stacey_mcgill_super_sitter_15.txt, 1426, 15
120c_mary_anne_and_the_playground_fight_4.txt, 967, 4
081c_kristy_and_mr_mom_2.txt, 3072, 2
022c_jessi_ramsey_petsitter_1.txt, 2155, 1
m14c_stacey_and_the_mystery_at_the_mall_10.txt, 1947, 10
029c_mallory_and_the_mystery_diary_10.txt, 1535, 10
021c_mallory_and_the_trouble_with_twins_13.txt, 2039, 13
126c_the_all_new_mallory_pike_1.txt, 1504, 1
100c_kristys_worst_idea_12.txt, 1186, 12
m35c_abby_and_the_notorius_neighbor_14.txt, 1380, 14
098c_dawn_and_too_many_sitters_12.txt, 1420, 12
048c_jessis_wish_7.txt, 1571, 7
m26c_dawn_schafer_undercover_babysitter_2.txt, 2322, 2
003c_the_truth_about_stacey_6.txt, 1365, 6
001c_kristys_great_idea_10.txt, 2429, 10
m09c_kristy_and_the_haunted_mansion_5.txt, 1644, 5
054c_mallory_and_the_dream_horse_6.txt, 2124, 6
049c_claudia_and_the_genius_of_elm_street_14.txt, 1426, 14
serr1c_logans_story_9.txt, 1704, 9
m01c_stacey_and_the_mystery_ring_8.txt, 1849, 8
075c_jessis_horrible_prank_6.txt, 1221, 6
027c_jessi_and_the_superbrat_3.txt, 1704, 3
068c_jessi_and_the_bad_babysitter_15.txt, 1274, 15
m30c_kristy_and_the_mystery_train_1.txt, 1691, 1
061c_jessi_and_the_awful_secret_5.txt, 2324, 5
121c_abby_in_wonderland_9.txt, 1624, 9
038c_kristys_mystery_admirer_6.txt, 1480, 6
m34c_mary_anne_and_the_haunted_bookstore_11.txt, 716, 11
111c_staceys_secret_friend_5.txt, 1479, 5
114c_the_secret_life_of_mary_anne_spier_7.txt, 2063, 7
m15c_kristy_and_the_vampires_9.txt, 1781, 9
028c_welcome_back_stacey_12.txt, 1415, 12
016c_jessis_secret_language_1.txt, 1988, 1
017c_mary_annes_bad_luck_mystery_14.txt, 1996, 14
068c_jessi_and_the_bad_babysitter_2.txt, 3278, 2
090c_welcome_to_the_bsc_abby_7.txt, 1861, 7
m13c_mary_anne_and_the_library_mystery_15.txt, 2040, 15
103c_happy_holidays_jessi_9.txt, 1365, 9
092c_mallorys_christmas_wish_11.txt, 1076, 11
010c_logan_likes_mary_anne_5.txt, 1902, 5
076c_staceys_lie_8.txt, 2882, 8
074c_kristy_and_the_copycat_14.txt, 1541, 14
m36c_kristy_and_the_cat_burglar_13.txt, 2020, 13
094c_stacey_mcgill_super_sitter_6.txt, 1279, 6
122c_kristy_in_charge_9.txt, 1478, 9
040c_claudia_and_the_middle_school_mystery_3.txt, 2020, 3
m32c_claudia_and_the_mystery_in_the_painting_4.txt, 2277, 4
116c_abby_and_the_best_kid_ever_7.txt, 1714, 7
serr2c_logan_bruno_boy_babysitter_1.txt, 2769, 1
m30c_kristy_and_the_mystery_train_12.txt, 1008, 12
057c_dawn_saves_the_planet_14.txt, 1682, 14
023c_dawn_on_the_coast_10.txt, 1325, 10
115c_jessis_big_break_3.txt, 1605, 3
092c_mallorys_christmas_wish_1.txt, 2050, 1
115c_jessis_big_break_2.txt, 2978, 2
023c_dawn_on_the_coast_11.txt, 1419, 11
057c_dawn_saves_the_planet_15.txt, 1407, 15
m30c_kristy_and_the_mystery_train_13.txt, 678, 13
m32c_claudia_and_the_mystery_in_the_painting_5.txt, 3083, 5
040c_claudia_and_the_middle_school_mystery_2.txt, 2045, 2
116c_abby_and_the_best_kid_ever_6.txt, 1865, 6
043c_staceys_emergency_1.txt, 2069, 1
122c_kristy_in_charge_8.txt, 1755, 8
m36c_kristy_and_the_cat_burglar_12.txt, 1610, 12
094c_stacey_mcgill_super_sitter_7.txt, 1954, 7
074c_kristy_and_the_copycat_15.txt, 920, 15
076c_staceys_lie_9.txt, 1393, 9
092c_mallorys_christmas_wish_10.txt, 1561, 10
103c_happy_holidays_jessi_8.txt, 1221, 8
010c_logan_likes_mary_anne_4.txt, 1828, 4
090c_welcome_to_the_bsc_abby_6.txt, 1634, 6
068c_jessi_and_the_bad_babysitter_3.txt, 1713, 3
m13c_mary_anne_and_the_library_mystery_14.txt, 1738, 14
028c_welcome_back_stacey_13.txt, 1748, 13
017c_mary_annes_bad_luck_mystery_15.txt, 1254, 15
m15c_kristy_and_the_vampires_8.txt, 1486, 8
114c_the_secret_life_of_mary_anne_spier_6.txt, 1725, 6
m34c_mary_anne_and_the_haunted_bookstore_10.txt, 2688, 10
111c_staceys_secret_friend_4.txt, 1474, 4
061c_jessi_and_the_awful_secret_4.txt, 1586, 4
038c_kristys_mystery_admirer_7.txt, 1304, 7
121c_abby_in_wonderland_8.txt, 1289, 8
075c_jessis_horrible_prank_7.txt, 1305, 7
068c_jessi_and_the_bad_babysitter_14.txt, 1875, 14
027c_jessi_and_the_superbrat_2.txt, 1788, 2
m01c_stacey_and_the_mystery_ring_9.txt, 1601, 9
054c_mallory_and_the_dream_horse_7.txt, 2076, 7
serr1c_logans_story_8.txt, 1920, 8
049c_claudia_and_the_genius_of_elm_street_15.txt, 930, 15
m09c_kristy_and_the_haunted_mansion_4.txt, 1898, 4
098c_dawn_and_too_many_sitters_13.txt, 1384, 13
m35c_abby_and_the_notorius_neighbor_15.txt, 1545, 15
100c_kristys_worst_idea_13.txt, 1119, 13
001c_kristys_great_idea_11.txt, 1549, 11
003c_the_truth_about_stacey_7.txt, 1774, 7
m26c_dawn_schafer_undercover_babysitter_3.txt, 1914, 3
048c_jessis_wish_6.txt, 1640, 6
029c_mallory_and_the_mystery_diary_11.txt, 1509, 11
m14c_stacey_and_the_mystery_at_the_mall_11.txt, 1641, 11
081c_kristy_and_mr_mom_3.txt, 2154, 3
021c_mallory_and_the_trouble_with_twins_12.txt, 1785, 12
069c_get_well_soon_mallory_14.txt, 1909, 14
120c_mary_anne_and_the_playground_fight_5.txt, 1832, 5
094c_stacey_mcgill_super_sitter_14.txt, 1005, 14
m22c_stacey_and_the_haunted_masquerade_2.txt, 2652, 2
113c_claudia_makes_up_her_mind_15.txt, 1686, 15
115c_jessis_big_break_12.txt, 1247, 12
m23c_abby_and_the_secret_society_14.txt, 1622, 14
050c_dawns_big_date_8.txt, 1956, 8
067c_dawns_big_move_1.txt, 2542, 1
044c_dawn_and_the_big_sleepover_8.txt, 1808, 8
118c_kristy_thomas_dog_trainer_4.txt, 1360, 4
m11c_claudia_and_the_mystery_at_the_museum_11.txt, 1840, 11
011c_kristy_and_the_snobs_2.txt, 1657, 2
071c_claudia_and_the_perfect_boy_1.txt, 1915, 1
058c_staceys_choice_9.txt, 1388, 9
m24c_mary_anne_and_the_silent_witness_15.txt, 1287, 15
042c_jessi_and_the_dance_school_phantom_7.txt, 2141, 7
109c_mary_anne_to_the_rescue_1.txt, 2079, 1
064c_dawns_family_feud_12.txt, 1093, 12
097c_claudia_and_the_worlds_cutest_baby_15.txt, 634, 15
m20c_mary_anne_and_the_zoo_mystery_2.txt, 2450, 2
009c_the_ghost_at_dawns_house_14.txt, 1407, 14
026c_claudia_and_the_sad_goodbye_2.txt, 2068, 2
107c_mind_your_own_business_kristy_9.txt, 2004, 9
130c_staceys_movie_1.txt, 1442, 1
053c_kristy_for_president_3.txt, 2833, 3
131c_the_fire_at_mary_annes_house_12.txt, 1504, 12
m10c_stacey_and_the_mystery_money_6.txt, 2190, 6
130c_staceys_movie_14.txt, 1422, 14
027c_jessi_and_the_superbrat_11.txt, 1843, 11
090c_welcome_to_the_bsc_abby_12.txt, 1288, 12
m10c_stacey_and_the_mystery_money_4.txt, 2579, 4
131c_the_fire_at_mary_annes_house_10.txt, 1973, 10
090c_welcome_to_the_bsc_abby_10.txt, 1616, 10
027c_jessi_and_the_superbrat_13.txt, 1780, 13
130c_staceys_movie_3.txt, 1634, 3
m16c_claudia_and_the_clue_in_the_photograph_14.txt, 2289, 14
053c_kristy_for_president_1.txt, 1858, 1
064c_dawns_family_feud_10.txt, 1986, 10
109c_mary_anne_to_the_rescue_3.txt, 2037, 3
042c_jessi_and_the_dance_school_phantom_5.txt, 2363, 5
124c_stacey_mcgill_matchmaker_9.txt, 1477, 9
064c_dawns_family_feud_2.txt, 1802, 2
m11c_claudia_and_the_mystery_at_the_museum_13.txt, 1892, 13
m35c_abby_and_the_notorius_neighbor_2.txt, 2872, 2
071c_claudia_and_the_perfect_boy_3.txt, 2139, 3
m17c_dawn_and_the_halloween_mystery_9.txt, 1946, 9
067c_dawns_big_move_3.txt, 1764, 3
074c_kristy_and_the_copycat_8.txt, 819, 8
118c_kristy_thomas_dog_trainer_6.txt, 1690, 6
106c_claudia_queen_of_the_seventh_grade_9.txt, 1350, 9
120c_mary_anne_and_the_playground_fight_7.txt, 1822, 7
115c_jessis_big_break_10.txt, 1168, 10
028c_welcome_back_stacey_2.txt, 1683, 2
048c_jessis_wish_4.txt, 1575, 4
m26c_dawn_schafer_undercover_babysitter_1.txt, 1990, 1
003c_the_truth_about_stacey_5.txt, 2349, 5
001c_kristys_great_idea_13.txt, 2167, 13
126c_the_all_new_mallory_pike_2.txt, 2444, 2
100c_kristys_worst_idea_11.txt, 1452, 11
098c_dawn_and_too_many_sitters_11.txt, 1372, 11
021c_mallory_and_the_trouble_with_twins_10.txt, 1526, 10
081c_kristy_and_mr_mom_1.txt, 1913, 1
022c_jessi_ramsey_petsitter_2.txt, 2240, 2
m14c_stacey_and_the_mystery_at_the_mall_13.txt, 1669, 13
029c_mallory_and_the_mystery_diary_13.txt, 1707, 13
054c_mallory_and_the_dream_horse_5.txt, 1903, 5
m09c_kristy_and_the_haunted_mansion_6.txt, 1996, 6
119c_staceys_ex_boyfriend_9.txt, 1251, 9
103c_happy_holidays_jessi_15.txt, 803, 15
075c_jessis_horrible_prank_5.txt, 1576, 5
093c_mary_anne_and_the_memory_garden_15.txt, 1099, 15
100c_kristys_worst_idea_9.txt, 1481, 9
034c_mary_anne_and_too_many_boys_14.txt, 1355, 14
038c_kristys_mystery_admirer_5.txt, 1533, 5
061c_jessi_and_the_awful_secret_6.txt, 2006, 6
m30c_kristy_and_the_mystery_train_2.txt, 3746, 2
114c_the_secret_life_of_mary_anne_spier_4.txt, 1626, 4
075c_jessis_horrible_prank_14.txt, 2447, 14
111c_staceys_secret_friend_6.txt, 1308, 6
091c_claudia_and_the_first_thanksgiving_14.txt, 1665, 14
m34c_mary_anne_and_the_haunted_bookstore_12.txt, 2618, 12
018c_staceys_mistake_8.txt, 1310, 8
068c_jessi_and_the_bad_babysitter_1.txt, 2170, 1
090c_welcome_to_the_bsc_abby_4.txt, 2115, 4
028c_welcome_back_stacey_11.txt, 1435, 11
016c_jessis_secret_language_2.txt, 2501, 2
094c_stacey_mcgill_super_sitter_5.txt, 2514, 5
m36c_kristy_and_the_cat_burglar_10.txt, 2136, 10
010c_logan_likes_mary_anne_6.txt, 1811, 6
015c_little_miss_stoneybrook_and_dawn_9.txt, 1607, 9
092c_mallorys_christmas_wish_12.txt, 1651, 12
116c_abby_and_the_best_kid_ever_4.txt, 1600, 4
m32c_claudia_and_the_mystery_in_the_painting_7.txt, 1645, 7
043c_staceys_emergency_3.txt, 1945, 3
092c_mallorys_christmas_wish_2.txt, 2771, 2
023c_dawn_on_the_coast_13.txt, 1137, 13
m30c_kristy_and_the_mystery_train_11.txt, 926, 11
serr2c_logan_bruno_boy_babysitter_2.txt, 2764, 2
m30c_kristy_and_the_mystery_train_10.txt, 1652, 10
serr2c_logan_bruno_boy_babysitter_3.txt, 1728, 3
092c_mallorys_christmas_wish_3.txt, 1418, 3
115c_jessis_big_break_1.txt, 2119, 1
023c_dawn_on_the_coast_12.txt, 1731, 12
043c_staceys_emergency_2.txt, 2080, 2
116c_abby_and_the_best_kid_ever_5.txt, 986, 5
m32c_claudia_and_the_mystery_in_the_painting_6.txt, 1572, 6
040c_claudia_and_the_middle_school_mystery_1.txt, 1825, 1
010c_logan_likes_mary_anne_7.txt, 1686, 7
092c_mallorys_christmas_wish_13.txt, 1922, 13
015c_little_miss_stoneybrook_and_dawn_8.txt, 1698, 8
094c_stacey_mcgill_super_sitter_4.txt, 1806, 4
m36c_kristy_and_the_cat_burglar_11.txt, 1701, 11
016c_jessis_secret_language_3.txt, 1461, 3
028c_welcome_back_stacey_10.txt, 1278, 10
090c_welcome_to_the_bsc_abby_5.txt, 1019, 5
018c_staceys_mistake_9.txt, 1592, 9
091c_claudia_and_the_first_thanksgiving_15.txt, 1185, 15
111c_staceys_secret_friend_7.txt, 1315, 7
m34c_mary_anne_and_the_haunted_bookstore_13.txt, 1334, 13
075c_jessis_horrible_prank_15.txt, 1056, 15
114c_the_secret_life_of_mary_anne_spier_5.txt, 1416, 5
m30c_kristy_and_the_mystery_train_3.txt, 3075, 3
038c_kristys_mystery_admirer_4.txt, 1784, 4
061c_jessi_and_the_awful_secret_7.txt, 1261, 7
100c_kristys_worst_idea_8.txt, 2174, 8
034c_mary_anne_and_too_many_boys_15.txt, 1513, 15
103c_happy_holidays_jessi_14.txt, 1700, 14
027c_jessi_and_the_superbrat_1.txt, 2327, 1
093c_mary_anne_and_the_memory_garden_14.txt, 928, 14
075c_jessis_horrible_prank_4.txt, 1319, 4
119c_staceys_ex_boyfriend_8.txt, 2064, 8
m09c_kristy_and_the_haunted_mansion_7.txt, 1787, 7
054c_mallory_and_the_dream_horse_4.txt, 1690, 4
021c_mallory_and_the_trouble_with_twins_11.txt, 1733, 11
029c_mallory_and_the_mystery_diary_12.txt, 1379, 12
m14c_stacey_and_the_mystery_at_the_mall_12.txt, 1719, 12
022c_jessi_ramsey_petsitter_3.txt, 1898, 3
001c_kristys_great_idea_12.txt, 1569, 12
003c_the_truth_about_stacey_4.txt, 2094, 4
048c_jessis_wish_5.txt, 1920, 5
098c_dawn_and_too_many_sitters_10.txt, 932, 10
100c_kristys_worst_idea_10.txt, 1371, 10
126c_the_all_new_mallory_pike_3.txt, 2124, 3
115c_jessis_big_break_11.txt, 1810, 11
028c_welcome_back_stacey_3.txt, 2416, 3
m22c_stacey_and_the_haunted_masquerade_1.txt, 1934, 1
120c_mary_anne_and_the_playground_fight_6.txt, 1573, 6
106c_claudia_queen_of_the_seventh_grade_8.txt, 1375, 8
118c_kristy_thomas_dog_trainer_7.txt, 1574, 7
067c_dawns_big_move_2.txt, 2239, 2
074c_kristy_and_the_copycat_9.txt, 1526, 9
071c_claudia_and_the_perfect_boy_2.txt, 3340, 2
m35c_abby_and_the_notorius_neighbor_3.txt, 1691, 3
011c_kristy_and_the_snobs_1.txt, 2162, 1
m17c_dawn_and_the_halloween_mystery_8.txt, 1834, 8
m11c_claudia_and_the_mystery_at_the_museum_12.txt, 1671, 12
064c_dawns_family_feud_3.txt, 1848, 3
124c_stacey_mcgill_matchmaker_8.txt, 2346, 8
042c_jessi_and_the_dance_school_phantom_4.txt, 2391, 4
109c_mary_anne_to_the_rescue_2.txt, 3395, 2
m20c_mary_anne_and_the_zoo_mystery_1.txt, 2240, 1
064c_dawns_family_feud_11.txt, 1744, 11
m16c_claudia_and_the_clue_in_the_photograph_15.txt, 1439, 15
026c_claudia_and_the_sad_goodbye_1.txt, 2872, 1
130c_staceys_movie_2.txt, 2960, 2
027c_jessi_and_the_superbrat_12.txt, 1461, 12
090c_welcome_to_the_bsc_abby_11.txt, 1344, 11
m10c_stacey_and_the_mystery_money_5.txt, 1963, 5
131c_the_fire_at_mary_annes_house_11.txt, 1855, 11
m27c_claudia_and_the_lighthouse_ghost_1.txt, 1924, 1
042c_jessi_and_the_dance_school_phantom_12.txt, 1914, 12
m19c_kristy_and_the_missing_fortune_2.txt, 2753, 2
m10c_stacey_and_the_mystery_money_8.txt, 2008, 8
110c_abby_and_the_bad_sport_10.txt, 1854, 10
016c_jessis_secret_language_15.txt, 1652, 15
086c_mary_anne_and_camp_bsc_2.txt, 2649, 2
107c_mind_your_own_business_kristy_7.txt, 1919, 7
m10c_stacey_and_the_mystery_money_14.txt, 2311, 14
088c_farewell_dawn_14.txt, 1483, 14
m05c_mary_anne_and_the_secret_in_the_attic_11.txt, 1892, 11
014c_hello_mallory_12.txt, 1608, 12
102c_mary_anne_and_the_little_princess_3.txt, 1498, 3
042c_jessi_and_the_dance_school_phantom_9.txt, 2110, 9
117c_claudia_and_the_terrible_truth_11.txt, 1608, 11
096c_abbys_lucky_thirteen_2.txt, 1474, 2
083c_stacey_vs_the_bsc_10.txt, 2044, 10
058c_staceys_choice_7.txt, 1617, 7
025c_mary_anne_and_the_search_for_tigger_11.txt, 1745, 11
m17c_dawn_and_the_halloween_mystery_5.txt, 1970, 5
033c_claudia_and_the_great_search_13.txt, 1366, 13
124c_stacey_mcgill_matchmaker_5.txt, 885, 5
106c_claudia_queen_of_the_seventh_grade_5.txt, 1585, 5
077c_dwn_and_whitney_friends_forever_10.txt, 1499, 10
044c_dawn_and_the_big_sleepover_6.txt, 1483, 6
112c_kristy_and_the_sister_war_12.txt, 1628, 12
050c_dawns_big_date_6.txt, 1397, 6
074c_kristy_and_the_copycat_4.txt, 2050, 4
099c_staceys_broken_heart_14.txt, 1409, 14
003c_the_truth_about_stacey_11.txt, 2254, 11
025c_mary_anne_and_the_search_for_tigger_1.txt, 2361, 1
047c_mallory_on_strike_3.txt, 2181, 3
m18c_stacey_and_the_mystery_at_the_empty_house_2.txt, 2181, 2
071c_claudia_and_the_perfect_boy_12.txt, 1294, 12
089c_kristy_and_the_dirty_diapers_13.txt, 1680, 13
003c_the_truth_about_stacey_9.txt, 1826, 9
048c_jessis_wish_8.txt, 1574, 8
119c_staceys_ex_boyfriend_5.txt, 1689, 5
032c_kristy_and_the_secret_of_susan_13.txt, 1634, 13
serr1c_logans_story_6.txt, 1553, 6
102c_mary_anne_and_the_little_princess_15.txt, 788, 15
054c_mallory_and_the_dream_horse_9.txt, 2126, 9
100c_kristys_worst_idea_5.txt, 1745, 5
053c_kristy_for_president_13.txt, 1336, 13
m01c_stacey_and_the_mystery_ring_7.txt, 1850, 7
m29c_stacey_and_the_fashion_victim_1.txt, 2171, 1
007c_claudia_and_mean_jeanine_11.txt, 1904, 11
075c_jessis_horrible_prank_9.txt, 1611, 9
m02c_beware_dawn_1.txt, 2122, 1
038c_kristys_mystery_admirer_9.txt, 1878, 9
101c_claudia_kishi_middle_school_dropout_15.txt, 1486, 15
121c_abby_in_wonderland_6.txt, 1193, 6
125c_mary_anne_in_the_middle_13.txt, 1219, 13
087c_stacey_and_the_bad_girls_12.txt, 1869, 12
113c_claudia_makes_up_her_mind_2.txt, 2811, 2
114c_the_secret_life_of_mary_anne_spier_8.txt, 2008, 8
m15c_kristy_and_the_vampires_6.txt, 1994, 6
050c_dawns_big_date_14.txt, 2585, 14
090c_welcome_to_the_bsc_abby_8.txt, 1487, 8
018c_staceys_mistake_4.txt, 1627, 4
103c_happy_holidays_jessi_6.txt, 2174, 6
015c_little_miss_stoneybrook_and_dawn_5.txt, 1638, 5
045c_kristy_and_the_baby_parade_10.txt, 1582, 10
018c_staceys_mistake_10.txt, 1752, 10
069c_get_well_soon_mallory_1.txt, 2178, 1
081c_kristy_and_mr_mom_12.txt, 2280, 12
096c_abbys_lucky_thirteen_11.txt, 1116, 11
094c_stacey_mcgill_super_sitter_9.txt, 1024, 9
076c_staceys_lie_7.txt, 1966, 7
122c_kristy_in_charge_6.txt, 1224, 6
052c_mary_anne_plus_too_many_babies_13.txt, 1467, 13
021c_mallory_and_the_trouble_with_twins_2.txt, 2092, 2
116c_abby_and_the_best_kid_ever_8.txt, 1716, 8
128c_claudia_and_the_little_liar_13.txt, 1366, 13
m18c_stacey_and_the_mystery_at_the_empty_house_10.txt, 2053, 10
010c_logan_likes_mary_anne_13.txt, 1765, 13
114c_the_secret_life_of_mary_anne_spier_12.txt, 1712, 12
m32c_claudia_and_the_mystery_in_the_painting_14.txt, 1039, 14
044c_dawn_and_the_big_sleepover_12.txt, 1263, 12
015c_little_miss_stoneybrook_and_dawn_14.txt, 3116, 14
057c_dawn_saves_the_planet_1.txt, 2235, 1
m15c_kristy_and_the_vampires_15.txt, 876, 15
m15c_kristy_and_the_vampires_14.txt, 1809, 14
015c_little_miss_stoneybrook_and_dawn_15.txt, 958, 15
044c_dawn_and_the_big_sleepover_13.txt, 1268, 13
010c_logan_likes_mary_anne_12.txt, 1306, 12
m18c_stacey_and_the_mystery_at_the_empty_house_11.txt, 1877, 11
128c_claudia_and_the_little_liar_12.txt, 990, 12
116c_abby_and_the_best_kid_ever_9.txt, 1721, 9
m32c_claudia_and_the_mystery_in_the_painting_15.txt, 1627, 15
114c_the_secret_life_of_mary_anne_spier_13.txt, 730, 13
052c_mary_anne_plus_too_many_babies_12.txt, 1654, 12
122c_kristy_in_charge_7.txt, 1932, 7
021c_mallory_and_the_trouble_with_twins_3.txt, 1550, 3
081c_kristy_and_mr_mom_13.txt, 1476, 13
039c_poor_mallory_1.txt, 1771, 1
018c_staceys_mistake_11.txt, 1654, 11
076c_staceys_lie_6.txt, 1619, 6
094c_stacey_mcgill_super_sitter_8.txt, 1711, 8
096c_abbys_lucky_thirteen_10.txt, 1341, 10
015c_little_miss_stoneybrook_and_dawn_4.txt, 1645, 4
045c_kristy_and_the_baby_parade_11.txt, 1803, 11
103c_happy_holidays_jessi_7.txt, 1592, 7
097c_claudia_and_the_worlds_cutest_baby_1.txt, 2252, 1
018c_staceys_mistake_5.txt, 1978, 5
090c_welcome_to_the_bsc_abby_9.txt, 1472, 9
m15c_kristy_and_the_vampires_7.txt, 2067, 7
114c_the_secret_life_of_mary_anne_spier_9.txt, 1177, 9
125c_mary_anne_in_the_middle_12.txt, 1371, 12
113c_claudia_makes_up_her_mind_3.txt, 975, 3
087c_stacey_and_the_bad_girls_13.txt, 1308, 13
121c_abby_in_wonderland_7.txt, 1774, 7
101c_claudia_kishi_middle_school_dropout_14.txt, 2037, 14
038c_kristys_mystery_admirer_8.txt, 1825, 8
127c_abbys_un_valentine_1.txt, 1617, 1
007c_claudia_and_mean_jeanine_10.txt, 1693, 10
075c_jessis_horrible_prank_8.txt, 1762, 8
053c_kristy_for_president_12.txt, 1057, 12
100c_kristys_worst_idea_4.txt, 1203, 4
m01c_stacey_and_the_mystery_ring_6.txt, 1510, 6
065c_staceys_big_crush_1.txt, 2538, 1
serr1c_logans_story_7.txt, 1809, 7
032c_kristy_and_the_secret_of_susan_12.txt, 1531, 12
054c_mallory_and_the_dream_horse_8.txt, 1903, 8
102c_mary_anne_and_the_little_princess_14.txt, 1283, 14
119c_staceys_ex_boyfriend_4.txt, 1806, 4
048c_jessis_wish_9.txt, 1420, 9
003c_the_truth_about_stacey_8.txt, 1699, 8
089c_kristy_and_the_dirty_diapers_12.txt, 1102, 12
071c_claudia_and_the_perfect_boy_13.txt, 2846, 13
m18c_stacey_and_the_mystery_at_the_empty_house_3.txt, 2151, 3
047c_mallory_on_strike_2.txt, 1920, 2
099c_staceys_broken_heart_15.txt, 1262, 15
003c_the_truth_about_stacey_10.txt, 2105, 10
074c_kristy_and_the_copycat_5.txt, 1565, 5
050c_dawns_big_date_7.txt, 2021, 7
112c_kristy_and_the_sister_war_13.txt, 1217, 13
106c_claudia_queen_of_the_seventh_grade_4.txt, 2006, 4
077c_dwn_and_whitney_friends_forever_11.txt, 1549, 11
044c_dawn_and_the_big_sleepover_7.txt, 2213, 7
124c_stacey_mcgill_matchmaker_4.txt, 2058, 4
029c_mallory_and_the_mystery_diary_1.txt, 2346, 1
023c_dawn_on_the_coast_1.txt, 2213, 1
087c_stacey_and_the_bad_girls_1.txt, 2239, 1
033c_claudia_and_the_great_search_12.txt, 1557, 12
m17c_dawn_and_the_halloween_mystery_4.txt, 2017, 4
025c_mary_anne_and_the_search_for_tigger_10.txt, 1769, 10
062c_kristy_and_the_worst_kid_ever_1.txt, 2240, 1
058c_staceys_choice_6.txt, 1525, 6
083c_stacey_vs_the_bsc_11.txt, 1356, 11
102c_mary_anne_and_the_little_princess_2.txt, 3231, 2
096c_abbys_lucky_thirteen_3.txt, 3336, 3
117c_claudia_and_the_terrible_truth_10.txt, 1179, 10
042c_jessi_and_the_dance_school_phantom_8.txt, 1893, 8
014c_hello_mallory_13.txt, 1708, 13
m05c_mary_anne_and_the_secret_in_the_attic_10.txt, 1536, 10
072c_dawn_and_the_we_heart_kids_club_1.txt, 2313, 1
073c_mary_anne_and_miss_priss_1.txt, 2071, 1
107c_mind_your_own_business_kristy_6.txt, 1337, 6
086c_mary_anne_and_camp_bsc_3.txt, 1333, 3
033c_claudia_and_the_great_search_1.txt, 2047, 1
m10c_stacey_and_the_mystery_money_15.txt, 1914, 15
016c_jessis_secret_language_14.txt, 1745, 14
110c_abby_and_the_bad_sport_11.txt, 1263, 11
m10c_stacey_and_the_mystery_money_9.txt, 2429, 9
042c_jessi_and_the_dance_school_phantom_13.txt, 2277, 13
125c_mary_anne_in_the_middle_1.txt, 1385, 1
m19c_kristy_and_the_missing_fortune_3.txt, 1819, 3
110c_abby_and_the_bad_sport_13.txt, 2016, 13
m19c_kristy_and_the_missing_fortune_1.txt, 2222, 1
m27c_claudia_and_the_lighthouse_ghost_2.txt, 3402, 2
125c_mary_anne_in_the_middle_3.txt, 1689, 3
042c_jessi_and_the_dance_school_phantom_11.txt, 2365, 11
033c_claudia_and_the_great_search_3.txt, 2496, 3
086c_mary_anne_and_camp_bsc_1.txt, 2272, 1
107c_mind_your_own_business_kristy_4.txt, 1348, 4
m05c_mary_anne_and_the_secret_in_the_attic_12.txt, 1868, 12
014c_hello_mallory_11.txt, 1484, 11
048c_jessis_wish_15.txt, 1400, 15
073c_mary_anne_and_miss_priss_3.txt, 1779, 3
serr3c_shannons_story_14.txt, 1380, 14
072c_dawn_and_the_we_heart_kids_club_3.txt, 2761, 3
083c_stacey_vs_the_bsc_13.txt, 1748, 13
058c_staceys_choice_4.txt, 1129, 4
062c_kristy_and_the_worst_kid_ever_3.txt, 2233, 3
117c_claudia_and_the_terrible_truth_12.txt, 1421, 12
096c_abbys_lucky_thirteen_1.txt, 1755, 1
029c_mallory_and_the_mystery_diary_3.txt, 1601, 3
038c_kristys_mystery_admirer_15.txt, 1704, 15
124c_stacey_mcgill_matchmaker_6.txt, 2807, 6
025c_mary_anne_and_the_search_for_tigger_12.txt, 1732, 12
033c_claudia_and_the_great_search_10.txt, 1602, 10
m17c_dawn_and_the_halloween_mystery_6.txt, 1876, 6
087c_stacey_and_the_bad_girls_3.txt, 2451, 3
023c_dawn_on_the_coast_3.txt, 2019, 3
112c_kristy_and_the_sister_war_11.txt, 1466, 11
050c_dawns_big_date_5.txt, 2124, 5
074c_kristy_and_the_copycat_7.txt, 1003, 7
044c_dawn_and_the_big_sleepover_5.txt, 1014, 5
077c_dwn_and_whitney_friends_forever_13.txt, 1458, 13
118c_kristy_thomas_dog_trainer_9.txt, 1535, 9
106c_claudia_queen_of_the_seventh_grade_6.txt, 1686, 6
002c_claudia_and_the_phantom_phone_calls_15.txt, 627, 15
120c_mary_anne_and_the_playground_fight_8.txt, 1606, 8
003c_the_truth_about_stacey_12.txt, 2110, 12
025c_mary_anne_and_the_search_for_tigger_2.txt, 2001, 2
089c_kristy_and_the_dirty_diapers_10.txt, 1214, 10
m18c_stacey_and_the_mystery_at_the_empty_house_1.txt, 2120, 1
071c_claudia_and_the_perfect_boy_11.txt, 1630, 11
serr1c_logans_story_5.txt, 1686, 5
032c_kristy_and_the_secret_of_susan_10.txt, 1866, 10
119c_staceys_ex_boyfriend_6.txt, 1059, 6
m09c_kristy_and_the_haunted_mansion_9.txt, 1829, 9
007c_claudia_and_mean_jeanine_12.txt, 1404, 12
m29c_stacey_and_the_fashion_victim_2.txt, 2412, 2
065c_staceys_big_crush_3.txt, 1651, 3
m01c_stacey_and_the_mystery_ring_4.txt, 2060, 4
100c_kristys_worst_idea_6.txt, 1625, 6
053c_kristy_for_president_10.txt, 2408, 10
m08c_jessi_and_the_jewel_thieves_15.txt, 1937, 15
061c_jessi_and_the_awful_secret_9.txt, 1278, 9
127c_abbys_un_valentine_3.txt, 867, 3
121c_abby_in_wonderland_5.txt, 2007, 5
m02c_beware_dawn_2.txt, 1965, 2
m15c_kristy_and_the_vampires_5.txt, 1715, 5
087c_stacey_and_the_bad_girls_11.txt, 966, 11
113c_claudia_makes_up_her_mind_1.txt, 1852, 1
111c_staceys_secret_friend_9.txt, 1179, 9
125c_mary_anne_in_the_middle_10.txt, 1660, 10
018c_staceys_mistake_7.txt, 2019, 7
109c_mary_anne_to_the_rescue_15.txt, 1621, 15
096c_abbys_lucky_thirteen_12.txt, 1920, 12
063c_claudias_freind_friend_14.txt, 1220, 14
076c_staceys_lie_4.txt, 2116, 4
018c_staceys_mistake_13.txt, 1922, 13
039c_poor_mallory_3.txt, 2011, 3
069c_get_well_soon_mallory_2.txt, 2244, 2
081c_kristy_and_mr_mom_11.txt, 1291, 11
084c_dawn_and_the_school_spirit_war_15.txt, 548, 15
097c_claudia_and_the_worlds_cutest_baby_3.txt, 1602, 3
103c_happy_holidays_jessi_5.txt, 1472, 5
045c_kristy_and_the_baby_parade_13.txt, 1492, 13
015c_little_miss_stoneybrook_and_dawn_6.txt, 1806, 6
037c_dawn_and_the_older_boy_14.txt, 1663, 14
010c_logan_likes_mary_anne_9.txt, 1394, 9
107c_mind_your_own_business_kristy_15.txt, 680, 15
m32c_claudia_and_the_mystery_in_the_painting_8.txt, 2187, 8
114c_the_secret_life_of_mary_anne_spier_11.txt, 1479, 11
128c_claudia_and_the_little_liar_10.txt, 917, 10
m18c_stacey_and_the_mystery_at_the_empty_house_13.txt, 1761, 13
010c_logan_likes_mary_anne_10.txt, 2037, 10
021c_mallory_and_the_trouble_with_twins_1.txt, 2082, 1
122c_kristy_in_charge_5.txt, 1049, 5
079c_mary_anne_breaks_the_rules_14.txt, 1756, 14
052c_mary_anne_plus_too_many_babies_10.txt, 1729, 10
057c_dawn_saves_the_planet_2.txt, 1636, 2
044c_dawn_and_the_big_sleepover_11.txt, 2288, 11
044c_dawn_and_the_big_sleepover_10.txt, 1367, 10
057c_dawn_saves_the_planet_3.txt, 1925, 3
052c_mary_anne_plus_too_many_babies_11.txt, 1487, 11
079c_mary_anne_breaks_the_rules_15.txt, 1559, 15
122c_kristy_in_charge_4.txt, 1359, 4
114c_the_secret_life_of_mary_anne_spier_10.txt, 1000, 10
m32c_claudia_and_the_mystery_in_the_painting_9.txt, 1726, 9
107c_mind_your_own_business_kristy_14.txt, 1746, 14
010c_logan_likes_mary_anne_11.txt, 1633, 11
m18c_stacey_and_the_mystery_at_the_empty_house_12.txt, 1618, 12
128c_claudia_and_the_little_liar_11.txt, 1441, 11
045c_kristy_and_the_baby_parade_12.txt, 1938, 12
015c_little_miss_stoneybrook_and_dawn_7.txt, 1600, 7
103c_happy_holidays_jessi_4.txt, 1449, 4
097c_claudia_and_the_worlds_cutest_baby_2.txt, 2705, 2
084c_dawn_and_the_school_spirit_war_14.txt, 1236, 14
010c_logan_likes_mary_anne_8.txt, 1431, 8
037c_dawn_and_the_older_boy_15.txt, 437, 15
063c_claudias_freind_friend_15.txt, 1025, 15
076c_staceys_lie_5.txt, 1861, 5
096c_abbys_lucky_thirteen_13.txt, 1062, 13
081c_kristy_and_mr_mom_10.txt, 1905, 10
069c_get_well_soon_mallory_3.txt, 1605, 3
m25c_kristy_and_the_middle_school_vandal_14.txt, 1268, 14
039c_poor_mallory_2.txt, 2696, 2
018c_staceys_mistake_12.txt, 1669, 12
109c_mary_anne_to_the_rescue_14.txt, 1803, 14
018c_staceys_mistake_6.txt, 1479, 6
087c_stacey_and_the_bad_girls_10.txt, 1556, 10
125c_mary_anne_in_the_middle_11.txt, 1215, 11
111c_staceys_secret_friend_8.txt, 964, 8
m15c_kristy_and_the_vampires_4.txt, 1864, 4
m02c_beware_dawn_3.txt, 2026, 3
127c_abbys_un_valentine_2.txt, 4087, 2
061c_jessi_and_the_awful_secret_8.txt, 1842, 8
121c_abby_in_wonderland_4.txt, 1627, 4
m01c_stacey_and_the_mystery_ring_5.txt, 1981, 5
065c_staceys_big_crush_2.txt, 2922, 2
m08c_jessi_and_the_jewel_thieves_14.txt, 1615, 14
053c_kristy_for_president_11.txt, 2272, 11
100c_kristys_worst_idea_7.txt, 2232, 7
m29c_stacey_and_the_fashion_victim_3.txt, 1711, 3
007c_claudia_and_mean_jeanine_13.txt, 2162, 13
m09c_kristy_and_the_haunted_mansion_8.txt, 1738, 8
119c_staceys_ex_boyfriend_7.txt, 1378, 7
032c_kristy_and_the_secret_of_susan_11.txt, 1733, 11
serr1c_logans_story_4.txt, 1972, 4
071c_claudia_and_the_perfect_boy_10.txt, 1729, 10
089c_kristy_and_the_dirty_diapers_11.txt, 1681, 11
025c_mary_anne_and_the_search_for_tigger_3.txt, 1837, 3
003c_the_truth_about_stacey_13.txt, 3167, 13
047c_mallory_on_strike_1.txt, 2762, 1
120c_mary_anne_and_the_playground_fight_9.txt, 886, 9
044c_dawn_and_the_big_sleepover_4.txt, 2340, 4
077c_dwn_and_whitney_friends_forever_12.txt, 1396, 12
002c_claudia_and_the_phantom_phone_calls_14.txt, 2612, 14
106c_claudia_queen_of_the_seventh_grade_7.txt, 1798, 7
118c_kristy_thomas_dog_trainer_8.txt, 1457, 8
074c_kristy_and_the_copycat_6.txt, 1800, 6
050c_dawns_big_date_4.txt, 2575, 4
112c_kristy_and_the_sister_war_10.txt, 1805, 10
m17c_dawn_and_the_halloween_mystery_7.txt, 1980, 7
033c_claudia_and_the_great_search_11.txt, 1683, 11
025c_mary_anne_and_the_search_for_tigger_13.txt, 1544, 13
023c_dawn_on_the_coast_2.txt, 1841, 2
087c_stacey_and_the_bad_girls_2.txt, 2218, 2
038c_kristys_mystery_admirer_14.txt, 1495, 14
029c_mallory_and_the_mystery_diary_2.txt, 1505, 2
124c_stacey_mcgill_matchmaker_7.txt, 1243, 7
117c_claudia_and_the_terrible_truth_13.txt, 1561, 13
102c_mary_anne_and_the_little_princess_1.txt, 2075, 1
058c_staceys_choice_5.txt, 1544, 5
083c_stacey_vs_the_bsc_12.txt, 1501, 12
062c_kristy_and_the_worst_kid_ever_2.txt, 2500, 2
072c_dawn_and_the_we_heart_kids_club_2.txt, 2249, 2
serr3c_shannons_story_15.txt, 1472, 15
073c_mary_anne_and_miss_priss_2.txt, 2592, 2
048c_jessis_wish_14.txt, 963, 14
014c_hello_mallory_10.txt, 1673, 10
m05c_mary_anne_and_the_secret_in_the_attic_13.txt, 1525, 13
033c_claudia_and_the_great_search_2.txt, 2885, 2
107c_mind_your_own_business_kristy_5.txt, 1129, 5
042c_jessi_and_the_dance_school_phantom_10.txt, 2231, 10
125c_mary_anne_in_the_middle_2.txt, 3098, 2
m27c_claudia_and_the_lighthouse_ghost_3.txt, 1634, 3
110c_abby_and_the_bad_sport_12.txt, 1100, 12
130c_staceys_movie_9.txt, 983, 9
086c_mary_anne_and_camp_bsc_4.txt, 3008, 4
107c_mind_your_own_business_kristy_1.txt, 1773, 1
m10c_stacey_and_the_mystery_money_12.txt, 1775, 12
033c_claudia_and_the_great_search_6.txt, 1343, 6
016c_jessis_secret_language_13.txt, 1667, 13
m27c_claudia_and_the_lighthouse_ghost_7.txt, 2010, 7
125c_mary_anne_in_the_middle_6.txt, 1079, 6
042c_jessi_and_the_dance_school_phantom_14.txt, 2186, 14
m19c_kristy_and_the_missing_fortune_4.txt, 1625, 4
062c_kristy_and_the_worst_kid_ever_6.txt, 1485, 6
058c_staceys_choice_1.txt, 1803, 1
102c_mary_anne_and_the_little_princess_5.txt, 951, 5
109c_mary_anne_to_the_rescue_9.txt, 1489, 9
096c_abbys_lucky_thirteen_4.txt, 1580, 4
014c_hello_mallory_14.txt, 1625, 14
048c_jessis_wish_10.txt, 1491, 10
serr3c_shannons_story_11.txt, 2076, 11
073c_mary_anne_and_miss_priss_6.txt, 1993, 6
072c_dawn_and_the_we_heart_kids_club_6.txt, 1356, 6
088c_farewell_dawn_12.txt, 2185, 12
067c_dawns_big_move_9.txt, 1564, 9
112c_kristy_and_the_sister_war_14.txt, 1613, 14
074c_kristy_and_the_copycat_2.txt, 3150, 2
106c_claudia_queen_of_the_seventh_grade_3.txt, 1632, 3
002c_claudia_and_the_phantom_phone_calls_10.txt, 1859, 10
124c_stacey_mcgill_matchmaker_3.txt, 1868, 3
064c_dawns_family_feud_8.txt, 1269, 8
029c_mallory_and_the_mystery_diary_6.txt, 1575, 6
038c_kristys_mystery_admirer_10.txt, 1498, 10
m35c_abby_and_the_notorius_neighbor_8.txt, 1812, 8
071c_claudia_and_the_perfect_boy_9.txt, 1622, 9
087c_stacey_and_the_bad_girls_6.txt, 1379, 6
023c_dawn_on_the_coast_6.txt, 1849, 6
m17c_dawn_and_the_halloween_mystery_3.txt, 1949, 3
033c_claudia_and_the_great_search_15.txt, 1780, 15
089c_kristy_and_the_dirty_diapers_15.txt, 1009, 15
126c_the_all_new_mallory_pike_8.txt, 2106, 8
071c_claudia_and_the_perfect_boy_14.txt, 1127, 14
m18c_stacey_and_the_mystery_at_the_empty_house_4.txt, 1864, 4
022c_jessi_ramsey_petsitter_8.txt, 1424, 8
047c_mallory_on_strike_5.txt, 1885, 5
099c_staceys_broken_heart_12.txt, 2051, 12
028c_welcome_back_stacey_8.txt, 1780, 8
025c_mary_anne_and_the_search_for_tigger_7.txt, 1338, 7
m29c_stacey_and_the_fashion_victim_7.txt, 1884, 7
100c_kristys_worst_idea_3.txt, 1933, 3
053c_kristy_for_president_15.txt, 948, 15
m08c_jessi_and_the_jewel_thieves_10.txt, 1782, 10
065c_staceys_big_crush_6.txt, 1589, 6
m01c_stacey_and_the_mystery_ring_1.txt, 2044, 1
032c_kristy_and_the_secret_of_susan_15.txt, 1638, 15
102c_mary_anne_and_the_little_princess_13.txt, 1140, 13
119c_staceys_ex_boyfriend_3.txt, 1374, 3
125c_mary_anne_in_the_middle_15.txt, 1474, 15
087c_stacey_and_the_bad_girls_14.txt, 1450, 14
113c_claudia_makes_up_her_mind_4.txt, 1632, 4
101c_claudia_kishi_middle_school_dropout_13.txt, 1934, 13
127c_abbys_un_valentine_6.txt, 1135, 6
m30c_kristy_and_the_mystery_train_8.txt, 1564, 8
m02c_beware_dawn_7.txt, 1825, 7
m25c_kristy_and_the_middle_school_vandal_10.txt, 1688, 10
039c_poor_mallory_6.txt, 1815, 6
081c_kristy_and_mr_mom_14.txt, 1254, 14
069c_get_well_soon_mallory_7.txt, 1555, 7
076c_staceys_lie_1.txt, 2234, 1
063c_claudias_freind_friend_11.txt, 2109, 11
037c_dawn_and_the_older_boy_11.txt, 1945, 11
084c_dawn_and_the_school_spirit_war_10.txt, 1975, 10
097c_claudia_and_the_worlds_cutest_baby_6.txt, 1869, 6
015c_little_miss_stoneybrook_and_dawn_3.txt, 1774, 3
018c_staceys_mistake_2.txt, 2020, 2
109c_mary_anne_to_the_rescue_10.txt, 1666, 10
016c_jessis_secret_language_8.txt, 1420, 8
050c_dawns_big_date_12.txt, 1074, 12
m15c_kristy_and_the_vampires_13.txt, 2024, 13
015c_little_miss_stoneybrook_and_dawn_12.txt, 1745, 12
057c_dawn_saves_the_planet_7.txt, 1987, 7
092c_mallorys_christmas_wish_8.txt, 1647, 8
044c_dawn_and_the_big_sleepover_14.txt, 1526, 14
serr2c_logan_bruno_boy_babysitter_8.txt, 1876, 8
128c_claudia_and_the_little_liar_15.txt, 751, 15
010c_logan_likes_mary_anne_15.txt, 1664, 15
107c_mind_your_own_business_kristy_10.txt, 1287, 10
114c_the_secret_life_of_mary_anne_spier_14.txt, 839, 14
m32c_claudia_and_the_mystery_in_the_painting_12.txt, 2162, 12
043c_staceys_emergency_9.txt, 1755, 9
079c_mary_anne_breaks_the_rules_11.txt, 1228, 11
052c_mary_anne_plus_too_many_babies_15.txt, 1611, 15
021c_mallory_and_the_trouble_with_twins_4.txt, 1360, 4
052c_mary_anne_plus_too_many_babies_14.txt, 1468, 14
079c_mary_anne_breaks_the_rules_10.txt, 1539, 10
122c_kristy_in_charge_1.txt, 1334, 1
043c_staceys_emergency_8.txt, 2290, 8
021c_mallory_and_the_trouble_with_twins_5.txt, 1310, 5
010c_logan_likes_mary_anne_14.txt, 1910, 14
128c_claudia_and_the_little_liar_14.txt, 850, 14
m32c_claudia_and_the_mystery_in_the_painting_13.txt, 1125, 13
114c_the_secret_life_of_mary_anne_spier_15.txt, 697, 15
107c_mind_your_own_business_kristy_11.txt, 1553, 11
serr2c_logan_bruno_boy_babysitter_9.txt, 1290, 9
044c_dawn_and_the_big_sleepover_15.txt, 772, 15
092c_mallorys_christmas_wish_9.txt, 1412, 9
015c_little_miss_stoneybrook_and_dawn_13.txt, 1922, 13
057c_dawn_saves_the_planet_6.txt, 1620, 6
m15c_kristy_and_the_vampires_12.txt, 1741, 12
050c_dawns_big_date_13.txt, 1835, 13
016c_jessis_secret_language_9.txt, 1987, 9
109c_mary_anne_to_the_rescue_11.txt, 1136, 11
018c_staceys_mistake_3.txt, 1805, 3
037c_dawn_and_the_older_boy_10.txt, 1353, 10
015c_little_miss_stoneybrook_and_dawn_2.txt, 1769, 2
097c_claudia_and_the_worlds_cutest_baby_7.txt, 1749, 7
084c_dawn_and_the_school_spirit_war_11.txt, 1646, 11
103c_happy_holidays_jessi_1.txt, 1767, 1
069c_get_well_soon_mallory_6.txt, 1510, 6
081c_kristy_and_mr_mom_15.txt, 924, 15
039c_poor_mallory_7.txt, 1388, 7
m25c_kristy_and_the_middle_school_vandal_11.txt, 1450, 11
063c_claudias_freind_friend_10.txt, 1511, 10
m30c_kristy_and_the_mystery_train_9.txt, 1715, 9
m02c_beware_dawn_6.txt, 1707, 6
101c_claudia_kishi_middle_school_dropout_12.txt, 1849, 12
121c_abby_in_wonderland_1.txt, 1901, 1
127c_abbys_un_valentine_7.txt, 1359, 7
125c_mary_anne_in_the_middle_14.txt, 1119, 14
113c_claudia_makes_up_her_mind_5.txt, 1005, 5
087c_stacey_and_the_bad_girls_15.txt, 1128, 15
m15c_kristy_and_the_vampires_1.txt, 2002, 1
119c_staceys_ex_boyfriend_2.txt, 2536, 2
032c_kristy_and_the_secret_of_susan_14.txt, 1745, 14
serr1c_logans_story_1.txt, 2649, 1
102c_mary_anne_and_the_little_princess_12.txt, 1616, 12
m08c_jessi_and_the_jewel_thieves_11.txt, 1920, 11
053c_kristy_for_president_14.txt, 1717, 14
100c_kristys_worst_idea_2.txt, 3371, 2
065c_staceys_big_crush_7.txt, 1487, 7
m29c_stacey_and_the_fashion_victim_6.txt, 1782, 6
099c_staceys_broken_heart_13.txt, 1059, 13
025c_mary_anne_and_the_search_for_tigger_6.txt, 1657, 6
028c_welcome_back_stacey_9.txt, 1940, 9
047c_mallory_on_strike_4.txt, 1829, 4
m18c_stacey_and_the_mystery_at_the_empty_house_5.txt, 1811, 5
071c_claudia_and_the_perfect_boy_15.txt, 2591, 15
022c_jessi_ramsey_petsitter_9.txt, 1548, 9
089c_kristy_and_the_dirty_diapers_14.txt, 1227, 14
126c_the_all_new_mallory_pike_9.txt, 1325, 9
023c_dawn_on_the_coast_7.txt, 1710, 7
087c_stacey_and_the_bad_girls_7.txt, 1460, 7
071c_claudia_and_the_perfect_boy_8.txt, 1884, 8
m35c_abby_and_the_notorius_neighbor_9.txt, 1405, 9
033c_claudia_and_the_great_search_14.txt, 1421, 14
m17c_dawn_and_the_halloween_mystery_2.txt, 2203, 2
064c_dawns_family_feud_9.txt, 1415, 9
124c_stacey_mcgill_matchmaker_2.txt, 3288, 2
038c_kristys_mystery_admirer_11.txt, 1886, 11
029c_mallory_and_the_mystery_diary_7.txt, 1579, 7
002c_claudia_and_the_phantom_phone_calls_11.txt, 2338, 11
106c_claudia_queen_of_the_seventh_grade_2.txt, 3059, 2
044c_dawn_and_the_big_sleepover_1.txt, 1978, 1
067c_dawns_big_move_8.txt, 1345, 8
050c_dawns_big_date_1.txt, 2488, 1
074c_kristy_and_the_copycat_3.txt, 2308, 3
112c_kristy_and_the_sister_war_15.txt, 1322, 15
072c_dawn_and_the_we_heart_kids_club_7.txt, 1435, 7
073c_mary_anne_and_miss_priss_7.txt, 2250, 7
serr3c_shannons_story_10.txt, 1463, 10
088c_farewell_dawn_13.txt, 1213, 13
048c_jessis_wish_11.txt, 1670, 11
014c_hello_mallory_15.txt, 1463, 15
102c_mary_anne_and_the_little_princess_4.txt, 2516, 4
096c_abbys_lucky_thirteen_5.txt, 2426, 5
109c_mary_anne_to_the_rescue_8.txt, 909, 8
062c_kristy_and_the_worst_kid_ever_7.txt, 2691, 7
042c_jessi_and_the_dance_school_phantom_15.txt, 2640, 15
125c_mary_anne_in_the_middle_7.txt, 1724, 7
m27c_claudia_and_the_lighthouse_ghost_6.txt, 1303, 6
m19c_kristy_and_the_missing_fortune_5.txt, 1938, 5
016c_jessis_secret_language_12.txt, 1766, 12
086c_mary_anne_and_camp_bsc_5.txt, 2007, 5
130c_staceys_movie_8.txt, 1624, 8
033c_claudia_and_the_great_search_7.txt, 1763, 7
m10c_stacey_and_the_mystery_money_13.txt, 2364, 13
016c_jessis_secret_language_10.txt, 1838, 10
053c_kristy_for_president_8.txt, 1705, 8
m10c_stacey_and_the_mystery_money_11.txt, 2138, 11
033c_claudia_and_the_great_search_5.txt, 1756, 5
026c_claudia_and_the_sad_goodbye_9.txt, 1616, 9
086c_mary_anne_and_camp_bsc_7.txt, 1772, 7
107c_mind_your_own_business_kristy_2.txt, 2954, 2
m19c_kristy_and_the_missing_fortune_7.txt, 1856, 7
m27c_claudia_and_the_lighthouse_ghost_4.txt, 1877, 4
125c_mary_anne_in_the_middle_5.txt, 1427, 5
110c_abby_and_the_bad_sport_15.txt, 926, 15
117c_claudia_and_the_terrible_truth_14.txt, 1677, 14
096c_abbys_lucky_thirteen_7.txt, 1251, 7
102c_mary_anne_and_the_little_princess_6.txt, 2707, 6
083c_stacey_vs_the_bsc_15.txt, 790, 15
058c_staceys_choice_2.txt, 3473, 2
062c_kristy_and_the_worst_kid_ever_5.txt, 1822, 5
088c_farewell_dawn_11.txt, 1476, 11
m20c_mary_anne_and_the_zoo_mystery_9.txt, 1871, 9
serr3c_shannons_story_12.txt, 1656, 12
073c_mary_anne_and_miss_priss_5.txt, 1466, 5
072c_dawn_and_the_we_heart_kids_club_5.txt, 1183, 5
m05c_mary_anne_and_the_secret_in_the_attic_14.txt, 2054, 14
048c_jessis_wish_13.txt, 1463, 13
044c_dawn_and_the_big_sleepover_3.txt, 1903, 3
077c_dwn_and_whitney_friends_forever_15.txt, 1197, 15
002c_claudia_and_the_phantom_phone_calls_13.txt, 1447, 13
074c_kristy_and_the_copycat_1.txt, 2705, 1
050c_dawns_big_date_3.txt, 2696, 3
025c_mary_anne_and_the_search_for_tigger_14.txt, 2061, 14
011c_kristy_and_the_snobs_9.txt, 1845, 9
087c_stacey_and_the_bad_girls_5.txt, 2000, 5
023c_dawn_on_the_coast_5.txt, 1414, 5
038c_kristys_mystery_admirer_13.txt, 1651, 13
029c_mallory_and_the_mystery_diary_5.txt, 1354, 5
081c_kristy_and_mr_mom_8.txt, 1942, 8
m18c_stacey_and_the_mystery_at_the_empty_house_7.txt, 1966, 7
m26c_dawn_schafer_undercover_babysitter_8.txt, 1882, 8
003c_the_truth_about_stacey_14.txt, 2824, 14
m22c_stacey_and_the_haunted_masquerade_9.txt, 1977, 9
025c_mary_anne_and_the_search_for_tigger_4.txt, 1740, 4
099c_staceys_broken_heart_11.txt, 1724, 11
047c_mallory_on_strike_6.txt, 1514, 6
065c_staceys_big_crush_5.txt, 1611, 5
m01c_stacey_and_the_mystery_ring_2.txt, 2076, 2
m08c_jessi_and_the_jewel_thieves_13.txt, 1470, 13
m29c_stacey_and_the_fashion_victim_4.txt, 1849, 4
007c_claudia_and_mean_jeanine_14.txt, 1213, 14
027c_jessi_and_the_superbrat_9.txt, 1479, 9
102c_mary_anne_and_the_little_princess_10.txt, 1088, 10
serr1c_logans_story_3.txt, 1230, 3
113c_claudia_makes_up_her_mind_7.txt, 1511, 7
m15c_kristy_and_the_vampires_3.txt, 2246, 3
m02c_beware_dawn_4.txt, 1842, 4
127c_abbys_un_valentine_5.txt, 2300, 5
101c_claudia_kishi_middle_school_dropout_10.txt, 1665, 10
121c_abby_in_wonderland_3.txt, 1186, 3
103c_happy_holidays_jessi_3.txt, 1747, 3
084c_dawn_and_the_school_spirit_war_13.txt, 1244, 13
097c_claudia_and_the_worlds_cutest_baby_5.txt, 1719, 5
045c_kristy_and_the_baby_parade_15.txt, 1515, 15
037c_dawn_and_the_older_boy_12.txt, 1643, 12
096c_abbys_lucky_thirteen_14.txt, 1078, 14
063c_claudias_freind_friend_12.txt, 1781, 12
076c_staceys_lie_2.txt, 2826, 2
018c_staceys_mistake_15.txt, 1399, 15
m25c_kristy_and_the_middle_school_vandal_13.txt, 1686, 13
039c_poor_mallory_5.txt, 1612, 5
069c_get_well_soon_mallory_4.txt, 1751, 4
050c_dawns_big_date_11.txt, 2003, 11
018c_staceys_mistake_1.txt, 1972, 1
068c_jessi_and_the_bad_babysitter_8.txt, 1992, 8
109c_mary_anne_to_the_rescue_13.txt, 1229, 13
115c_jessis_big_break_9.txt, 970, 9
057c_dawn_saves_the_planet_4.txt, 1791, 4
015c_little_miss_stoneybrook_and_dawn_11.txt, 1981, 11
m15c_kristy_and_the_vampires_10.txt, 1932, 10
021c_mallory_and_the_trouble_with_twins_7.txt, 1912, 7
122c_kristy_in_charge_3.txt, 1007, 3
079c_mary_anne_breaks_the_rules_12.txt, 945, 12
107c_mind_your_own_business_kristy_13.txt, 1496, 13
m32c_claudia_and_the_mystery_in_the_painting_11.txt, 1811, 11
040c_claudia_and_the_middle_school_mystery_9.txt, 1597, 9
m18c_stacey_and_the_mystery_at_the_empty_house_15.txt, 1812, 15
m32c_claudia_and_the_mystery_in_the_painting_10.txt, 1953, 10
040c_claudia_and_the_middle_school_mystery_8.txt, 1481, 8
107c_mind_your_own_business_kristy_12.txt, 1471, 12
m18c_stacey_and_the_mystery_at_the_empty_house_14.txt, 2277, 14
021c_mallory_and_the_trouble_with_twins_6.txt, 1740, 6
079c_mary_anne_breaks_the_rules_13.txt, 925, 13
122c_kristy_in_charge_2.txt, 2673, 2
115c_jessis_big_break_8.txt, 1753, 8
m15c_kristy_and_the_vampires_11.txt, 2077, 11
057c_dawn_saves_the_planet_5.txt, 1995, 5
015c_little_miss_stoneybrook_and_dawn_10.txt, 1502, 10
109c_mary_anne_to_the_rescue_12.txt, 1550, 12
068c_jessi_and_the_bad_babysitter_9.txt, 1384, 9
050c_dawns_big_date_10.txt, 2678, 10
063c_claudias_freind_friend_13.txt, 2123, 13
076c_staceys_lie_3.txt, 3911, 3
096c_abbys_lucky_thirteen_15.txt, 901, 15
069c_get_well_soon_mallory_5.txt, 1401, 5
039c_poor_mallory_4.txt, 1679, 4
m25c_kristy_and_the_middle_school_vandal_12.txt, 1636, 12
018c_staceys_mistake_14.txt, 1686, 14
045c_kristy_and_the_baby_parade_14.txt, 1636, 14
015c_little_miss_stoneybrook_and_dawn_1.txt, 2326, 1
097c_claudia_and_the_worlds_cutest_baby_4.txt, 1774, 4
084c_dawn_and_the_school_spirit_war_12.txt, 993, 12
103c_happy_holidays_jessi_2.txt, 3034, 2
037c_dawn_and_the_older_boy_13.txt, 1406, 13
127c_abbys_un_valentine_4.txt, 1913, 4
121c_abby_in_wonderland_2.txt, 3173, 2
101c_claudia_kishi_middle_school_dropout_11.txt, 2036, 11
m02c_beware_dawn_5.txt, 1575, 5
m15c_kristy_and_the_vampires_2.txt, 2481, 2
113c_claudia_makes_up_her_mind_6.txt, 1472, 6
102c_mary_anne_and_the_little_princess_11.txt, 1539, 11
serr1c_logans_story_2.txt, 2206, 2
119c_staceys_ex_boyfriend_1.txt, 1481, 1
027c_jessi_and_the_superbrat_8.txt, 1617, 8
007c_claudia_and_mean_jeanine_15.txt, 1368, 15
m29c_stacey_and_the_fashion_victim_5.txt, 1889, 5
m01c_stacey_and_the_mystery_ring_3.txt, 1841, 3
065c_staceys_big_crush_4.txt, 1931, 4
m08c_jessi_and_the_jewel_thieves_12.txt, 1683, 12
100c_kristys_worst_idea_1.txt, 1915, 1
047c_mallory_on_strike_7.txt, 1938, 7
025c_mary_anne_and_the_search_for_tigger_5.txt, 1402, 5
m22c_stacey_and_the_haunted_masquerade_8.txt, 1740, 8
099c_staceys_broken_heart_10.txt, 2726, 10
m26c_dawn_schafer_undercover_babysitter_9.txt, 1742, 9
081c_kristy_and_mr_mom_9.txt, 1526, 9
m18c_stacey_and_the_mystery_at_the_empty_house_6.txt, 1969, 6
029c_mallory_and_the_mystery_diary_4.txt, 1517, 4
038c_kristys_mystery_admirer_12.txt, 2019, 12
124c_stacey_mcgill_matchmaker_1.txt, 1679, 1
m17c_dawn_and_the_halloween_mystery_1.txt, 1882, 1
011c_kristy_and_the_snobs_8.txt, 1825, 8
025c_mary_anne_and_the_search_for_tigger_15.txt, 1589, 15
023c_dawn_on_the_coast_4.txt, 1742, 4
087c_stacey_and_the_bad_girls_4.txt, 2432, 4
050c_dawns_big_date_2.txt, 2677, 2
044c_dawn_and_the_big_sleepover_2.txt, 2017, 2
077c_dwn_and_whitney_friends_forever_14.txt, 949, 14
002c_claudia_and_the_phantom_phone_calls_12.txt, 1527, 12
106c_claudia_queen_of_the_seventh_grade_1.txt, 2007, 1
048c_jessis_wish_12.txt, 1363, 12
m20c_mary_anne_and_the_zoo_mystery_8.txt, 2878, 8
088c_farewell_dawn_10.txt, 1541, 10
072c_dawn_and_the_we_heart_kids_club_4.txt, 1194, 4
073c_mary_anne_and_miss_priss_4.txt, 1532, 4
serr3c_shannons_story_13.txt, 992, 13
058c_staceys_choice_3.txt, 1628, 3
083c_stacey_vs_the_bsc_14.txt, 1339, 14
062c_kristy_and_the_worst_kid_ever_4.txt, 2001, 4
096c_abbys_lucky_thirteen_6.txt, 966, 6
117c_claudia_and_the_terrible_truth_15.txt, 1230, 15
102c_mary_anne_and_the_little_princess_7.txt, 1244, 7
110c_abby_and_the_bad_sport_14.txt, 1026, 14
m19c_kristy_and_the_missing_fortune_6.txt, 1832, 6
125c_mary_anne_in_the_middle_4.txt, 1202, 4
m27c_claudia_and_the_lighthouse_ghost_5.txt, 1877, 5
033c_claudia_and_the_great_search_4.txt, 1795, 4
m10c_stacey_and_the_mystery_money_10.txt, 2118, 10
107c_mind_your_own_business_kristy_3.txt, 1030, 3
086c_mary_anne_and_camp_bsc_6.txt, 1463, 6
026c_claudia_and_the_sad_goodbye_8.txt, 1826, 8
053c_kristy_for_president_9.txt, 1561, 9
016c_jessis_secret_language_11.txt, 1696, 11
056c_keep_out_claudia_2.txt, 3183, 2
123c_claudias_big_party_10.txt, 1764, 10
098c_dawn_and_too_many_sitters_6.txt, 1626, 6
111c_staceys_secret_friend_11.txt, 1270, 11
012c_claudia_and_the_new_girl_8.txt, 1503, 8
035c_jessis_babysitter_6.txt, 1906, 6
060c_mary_annes_makeover_11.txt, 1046, 11
121c_abby_in_wonderland_10.txt, 2217, 10
099c_staceys_broken_heart_8.txt, 1711, 8
084c_dawn_and_the_school_spirit_war_5.txt, 1763, 5
070c_stacey_and_the_cheerleaders_14.txt, 1851, 14
052c_mary_anne_plus_too_many_babies_4.txt, 1696, 4
129c_kristy_at_bat_15.txt, 1816, 15
m31c_mary_anne_and_the_music_box_secret_14.txt, 1895, 14
085c_claudia_kishi_live_from_wsto_4.txt, 2278, 4
129c_kristy_at_bat_6.txt, 2120, 6
m34c_mary_anne_and_the_haunted_bookstore_1.txt, 2232, 1
054c_mallory_and_the_dream_horse_15.txt, 1377, 15
095c_kristy_plus_bart_equals_questionmark_12.txt, 1036, 12
079c_mary_anne_breaks_the_rules_3.txt, 1716, 3
m13c_mary_anne_and_the_library_mystery_8.txt, 1737, 8
m14c_stacey_and_the_mystery_at_the_mall_5.txt, 1989, 5
046c_mary_anne_misses_logan_5.txt, 1419, 5
031c_dawns_wicked_stepsister_8.txt, 1668, 8
m23c_abby_and_the_secret_society_3.txt, 1922, 3
m12c_dawn_and_the_surfer_ghost_8.txt, 1628, 8
055c_jessis_gold_medal_10.txt, 1488, 10
017c_mary_annes_bad_luck_mystery_4.txt, 1512, 4
101c_claudia_kishi_middle_school_dropout_5.txt, 1934, 5
039c_poor_mallory_14.txt, 1894, 14
080c_mallory_pike_no_1_fan_11.txt, 1130, 11
034c_mary_anne_and_too_many_boys_6.txt, 1907, 6
030c_mary_anne_and_the_great_romance_11.txt, 1754, 11
m16c_claudia_and_the_clue_in_the_photograph_5.txt, 1807, 5
m28c_abby_and_the_mystery_baby_7.txt, 1743, 7
037c_dawn_and_the_older_boy_7.txt, 1255, 7
m05c_mary_anne_and_the_secret_in_the_attic_2.txt, 2055, 2
043c_staceys_emergency_10.txt, 1318, 10
045c_kristy_and_the_baby_parade_2.txt, 2240, 2
078c_claudia_and_crazy_peaches_1.txt, 2660, 1
m08c_jessi_and_the_jewel_thieves_6.txt, 1801, 6
030c_mary_anne_and_the_great_romance_1.txt, 1848, 1
078c_claudia_and_crazy_peaches_15.txt, 1090, 15
032c_kristy_and_the_secret_of_susan_6.txt, 1485, 6
m19c_kristy_and_the_missing_fortune_12.txt, 1871, 12
013c_goodbye_stacey_goodbye_9.txt, 1984, 9
m29c_stacey_and_the_fashion_victim_11.txt, 1948, 11
059c_mallory_hates_boys_and_gym_10.txt, 1555, 10
004c_mary_anne_saves_the_day_13.txt, 1096, 13
m25c_kristy_and_the_middle_school_vandal_3.txt, 3375, 3
105c_stacey_the_math_whiz_14.txt, 1402, 14
006c_kristys_big_day_6.txt, 2193, 6
013c_goodbye_stacey_goodbye_14.txt, 1553, 14
124c_stacey_mcgill_matchmaker_15.txt, 903, 15
065c_staceys_big_crush_15.txt, 1107, 15
105c_stacey_the_math_whiz_4.txt, 1903, 4
128c_claudia_and_the_little_liar_7.txt, 1367, 7
067c_dawns_big_move_11.txt, 1473, 11
m06c_the_mystery_at_claudias_house_5.txt, 1793, 5
055c_jessis_gold_medal_9.txt, 1199, 9
m21c_claudia_and_the_recipe_for_danger_6.txt, 1836, 6
m36c_kristy_and_the_cat_burglar_3.txt, 2216, 3
126c_the_all_new_mallory_pike_14.txt, 2024, 14
077c_dwn_and_whitney_friends_forever_2.txt, 2503, 2
041c_mary_anne_vs_logan_5.txt, 1673, 5
104c_abbys_twin_7.txt, 1317, 7
089c_kristy_and_the_dirty_diapers_5.txt, 1468, 5
026c_claudia_and_the_sad_goodbye_14.txt, 1576, 14
108c_dont_give_up_mallory_3.txt, 2898, 3
006c_kristys_big_day_13.txt, 1574, 13
006c_kristys_big_day_12.txt, 1558, 12
089c_kristy_and_the_dirty_diapers_4.txt, 1630, 4
041c_mary_anne_vs_logan_4.txt, 1493, 4
104c_abbys_twin_6.txt, 1282, 6
108c_dont_give_up_mallory_2.txt, 3080, 2
026c_claudia_and_the_sad_goodbye_15.txt, 1744, 15
m03c_mallory_and_the_ghost_cat_1.txt, 2656, 1
110c_abby_and_the_bad_sport_1.txt, 1982, 1
077c_dwn_and_whitney_friends_forever_3.txt, 2287, 3
126c_the_all_new_mallory_pike_15.txt, 1197, 15
m21c_claudia_and_the_recipe_for_danger_7.txt, 2030, 7
m36c_kristy_and_the_cat_burglar_2.txt, 2288, 2
055c_jessis_gold_medal_8.txt, 1452, 8
m06c_the_mystery_at_claudias_house_4.txt, 2073, 4
067c_dawns_big_move_10.txt, 1200, 10
128c_claudia_and_the_little_liar_6.txt, 1878, 6
m04c_kristy_and_the_missing_child_1.txt, 1748, 1
082c_jessi_and_the_troublemaker_1.txt, 2900, 1
065c_staceys_big_crush_14.txt, 2277, 14
080c_mallory_pike_no_1_fan_1.txt, 2151, 1
105c_stacey_the_math_whiz_5.txt, 1454, 5
013c_goodbye_stacey_goodbye_15.txt, 1693, 15
006c_kristys_big_day_7.txt, 2424, 7
105c_stacey_the_math_whiz_15.txt, 1201, 15
124c_stacey_mcgill_matchmaker_14.txt, 770, 14
m25c_kristy_and_the_middle_school_vandal_2.txt, 1462, 2
004c_mary_anne_saves_the_day_12.txt, 1547, 12
m29c_stacey_and_the_fashion_victim_10.txt, 1944, 10
059c_mallory_hates_boys_and_gym_11.txt, 1954, 11
013c_goodbye_stacey_goodbye_8.txt, 1273, 8
m19c_kristy_and_the_missing_fortune_13.txt, 1941, 13
032c_kristy_and_the_secret_of_susan_7.txt, 1652, 7
078c_claudia_and_crazy_peaches_14.txt, 1574, 14
m08c_jessi_and_the_jewel_thieves_7.txt, 2096, 7
m05c_mary_anne_and_the_secret_in_the_attic_3.txt, 1733, 3
037c_dawn_and_the_older_boy_6.txt, 1980, 6
045c_kristy_and_the_baby_parade_3.txt, 2356, 3
043c_staceys_emergency_11.txt, 1758, 11
034c_mary_anne_and_too_many_boys_7.txt, 1613, 7
080c_mallory_pike_no_1_fan_10.txt, 1501, 10
039c_poor_mallory_15.txt, 1480, 15
m28c_abby_and_the_mystery_baby_6.txt, 1611, 6
030c_mary_anne_and_the_great_romance_10.txt, 1689, 10
m16c_claudia_and_the_clue_in_the_photograph_4.txt, 2144, 4
017c_mary_annes_bad_luck_mystery_5.txt, 1426, 5
101c_claudia_kishi_middle_school_dropout_4.txt, 1777, 4
m12c_dawn_and_the_surfer_ghost_9.txt, 1711, 9
m23c_abby_and_the_secret_society_2.txt, 2509, 2
031c_dawns_wicked_stepsister_9.txt, 1602, 9
055c_jessis_gold_medal_11.txt, 1133, 11
046c_mary_anne_misses_logan_4.txt, 1923, 4
m14c_stacey_and_the_mystery_at_the_mall_4.txt, 2003, 4
m13c_mary_anne_and_the_library_mystery_9.txt, 1889, 9
079c_mary_anne_breaks_the_rules_2.txt, 2082, 2
054c_mallory_and_the_dream_horse_14.txt, 3323, 14
051c_staceys_ex_best_friend_1.txt, 1909, 1
095c_kristy_plus_bart_equals_questionmark_13.txt, 1420, 13
129c_kristy_at_bat_7.txt, 1923, 7
085c_claudia_kishi_live_from_wsto_5.txt, 1475, 5
m31c_mary_anne_and_the_music_box_secret_15.txt, 2089, 15
095c_kristy_plus_bart_equals_questionmark_1.txt, 2188, 1
052c_mary_anne_plus_too_many_babies_5.txt, 1771, 5
129c_kristy_at_bat_14.txt, 2035, 14
070c_stacey_and_the_cheerleaders_15.txt, 808, 15
099c_staceys_broken_heart_9.txt, 1191, 9
084c_dawn_and_the_school_spirit_war_4.txt, 1334, 4
121c_abby_in_wonderland_11.txt, 1736, 11
060c_mary_annes_makeover_10.txt, 1805, 10
035c_jessis_babysitter_7.txt, 1631, 7
098c_dawn_and_too_many_sitters_7.txt, 1249, 7
012c_claudia_and_the_new_girl_9.txt, 1645, 9
111c_staceys_secret_friend_10.txt, 871, 10
123c_claudias_big_party_11.txt, 1654, 11
056c_keep_out_claudia_3.txt, 1251, 3
123c_claudias_big_party_13.txt, 1898, 13
056c_keep_out_claudia_1.txt, 1799, 1
m02c_beware_dawn_14.txt, 1948, 14
111c_staceys_secret_friend_12.txt, 965, 12
098c_dawn_and_too_many_sitters_5.txt, 1628, 5
104c_abbys_twin_15.txt, 1521, 15
005c_dawn_and_the_impossible_three_9.txt, 2046, 9
035c_jessis_babysitter_5.txt, 1437, 5
060c_mary_annes_makeover_12.txt, 2104, 12
121c_abby_in_wonderland_13.txt, 1326, 13
082c_jessi_and_the_troublemaker_14.txt, 1095, 14
serr3c_shannons_story_9.txt, 1036, 9
001c_kristys_great_idea_9.txt, 1196, 9
084c_dawn_and_the_school_spirit_war_6.txt, 2445, 6
095c_kristy_plus_bart_equals_questionmark_3.txt, 2390, 3
085c_claudia_kishi_live_from_wsto_7.txt, 1640, 7
129c_kristy_at_bat_5.txt, 1444, 5
052c_mary_anne_plus_too_many_babies_7.txt, 1640, 7
m14c_stacey_and_the_mystery_at_the_mall_6.txt, 2055, 6
083c_stacey_vs_the_bsc_9.txt, 1586, 9
046c_mary_anne_misses_logan_6.txt, 1529, 6
095c_kristy_plus_bart_equals_questionmark_11.txt, 1475, 11
051c_staceys_ex_best_friend_3.txt, 1538, 3
m34c_mary_anne_and_the_haunted_bookstore_2.txt, 3809, 2
009c_the_ghost_at_dawns_house_8.txt, 2342, 8
101c_claudia_kishi_middle_school_dropout_6.txt, 1917, 6
017c_mary_annes_bad_luck_mystery_7.txt, 1396, 7
055c_jessis_gold_medal_13.txt, 2592, 13
043c_staceys_emergency_13.txt, 1742, 13
045c_kristy_and_the_baby_parade_1.txt, 1934, 1
037c_dawn_and_the_older_boy_4.txt, 1984, 4
m05c_mary_anne_and_the_secret_in_the_attic_1.txt, 2087, 1
008c_boy_crazy_stacey_8.txt, 1609, 8
m16c_claudia_and_the_clue_in_the_photograph_6.txt, 1883, 6
030c_mary_anne_and_the_great_romance_12.txt, 1808, 12
m28c_abby_and_the_mystery_baby_4.txt, 2239, 4
080c_mallory_pike_no_1_fan_12.txt, 2376, 12
034c_mary_anne_and_too_many_boys_5.txt, 1654, 5
123c_claudias_big_party_8.txt, 770, 8
m08c_jessi_and_the_jewel_thieves_5.txt, 1839, 5
030c_mary_anne_and_the_great_romance_2.txt, 2511, 2
m12c_dawn_and_the_surfer_ghost_14.txt, 1936, 14
078c_claudia_and_crazy_peaches_2.txt, 2715, 2
106c_claudia_queen_of_the_seventh_grade_14.txt, 1201, 14
059c_mallory_hates_boys_and_gym_13.txt, 1413, 13
m29c_stacey_and_the_fashion_victim_12.txt, 1552, 12
m19c_kristy_and_the_missing_fortune_11.txt, 1526, 11
032c_kristy_and_the_secret_of_susan_5.txt, 1398, 5
m17c_dawn_and_the_halloween_mystery_14.txt, 1994, 14
004c_mary_anne_saves_the_day_10.txt, 1810, 10
105c_stacey_the_math_whiz_7.txt, 1240, 7
080c_mallory_pike_no_1_fan_3.txt, 2123, 3
m21c_claudia_and_the_recipe_for_danger_15.txt, 1757, 15
006c_kristys_big_day_5.txt, 2268, 5
m03c_mallory_and_the_ghost_cat_14.txt, 2194, 14
serr1c_logans_story_14.txt, 2144, 14
067c_dawns_big_move_12.txt, 1873, 12
049c_claudia_and_the_genius_of_elm_street_9.txt, 1168, 9
082c_jessi_and_the_troublemaker_3.txt, 1511, 3
m04c_kristy_and_the_missing_child_3.txt, 1993, 3
014c_hello_mallory_9.txt, 1631, 9
128c_claudia_and_the_little_liar_4.txt, 985, 4
022c_jessi_ramsey_petsitter_14.txt, 1448, 14
m06c_the_mystery_at_claudias_house_6.txt, 1931, 6
m21c_claudia_and_the_recipe_for_danger_5.txt, 1636, 5
077c_dwn_and_whitney_friends_forever_1.txt, 2311, 1
110c_abby_and_the_bad_sport_3.txt, 1892, 3
m28c_abby_and_the_mystery_baby_14.txt, 1800, 14
002c_claudia_and_the_phantom_phone_calls_8.txt, 1738, 8
m07c_dawn_and_the_disappearing_dogs_9.txt, 1979, 9
006c_kristys_big_day_10.txt, 1727, 10
m03c_mallory_and_the_ghost_cat_3.txt, 2070, 3
m31c_mary_anne_and_the_music_box_secret_9.txt, 1669, 9
104c_abbys_twin_4.txt, 1099, 4
041c_mary_anne_vs_logan_6.txt, 1537, 6
m11c_claudia_and_the_mystery_at_the_museum_9.txt, 1587, 9
089c_kristy_and_the_dirty_diapers_6.txt, 2377, 6
m31c_mary_anne_and_the_music_box_secret_8.txt, 1986, 8
108c_dont_give_up_mallory_1.txt, 1724, 1
m03c_mallory_and_the_ghost_cat_2.txt, 2422, 2
089c_kristy_and_the_dirty_diapers_7.txt, 1752, 7
m11c_claudia_and_the_mystery_at_the_museum_8.txt, 1436, 8
104c_abbys_twin_5.txt, 2686, 5
041c_mary_anne_vs_logan_7.txt, 1039, 7
m07c_dawn_and_the_disappearing_dogs_8.txt, 1373, 8
006c_kristys_big_day_11.txt, 2119, 11
m28c_abby_and_the_mystery_baby_15.txt, 1450, 15
002c_claudia_and_the_phantom_phone_calls_9.txt, 1979, 9
m36c_kristy_and_the_cat_burglar_1.txt, 1797, 1
m21c_claudia_and_the_recipe_for_danger_4.txt, 1581, 4
110c_abby_and_the_bad_sport_2.txt, 2238, 2
m06c_the_mystery_at_claudias_house_7.txt, 1597, 7
022c_jessi_ramsey_petsitter_15.txt, 1678, 15
128c_claudia_and_the_little_liar_5.txt, 1445, 5
014c_hello_mallory_8.txt, 1549, 8
m04c_kristy_and_the_missing_child_2.txt, 2411, 2
082c_jessi_and_the_troublemaker_2.txt, 3499, 2
049c_claudia_and_the_genius_of_elm_street_8.txt, 1438, 8
067c_dawns_big_move_13.txt, 1675, 13
serr1c_logans_story_15.txt, 1699, 15
m03c_mallory_and_the_ghost_cat_15.txt, 1810, 15
m21c_claudia_and_the_recipe_for_danger_14.txt, 1898, 14
006c_kristys_big_day_4.txt, 1896, 4
080c_mallory_pike_no_1_fan_2.txt, 2948, 2
105c_stacey_the_math_whiz_6.txt, 2103, 6
004c_mary_anne_saves_the_day_11.txt, 2921, 11
m17c_dawn_and_the_halloween_mystery_15.txt, 1933, 15
m25c_kristy_and_the_middle_school_vandal_1.txt, 2149, 1
032c_kristy_and_the_secret_of_susan_4.txt, 2288, 4
m19c_kristy_and_the_missing_fortune_10.txt, 1959, 10
059c_mallory_hates_boys_and_gym_12.txt, 1678, 12
106c_claudia_queen_of_the_seventh_grade_15.txt, 1488, 15
m29c_stacey_and_the_fashion_victim_13.txt, 1850, 13
030c_mary_anne_and_the_great_romance_3.txt, 2022, 3
m08c_jessi_and_the_jewel_thieves_4.txt, 2014, 4
078c_claudia_and_crazy_peaches_3.txt, 1557, 3
m12c_dawn_and_the_surfer_ghost_15.txt, 1406, 15
123c_claudias_big_party_9.txt, 1403, 9
m28c_abby_and_the_mystery_baby_5.txt, 1691, 5
m16c_claudia_and_the_clue_in_the_photograph_7.txt, 1962, 7
030c_mary_anne_and_the_great_romance_13.txt, 1285, 13
034c_mary_anne_and_too_many_boys_4.txt, 1479, 4
080c_mallory_pike_no_1_fan_13.txt, 1672, 13
043c_staceys_emergency_12.txt, 1934, 12
008c_boy_crazy_stacey_9.txt, 1543, 9
037c_dawn_and_the_older_boy_5.txt, 1640, 5
055c_jessis_gold_medal_12.txt, 2310, 12
m23c_abby_and_the_secret_society_1.txt, 1899, 1
101c_claudia_kishi_middle_school_dropout_7.txt, 1786, 7
009c_the_ghost_at_dawns_house_9.txt, 2637, 9
017c_mary_annes_bad_luck_mystery_6.txt, 1815, 6
051c_staceys_ex_best_friend_2.txt, 2995, 2
095c_kristy_plus_bart_equals_questionmark_10.txt, 1561, 10
m34c_mary_anne_and_the_haunted_bookstore_3.txt, 3625, 3
046c_mary_anne_misses_logan_7.txt, 1851, 7
083c_stacey_vs_the_bsc_8.txt, 1116, 8
m14c_stacey_and_the_mystery_at_the_mall_7.txt, 1919, 7
079c_mary_anne_breaks_the_rules_1.txt, 2371, 1
052c_mary_anne_plus_too_many_babies_6.txt, 1451, 6
095c_kristy_plus_bart_equals_questionmark_2.txt, 1420, 2
129c_kristy_at_bat_4.txt, 1964, 4
085c_claudia_kishi_live_from_wsto_6.txt, 1132, 6
084c_dawn_and_the_school_spirit_war_7.txt, 1738, 7
001c_kristys_great_idea_8.txt, 1266, 8
serr3c_shannons_story_8.txt, 1595, 8
082c_jessi_and_the_troublemaker_15.txt, 726, 15
005c_dawn_and_the_impossible_three_8.txt, 2322, 8
104c_abbys_twin_14.txt, 990, 14
121c_abby_in_wonderland_12.txt, 1259, 12
060c_mary_annes_makeover_13.txt, 3064, 13
035c_jessis_babysitter_4.txt, 1528, 4
111c_staceys_secret_friend_13.txt, 2171, 13
098c_dawn_and_too_many_sitters_4.txt, 1586, 4
m02c_beware_dawn_15.txt, 1327, 15
123c_claudias_big_party_12.txt, 1575, 12
007c_claudia_and_mean_jeanine_9.txt, 1719, 9
m02c_beware_dawn_11.txt, 1651, 11
056c_keep_out_claudia_4.txt, 1356, 4
070c_stacey_and_the_cheerleaders_12.txt, 1106, 12
084c_dawn_and_the_school_spirit_war_3.txt, 1745, 3
004c_mary_anne_saves_the_day_9.txt, 2063, 9
104c_abbys_twin_10.txt, 1585, 10
082c_jessi_and_the_troublemaker_11.txt, 983, 11
060c_mary_annes_makeover_9.txt, 1504, 9
079c_mary_anne_breaks_the_rules_5.txt, 1298, 5
m14c_stacey_and_the_mystery_at_the_mall_3.txt, 2036, 3
046c_mary_anne_misses_logan_3.txt, 1700, 3
m34c_mary_anne_and_the_haunted_bookstore_7.txt, 1163, 7
054c_mallory_and_the_dream_horse_13.txt, 1935, 13
095c_kristy_plus_bart_equals_questionmark_14.txt, 1748, 14
051c_staceys_ex_best_friend_6.txt, 1487, 6
m31c_mary_anne_and_the_music_box_secret_12.txt, 1944, 12
085c_claudia_kishi_live_from_wsto_2.txt, 2685, 2
095c_kristy_plus_bart_equals_questionmark_6.txt, 1521, 6
052c_mary_anne_plus_too_many_babies_2.txt, 2673, 2
129c_kristy_at_bat_13.txt, 1717, 13
037c_dawn_and_the_older_boy_1.txt, 2052, 1
m05c_mary_anne_and_the_secret_in_the_attic_4.txt, 2046, 4
045c_kristy_and_the_baby_parade_4.txt, 1884, 4
039c_poor_mallory_12.txt, 1521, 12
m16c_claudia_and_the_clue_in_the_photograph_3.txt, 1839, 3
m28c_abby_and_the_mystery_baby_1.txt, 1924, 1
017c_mary_annes_bad_luck_mystery_2.txt, 2288, 2
101c_claudia_kishi_middle_school_dropout_3.txt, 2066, 3
m23c_abby_and_the_secret_society_5.txt, 1889, 5
106c_claudia_queen_of_the_seventh_grade_11.txt, 1238, 11
091c_claudia_and_the_first_thanksgiving_8.txt, 1204, 8
m19c_kristy_and_the_missing_fortune_14.txt, 1992, 14
078c_claudia_and_crazy_peaches_13.txt, 1251, 13
m12c_dawn_and_the_surfer_ghost_11.txt, 1493, 11
078c_claudia_and_crazy_peaches_7.txt, 1997, 7
030c_mary_anne_and_the_great_romance_7.txt, 1722, 7
065c_staceys_big_crush_13.txt, 1629, 13
093c_mary_anne_and_the_memory_garden_9.txt, 1721, 9
105c_stacey_the_math_whiz_2.txt, 2745, 2
080c_mallory_pike_no_1_fan_6.txt, 2564, 6
105c_stacey_the_math_whiz_12.txt, 1768, 12
013c_goodbye_stacey_goodbye_12.txt, 1924, 12
124c_stacey_mcgill_matchmaker_13.txt, 1173, 13
m21c_claudia_and_the_recipe_for_danger_10.txt, 1840, 10
117c_claudia_and_the_terrible_truth_8.txt, 1538, 8
m25c_kristy_and_the_middle_school_vandal_5.txt, 2091, 5
m17c_dawn_and_the_halloween_mystery_11.txt, 1729, 11
004c_mary_anne_saves_the_day_15.txt, 1491, 15
022c_jessi_ramsey_petsitter_11.txt, 1823, 11
m06c_the_mystery_at_claudias_house_3.txt, 1728, 3
serr1c_logans_story_11.txt, 1259, 11
m03c_mallory_and_the_ghost_cat_11.txt, 2114, 11
m04c_kristy_and_the_missing_child_6.txt, 1720, 6
082c_jessi_and_the_troublemaker_6.txt, 1376, 6
128c_claudia_and_the_little_liar_1.txt, 1843, 1
088c_farewell_dawn_9.txt, 1554, 9
041c_mary_anne_vs_logan_3.txt, 2241, 3
104c_abbys_twin_1.txt, 1902, 1
089c_kristy_and_the_dirty_diapers_3.txt, 2307, 3
m03c_mallory_and_the_ghost_cat_6.txt, 2324, 6
026c_claudia_and_the_sad_goodbye_12.txt, 1708, 12
108c_dont_give_up_mallory_5.txt, 1804, 5
110c_abby_and_the_bad_sport_6.txt, 1036, 6
m36c_kristy_and_the_cat_burglar_5.txt, 1816, 5
077c_dwn_and_whitney_friends_forever_4.txt, 2379, 4
126c_the_all_new_mallory_pike_12.txt, 1913, 12
m28c_abby_and_the_mystery_baby_11.txt, 1488, 11
m28c_abby_and_the_mystery_baby_10.txt, 1882, 10
110c_abby_and_the_bad_sport_7.txt, 1830, 7
126c_the_all_new_mallory_pike_13.txt, 1651, 13
077c_dwn_and_whitney_friends_forever_5.txt, 1517, 5
m21c_claudia_and_the_recipe_for_danger_1.txt, 2026, 1
m36c_kristy_and_the_cat_burglar_4.txt, 1627, 4
089c_kristy_and_the_dirty_diapers_2.txt, 2526, 2
041c_mary_anne_vs_logan_2.txt, 2933, 2
108c_dont_give_up_mallory_4.txt, 2050, 4
026c_claudia_and_the_sad_goodbye_13.txt, 1943, 13
m03c_mallory_and_the_ghost_cat_7.txt, 2234, 7
006c_kristys_big_day_14.txt, 1312, 14
088c_farewell_dawn_8.txt, 1112, 8
082c_jessi_and_the_troublemaker_7.txt, 2044, 7
m04c_kristy_and_the_missing_child_7.txt, 1259, 7
m03c_mallory_and_the_ghost_cat_10.txt, 2558, 10
serr1c_logans_story_10.txt, 1029, 10
m06c_the_mystery_at_claudias_house_2.txt, 2029, 2
022c_jessi_ramsey_petsitter_10.txt, 1664, 10
004c_mary_anne_saves_the_day_14.txt, 3412, 14
117c_claudia_and_the_terrible_truth_9.txt, 1475, 9
m17c_dawn_and_the_halloween_mystery_10.txt, 1861, 10
m25c_kristy_and_the_middle_school_vandal_4.txt, 1834, 4
013c_goodbye_stacey_goodbye_13.txt, 1794, 13
006c_kristys_big_day_1.txt, 1851, 1
105c_stacey_the_math_whiz_13.txt, 1225, 13
m21c_claudia_and_the_recipe_for_danger_11.txt, 1770, 11
124c_stacey_mcgill_matchmaker_12.txt, 1228, 12
093c_mary_anne_and_the_memory_garden_8.txt, 2054, 8
065c_staceys_big_crush_12.txt, 1369, 12
080c_mallory_pike_no_1_fan_7.txt, 1890, 7
105c_stacey_the_math_whiz_3.txt, 1997, 3
078c_claudia_and_crazy_peaches_6.txt, 1516, 6
m12c_dawn_and_the_surfer_ghost_10.txt, 1986, 10
030c_mary_anne_and_the_great_romance_6.txt, 1639, 6
m08c_jessi_and_the_jewel_thieves_1.txt, 2026, 1
078c_claudia_and_crazy_peaches_12.txt, 1272, 12
091c_claudia_and_the_first_thanksgiving_9.txt, 1376, 9
032c_kristy_and_the_secret_of_susan_1.txt, 1790, 1
m19c_kristy_and_the_missing_fortune_15.txt, 1827, 15
106c_claudia_queen_of_the_seventh_grade_10.txt, 1409, 10
m23c_abby_and_the_secret_society_4.txt, 2167, 4
017c_mary_annes_bad_luck_mystery_3.txt, 1508, 3
101c_claudia_kishi_middle_school_dropout_2.txt, 2280, 2
039c_poor_mallory_13.txt, 1447, 13
034c_mary_anne_and_too_many_boys_1.txt, 2653, 1
m16c_claudia_and_the_clue_in_the_photograph_2.txt, 2970, 2
m05c_mary_anne_and_the_secret_in_the_attic_5.txt, 1604, 5
045c_kristy_and_the_baby_parade_5.txt, 1892, 5
052c_mary_anne_plus_too_many_babies_3.txt, 1946, 3
129c_kristy_at_bat_12.txt, 1759, 12
085c_claudia_kishi_live_from_wsto_3.txt, 1696, 3
129c_kristy_at_bat_1.txt, 1967, 1
m31c_mary_anne_and_the_music_box_secret_13.txt, 1805, 13
095c_kristy_plus_bart_equals_questionmark_7.txt, 1688, 7
054c_mallory_and_the_dream_horse_12.txt, 1414, 12
m34c_mary_anne_and_the_haunted_bookstore_6.txt, 3059, 6
051c_staceys_ex_best_friend_7.txt, 1418, 7
095c_kristy_plus_bart_equals_questionmark_15.txt, 1315, 15
060c_mary_annes_makeover_8.txt, 1118, 8
046c_mary_anne_misses_logan_2.txt, 2346, 2
m14c_stacey_and_the_mystery_at_the_mall_2.txt, 2293, 2
079c_mary_anne_breaks_the_rules_4.txt, 1535, 4
082c_jessi_and_the_troublemaker_10.txt, 1059, 10
035c_jessis_babysitter_1.txt, 1755, 1
104c_abbys_twin_11.txt, 1510, 11
004c_mary_anne_saves_the_day_8.txt, 1758, 8
084c_dawn_and_the_school_spirit_war_2.txt, 3168, 2
070c_stacey_and_the_cheerleaders_13.txt, 1226, 13
m02c_beware_dawn_10.txt, 1690, 10
056c_keep_out_claudia_5.txt, 1613, 5
007c_claudia_and_mean_jeanine_8.txt, 1679, 8
098c_dawn_and_too_many_sitters_1.txt, 1668, 1
111c_staceys_secret_friend_14.txt, 1118, 14
112c_kristy_and_the_sister_war_8.txt, 1269, 8
098c_dawn_and_too_many_sitters_3.txt, 2930, 3
131c_the_fire_at_mary_annes_house_8.txt, 1903, 8
056c_keep_out_claudia_7.txt, 1355, 7
m02c_beware_dawn_12.txt, 1758, 12
123c_claudias_big_party_15.txt, 932, 15
070c_stacey_and_the_cheerleaders_11.txt, 1502, 11
082c_jessi_and_the_troublemaker_12.txt, 1238, 12
104c_abbys_twin_13.txt, 955, 13
035c_jessis_babysitter_3.txt, 1797, 3
121c_abby_in_wonderland_15.txt, 894, 15
060c_mary_annes_makeover_14.txt, 1223, 14
051c_staceys_ex_best_friend_5.txt, 1108, 5
m34c_mary_anne_and_the_haunted_bookstore_4.txt, 4945, 4
054c_mallory_and_the_dream_horse_10.txt, 1924, 10
079c_mary_anne_breaks_the_rules_6.txt, 1102, 6
129c_kristy_at_bat_10.txt, 1688, 10
052c_mary_anne_plus_too_many_babies_1.txt, 1584, 1
070c_stacey_and_the_cheerleaders_8.txt, 2067, 8
095c_kristy_plus_bart_equals_questionmark_5.txt, 1381, 5
m31c_mary_anne_and_the_music_box_secret_11.txt, 1793, 11
129c_kristy_at_bat_3.txt, 1768, 3
085c_claudia_kishi_live_from_wsto_1.txt, 2088, 1
m24c_mary_anne_and_the_silent_witness_9.txt, 1678, 9
030c_mary_anne_and_the_great_romance_14.txt, 1906, 14
m28c_abby_and_the_mystery_baby_2.txt, 2122, 2
034c_mary_anne_and_too_many_boys_3.txt, 1759, 3
039c_poor_mallory_11.txt, 1430, 11
080c_mallory_pike_no_1_fan_14.txt, 1808, 14
043c_staceys_emergency_15.txt, 1795, 15
045c_kristy_and_the_baby_parade_7.txt, 1358, 7
037c_dawn_and_the_older_boy_2.txt, 1999, 2
m05c_mary_anne_and_the_secret_in_the_attic_7.txt, 2044, 7
055c_jessis_gold_medal_15.txt, 1240, 15
024c_kristy_and_the_mothers_day_surprise_9.txt, 1716, 9
m23c_abby_and_the_secret_society_6.txt, 1903, 6
017c_mary_annes_bad_luck_mystery_1.txt, 2244, 1
032c_kristy_and_the_secret_of_susan_3.txt, 1988, 3
106c_claudia_queen_of_the_seventh_grade_12.txt, 1473, 12
059c_mallory_hates_boys_and_gym_15.txt, 1181, 15
m29c_stacey_and_the_fashion_victim_14.txt, 1980, 14
m08c_jessi_and_the_jewel_thieves_3.txt, 1981, 3
030c_mary_anne_and_the_great_romance_4.txt, 1441, 4
066c_maid_mary_anne_8.txt, 1455, 8
m12c_dawn_and_the_surfer_ghost_12.txt, 1725, 12
078c_claudia_and_crazy_peaches_4.txt, 1699, 4
078c_claudia_and_crazy_peaches_10.txt, 1650, 10
m33c_stacey_and_the_stolen_hearts_8.txt, 1648, 8
124c_stacey_mcgill_matchmaker_10.txt, 1104, 10
m21c_claudia_and_the_recipe_for_danger_13.txt, 1585, 13
105c_stacey_the_math_whiz_11.txt, 1503, 11
006c_kristys_big_day_3.txt, 2071, 3
013c_goodbye_stacey_goodbye_11.txt, 1418, 11
105c_stacey_the_math_whiz_1.txt, 1677, 1
080c_mallory_pike_no_1_fan_5.txt, 1450, 5
065c_staceys_big_crush_10.txt, 1248, 10
m25c_kristy_and_the_middle_school_vandal_6.txt, 2148, 6
m17c_dawn_and_the_halloween_mystery_12.txt, 1980, 12
022c_jessi_ramsey_petsitter_12.txt, 2007, 12
m04c_kristy_and_the_missing_child_5.txt, 1772, 5
082c_jessi_and_the_troublemaker_5.txt, 1504, 5
128c_claudia_and_the_little_liar_2.txt, 2899, 2
serr1c_logans_story_12.txt, 1315, 12
m03c_mallory_and_the_ghost_cat_12.txt, 2277, 12
067c_dawns_big_move_14.txt, 1649, 14
m03c_mallory_and_the_ghost_cat_5.txt, 2236, 5
059c_mallory_hates_boys_and_gym_8.txt, 1466, 8
026c_claudia_and_the_sad_goodbye_11.txt, 1746, 11
108c_dont_give_up_mallory_6.txt, 1914, 6
104c_abbys_twin_2.txt, 3233, 2
m28c_abby_and_the_mystery_baby_12.txt, 1832, 12
063c_claudias_freind_friend_8.txt, 1963, 8
m36c_kristy_and_the_cat_burglar_6.txt, 1787, 6
m21c_claudia_and_the_recipe_for_danger_3.txt, 2013, 3
126c_the_all_new_mallory_pike_11.txt, 1940, 11
077c_dwn_and_whitney_friends_forever_7.txt, 2102, 7
110c_abby_and_the_bad_sport_5.txt, 880, 5
077c_dwn_and_whitney_friends_forever_6.txt, 2376, 6
126c_the_all_new_mallory_pike_10.txt, 1739, 10
m36c_kristy_and_the_cat_burglar_7.txt, 2320, 7
m21c_claudia_and_the_recipe_for_danger_2.txt, 2537, 2
110c_abby_and_the_bad_sport_4.txt, 1291, 4
m28c_abby_and_the_mystery_baby_13.txt, 1788, 13
063c_claudias_freind_friend_9.txt, 1490, 9
108c_dont_give_up_mallory_7.txt, 1636, 7
026c_claudia_and_the_sad_goodbye_10.txt, 1568, 10
m03c_mallory_and_the_ghost_cat_4.txt, 2377, 4
059c_mallory_hates_boys_and_gym_9.txt, 1512, 9
089c_kristy_and_the_dirty_diapers_1.txt, 2839, 1
104c_abbys_twin_3.txt, 1057, 3
041c_mary_anne_vs_logan_1.txt, 1830, 1
067c_dawns_big_move_15.txt, 1366, 15
m03c_mallory_and_the_ghost_cat_13.txt, 2206, 13
serr1c_logans_story_13.txt, 1134, 13
128c_claudia_and_the_little_liar_3.txt, 1662, 3
082c_jessi_and_the_troublemaker_4.txt, 1326, 4
m04c_kristy_and_the_missing_child_4.txt, 1734, 4
022c_jessi_ramsey_petsitter_13.txt, 1981, 13
m06c_the_mystery_at_claudias_house_1.txt, 2039, 1
m17c_dawn_and_the_halloween_mystery_13.txt, 1906, 13
m25c_kristy_and_the_middle_school_vandal_7.txt, 1219, 7
080c_mallory_pike_no_1_fan_4.txt, 1892, 4
065c_staceys_big_crush_11.txt, 1225, 11
m21c_claudia_and_the_recipe_for_danger_12.txt, 1814, 12
124c_stacey_mcgill_matchmaker_11.txt, 1157, 11
013c_goodbye_stacey_goodbye_10.txt, 1820, 10
006c_kristys_big_day_2.txt, 2101, 2
105c_stacey_the_math_whiz_10.txt, 1873, 10
m33c_stacey_and_the_stolen_hearts_9.txt, 1850, 9
078c_claudia_and_crazy_peaches_11.txt, 1452, 11
030c_mary_anne_and_the_great_romance_5.txt, 1546, 5
m08c_jessi_and_the_jewel_thieves_2.txt, 2650, 2
078c_claudia_and_crazy_peaches_5.txt, 2169, 5
m12c_dawn_and_the_surfer_ghost_13.txt, 1921, 13
066c_maid_mary_anne_9.txt, 1714, 9
059c_mallory_hates_boys_and_gym_14.txt, 1924, 14
106c_claudia_queen_of_the_seventh_grade_13.txt, 1869, 13
m29c_stacey_and_the_fashion_victim_15.txt, 1452, 15
032c_kristy_and_the_secret_of_susan_2.txt, 2499, 2
101c_claudia_kishi_middle_school_dropout_1.txt, 1939, 1
055c_jessis_gold_medal_14.txt, 1926, 14
m23c_abby_and_the_secret_society_7.txt, 1826, 7
024c_kristy_and_the_mothers_day_surprise_8.txt, 1728, 8
045c_kristy_and_the_baby_parade_6.txt, 2008, 6
043c_staceys_emergency_14.txt, 1517, 14
m05c_mary_anne_and_the_secret_in_the_attic_6.txt, 2006, 6
037c_dawn_and_the_older_boy_3.txt, 2047, 3
m28c_abby_and_the_mystery_baby_3.txt, 2194, 3
m16c_claudia_and_the_clue_in_the_photograph_1.txt, 2400, 1
030c_mary_anne_and_the_great_romance_15.txt, 1876, 15
080c_mallory_pike_no_1_fan_15.txt, 1050, 15
039c_poor_mallory_10.txt, 1475, 10
034c_mary_anne_and_too_many_boys_2.txt, 2146, 2
095c_kristy_plus_bart_equals_questionmark_4.txt, 1494, 4
m24c_mary_anne_and_the_silent_witness_8.txt, 1784, 8
129c_kristy_at_bat_2.txt, 2453, 2
m31c_mary_anne_and_the_music_box_secret_10.txt, 1559, 10
129c_kristy_at_bat_11.txt, 1772, 11
070c_stacey_and_the_cheerleaders_9.txt, 976, 9
046c_mary_anne_misses_logan_1.txt, 1760, 1
m14c_stacey_and_the_mystery_at_the_mall_1.txt, 2254, 1
079c_mary_anne_breaks_the_rules_7.txt, 2041, 7
051c_staceys_ex_best_friend_4.txt, 1871, 4
054c_mallory_and_the_dream_horse_11.txt, 1638, 11
m34c_mary_anne_and_the_haunted_bookstore_5.txt, 2295, 5
104c_abbys_twin_12.txt, 1927, 12
121c_abby_in_wonderland_14.txt, 792, 14
035c_jessis_babysitter_2.txt, 2226, 2
082c_jessi_and_the_troublemaker_13.txt, 1571, 13
070c_stacey_and_the_cheerleaders_10.txt, 1616, 10
084c_dawn_and_the_school_spirit_war_1.txt, 1725, 1
123c_claudias_big_party_14.txt, 1677, 14
056c_keep_out_claudia_6.txt, 1232, 6
m02c_beware_dawn_13.txt, 2023, 13
111c_staceys_secret_friend_15.txt, 1630, 15
131c_the_fire_at_mary_annes_house_9.txt, 1830, 9
098c_dawn_and_too_many_sitters_2.txt, 2039, 2
112c_kristy_and_the_sister_war_9.txt, 2007, 9
###Markdown
I put the output files into Tableau (Gantt visualization, configuring length as a dimension under “rows”) after running the code on the full text of all the series, and the chapter length of the main and mystery series (remember, each of those books has 15 chapters).The books range from around 12,600 words (California Diaries: *Amalia 3*, which is shorter than this DSC book!), to nearly 45,000 words (Super Mystery 1: *Baby-Sitters’ Haunted House*). On the chapter level, there’s not a ton of variation in word length between chapters, though chapter 15 tends to be a bit shorter, and chapter 2 tends to be longer -- there’s a lot of tropes to pack in! But if we’re using Euclidean distance to compare even chapter 2s, BSC 75: *Jessi’s Horrible Prank* is 1,266 words and BSC 99: *Stacey’s Broken Heart* is 4,293 words. That alone is going to lead to a big difference in the word-count values.When I first started playing with these text-comparison metrics (before taking the care to properly clean the data and ensure there weren’t problems with my chapter-separating code), I first tried Euclidean distance, and was fascinated by the apparent similarity of chapter 2 in the first Baby-Sitters Club book and a chapter in a California Diaries book. “What,” I wondered, “does wholesome *Kristy’s Great Idea* have to do with salacious *California Diaries?*” I laughed out loud when I opened the text files containing the text of those chapters, and immediately saw the answer: what they had in common was data cleaning problems that led to their truncation after a sentence or two. As a Choose Your Own Adventure book might put it, *“You realize that your ‘findings’ are nothing more than your own mistakes in preparing your data set. You sigh wearily. The end.”* Hopefully you, like childhood me, left a bookmark that last decision point you were unsure of, and you can go back and make a different choice. But even if you have to start over from the beginning, you can almost try again when doing DH. Cosine similarityCosine similarity offers a workaround for the text-scale problems we encountered with Euclidean distance. Instead of trying to measure the **distance** between two points (which can be thrown off due to issues of magnitude, when one point represents a text that’s much longer than the other), it measures the cosine of the angle between them and calls it *similarity*. You may have also filed “cosine” away under “high school math I hoped to never see again”, but don’t panic! As trigonometry starts to flood back at you, you might find yourself wondering, “Why cosine similarity, and not any of its little friends, like sine or tangent?” After all, wouldn’t it be fun to burst into the chorus of Ace of Base’s “I Saw the Sine” whenever you worked out the text similarity?Mostly it works out to a matter of numerical convenience in setting up the framing for measuring similarity: If the angle between two points is 0, then that means any difference is just one of *magnitude* (which we don’t worry about with cosine similarity) and you can say the texts are extremely similar. If the angle is 90 degrees, which is as far as you can get while staying in all-positive numbers (we don’t have any negative word counts), then there’s a huge difference. Cos(0) = 1, and cos(90) = 0, so with cosine similarity, you want **larger** numbers for more similarity. Which is the opposite of Euclidean distance, where you want **smaller** numbers for more similarity (because using that measure, 0 means “there’s no distance between these things and they are the same”). I’ve screwed this up more than once, getting excited about large numbers when using an algorithm where you want smaller numbers, and vice versa. Always double-check the scale you’re using and what counts as “similar” if you’re not sure. Or, as you might find in a Choose Your Own Adventure book: *“The tweet was written, delayed only by the search for the perfect celebratory emoji to decorate its conclusion, when a small voice echoes in the back of your head. ‘Should these be large numbers? What algorithm did you use?’ You pause and think for a moment… then sigh, delete the tweet, and return to your code to start over. The end.”* But before you start writing “**EUCLIDEAN = SMALL, COSINE = BIG**” in sharpie on a sticky note and putting it on your wall with extra tape for reinforcement, the people who write Python packages realized it’s going to be a problem if they write a package where you can easily swap in different metrics, but some of them use large numbers for similarity, while others use small numbers. So what you’ll see in the Jupyter notebook is that it’s calculating cosine *distance* -- which is just (1 - cosine similarity). After that bit of subtraction, “exactly the same” has a value of 0, just like you’d get in Euclidean distance. We’re still not exactly comparing apples to apples here: you’re going to get much bigger numbers when calculating Euclidean distance than when calculating cosine distance, which makes sense. Euclidean distance is a kind of actual distance. Cosine distance is still just an angle between two vectors, which looks like a percentage, with a bit of manipulation to make “identical” work out to 0. The numbers are a lot smaller, and their range is a lot more compressed (from 0 to .99 for cosine distance, vs. 0 to 650 in our data set for Euclidean distance). The Euclidean distance score can be more nuanced, but this is a situation where nuance is a bad thing. I’m not doing this particular analysis to find precisely how different the texts are from each other -- which is a good thing, because I know the variable length is a distorting factor that would prevent me from getting to that perfect number anyway. What I’m looking for is book pairings that stand out as noteworthy, either for their similarity or dissimilarity. And the compressed range of possible values for cosine distance makes those differences more visible. Running the Euclidean distance calculation didn't do anything to the results of our count vectorizer, so if you're working through this book in order, you should be able to just run the cosine distance calculation below. If you have trouble, you can rerun the code cell with the CountVectorizer code in it -- just make sure you've got it pointing to the right directory with the full text files.
###Code
cosine_distances = pd.DataFrame(squareform(pdist(wordcounts, metric='cosine')), index=filekeys, columns=filekeys)
cosine_distances
cosine_distances.to_csv('cosine_distances_count.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(cosine_distances)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
A sort of light salmon in the Euclidean distance visualization represented a value of 500, and the same color represents .8 in the cosine distance visualization. To my mind, the overall impression is less of Mary Anne’s classic plaid, and more like a dirty Kristy’s Krushers baseball jersey with flecks and blobs of spaghetti sauce here and there. (I’ll note that there’s some disagreement here within the DSC; Katia’s reaction was “Plaid in salmon and pink? Sickening, but still something Mary Anne’s dad would make her wear.”) It’s not pretty, but it’s clarifying.First, those super-light bands that are quite similar to one another (where they intersect in a box around the black diagonal line), but quite dissimilar from everything else? That’s the California Diaries series. And California Diaries: *Dawn 1* is still a little lighter than all the rest of that sub-series, but not so much so. This visualization makes it easier to see that the California Diaries are much more similar to regular-series books set in California, like BSC 23: *Dawn on the Coast* and BSC 72: *Dawn and the We ♥️ Kids Club*. It’s not a groundbreaking discovery, but it immediately makes sense! And honestly, “boring” DH results are often a sign that you’ve done something right.*Abby’s Book* is still fairly distinct, but this visualization makes it easier to see some of the points of overlap for the other Portrait Collection books, like the overlap between Claudia’s and Mary Anne’s autobiographies and BSC 7: *Claudia and Mean Janine*, which features Kishi family drama and a focus on Claudia’s grandmother Mimi, who was an important figure in both girls’ lives. There are also speckles of dark spots on the visualization, which mostly seem to correspond to books with the same narrator. It’s particularly prominent with distinctive narrators, like Jessi, whose interests and perspective are not shared by the other characters.The phenomenon involving books 83-101 forming a cluster (including, we can see here, the mystery novels published around the same time period) is still visible here. I don’t have an explanation (though Anouk suspects possible editorial influence since the books are sequential), but this could be something worth exploring later. But while this has been an interesting diversion, let’s get back to chapter 2! After running just the chapter 2s through the same cosine distance calculation, here’s what we get.
###Code
ch2dir = '/Users/qad/Documents/dsc_chapters/ch2'
os.chdir(ch2dir)
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
vectorizer = CountVectorizer(input="filename", max_features=1000, max_df=0.7)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
ch2 = vectorizer.fit_transform(filenames).toarray()
ch2_cosine = pd.DataFrame(squareform(pdist(ch2, metric='cosine')), index=filekeys, columns=filekeys)
ch2_cosine
ch2_cosine.to_csv('ch2_cosine_count.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(ch2_cosine)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
I did a double-take when I saw it, and went back to check the code and make sure I hadn’t accidentally run Euclidean distance again. The chapter 2s are *a lot* closer than the books overall. Which makes sense -- the reason we’re looking at chapter 2 is because we know it’s repetitive. This is a smaller data set than what we used for the full book comparison, including only chapter 2s from the main and mystery series (which follow the 15-chapter structure). Even the chapter 2s show the pattern of similarity for books 83-101 and temporally-similar mysteries, and there’s another cluster from books 30-48. The light-colored lines reflect another known phenomenon about chapter 2, where sometimes the typical “chapter 2” content actually appears in chapter 3.To drive home the point that there’s something different going on here with chapter 2, I re-ran cosine distance on four other chapters: 1, 5 (top row), 9, and 15.(I'm not going to repeat the code for calculating these here; it's the same as the chapter 2 code above, with different source folders.)There are some interesting things that we could dig into here! It looks like there’s more overlap in how the books end (ch. 15, bottom right) than how the middle of the book goes, though there are lots of individual speckles of high similarity for the middle chapters. Chapter 1 starts similarly in the early books, but is pretty dispersed by the end. The cluster in books 83-101 isn’t really visible in these chapters. But the crucial thing we’re seeing is just that chapter 2s are **much more similar** to one another than other chapters. Word counts or word frequencies?I ran this part by Mark, pleased with myself for having worked through a tutorial, modified it to fit what I wanted to work on, and come up with a largely interpretable result that was brimming with possibilities for things to explore next. His response caught me completely off-guard: “You scaled, or otherwise normalized, your word counts, right? RIGHT? RIGHT?!?!? I only ask because you don’t mention it anywhere, and if you don’t normalize your word counts by turning them into word frequencies, you are only really going to ever find out about what texts are longer than others.”Uh-oh. That *Programming Historian* tutorial hadn’t said anything about word **frequencies**. In fact, it’d used the word *count* vectorizer in its code. I knew that would be a problem for Euclidean distance, but I’d hoped that cosine distance would… solve it?“If you use frequencies instead of counts, then you can compare texts that are of somewhat different lengths (within an order of magnitude) pretty effectively,” suggested Mark. “The big problem with Euclidean distances are 0 values. When you use too many dimensions, especially when you use word frequencies, there are a lot of 0s, and these are overweighted by Euclidean distance so that similar texts of very different lengths look much more different than they should – because the longer text has a lot of words that the shorter text doesn’t have (and the reverse is not as true – the shorter text has far fewer words that the longer text doesn’t have). So, when you compare a novel to a short story (or a LONG novel to a normal novel), this becomes a real problem. Cosine is still probably a better metric for the kind of work that you are doing, but here too it is crucial to scale/normalize your counts – otherwise size just keeps becoming a factor. Normalizing word counts is such a crucial point in the process and you don’t actually mention it, that it has me worried.”Now I was worried, too. I definitely had **not** normalized the word counts. I guess I could figure out how to create a table with each word and its word count and then generate a frequency by dividing by the sum of all the words, but how would I then feed those frequencies back into the vectorizer pipeline? In the peaceful, dark hours of Insomnia O’Clock, I curled up with the documentation for scikit-learn, the Python library I used for the vectorizer, to see if it offered any better options.And to my delight, it did! The TF-IDF vectorizer was there to save the day. Now, TF-IDF (term frequency - inverse document frequency, which tries to get at *distinctive words* in each text) wasn’t what I wanted -- not yet. (We’ll get to that soon enough; it’s a very different method for evaluating similarity.) But you can’t spell TF-IDF without TF, and since TF is “term frequency”, it’s exactly the thing I was looking for!If using term frequency helps accounting for differences in length, I expected that running Euclidean distance on a matrix of word frequencies should look something like the Cosine distance on a matrix of word counts, right? Let’s compare the first version and the normalized version comparing the full books using Euclidean distance! Euclidean distance with word frequenciesBecause we were in the directory with the chapter 2's, we need to go back to the directory with the full text.
###Code
filedir = '/Users/qad/Documents/dsc_corpus_clean'
os.chdir(filedir)
###Output
_____no_output_____
###Markdown
This time we're using the TF-IDF vectorizer, with the "IDF" part turned off:
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
vectorizer = TfidfVectorizer(input="filename", stop_words=None, use_idf=False, norm=None, max_features=1000)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
wordfreqs = vectorizer.fit_transform(filenames).toarray()
###Output
_____no_output_____
###Markdown
Note: See what happened here? I had to figure out a method to do something, where there wasn't an out-of-the-box solution I could just pull from a tutorial I was following. As a result, I thought about all the parameters and picked better ones-- and did not throw out words shared by 70% of the corpus. (What I also didn't know yet was that, in the process, I'd made another consequential mistake with the vectorizer, but I wouldn't discover that until later still.) So that was good. But the surprise that followed wasn't enough to make me suspicious about the parameters from the **first** time I ran the vectorizer. I guess I've managed to be a walking case study in the point Mark was making about the dangers of just reusing things you find online without being very critical about everything that goes into them. But at least I'm a self-aware walking case study... even if it takes until the 11th hour.
###Code
euclidean_distances_freq = pd.DataFrame(squareform(pdist(wordfreqs, metric='euclidean')), index=filekeys, columns=filekeys)
euclidean_distances_freq
euclidean_distances_freq.to_csv('euclidean_distances_freq.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(euclidean_distances_freq)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
Oh.Once you normalize for length, all the Baby-Sitters Club books look… mostly the same. Even with Euclidean distance. So what am I even going to get for Cosine distance using term frequencies? Cosine distance with word frequenciesWe've already used the TF-IDF vectorizer, so now we just need to do a different distance calculation.
###Code
cosine_distances_freq = pd.DataFrame(squareform(pdist(wordfreqs, metric='cosine')), index=filekeys, columns=filekeys)
cosine_distances_freq
cosine_distances_freq.to_csv('cosine_distances_freq.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(cosine_distances_freq)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
We’ve gone from Mary Anne Plaid to a sort of Claudia Eggplant. Could that be right? Is most of the difference really attributable to length? Even the clear-as-day California Diaries cluster has mostly washed out, except for those shining lights of difference: Ducky, and to a lesser extent, Amalia. (I guess after normalizing for length, what really makes a difference in this corpus is East Coast people vs. West Coast people… and Dawn has assimilated to Connecticut more than she realizes.)This is something that we can check pretty easily! We already wrote up some code to do word counts for all the books. Are the books that stood out before, and have now disappeared into the purple morass, particularly long or short? That does turn out to be the answer with the California Diaries cluster: all of them are shorter than your average BSC book. And it’s also the answer with Abby’s Portrait Collection looking different than the other Portrait Collection books, coming in at only 78% of the length of Stacey's Portrait Collection book. Note: Remember, I didn't realize it at the time, but there were two things that this variant was accounting for: text length, and also not throwing out words that 70% of the books have in common, which includes important things in this corpus like character names! Or, at least, I thought there were two things this variant was accounting for... So what happens when we look at cosine distance for the chapter 2’s?
###Code
ch2dir = '/Users/qad/Documents/dsc_chapters/ch2'
os.chdir(ch2dir)
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
vectorizer = TfidfVectorizer(input="filename", stop_words=None, use_idf=False, norm=None, max_features=1000)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
ch2freqs = vectorizer.fit_transform(filenames).toarray()
ch2_cosine_freq = pd.DataFrame(squareform(pdist(ch2freqs, metric='cosine')), index=filekeys, columns=filekeys)
ch2_cosine_freq
ch2_cosine_freq.to_csv('ch2_cosine_freq.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(ch2_cosine_freq)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
Now **wait a minute!!** Why on earth do the full books look so much more similar than the chapter 2’s?! We **know** the chapter 2’s are more similar than the full books! *WTF is going wrong?!*I was so irked at the direction this had gone that I entirely forgot about the typical mutual inquiry about well-being and all those social conventions at my next meeting with Mark. The first words out of my mouth, flying forth as soon as his audio connected on Zoom, were, “I tried to normalize the word counts and now the novels are more similar than the chapter 2’s **WHAT IS EVEN GOING ON HERE?!?!**”And then I remembered-- as Kristy’s teacher, Mr. Redmont, would put it-- “*decorum*”, and managed to collect myself. “Also, hello! How are you?”Mark was gracious and generous, as always. “I’m interested! Tell me more!” So I showed him, grumbling and annoyed as I pulled up the code and data. Mark thought about it. “I think you’re really comparing apples to oranges here. Changing word counts to word frequencies helps when your texts are different lengths, but, say, within an order of magnitude.” I stared, quizzically, into my laptop’s video camera. “So what I think is happening with your chapter 2’s is that they’re short enough that the difference between 10 and 13 instances of the word ‘the’ is going to make them look more ‘different’. And the same thing for every other word. With the end result being that the chapter 2’s look more different. But across the entirety of the novel, though, small differences in word frequencies even out. So they end up looking more similar.”“Wait, so, there’s no way to compare chapters vs. whole books?” I asked.“You could do that,” said Mark. “What you’d need to do is sample a chapter-2’s length of text from the set of all the words in a whole book. And then use that sample as the point of comparison.”“Wait, what? If you randomly grab, say, 2,500 words from a novel, you’d be comparing chapter 2 vs. a text that doesn’t make any sense!”Mark shrugged. “I mean, you could generate a text of chapter 2 length using a Markov chain if that would make you feel better,” he said, referencing a text-generation model where the probability of each word occurring depends only on the previous word generated. It’d probably have basically the same effect overall, but would be likely to make more sense to the human reader.But that seemed like a task for a future BSC book. For now, though, a better point of comparison would be comparing how similar the chapter 2’s were, vs. other chapters, just like what we’d done earlier for cosine distance using word counts: And clearly, even though the chapters are less similar than the books overall using this metric, the chapter 2’s are much more similar than other sets of chapters. So we’ve found the same overall result, but we’ve also saved ourselves from chasing false leads -- like the “difference” in Abby’s Portrait Collection book that only really have to do with text length. Not everything is as purple as everything else in this visualization, and there are still things we can follow up on. But we’ve leveled out the differences that are just about difference in length.I think we’ve said all we can say about Euclidean and Cosine distance for this book, and how the results you get vary depending on how you count (or ratio) your words. It’s time to move on to a different method. Slow down, Quinn: Before moving on to the next text comparison method, it's important to wrap up some loose ends. We wanted to differentiate the effect of the TF-IDF vectorizer from the effect of no longer using the `max_df` setting to drop terms that appear in 70% of texts. So let's compare three visualizations, all showing Euclidean distance, but with different vectorizer settings: from left to right, the count vectorizer that we used when we first ran Euclidaen distance, which drops the terms that appear in 70% in the text. In the middle, the TF-IDF vectorizer that should get us term frequencies instead of counts, and thereby normalize for length. And then finally, the TF-IDF vectorizer without dropping any terms. Now wait just a minute here. Why do the count vectorizer and TF-IDF vectorizer results look identical? Are they actually identical? Shouldn't dropping common words make it even more important to use word frequencies? This was bad news. I was already up past midnight trying to get this Data-Sitter's Club book ready for publication, and as an insomniac morning person, that was never a good thing. This was a huge roadblock. I couldn't publish this book without figuring out what was going on. I re-ran the code again and again, ditching the visualization and comparing the numbers in the table. Every single time, the numbers were identical, regardless of which vectorizer I used or what max_df value I used. I spent the early morning insomnia hours desperately Googling, and scouring the scikit-learn documentation. I couldn't find anyone else having this problem, and I was completley stumped. It was time to throw myself on the mercy of DH Python Twitter. DH Python Twitter is a thing.I've been surprised at how often it's worked out that I complain about something involving coding (usually Python, but sometimes other tools) on Twitter and someone will show up and help me solve it. Sometimes it's someone I know, sometimes it's a random person who works on data science, machine learning, or just knows a lot of Python. It feels like a kind of positive, helpful inverse of mansplaining: instead of guys showing up to talk over me and explain things I already know, they show up, listen to the problem I'm having, and help me understand what's going on. (I mean, sometimes they show up and don't read the question and suggest something I and any other reasonable person would've already tried first, but I've gotten lucky with more helpful replies than not.)Part of it is definitely the privilege of my weird job -- there's no professional risk for me in publicly not-knowing things. That's not the case for a lot of people. But since I can do this, I do, with the hope that other people who don't know can follow along and learn, too.A lot of the Data-Sitters Club is active on Twitter, and if you're trying to do something from one of our books and you've got a question, please don't feel weird about tagging us and asking, if you're comfortable! People who write DH tutorials and stuff are generally really happy to see that people are using their work, and often don't mind helping you debug it. And that's what saved the day this time. Closing the narrative loop I was so relieved when Zoe LeBlanc offered to take a look at my code. She's my favorite non-English DH developer-turned-tenure-track faculty. As luck would have it, she was meeting with John R. Ladd that afternoon... the same John R. Ladd who'd written the Programming Historian tutorial from which I copied the code that triggered this whole subplot! And he also offered to help!And that's how I found myself meeting with Zoe and John, which felt like an apt conclusion to this strange computational subplot.As soon as he took a look at my code, John knew the answer."Everything here looks great-- the only problem is you told it not to normalize," he said.I gaped. "Wait, what? I told it to use the TF-IDF vectorizer. I mean, I read all the scikit-learn documentation on normalization and I was pretty sure I didn't want it to do... whatever it was exactly that the normalization parameter did? I just wanted term frequencies."John shook his head sympathetically. "Yeah, the scikit-learn documentation really doesn't help sometimes. This happened to me a couple years ago when I was teaching a workshop on text comparison using scikit-learn. People were concerned about normalization, and I couldn't figure out how to make it work with scikit-learn, and it made me wonder if it was the right package for the job. But here's how normalization works with the TF-IDF vectorizer: if you set it to 'l1', you get relative frequencies. What it does is make the sum (of absolute values, but we don't have any negative word counts here) of all the features (word counts) add up to 1. Now, l2 is the standard machine learning normalization for text analysis. L2 normalization makes it so that the sum of the *squares* of features is equal to 1. This better accounts for outliers. It basically uses the Pythagorean theorem to normalize the vectors."So there you have it. If your middle-school-age kid ever complains about having to learn the Pythagorean theorem, and refuses to believe it has any real-world utility, you can tell them that it's really important for machine learning.John wasn't kidding about the scikit-learn documentation not helping, though; I don't think I would have ever understood that "‘l1’: Sum of absolute values of vector elements is 1." would mean "turns counts into frequencies". Word frequencies... now with actual word frequencies!Thanks to John and Zoe, I knew how to change my code to actually get what I was aiming for. Let's look at what real word frequencies look like, compared to just not throwing out common shared words, like it turns out we just did, above.
###Code
filedir = '/Users/qad/Documents/dsc_corpus_clean'
os.chdir(filedir)
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
# Like, actually, the parameters you need, including not disabling normalization
vectorizer = TfidfVectorizer(input="filename", stop_words=None, use_idf=False, norm='l1', max_features=1000)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
wordfreqs4real = vectorizer.fit_transform(filenames).toarray()
###Output
_____no_output_____
###Markdown
Euclidean distance with real word frequencies
###Code
euclidean_distances_freq = pd.DataFrame(squareform(pdist(wordfreqs4real, metric='euclidean')), index=filekeys, columns=filekeys)
euclidean_distances_freq
euclidean_distances_freq.to_csv('euclidean_distances_freq.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(euclidean_distances_freq)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
Interesting! Similar to what I had before, without the word frequency normalization, but a little lighter in color, meaning less similar. Which sounds better to me, knowing the corpus? Let's see how cosine distance plays out.Cosine distance with word frequencies
###Code
cosine_distances_freq = pd.DataFrame(squareform(pdist(wordfreqs4real, metric='cosine')), index=filekeys, columns=filekeys)
cosine_distances_freq
cosine_distances_freq.to_csv('cosine_distances_freq.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(cosine_distances_freq)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
Very similar! Honestly, there's less difference between cosine distance with word counts and cosine distance with word frequencies... which makes sense, because the cosine distance measure already helps account for different text lengths, at least up to a certain point. Let's try cosine distance on the chapter 2's.Cosine distance with chapter 2's
###Code
ch2dir = '/Users/qad/Documents/dsc_chapters/ch2'
os.chdir(ch2dir)
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
# Like, actually, the parameters you need, including not disabling normalization
vectorizer = TfidfVectorizer(input="filename", stop_words=None, use_idf=False, norm='l1', max_features=1000)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
ch2freqs4real = vectorizer.fit_transform(filenames).toarray()
ch2_cosine_freq = pd.DataFrame(squareform(pdist(ch2freqs4real, metric='cosine')), index=filekeys, columns=filekeys)
ch2_cosine_freq
ch2_cosine_freq.to_csv('ch2_cosine_freq.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(ch2_cosine_freq)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
It's largely the same as cosine distance using just word counts! With the same questions and disappointments with regard to the similarity of the chapter 2's, compared to the full books, when using cosine distance. We probably don't need to rerun this for chapters 1, 5, 9, and 15; you get the point.But now we've found it using code that legitimately works, without any confusions or misunderstandings about what's happening (at least, I hope?). That's satisfying. A satisfying kind of dissatisfying. Now we can move on to another method. TF-IDFAs I mentioned before, TF-IDF stands for term frequency - inverse document frequency. TF-IDF tries to get at **distinctive** words. For each text, what are the words that set it apart from all the other texts you’re comparing against? To calculate TF-IDF, you don’t have to imagine 1000-dimensional space or anything like that. Term frequency is just how often the word occurs, divided by the total number of words in the text. Inverse document frequency is a way to reduce the importance of words that are high-frequency everywhere (like “the”) in order to surface the words that are high frequency in a particular text because they’re important. You calculate it using another concept from high school math: your old pal logarithm. The inverse document frequency for a word is: log_e(Total number of documents / Number of documents with term t in it). The TF-IDF calculation is inherently comparative: it doesn’t make sense to run it on just one text, if you’re looking for what’s unique about a text in relation to other texts. But the output we get from TF-IDF is a list of words and numerical values, which isn’t something we can use to visualize a comparison of the texts, the way we could with the output of the vectorizer we used to plot points in 1000-dimensional space. We *can* use the TF-IDF calculations for each word in our vectorizer instead of simple word counts, which will generate a different set of points for each text, and from there we can use Euclidean or Cosine distance. But before we go there, let’s take a look at what we get out of the TF-IDF calculation, using our full-text corpus (not just the chapter 2s).The word “baby-sitters” is going to appear in most or all of the books (maybe not California Diaries). On the other hand, the word “Lowell” (the surname of the racist family in BSC 56: *Keep Out, Claudia!*) only occurs in two books: *Keep Out, Claudia!* and BSC 3: *The Truth About Stacey* (where “Lowell” actually refers to a different person, Lowell Johnston). Lowell Johnston is only mentioned twice in *The Truth About Stacey*, so it’s still not going to get a high TF-IDF score in that book (it comes in 103 with a score of 10.64). But in *Keep Out, Claudia!*, Lowell appears a lot, and that number isn’t scaled down much at all because it only occurs in two books. So it ends up getting the highest TF-IDF score for that book, 707.82. This is a large score, more similar to characters in “very special episodes” who appear in just one book, like Whitney (the girl with Down’s Syndrome who Dawn babysits in BSC 77: *Dawn and Whitney, Friends Forever*).TF-IDF is one approach to getting at what a text is “about” -- more straightforward to understand and faster to calculate than topic modeling. But especially working with a corpus of fiction, you’ll probably need to weed out the character names -- either by pre-processing the text to remove them, or looking beyond the first few highest-scoring terms. (If anything, we’re getting fewer high-scoring character names than you’d expect in most fiction. The major characters occur frequently enough that they get weighted down, like words like “the” and “is”.) Let's go back to the directory with the full texts:
###Code
filedir = '/Users/qad/Documents/dsc_corpus_clean'
os.chdir(filedir)
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
vectorizer = TfidfVectorizer(input="filename", stop_words=None, use_idf=True, norm=None, max_features=1000)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
transformed_documents = vectorizer.fit_transform(filenames)
transformed_documents_as_array = transformed_documents.toarray()
###Output
_____no_output_____
###Markdown
The code from the Programming Historian tutorial generates a CSV file for each text, showing the TF-IDF value of each word. (You can find all these CSV files [in the GitHub repo for this book](https://github.com/datasittersclub/dsc8).)
###Code
# construct a list of output file paths using the previous list of text files the relative path for tf_idf_output
output_filenames = [str(txt_file).replace(".txt", ".csv") for txt_file in filenames]
# loop each item in transformed_documents_as_array, using enumerate to keep track of the current position
for counter, doc in enumerate(transformed_documents_as_array):
# construct a dataframe
tf_idf_tuples = list(zip(vectorizer.get_feature_names(), doc))
one_doc_as_df = pd.DataFrame.from_records(tf_idf_tuples, columns=['term', 'score']).sort_values(by='score', ascending=False).reset_index(drop=True)
# output to a csv using the enumerated value for the filename
one_doc_as_df.to_csv(output_filenames[counter])
###Output
_____no_output_____
###Markdown
For BSC 54: *Mallory and the Dream Horse*, the top three terms are Nina (a little girl involved in the book’s babysitting sub-plot), Pax (the horse Mallory rides), and Lauren (Mallory’s equitation instructor), but by themselves they don’t help much with classifying this text. If you look in the top 10, though, you’ve got riding (5), horse (6), and horses (8). In the top 25, there are lessons (13), saddle (15), riders (17), reins (18), stable (19), canter (21), and bridle (25). It’s looking pretty horsey in here. In BSC 57: *Dawn Saves the Planet*, we’ve got recycling (2), planet (5), ecology (7), pollution (10), garbage (11), recycle (12), styrofoam (13), recycled (20), and carton (25). BSC 110: *Abby and the Bad Sport* has coach (3), soccer (4), goal (7), goalie (8), players (13), field (15), referee (17), defense (18), cleats (20), player (21), kickers (23), and benched (24). You might not get the bad sportsmanship out of this, but there’s clearly some soccer afoot. What about books with a less obvious theme? There are some other terms that might throw you off, but you could probably come to the conclusion that art plays a meaningful role in BSC 12: *Claudia and the New Girl* with sculpture (4), sculpt (5), portfolio (12), gallery (22), ... despite hydrant (15), vacuum (19), and inanimate (20). Indeed, the aforementioned new girl is into art, just like Claudia. If I were thinking of some distinctive words for BSC 87: *Stacey and the Bad Girls*, what would come to mind would be “shoplifting”, “concert”, “alcohol”, and “wine”. But the top 25 terms are almost all names -- including the band whose concert they go see (7 U4Me) and the department store where the shoplifting takes place (10 Bellair). There are also trains (19) and escalator (24). “Concert” does the best of my terms at 40. “Alcohol” is 80, between “camera” and “rosebud”. “Shoplift” is 118, between “bikes” and “creature”. And “wine” is down at 1002, in the company of “sniffle” and “bees”. So don’t get too comfortable with the assumption that TF-IDF will get you to basically the same set of terms that a human would think of. Plot salience and distinctive content aren’t the same as distinctive frequency distribution.BSC 83: *Stacey vs. the BSC* features Stacey being duplicitous, along with the inter-babysitter drama that ultimately leads to the misbehavior described above for BSC 87, but you can’t see it in the top 25 terms, which feature a lot of names, various instances of onomatopoeia (“clack”, “clomp”, and “plink”), piano, fiesta, talent, twinkle, recital, cheese, and jukebox. There’s something to this: Dawn hides behind a jukebox spying on Stacey after she sneaks out on a date. And Charlotte plays the piano at the BSC talent show. Score three for TF-IDF! Even if it’s fixating on objects, at least they’re plot-significant objects. So what’s up with the cheese? I don’t have a good explanation, but it comes up a lot, between Jamie’s macaroni and cheese, extra pepperoni and cheese on a pizza, multiple references to cream cheese, cheese and crackers, a fiesta burger (there’s the “fiesta” from our TF-IDF results) with melted cheese… maybe ghostwriter Peter Lerangis had a cheese craving while writing it? TF-IDF for text comparisonClose-reading a distant reading method as a proxy for looking at the “topic” of individual texts is one way you can use the TF-IDF output. But you can also use it to compare texts at scale. You can also substitute in the TF-IDF vectorizer (with the IDF turned **on** this time) as your vectorizer of choice when trying out the Euclidean and cosine distance. The TF-IDF vectorizer has some optional parameters for dropping words. You can drop words that appear in too many documents with max_df. So `max_df = 0.9` means “ignore all words that appear in more than 90% of the documents”, or you can give it a specific number of documents with `max_df = 100`, for “ignore all words that appear in more than 100 documents”. You can get rid of words that appear too infrequently with min_df (e.g. `min_df = 0.1` means “ignore all words that appear in less than 10% of the documents”.) In this case, we’ll keep everything by not using those parameters, but you can play with them with your own corpora to see how it impacts your result to remove super-high frequency words (which, in the Baby-Sitters Club corpus, would get rid of both words like “the” and “a”, and the main characters’ names) or super-low frequency words (like the names of characters in the “very special episode” books.) Note: Remember, I wrote this before I had any idea at all about the problems with my code that triggered this book's subplot. If this were a horror-themed choose-your-own-adventure book, at this point you might read something like this: If only you could hear the screaming voices of the readers as you write this description of max_df. "CHECK YOUR CODE, YOU MADE THIS MISTAKE WITH YOUR FIRST EUCLIDEAN AND COSINE DISTANCE EXAMPLES!" But you cannot hear them. And so you remain ignorant of this fact for a few weeks longer. Turn the page..." So let's do Euclidean and cosine distance using the TF-IDF vectorizer with IDF set to true, and see how it compares to the other ways of comparing text that we've tried so far.
###Code
tfidf_comparison_output_euclidean = pd.DataFrame(squareform(pdist(transformed_documents_as_array, metric='euclidean')), index=filekeys, columns=filekeys)
tfidf_comparison_output_euclidean
tfidf_comparison_output_euclidean.to_csv('tfidf_comparison_output_euclidean.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(tfidf_comparison_output_euclidean)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
Okay. Now let's try cosine distance with the TF-IDF vectorizer!
###Code
tfidf_comparison_output_cosine = pd.DataFrame(squareform(pdist(transformed_documents_as_array, metric='cosine')), index=filekeys, columns=filekeys)
tfidf_comparison_output_cosine
tfidf_comparison_output_cosine.to_csv('tfidf_comparison_output_cosine.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(tfidf_comparison_output_cosine)
#Displays the image
plt.show()
###Output
_____no_output_____
###Markdown
There’s less difference between the Euclidean and cosine distance when using a TF-IDF vectorizer (that actually uses the “-IDF” in “TF-IDF”) than the word count vectorizer. So what happens when we try to run cosine distance using TF-IDF on chapter 2's?
###Code
ch2dir = '/Users/qad/Documents/dsc_chapters/ch2'
os.chdir(ch2dir)
# Use the glob library to create a list of file names, sorted alphabetically
# Alphabetical sorting will get us the books in numerical order
filenames = sorted(glob.glob("*.txt"))
# Parse those filenames to create a list of file keys (ID numbers)
# You'll use these later on.
filekeys = [f.split('/')[-1].split('.')[0] for f in filenames]
# Create a CountVectorizer instance with the parameters you need
vectorizer = TfidfVectorizer(input="filename", stop_words=None, use_idf=True, norm=None, max_features=1000)
# Run the vectorizer on your list of filenames to create your wordcounts
# Use the toarray() function so that SciPy will accept the results
ch2_tfidf = vectorizer.fit_transform(filenames).toarray()
ch2_cosine_tfidf = pd.DataFrame(squareform(pdist(ch2_tfidf, metric='cosine')), index=filekeys, columns=filekeys)
ch2_cosine_tfidf
ch2_cosine_tfidf.to_csv('ch2_tfidf.csv')
#Defines the size of the image
plt.figure(figsize=(100, 100))
#Increases the label size so it's more legible
sns.set(font_scale=3)
#Generates the visualization using the data in the dataframe
ax = sns.heatmap(ch2_cosine_tfidf)
#Displays the image
plt.show()
###Output
_____no_output_____ |
Car Price Prediction/car-price-prediction-ann.ipynb | ###Markdown
Laoding the data
###Code
df = pd.read_csv('car data.csv')
df.head()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis
###Code
df.shape
df.info()
df.describe()
df.isna().sum()
###Output
_____no_output_____
###Markdown
Data Preprocessing
###Code
df['Age'] = 2021 - df['Year']
df.drop('Year', axis=1, inplace=True)
df.head()
df.rename(columns = {'Selling_Price':'Selling_Price(lacs)', 'Present_Price':'Present_Price(lacs)', 'Owner':'Past_Owners'}, inplace=True)
df.head()
df.columns
###Output
_____no_output_____
###Markdown
Visualizing the data
###Code
cat_cols = ['Fuel_Type', 'Seller_Type', 'Transmission', 'Past_Owners']
i = 0
while i < 4:
fig = plt.figure(figsize=[10,4])
plt.subplot(1,2,1)
sns.countplot(x=cat_cols[i], data=df)
i +=1
plt.subplot(1,2,2)
sns.countplot(x=cat_cols[i], data=df)
i += 1
plt.show()
df[df['Present_Price(lacs)']>df['Present_Price(lacs)'].quantile(0.99)]
df[df['Selling_Price(lacs)'] > df['Selling_Price(lacs)'].quantile(0.99)]
df[df['Kms_Driven'] > df['Kms_Driven'].quantile(0.99)]
sns.heatmap(df.corr(), annot = True, cmap='RdBu')
plt.show()
df.corr()['Selling_Price(lacs)']
df.pivot_table(values='Selling_Price(lacs)', index = 'Seller_Type', columns='Fuel_Type')
# Creating dummies for categorical values
df.drop(labels='Car_Name', axis=1, inplace = True)
df.head()
df = pd.get_dummies(data=df, drop_first=True)
df.head()
###Output
_____no_output_____
###Markdown
Train - Test - Split
###Code
# Train Test Split
X = df.iloc[:,1:].values
y = df.iloc[:,:1].values
print(X)
# print(y)
print(X.shape, y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2, random_state=1)
###Output
_____no_output_____
###Markdown
Scaling the data for better training
###Code
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
print(X.shape, X_train.shape, X_test.shape)
print(y.shape, y_train.shape, y_test.shape)
###Output
(301, 1) (240, 1) (61, 1)
###Markdown
Building Artifical Neural Network
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(30, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1)) # O/P
model.compile(optimizer='rmsprop', loss='mse')
model.fit(X_train, y_train, epochs=100, validation_data=(X_test,y_test))
model.summary()
loss_df = pd.DataFrame(model.history.history)
loss_df.plot()
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
model.evaluate(X_test, y_test)
###Output
2/2 [==============================] - 0s 4ms/step - loss: 1.2536
###Markdown
model.predict() on X_test
###Code
train_pred = model.predict(X_train)
# print(pred_train)
###Output
_____no_output_____
###Markdown
model.predict() on X_train
###Code
test_pred = model.predict(X_test)
from sklearn.metrics import r2_score
###Output
_____no_output_____
###Markdown
- R Squared : R-squared measures the strength of the relationship between your model and the dependent variable on a convenient 0 – 100% scale.
###Code
r2_train = r2_score(y_train, train_pred)
print("R Squared value of train dataL: ",r2_train)
r2_test = r2_score(y_test, test_pred)
print("R Squared value of test data:", r2_test)
diff_r2_scores = r2_train - r2_test
print("Difference between two scores: ", diff_r2_scores.round(2))
###Output
Difference between two scores: 0.04
|
Pytorch/Practices/2A_Linear_Regression.ipynb | ###Markdown
1. Dataset
###Code
N = 20
# X = 20x random in [-5, +5]
X = np.random.random(N)*10 - 5
print(X.shape)
# y = A line plus some noise
y = 0.5 * X - 1 + np.random.randn(N)
plt.scatter(X, y);
print(y.shape)
X = X.reshape(N, 1)
y = y.reshape(N, 1)
# Convert to pytorch tensors
X_train = torch.from_numpy(X.astype(np.float32))
y_train = torch.from_numpy(y.astype(np.float32))
print(X_train.shape)
print(y_train.shape)
###Output
torch.Size([20, 1])
torch.Size([20, 1])
###Markdown
2. Model
###Code
import torch.nn as nn
# model
lr_model = nn.Linear(1, 1)
# loss function: MSE
mse_loss = nn.MSELoss()
# Optimizer: SGD
sgd_opt = torch.optim.SGD(lr_model.parameters(), lr=0.05)
###Output
_____no_output_____
###Markdown
3. Train
###Code
def fit(X, y, model, loss_fn, optimizer, n_epochs):
losses = []
for i in range(n_epochs):
# zero the parameter gradients
optimizer.zero_grad()
# Forward
y_ = model(X)
loss = loss_fn(y_, y)
# Save loss
losses.append(loss.item())
# Backward
loss.backward()
optimizer.step()
if (i+1)%10==0: print(f"Epoch {i+1}/{n_epochs}, Loss: {loss.item():.4f}")
# Plot losses
plt.plot(losses);
fit(
X=X_train, y=y_train,
model=lr_model, loss_fn=mse_loss, optimizer=sgd_opt,
n_epochs=100)
###Output
Epoch 10/100, Loss: 1.9804
Epoch 20/100, Loss: 1.4644
Epoch 30/100, Loss: 1.4000
Epoch 40/100, Loss: 1.3919
Epoch 50/100, Loss: 1.3909
Epoch 60/100, Loss: 1.3908
Epoch 70/100, Loss: 1.3908
Epoch 80/100, Loss: 1.3908
Epoch 90/100, Loss: 1.3908
Epoch 100/100, Loss: 1.3908
###Markdown
4. Test
###Code
# test = train to verify model
X_test = X_train
y_test = y_train
y_test_ = lr_model(X_test)
loss = mse_loss(y_test_, y_test)
print(f"Final loss: {loss:.4f}")
# Plot final prediction
## convert pytorch tensor -> numpy
X_test = X_test.detach().numpy()
y_test_ = y_test_.detach().numpy()
plt.scatter(X, y, label='Original data')
plt.plot(X_test, y_test_, color='red', label='Fitted line')
plt.legend()
plt.show()
###Output
_____no_output_____ |
docs/examples/descriptions.ipynb | ###Markdown
Checking empty descriptions In this example, we use `fastobo` to create a small validation script which will report empty definitions in an OBO file. We also use `requests` in order to connect to the OBO library.
###Code
import fastobo
import requests
###Output
_____no_output_____
###Markdown
`fastobo.load` takes a file-handle, which can be accessed using the `raw` property of the `Response` object returned by `requests.get`:
###Code
res = requests.get("http://purl.obolibrary.org/obo/ms.obo", stream=True)
doc = fastobo.load(res.raw)
###Output
_____no_output_____
###Markdown
HeaderNow, we can check the header for empty descriptions in definition clauses:
###Code
for clause in doc.header:
if isinstance(clause, fastobo.header.SynonymTypedefClause) and not clause.description:
print("Empty description in definition of", clause.typedef)
elif isinstance(clause, fastobo.header.SubsetdefClause) and not clause.description:
print("Empty description in definition of", clause.subset)
###Output
_____no_output_____
###Markdown
Note that we are using `isinstance` a lot compared to what you may be used to in other Python library: this is because `fastobo` is based on a Rust library which is strongly-typed, so that is reflected in the Python library that wraps it. We could use the strong typing to write the same snippet using type-specific callback wrapped in a `dict`:
###Code
def check_synonym_typedef(clause):
if not clause.description:
print("Empty description in definition of", clause.typedef, "in header")
def check_subsetdef(clause):
if not clause.description:
print("Empty description in definition of", clause.subset, "in header")
CALLBACKS = {
fastobo.header.SynonymTypedefClause: check_synonym_typedef,
fastobo.header.SynonymTypedefClause: check_subsetdef,
}
for clause in doc.header:
callback = CALLBACKS.get(type(clause))
if callback is not None:
callback(clause)
###Output
_____no_output_____
###Markdown
Such a construct can be used to process all possible clauses while reducing the number of `if`/`elif` branches, in particular when many different clauses are processed at the same time. Entities Checking for definitions in entity frames is straightforward: all definition clauses have a `definition` property that returns the textual definition of the entity. We can use duck-typing here to check for empty definitions:
###Code
for frame in doc:
for clause in frame:
try:
if not clause.definition:
print("Empty definition of", frame.id)
except AttributeError:
pass
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/01_radiation_therapy_patients_data_EXTRACTION_CLEANING-checkpoint.ipynb | ###Markdown
--- ... --- Loading Telomere Length Data from TeloFISH--- Extracting telomere length data output from ImageJ from all radiation therapy patients
###Code
all_patients_dict = trp.generate_dictionary_from_TeloLength_data('../data/raw patient teloFISH data/')
###Output
SW9A non irrad.xlsx data extraction in progress..
BJ1 for SW9_.xlsx data extraction in progress..
SW11A non irrad.xlsx data extraction in progress..
BJ1 for SW15_.xlsx data extraction in progress..
SW6A non irrad.xlsx data extraction in progress..
SW6A irrad @ 4 Gy.xlsx data extraction in progress..
SW8B.xlsx data extraction in progress..
SW14A irrad @ 4 Gy.xlsx data extraction in progress..
SW8A irrad @ 4 Gy.xlsx data extraction in progress..
SW5A irrad @ 4 Gy.xlsx data extraction in progress..
SW8C.xlsx data extraction in progress..
SW1A non irrad.xlsx data extraction in progress..
BJ1 for SW11_.xlsx data extraction in progress..
SW16A non irrad.xlsx data extraction in progress..
BJ1 for SW13_.xlsx data extraction in progress..
BJ-hTERT for SW9_.xlsx data extraction in progress..
BJ1 for SW14_.xlsx data extraction in progress..
SW9B.xlsx data extraction in progress..
BJ1 for SW8_.xlsx data extraction in progress..
SW_1_ok_3_C_.xlsx data extraction in progress..
SW3A irrad @ 4 Gy.xlsx data extraction in progress..
SW11A irrad @ 4 Gy.xlsx data extraction in progress..
BJ1 for SW16_.xlsx data extraction in progress..
BJ1 for SW12_.xlsx data extraction in progress..
SW8A non irrad.xlsx data extraction in progress..
BJ-hTERT for SW8_.xlsx data extraction in progress..
SW10A non irrad.xlsx data extraction in progress..
SW12A irrad @ 4 Gy.xlsx data extraction in progress..
SW9C.xlsx data extraction in progress..
BJ1 for SW10_.xlsx data extraction in progress..
SW7A non irrad.xlsx data extraction in progress..
SW1A irrad @ 4 Gy.xlsx data extraction in progress..
SW13A irrad @ 4 Gy.xlsx data extraction in progress..
SW1B.xlsx data extraction in progress..
BJ-hTERT for SW6_.xlsx data extraction in progress..
SW13B.xlsx data extraction in progress..
BJ1 for SW2_.xlsx data extraction in progress..
SW2A non irrad.xlsx data extraction in progress..
SW5C.xlsx data extraction in progress..
SW15C.xlsx data extraction in progress..
SW7C.xlsx data extraction in progress..
SW11B.xlsx data extraction in progress..
SW3B.xlsx data extraction in progress..
BJ-hTERT for SW15_.xlsx data extraction in progress..
SW15A non irrad.xlsx data extraction in progress..
SW12A non irrad.xlsx data extraction in progress..
BJ-hTERT for SW11_.xlsx data extraction in progress..
SW3C.xlsx data extraction in progress..
SW11C.xlsx data extraction in progress..
SW7B.xlsx data extraction in progress..
SW15B.xlsx data extraction in progress..
BJ1 for SW6_.xlsx data extraction in progress..
BJ-hTERT for SW2_.xlsx data extraction in progress..
SW5B.xlsx data extraction in progress..
SW5A non irrad.xlsx data extraction in progress..
SW1C.xlsx data extraction in progress..
BJ-hTERT for SW13_.xlsx data extraction in progress..
SW10A irrad @ 4 Gy.xlsx data extraction in progress..
SW2A irrad @ 4 Gy.xlsx data extraction in progress..
BJ1 for SW1_.xlsx data extraction in progress..
SW10B.xlsx data extraction in progress..
BJ-hTERT for SW5_.xlsx data extraction in progress..
SW2B.xlsx data extraction in progress..
SW13A non irrad.xlsx data extraction in progress..
SW14C.xlsx data extraction in progress..
SW6C.xlsx data extraction in progress..
SW9A irrad @ 4 Gy.xlsx data extraction in progress..
SW16A irrad @ 4 Gy.xlsx data extraction in progress..
BJ-hTERT for SW14_.xlsx data extraction in progress..
BJ-hTERT for SW16_.xlsx data extraction in progress..
SW16C.xlsx data extraction in progress..
BJ1 for SW3_.xlsx data extraction in progress..
SW12B.xlsx data extraction in progress..
BJ-hTERT for SW7_.xlsx data extraction in progress..
SW12C.xlsx data extraction in progress..
SW16B.xlsx data extraction in progress..
BJ-hTERT for SW3_.xlsx data extraction in progress..
BJ1 for SW7_.xlsx data extraction in progress..
BJ-hTERT for SW12_.xlsx data extraction in progress..
SW3A non irrad.xlsx data extraction in progress..
SW15A irrad @ 4 Gy.xlsx data extraction in progress..
SW7A irrad @ 4 Gy.xlsx data extraction in progress..
BJ-hTERT for SW10_.xlsx data extraction in progress..
SW6B.xlsx data extraction in progress..
SW14B.xlsx data extraction in progress..
BJ-hTERT for SW1_.xlsx data extraction in progress..
SW14A non irrad.xlsx data extraction in progress..
BJ1 for SW5_.xlsx data extraction in progress..
SW2C.xlsx data extraction in progress..
SW10C.xlsx data extraction in progress..
completed file collection
###Markdown
Making dataframe from dict w/ all patients telomere length data, contains telo means & individual telos as list
###Code
all_patients_df = trp.generate_dataframe_from_dict(all_patients_dict)
# don't need telo means per cell @ this time
all_patients_df = all_patients_df.drop(['cell data'], axis=1)
print(all_patients_df.shape)
###Output
(59, 7)
###Markdown
Saving all patients telomere length data for later retrieval
###Code
# changing telo data to list in prep for saving to csv
all_patients_df['telo data'] = all_patients_df['telo data'].apply(lambda row: row.tolist())
all_patients_df.to_csv('../data/compiled patient data csv files/all_patients_df.csv', index=False)
###Output
_____no_output_____
###Markdown
Generating all patients telo df containing telo counts per quartile melted into tidy data format
###Code
melted_all_patients_df = pd.melt(
all_patients_df,
id_vars = [col for col in all_patients_df.columns if col != 'Q1' and col != 'Q2-3' and col != 'Q4'],
var_name='relative Q',
value_name='Q freq counts')
melted_all_patients_df['Q freq counts'] = melted_all_patients_df['Q freq counts'].astype('float64')
melted_all_patients_df.head(4)
###Output
_____no_output_____
###Markdown
Saving melted all patients df to csv
###Code
melted_all_patients_df.to_csv('../data/compiled patient data csv files/melted_all_patients_df.csv', index=False)
###Output
_____no_output_____
###Markdown
Pivoted Dataframe w/ timepoints as columns, and telomere length means for each patient timepoint in rows
###Code
pivot_patients_telo_means_df = all_patients_df.pivot(index='patient id', columns='timepoint', values='telo means')
pivot_patients_telo_means_df = pivot_patients_telo_means_df.drop(13)
###Output
_____no_output_____
###Markdown
Saving pivoted telo means df to file
###Code
pivot_patients_telo_means_df.to_csv('../data/compiled patient data csv files/pivot_patients_telo_means_df.csv', index=False)
###Output
_____no_output_____
###Markdown
Exploding individual telomere length measurements from contained list into dataframe (i.e row per individual telomere) while retaining related column info
###Code
# can imagine the lists containing the individual telos per patient exploding to the right; maintains the index relationship
explode_telos_raw = all_patients_df['telo data'].apply(pd.Series)
print(explode_telos_raw.shape)
explode_telos_raw.head(4)
exploded_telos_all_patients_df = (explode_telos_raw
# we'll merge the exploded telos df w/ our original all patients df on the index!
.merge(all_patients_df, right_index = True, left_index = True)
.drop(['telo data', 'Q1', 'Q2-3', 'Q4'], axis = 1)
.melt(id_vars = ['patient id', 'timepoint', 'telo means'], value_name = "individual telomeres")
.drop("variable", axis = 1)
.dropna())
exploded_telos_all_patients_df.head(4)
###Output
_____no_output_____
###Markdown
Saving exploded telomere df for later retrieval
###Code
exploded_telos_all_patients_df.to_csv('../data/compiled patient data csv files/exploded_telos_all_patients_df.csv', index=False)
###Output
_____no_output_____
###Markdown
Loading Chromosome Aberration Data from Subtelo-dGH ---
###Code
all_chr_aberr_df = trp.make_dataframe_chr_aberr_data('../data/dGH scoresheets/')
all_chr_aberr_df.to_csv('../data/compiled patient data csv files/all_chr_aberr_df.csv', index=False)
###Output
_____no_output_____
###Markdown
Loading Complete Blood Count data
###Code
# loading excel file
cbc_data = pd.read_excel('../data/to colorado.xlsx')
# minor data cleaning
cbc_data.rename({'patient': 'patient id',
'mrn': 'timepoint'}, axis=1, inplace=True)
def extract_patient_ID(row):
if 'SW' in row:
row = row.replace('SW', ' ').strip()
return row
cbc_data['patient id'] = cbc_data['patient id'].apply(lambda row: extract_patient_ID(row))
cbc_data['patient id'] = cbc_data['patient id'].astype('int64')
# saving to file
cbc_data.to_csv('../data/compiled patient data csv files/cleaned cbc data.csv', index=False)
###Output
_____no_output_____ |
Model backlog/Training/Segmentation/Local/[3-Fold]-57-UNet EfficientNetB3_320x480.ipynb | ###Markdown
Dependencies
###Code
import sys
sys.path.append('../Scripts/')
from utillity_script_cloud_segmentation import *
from utillity_script_lr_schedulers import *
seed = 0
seed_everything(seed)
warnings.filterwarnings("ignore")
train_path = '../data/train.csv'
kfold_set_path = '../data/3-fold.csv'
train_images_path = '../data/train_images320x480/'
###Output
_____no_output_____
###Markdown
Load data
###Code
train = pd.read_csv(train_path)
kfold_set = pd.read_csv(kfold_set_path)
X_train = kfold_set[kfold_set['fold_0'] == 'train']
X_val = kfold_set[kfold_set['fold_0'] == 'validation']
print('Compete set samples:', len(train))
print('Train samples: ', len(X_train))
print('Validation samples: ', len(X_val))
# Preprocecss data
train['image'] = train['Image_Label'].apply(lambda x: x.split('_')[0])
display(X_train.head())
###Output
Compete set samples: 22184
Train samples: 3682
Validation samples: 1843
###Markdown
Model parameters
###Code
N_GPUS = 3
BACKBONE = 'efficientnetb3'
BATCH_SIZE = 8
EPOCHS = 15
LEARNING_RATE = 10**(-1.7)
HEIGHT = 320
WIDTH = 480
CHANNELS = 3
N_CLASSES = 4
ES_PATIENCE = 8
STEP_SIZE_TRAIN = len(X_train)//BATCH_SIZE
STEP_SIZE_VALID = len(X_val)//BATCH_SIZE
BATCH_SIZE *= N_GPUS
model_0_path = 'files/57-unet[fold-1]_%s_%sx%s.h5' % (BACKBONE, HEIGHT, WIDTH)
model_1_path = 'files/57-unet[fold-2]_%s_%sx%s.h5' % (BACKBONE, HEIGHT, WIDTH)
model_2_path = 'files/57-unet[fold-3]_%s_%sx%s.h5' % (BACKBONE, HEIGHT, WIDTH)
class OneCycleLR(Callback):
def __init__(self,
max_lr,
end_percentage=0.1,
scale_percentage=None,
maximum_momentum=0.95,
minimum_momentum=0.85,
verbose=True):
""" This callback implements a cyclical learning rate policy (CLR).
This is a special case of Cyclic Learning Rates, where we have only 1 cycle.
After the completion of 1 cycle, the learning rate will decrease rapidly to
100th its initial lowest value.
# Arguments:
max_lr: Float. Initial learning rate. This also sets the
starting learning rate (which will be 10x smaller than
this), and will increase to this value during the first cycle.
end_percentage: Float. The percentage of all the epochs of training
that will be dedicated to sharply decreasing the learning
rate after the completion of 1 cycle. Must be between 0 and 1.
scale_percentage: Float or None. If float, must be between 0 and 1.
If None, it will compute the scale_percentage automatically
based on the `end_percentage`.
maximum_momentum: Optional. Sets the maximum momentum (initial)
value, which gradually drops to its lowest value in half-cycle,
then gradually increases again to stay constant at this max value.
Can only be used with SGD Optimizer.
minimum_momentum: Optional. Sets the minimum momentum at the end of
the half-cycle. Can only be used with SGD Optimizer.
verbose: Bool. Whether to print the current learning rate after every
epoch.
# Reference
- [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)
- [Super-Convergence: Very Fast Training of Residual Networks Using Large Learning Rates](https://arxiv.org/abs/1708.07120)
"""
super(OneCycleLR, self).__init__()
if end_percentage < 0. or end_percentage > 1.:
raise ValueError("`end_percentage` must be between 0 and 1")
if scale_percentage is not None and (scale_percentage < 0. or scale_percentage > 1.):
raise ValueError("`scale_percentage` must be between 0 and 1")
self.initial_lr = max_lr
self.end_percentage = end_percentage
self.scale = float(scale_percentage) if scale_percentage is not None else float(end_percentage)
self.max_momentum = maximum_momentum
self.min_momentum = minimum_momentum
self.verbose = verbose
if self.max_momentum is not None and self.min_momentum is not None:
self._update_momentum = True
else:
self._update_momentum = False
self.clr_iterations = 0.
self.history = {}
self.epochs = None
self.batch_size = None
self.samples = None
self.steps = None
self.num_iterations = None
self.mid_cycle_id = None
def _reset(self):
"""
Reset the callback.
"""
self.clr_iterations = 0.
self.history = {}
def compute_lr(self):
"""
Compute the learning rate based on which phase of the cycle it is in.
- If in the first half of training, the learning rate gradually increases.
- If in the second half of training, the learning rate gradually decreases.
- If in the final `end_percentage` portion of training, the learning rate
is quickly reduced to near 100th of the original min learning rate.
# Returns:
the new learning rate
"""
if self.clr_iterations > 2 * self.mid_cycle_id:
current_percentage = (self.clr_iterations - 2 * self.mid_cycle_id)
current_percentage /= float((self.num_iterations - 2 * self.mid_cycle_id))
new_lr = self.initial_lr * (1. + (current_percentage *
(1. - 100.) / 100.)) * self.scale
elif self.clr_iterations > self.mid_cycle_id:
current_percentage = 1. - (
self.clr_iterations - self.mid_cycle_id) / self.mid_cycle_id
new_lr = self.initial_lr * (1. + current_percentage *
(self.scale * 100 - 1.)) * self.scale
else:
current_percentage = self.clr_iterations / self.mid_cycle_id
new_lr = self.initial_lr * (1. + current_percentage *
(self.scale * 100 - 1.)) * self.scale
if self.clr_iterations == self.num_iterations:
self.clr_iterations = 0
return new_lr
def compute_momentum(self):
"""
Compute the momentum based on which phase of the cycle it is in.
- If in the first half of training, the momentum gradually decreases.
- If in the second half of training, the momentum gradually increases.
- If in the final `end_percentage` portion of training, the momentum value
is kept constant at the maximum initial value.
# Returns:
the new momentum value
"""
if self.clr_iterations > 2 * self.mid_cycle_id:
new_momentum = self.max_momentum
elif self.clr_iterations > self.mid_cycle_id:
current_percentage = 1. - ((self.clr_iterations - self.mid_cycle_id) / float(
self.mid_cycle_id))
new_momentum = self.max_momentum - current_percentage * (
self.max_momentum - self.min_momentum)
else:
current_percentage = self.clr_iterations / float(self.mid_cycle_id)
new_momentum = self.max_momentum - current_percentage * (
self.max_momentum - self.min_momentum)
return new_momentum
def on_train_begin(self, logs={}):
logs = logs or {}
# self.epochs = self.params['epochs']
# self.batch_size = self.params['batch_size']
# self.samples = self.params['samples']
# self.steps = self.params['steps']
self.epochs = EPOCHS
self.batch_size = BATCH_SIZE
self.samples = len(X_train)
self.steps = len(X_train)//BATCH_SIZE
if self.steps is not None:
self.num_iterations = self.epochs * self.steps
else:
if (self.samples % self.batch_size) == 0:
remainder = 0
else:
remainder = 1
self.num_iterations = (self.epochs + remainder) * self.samples // self.batch_size
self.mid_cycle_id = int(self.num_iterations * ((1. - self.end_percentage)) / float(2))
self._reset()
K.set_value(self.model.optimizer.lr, self.compute_lr())
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
K.set_value(self.model.optimizer.momentum, new_momentum)
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.clr_iterations += 1
new_lr = self.compute_lr()
self.history.setdefault('lr', []).append(
K.get_value(self.model.optimizer.lr))
K.set_value(self.model.optimizer.lr, new_lr)
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
self.history.setdefault('momentum', []).append(
K.get_value(self.model.optimizer.momentum))
K.set_value(self.model.optimizer.momentum, new_momentum)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def on_epoch_end(self, epoch, logs=None):
if self.verbose:
if self._update_momentum:
print(" - lr: %0.5f - momentum: %0.2f " %
(self.history['lr'][-1], self.history['momentum'][-1]))
else:
print(" - lr: %0.5f " % (self.history['lr'][-1]))
preprocessing = sm.get_preprocessing(BACKBONE)
augmentation = albu.Compose([albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0,
shift_limit=0.1, border_mode=0, p=0.5)
])
###Output
_____no_output_____
###Markdown
Data generator
###Code
train_generator = DataGenerator(
directory=train_images_path,
dataframe=X_train,
target_df=train,
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
n_channels=CHANNELS,
n_classes=N_CLASSES,
preprocessing=preprocessing,
augmentation=augmentation,
seed=seed)
valid_generator = DataGenerator(
directory=train_images_path,
dataframe=X_val,
target_df=train,
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
n_channels=CHANNELS,
n_classes=N_CLASSES,
preprocessing=preprocessing,
seed=seed)
###Output
_____no_output_____
###Markdown
Learning rate finder
###Code
from keras.utils import multi_gpu_model
model_s = sm.Unet(backbone_name=BACKBONE,
encoder_weights='imagenet',
classes=N_CLASSES,
activation='sigmoid',
input_shape=(None, None, CHANNELS))
lr_finder = LRFinder(num_samples=len(X_train), batch_size=BATCH_SIZE, minimum_lr=1e-5, maximum_lr=10, verbose=0)
optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)
model = multi_gpu_model(model_s, gpus=N_GPUS)
model.compile(optimizer=optimizer, loss=sm.losses.bce_dice_loss)
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
epochs=1,
callbacks=[lr_finder])
plt.rcParams.update({'font.size': 16})
plt.figure(figsize=(30, 10))
plt.axvline(x=np.log10(LEARNING_RATE), color='red')
lr_finder.plot_schedule(clip_beginning=15)
###Output
Epoch 1/1
460/460 [==============================] - 1264s 3s/step - loss: 2.5213
###Markdown
Fold 1
###Code
model_s = sm.Unet(backbone_name=BACKBONE,
encoder_weights='imagenet',
classes=N_CLASSES,
activation='sigmoid',
input_shape=(HEIGHT, WIDTH, CHANNELS))
checkpoint = ModelCheckpoint(model_0_path, monitor='val_loss', mode='min', save_best_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
oneCycleLR = OneCycleLR(max_lr=LEARNING_RATE, maximum_momentum=0.9, minimum_momentum=0.9)
metric_list = [dice_coef, sm.metrics.iou_score, sm.metrics.f1_score]
callback_list = [checkpoint, es, oneCycleLR]
optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)
model = multi_gpu_model(model_s, gpus=N_GPUS)
model.compile(optimizer=optimizer, loss=sm.losses.bce_dice_loss, metrics=metric_list)
history1 = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
callbacks=callback_list,
epochs=EPOCHS,
verbose=2).history
###Output
Epoch 1/15
###Markdown
Model loss graph
###Code
plot_metrics(history1, metric_list=['loss', 'dice_coef', 'iou_score', 'f1-score'])
###Output
_____no_output_____
###Markdown
Fold 2
###Code
X_train = kfold_set[kfold_set['fold_1'] == 'train']
X_val = kfold_set[kfold_set['fold_1'] == 'validation']
model_s = sm.Unet(backbone_name=BACKBONE,
encoder_weights='imagenet',
classes=N_CLASSES,
activation='sigmoid',
input_shape=(HEIGHT, WIDTH, CHANNELS))
checkpoint = ModelCheckpoint(model_1_path, monitor='val_loss', mode='min', save_best_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
oneCycleLR = OneCycleLR(max_lr=LEARNING_RATE, maximum_momentum=0.9, minimum_momentum=0.9)
metric_list = [dice_coef, sm.metrics.iou_score, sm.metrics.f1_score]
callback_list = [checkpoint, es, oneCycleLR]
optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)
model = multi_gpu_model(model_s, gpus=N_GPUS)
model.compile(optimizer=optimizer, loss=sm.losses.bce_dice_loss, metrics=metric_list)
history2 = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
callbacks=callback_list,
epochs=EPOCHS,
verbose=2).history
plot_metrics(history2, metric_list=['loss', 'dice_coef', 'iou_score', 'f1-score'])
###Output
WARNING:tensorflow:From C:\Users\virtus\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\function.py:987: calling Graph.create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version.
Instructions for updating:
Shapes are always computed; don't use the compute_shapes as it has no effect.
Epoch 1/15
- 1710s - loss: 1.1307 - dice_coef: 0.2758 - iou_score: 0.1606 - f1-score: 0.2717 - val_loss: 1.0935 - val_dice_coef: 0.4129 - val_iou_score: 0.2634 - val_f1-score: 0.4041
- lr: 0.00998 - momentum: 0.90
Epoch 2/15
- 1616s - loss: 0.8970 - dice_coef: 0.4320 - iou_score: 0.2803 - f1-score: 0.4305 - val_loss: 0.8378 - val_dice_coef: 0.5188 - val_iou_score: 0.3500 - val_f1-score: 0.5119
- lr: 0.01799 - momentum: 0.90
Epoch 3/15
- 1627s - loss: 0.8180 - dice_coef: 0.4929 - iou_score: 0.3292 - f1-score: 0.4897 - val_loss: 0.8100 - val_dice_coef: 0.5188 - val_iou_score: 0.3502 - val_f1-score: 0.5129
- lr: 0.01391 - momentum: 0.90
Epoch 4/15
- 1627s - loss: 0.7865 - dice_coef: 0.5144 - iou_score: 0.3475 - f1-score: 0.5104 - val_loss: 0.7634 - val_dice_coef: 0.5453 - val_iou_score: 0.3751 - val_f1-score: 0.5399
- lr: 0.00591 - momentum: 0.90
Epoch 5/15
- 1621s - loss: 0.7671 - dice_coef: 0.5253 - iou_score: 0.3573 - f1-score: 0.5212 - val_loss: 0.7601 - val_dice_coef: 0.5486 - val_iou_score: 0.3773 - val_f1-score: 0.5421
- lr: 0.00206 - momentum: 0.90
Epoch 6/15
- 1620s - loss: 0.7633 - dice_coef: 0.5278 - iou_score: 0.3600 - f1-score: 0.5241 - val_loss: 0.7643 - val_dice_coef: 0.5540 - val_iou_score: 0.3804 - val_f1-score: 0.5452
- lr: 0.01007 - momentum: 0.90
Epoch 7/15
- 1618s - loss: 0.7597 - dice_coef: 0.5330 - iou_score: 0.3639 - f1-score: 0.5283 - val_loss: 0.7723 - val_dice_coef: 0.5458 - val_iou_score: 0.3689 - val_f1-score: 0.5326
- lr: 0.01807 - momentum: 0.90
Epoch 8/15
- 1614s - loss: 0.7437 - dice_coef: 0.5424 - iou_score: 0.3730 - f1-score: 0.5377 - val_loss: 0.7812 - val_dice_coef: 0.5380 - val_iou_score: 0.3692 - val_f1-score: 0.5344
- lr: 0.01383 - momentum: 0.90
Epoch 9/15
- 1613s - loss: 0.7191 - dice_coef: 0.5576 - iou_score: 0.3878 - f1-score: 0.5533 - val_loss: 0.7512 - val_dice_coef: 0.5577 - val_iou_score: 0.3863 - val_f1-score: 0.5512
- lr: 0.00582 - momentum: 0.90
Epoch 10/15
- 1603s - loss: 0.7025 - dice_coef: 0.5675 - iou_score: 0.3969 - f1-score: 0.5627 - val_loss: 0.7517 - val_dice_coef: 0.5625 - val_iou_score: 0.3900 - val_f1-score: 0.5555
- lr: 0.00215 - momentum: 0.90
Epoch 11/15
- 1607s - loss: 0.6972 - dice_coef: 0.5721 - iou_score: 0.4015 - f1-score: 0.5673 - val_loss: 0.7559 - val_dice_coef: 0.5603 - val_iou_score: 0.3883 - val_f1-score: 0.5534
- lr: 0.01016 - momentum: 0.90
Epoch 12/15
- 1605s - loss: 0.6991 - dice_coef: 0.5719 - iou_score: 0.4015 - f1-score: 0.5675 - val_loss: 0.7920 - val_dice_coef: 0.5544 - val_iou_score: 0.3808 - val_f1-score: 0.5441
- lr: 0.01816 - momentum: 0.90
Epoch 13/15
- 1606s - loss: 0.6926 - dice_coef: 0.5769 - iou_score: 0.4054 - f1-score: 0.5714 - val_loss: 0.7873 - val_dice_coef: 0.5425 - val_iou_score: 0.3704 - val_f1-score: 0.5354
- lr: 0.01374 - momentum: 0.90
Epoch 14/15
- 1610s - loss: 0.6643 - dice_coef: 0.5952 - iou_score: 0.4240 - f1-score: 0.5898 - val_loss: 0.7671 - val_dice_coef: 0.5558 - val_iou_score: 0.3865 - val_f1-score: 0.5520
- lr: 0.00574 - momentum: 0.90
Epoch 15/15
- 1608s - loss: 0.6418 - dice_coef: 0.6084 - iou_score: 0.4372 - f1-score: 0.6031 - val_loss: 0.7678 - val_dice_coef: 0.5611 - val_iou_score: 0.3878 - val_f1-score: 0.5529
- lr: 0.00224 - momentum: 0.90
###Markdown
Fold 3
###Code
X_train = kfold_set[kfold_set['fold_2'] == 'train']
X_val = kfold_set[kfold_set['fold_2'] == 'validation']
model_s = sm.Unet(backbone_name=BACKBONE,
encoder_weights='imagenet',
classes=N_CLASSES,
activation='sigmoid',
input_shape=(HEIGHT, WIDTH, CHANNELS))
checkpoint = ModelCheckpoint(model_2_path, monitor='val_loss', mode='min', save_best_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
oneCycleLR = OneCycleLR(max_lr=LEARNING_RATE, maximum_momentum=0.9, minimum_momentum=0.9)
metric_list = [dice_coef, sm.metrics.iou_score, sm.metrics.f1_score]
callback_list = [checkpoint, es, oneCycleLR]
optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)
model = multi_gpu_model(model_s, gpus=N_GPUS)
model.compile(optimizer=optimizer, loss=sm.losses.bce_dice_loss, metrics=metric_list)
history3 = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
callbacks=callback_list,
epochs=EPOCHS,
verbose=2).history
plot_metrics(history3, metric_list=['loss', 'dice_coef', 'iou_score', 'f1-score'])
###Output
WARNING:tensorflow:From C:\Users\virtus\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\function.py:987: calling Graph.create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version.
Instructions for updating:
Shapes are always computed; don't use the compute_shapes as it has no effect.
Epoch 1/15
- 1694s - loss: 1.1307 - dice_coef: 0.2758 - iou_score: 0.1606 - f1-score: 0.2717 - val_loss: 1.0893 - val_dice_coef: 0.4128 - val_iou_score: 0.2634 - val_f1-score: 0.4041
- lr: 0.00998 - momentum: 0.90
Epoch 2/15
- 1605s - loss: 0.8970 - dice_coef: 0.4320 - iou_score: 0.2802 - f1-score: 0.4305 - val_loss: 0.8387 - val_dice_coef: 0.5181 - val_iou_score: 0.3498 - val_f1-score: 0.5119
- lr: 0.01799 - momentum: 0.90
Epoch 3/15
- 1606s - loss: 0.8180 - dice_coef: 0.4929 - iou_score: 0.3292 - f1-score: 0.4897 - val_loss: 0.8090 - val_dice_coef: 0.5192 - val_iou_score: 0.3503 - val_f1-score: 0.5130
- lr: 0.01391 - momentum: 0.90
Epoch 4/15
- 1601s - loss: 0.7864 - dice_coef: 0.5144 - iou_score: 0.3475 - f1-score: 0.5104 - val_loss: 0.7642 - val_dice_coef: 0.5450 - val_iou_score: 0.3748 - val_f1-score: 0.5397
- lr: 0.00591 - momentum: 0.90
Epoch 5/15
- 1605s - loss: 0.7670 - dice_coef: 0.5254 - iou_score: 0.3573 - f1-score: 0.5213 - val_loss: 0.7608 - val_dice_coef: 0.5484 - val_iou_score: 0.3777 - val_f1-score: 0.5427
- lr: 0.00206 - momentum: 0.90
Epoch 6/15
- 1601s - loss: 0.7631 - dice_coef: 0.5279 - iou_score: 0.3601 - f1-score: 0.5242 - val_loss: 0.7637 - val_dice_coef: 0.5539 - val_iou_score: 0.3808 - val_f1-score: 0.5458
- lr: 0.01007 - momentum: 0.90
Epoch 7/15
- 1604s - loss: 0.7598 - dice_coef: 0.5329 - iou_score: 0.3639 - f1-score: 0.5282 - val_loss: 0.7754 - val_dice_coef: 0.5449 - val_iou_score: 0.3677 - val_f1-score: 0.5312
- lr: 0.01807 - momentum: 0.90
Epoch 8/15
- 1606s - loss: 0.7439 - dice_coef: 0.5423 - iou_score: 0.3729 - f1-score: 0.5377 - val_loss: 0.7884 - val_dice_coef: 0.5357 - val_iou_score: 0.3670 - val_f1-score: 0.5322
- lr: 0.01383 - momentum: 0.90
Epoch 9/15
- 1605s - loss: 0.7190 - dice_coef: 0.5577 - iou_score: 0.3879 - f1-score: 0.5534 - val_loss: 0.7518 - val_dice_coef: 0.5570 - val_iou_score: 0.3857 - val_f1-score: 0.5506
- lr: 0.00582 - momentum: 0.90
Epoch 10/15
- 1606s - loss: 0.7025 - dice_coef: 0.5676 - iou_score: 0.3970 - f1-score: 0.5628 - val_loss: 0.7512 - val_dice_coef: 0.5627 - val_iou_score: 0.3899 - val_f1-score: 0.5553
- lr: 0.00215 - momentum: 0.90
Epoch 11/15
- 1610s - loss: 0.6972 - dice_coef: 0.5721 - iou_score: 0.4015 - f1-score: 0.5673 - val_loss: 0.7555 - val_dice_coef: 0.5599 - val_iou_score: 0.3880 - val_f1-score: 0.5532
- lr: 0.01016 - momentum: 0.90
Epoch 12/15
|
notebooks/4. Sanger - Subsampling.ipynb | ###Markdown
Sanger - Sub-samplingThis Jupyter notebook reproduces the results from the B-ALL sub-sampling analysis (Supplementary Figure S5).
###Code
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import sys
sys.path.append('../src')
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('white')
###Output
_____no_output_____
###Markdown
First, we read the insertions and CTGs that were identified at the different sequencing depths by IM-Fusion. These results have been pre-computed using an external Snakemake pipeline (see the documentation and the Makefile for more details).
###Code
depths = [15, 30, 50, 70]
# Read insertions.
insertions = pd.concat((pd.read_csv('../data/processed/sanger/star-subsample/'
'{}/insertions.txt'.format(depth), sep='\t')
.query('support >= 10')
.assign(depth=depth)
for depth in depths), axis=0)
# Read CTGs.
ctgs = pd.concat((pd.read_csv('../data/processed/sanger/star-subsample/'
'{}/ctgs.txt'.format(depth), sep='\t')
.assign(depth=depth)
for depth in depths), axis=0)
###Output
_____no_output_____
###Markdown
Next, we plot the number of insertions and DE CTGs across the different sequencing depths to see how depth affects insertion/CTG detection:
###Code
def plot_depth_ctg_count(ctgs, ax=None):
if ax is None:
_, ax = plt.subplots()
counts = (ctgs.groupby('depth')['gene_name'].nunique()
.reset_index(name='count'))
sns.barplot(data=counts, x='depth', y='count', ax=ax)
ax.set_xlabel('Sequencing depth')
ax.set_ylabel('Number of CTGs')
ax.set_ylim(0, counts['count'].max())
return ax
def plot_depth_insertion_count(insertions, ax=None, kind='bar'):
if ax is None:
_, ax = plt.subplots()
counts = (insertions.groupby('depth')['id'].nunique()
.reset_index(name='count'))
if kind == 'bar':
sns.barplot(data=counts, x='depth', y='count', ax=ax)
elif kind == 'reg':
sns.regplot(data=counts, x='depth', y='count', ax=ax)
else:
raise ValueError('Unknown value for kind')
ax.set_xlabel('Sequencing depth')
ax.set_ylabel('Number of insertions')
return ax
def plot_depth_overview(insertions, ctgs, axes=None):
if axes is None:
_, axes = plt.subplots(figsize=(9, 4), ncols=2, nrows=1)
plot_depth_ctg_count(ctgs.query('de_pvalue < 0.05'), ax=axes[1])
plot_depth_insertion_count(insertions, ax=axes[0], kind='reg')
axes[0].set_title('Insertions')
axes[1].set_title('DE CTGs')
axes[1].set_ylabel('Number of DE CTGs')
sns.despine()
plt.tight_layout()
return axes
fig, axes = plt.subplots(figsize=(9, 4), ncols=2)
plot_depth_overview(insertions, ctgs, axes=axes);
with sns.plotting_context('paper', font_scale=0.7):
fig, axes = plt.subplots(figsize=(6, 3), ncols=2)
plot_depth_overview(insertions, ctgs, axes=axes);
fig.savefig('../reports/supplemental/figures/fig_s5ab_subsampling.pdf', bbox_inches='tight')
plt.close(fig)
###Output
_____no_output_____
###Markdown
This shows that with increasing depth we identify linearly increasing numbers of insertions, but that the number of DE CTGs does not change strongly. To check if these CTGs are consistent across depths, we also plot a venn diagram of the identified CTGs:
###Code
from nbsupport.venn import venn
ctg_genes = {depth: set(grp.query('de_pvalue <= 0.05')['gene_name'])
for depth, grp in ctgs.groupby('depth')}
fig = venn([ctg_genes[d] for d in depths],
names=['{} million reads'.format(i) for i in depths],
colors=sns.color_palette())
fig.suptitle('DE CTG overlap', fontsize=13, y=0.85)
with sns.plotting_context('paper', font_scale=0.7):
fig = venn([ctg_genes[d] for d in depths],
names=['{} million reads'.format(i) for i in depths],
colors=sns.color_palette(), figsize=(6, 5))
fig.suptitle('DE CTG overlap', fontsize=7, y=0.85)
fig.savefig('../reports/supplemental/figures/fig_s5c_subsampling_venn.pdf', bbox_inches='tight')
plt.close(fig)
ctg_genes
###Output
_____no_output_____
###Markdown
This confirms that one additional CTG is identified with 50/70 million reads and that 5 CTGs are identified across all depths.To determine if we find similar support (in terms of number of samples) across the different depths, we also count the number of samples with insertions in each CTG across the sequencing depths:
###Code
depth_overview = pd.pivot_table(ctgs.query('de_pvalue < 0.05'), index='gene_name',
columns='depth', values=['n_samples'])
depth_overview
###Output
_____no_output_____ |
week12/evolve_prof.ipynb | ###Markdown
(heating-rate-profile)= Assignment 7b: Heating rate profilesThis notebook shows how to calculate the net heating rate given a hydrostatic atmosphere with an absorbinggas with constant mixing ratio $r_{gas}$ with height. At the bottom, I ask you to add a function thatwill use the heating rate to calculate a new temperature profile, and loop that function to capture the evolution of the atmosphere and surface to radiative equilibrium.In {ref}`heating-rate` we showed that the heating rate $Q_r$ (K/s) for a particular height inthe atmosphere was defined by:$$\begin{aligned}\rho c_p \Delta z \frac{dT}{dt} &= \Delta E_n\\Q_r = \frac{dT}{dt} &= \frac{1}{\rho c_p} \frac{\Delta E_n}{\Delta z} = \frac{1}{\rho c_p} \frac{dE_n}{dz}\end{aligned}$$where $E_n$ was the net flux integrated over all wavelengths (positive downwards), and $\Delta E_n$ isthe net downward flux $(E_{ntop} - E_{nbot})$ across a layer of thickness $\Delta z$.In this notebook we use the hydrostatic equation from {ref}`hydro` and the flux equationfrom {ref}`flux_schwartzchild` to find dT/dz as a function of height for an atmosphere withcontaining an absorbing gas with a mixing ratio of $r_gas=0.01$ kg/kg and a mass absorption coefficientaveraged across all longwave wavelengths of $k=0.01$ $m^2/kg$. Integrate the atmospheric pressure, temperature and densityRecall equation {mat:numref}`hydro`:$$dp = -\rho g dz$$for a hydrostatic atmosphere, if we assume that dT/dz is constant with height, we canbuild up an atmosphere one level at a time, but starting with know $p$, $\rho$ and $T$ at thesurface and using the values of $dT/dz$, $dp/dz$ to find $T$ and $p$ at the next level. Oncewe have those, we can use the ideal gas law to find the density $\rho$ and move up.This is done in the cell below.
###Code
def hydrostat(Temp,height,p_surf):
"""
build a hydrostatic atmosphere by integrating the hydrostatic equation from the surface,
given a temperature vs. height profile
Parameters
----------
Temp: ndarray
vector of air temps (K)
p_surf: float
surface pressure in Pa
height: ndarray
vector of heights (m)
delta_z: float
Returns
-------
press, rho: tuple of ndarrays the same shape as height and Temp
where the surface is level 0, and each index i larger than 0
is located at the height corresponding to the top of a particular layer,
so that values at the top of the atmosphere are given by index
numlevels - 1
press (Pa), rho (kg/m^3) for each layer
"""
Rd=287. #J/kg/K -- gas constant for dry air
g=9.8 #m/s^2
press=np.empty_like(Temp)
rho=np.empty_like(Temp)
#
# level 0 sits directly above the surface, so start
# with pressure, temp of air equal to ground temp, press
# and get density from equaiton of state
#
press[0]=p_surf
rho[0]=p_surf/(Rd*Temp[0])
num_levels = len(height)
num_layers=num_levels - 1
delta_z = np.diff(height)
#now march up the atmosphere a level at a time
# finding the values at the top of each layer
for i in range(num_layers):
delP= -rho[i]*g*delta_z[i]
press[i+1]= press[i] + delP
rho[i+1]=press[i+1]/(Rd*Temp[i+1])
return (press,rho)
###Output
_____no_output_____
###Markdown
Next we can find the optical depthIf we have the air density $\rho$, the mixing ratio $r_{mix}$ asnd the absorption coefficient $k$ from Stull Chapter 2section 2.3.6 we can find the optical depth in the layer:$$\tau = \rho r_{mix} k \Delta z$$where $\Delta z$ is the layer thickness. That's done in the next cell.
###Code
def find_tau(r_gas,k,rho,height):
"""
Parameters
----------
r_gas: float
gas mixing ratio in kg/kg
k: float
mass absorption coefficient in kg/m^2
rho: ndarray
vector of air densities in kg/m^3 for each layer
height: ndarray
corresponding level heights in m
Returns
-------
tau: ndarray
vertical optical depths of each level, starting from 0 at the surface
"""
tau=np.empty_like(rho)
tau[0]=0
num_levels=len(rho)
num_layers=num_levels - 1
#
# left side minus right side
#
delta_z=height[1:] - height[:-1]
for index in range(num_layers):
delta_tau=r_gas*rho[index]*k*delta_z[index]
tau[index+1]=tau[index] + delta_tau
return tau
###Output
_____no_output_____
###Markdown
Flux with heightNote the factor of 1.666 below that multiplies the optical depth inthe transmission -- this is the flux diffusivity approximation. The function belowsolves for the upward and downward fluxes one layer at at time by calculatingthe transmitted flux arriving from the bottom or the top of each layer, and theemitted flux that the layer is sending to the next layer above or below using the equation given in{math:numref}`layer_flux`. This is the"two stream approximation" mentioned in {ref}`two-stream-approx`Assumption: layers are thin enough so that it is safe to assume constant valueswithin each layer
###Code
def fluxes(tau,Temp,height,E_solar):
"""
given properties at each level return the upward and downward
total flux at each level assuming no downward longwave flux at the top
of the atmosphere, and a surface flux of sigma*T_surf**4.
Parameters
-----------
tau, Temp, height: ndarray of length tot_levels
total optical depth (from surface), temperature (K) and height (m)
at each level
Returns
-------
up_flux, down_flux: ndarrays
upward and downward flux of each level (W/m^2), all positive
"""
sigma=5.67e-8 #W/m^2/K^4
E_solar=240. #solar flux in W/m^2
up_flux=np.empty_like(height)
down_flux=np.empty_like(height)
tot_levs = len(height)
#
# start at the top of the atmosphere
# with zero downwelling flux
#
down_flux[-1]=0
#
# go down a level at a time, adding up the fluxes
#
for index in np.arange(1,tot_levs):
upper_lev=tot_levs - index
lower_lev=tot_levs - index -1
del_tau=tau[upper_lev] - tau[lower_lev]
trans=np.exp(-1.666*del_tau)
emiss=1 - trans
layer_flux=sigma*Temp[upper_lev]**4.*emiss
down_flux[lower_lev]=down_flux[upper_lev]*trans + layer_flux
#
# Assume the surface is black, and that its temperature increases
# quickly to emit just enough flux to balance the sun plus atmosphere
#
sfc_flux = down_flux[0] + E_solar
T_surf = (sfc_flux/sigma)**0.25
#
# now start at the surface and go up one level at a time
#
up_flux[0]=sfc_flux
for index in np.arange(1,tot_levs):
upper_lev=index
lower_lev=index - 1
del_tau=tau[upper_lev] - tau[lower_lev]
trans=np.exp(-del_tau)
emiss=1 - trans
layer_flux=sigma*Temp[lower_lev]**4.*emiss
#
# find the flux at the next level
#
up_flux[upper_lev]=trans*up_flux[lower_lev] + layer_flux
return (up_flux,down_flux, T_surf)
def heating_rate(net_down,height,rho):
"""
given the net flux at each level (downward positive) and the
height, and density of the atmosphere at each level, return
the rate of change of temperature in each layer between two levels
Parameters
----------
net_down: ndarray
positive downward net flux (W/m^2) at each level
height: ndarray
vertical location of each level (m)
rho: ndarray
density (kg/m^3) at each level
Returns
-------
dT_dt: ndarray -- length nlevels -1
time rate of change of temperature (K/s)
"""
cpd=1004.
#
# find the flux divergence across the layer
# by differencing the levels. Assume the layer density is constant
# and equal to the average of the densities at the top and bottom levels
#
rho_mid=(rho[1:] + rho[:-1])/2.
dEn_dz= np.diff(net_down)/np.diff(height)
dT_dt=dEn_dz/(rho_mid*cpd)
return dT_dt
###Output
_____no_output_____
###Markdown
Calculating the heating rateIn this cell I specify the inputs as a dictionary, then pass the inputsto the main function using [keyword expansion](https://stackoverflow.com/questions/36901/what-does-double-star-asterisk-and-star-asterisk-do-for-parameters) or "dictionary unpacking". This allows me tomodify just a few of the inputs if I want (i.e. change the `num_levels` and `delta_z` and also savethe dictionary as a json file to document a particular run.
###Code
inputs=dict(
r_gas=0.01, #kg/kg
k=0.006, #m^2/kg
E_solar = 240, #W/m^2
p_surf=100.e3, #Pa
delta_z=100, #m
delta_t = 1800., #s
num_timesteps=7000,
num_levels=200,
T_surf=300. #K
)
def init_profs(inputs,lapse_rate):
"""
make a first guess temperature profile with a constant lapse rate
Paramters
---------
inputs: dict
input values for profile: num_levels, T_surf (K) and delta_z
lapse_rate: float
constant lapse rate in (K/m)
Returns
-------
Temp, height: ndarrays
two ndarrays of length inputs['num_levels'], with spacing inputs['delta_z']
Temp (K) is the vertical temperature profile
"""
Tstart=inputs['T_surf']
lapse_rate = -7.e-3 #K/m
Tstop= Tstart + inputs['num_levels']*inputs['delta_z']*lapse_rate
Temp=np.linspace(Tstart,Tstop,inputs['num_levels'])
hbot = 0
htop = inputs['num_levels']*inputs['delta_z']
height = np.linspace(hbot,htop,inputs['num_levels'])
return Temp, height
def main(Temp,height,r_gas=None, k=None,p_surf=None,delta_t=None,delta_z=None,
num_timesteps=None,num_levels=None,E_solar=None,
T_surf=None):
"""
find the heating rate (K/km) for a hydrostatic
atmosphere with a constant decrease of temperature with height
Parameters
----------
Temp, height: ndarrays with vertical profiles of temperature (K) and height (m)
Keyword arguments: lots of arguments suppled from the inputs dictionary above
"""
#
#
#
press,rho=hydrostat(Temp,height,p_surf)
tau=find_tau(r_gas,k,rho,height)
#breakpoint()
up,down,T_surf=fluxes(tau,Temp,height,E_solar)
net_down = down - up
dT_dt=heating_rate(down - up,height,rho)
df=pd.DataFrame(height,columns=['height'])
df['height_km'] = height*1.e-3
df['up'] = up
df['down'] = down
df['net_down'] = net_down
fig,(axis1,axis2,axis3)=plt.subplots(1,3,figsize=(15,10))
axis1.plot('up','height_km','b-',lw=5,label='upward flux',data=df)
axis1.plot(down,'height_km','g-',lw=5,label='downward flux',data=df)
axis1.set_title('upward and downward fluxes')
axis1.set_xlabel('flux $(W\,m^{-2}$')
axis1.set_ylabel('height (km)')
axis1.legend(numpoints=1,loc='best')
axis1.grid(True)
axis2.plot('net_down','height_km','b-',lw=5,data=df)
axis2.set_title('net downward flux')
axis2.set_xlabel('net downward flux $(W\,m^{-2})$')
axis2.set_ylabel('height (km)')
axis2.grid(True)
dT_dt=dT_dt*3600.*24.
mid_height=(height[1:] + height[:-1])/2.
axis3.plot(dT_dt,mid_height*0.001,'b-',lw=5)
axis3.set_title('heating rate')
axis3.set_xlabel('heating rate in K/day')
axis3.set_ylabel('height (km)')
axis3.grid(True)
lapse_rate = -7.e-3
Tinit,height = init_profs(inputs,lapse_rate)
main(Tinit,height,**inputs)
###Output
_____no_output_____
###Markdown
Answer
###Code
def evolve(Temp,height,r_gas=None, k=None,p_surf=None,delta_t=None,delta_z=None,
num_timesteps=None,num_levels=None,E_solar=None,
T_surf=None):
"""
find the heating rate (K/km) for a hydrostatic
atmosphere with a constant decrease of temperature with heigt
"""
sigma=5.67e-8
dT_dz = np.ones([num_levels])*(-7.e-3)
#
# 2-D array to store the T_surf and time in seconds for
# each timestep
#
nvars=2
keep_sfc = np.empty([nvars,num_timesteps])
#
# 3-d array to store
# Temp,tau,up,down,and dT_dt every timestep
# Note that you'll need to append an extra point to the top
# of dT_dt to make it the same length as the other variables
# just use dT_dt[-1]
#
nvars=5
keep_vals=np.empty([nvars,num_levels,num_timesteps])
press,rho=hydrostat(Temp,height,p_surf)
for time_index in range(num_timesteps):
#
tau=find_tau(r_gas,k,rho,height)
up,down,T_surf=fluxes(tau,Temp,height,E_solar)
#
# save the surface flux and time
#
keep_sfc[0,time_index] =T_surf
keep_sfc[1,time_index]=time_index*delta_t
net_down = down - up
#
# find the heating rate and advance one timestep
#
dT_dt=heating_rate(net_down,height,rho)
dT_dt_p1 = np.append(dT_dt,dT_dt[-1])
Temp = Temp + dT_dt_p1*delta_t
#
# save all the profiles for this timestep
#
for i,the_vec in enumerate([Temp,tau,up,down,dT_dt_p1]):
keep_vals[i,:,time_index]=the_vec
#
# build a new atmosphere
#
press,rho=hydrostat(Temp,height,p_surf)
#breakpoint()
return keep_vals,keep_sfc
inputs=dict(
r_gas=0.01, #kg/kg
k=0.02, #m^2/kg
E_solar = 240,
p_surf=100.e3, #Pa
delta_z=100, #m
delta_t = 1800.,
num_timesteps=7000,
num_levels=200,
T_surf=310.
)
lapse_rate = -7.e-3
Tinit,height = init_profs(inputs,lapse_rate)
keep_vals,keep_sfc=evolve(Tinit,height,**inputs)
#
# Take a look at the last timestep
#
frame0 = keep_vals[:,:,-1]
df=pd.DataFrame(frame0.T,columns=['Temp','tau','up','down','dT_dt'])
fig_dir = Path() / 'figures'
fig_dir.mkdir(exist_ok=True, parents=True)
def make_plot(df):
"""
Plot the columns of the dataframe in nice units
"""
fig,(axis1,axis2,axis3,axis4)=plt.subplots(1,4,figsize=(16,10))
axis1.plot('up',height*0.001,'b',data=df,lw=5,label='upward flux')
axis1.plot('down',height*0.001,'g',data=df,lw=5,label='downward flux')
axis1.set_title('upward and downward fluxes')
axis1.set_xlabel('flux $(W\,m^{-2}$')
axis1.set_ylabel('height (km)')
axis1.legend(numpoints=1,loc='best')
axis1.grid(True)
net_down = df['down'] - df['up']
axis2.plot(net_down,height*0.001,'b-',lw=5)
axis2.set_title('net downward flux')
axis2.set_xlabel('net downward flux $(W\,m^{-2})$')
axis2.grid(True)
df['dT_dt_day']=df['dT_dt']*3600.*24.
axis3.plot('dT_dt_day',height*0.001,'b-',data=df,lw=5)
axis3.set_title('heating rate')
axis3.set_xlabel('heating rate in K/day')
axis3.grid(True)
varname='Temp'
axis4.plot(varname,height*0.001,'b-',data=df,lw=5)
axis4.set_title(varname)
axis4.set_xlabel(varname)
axis4.grid(True)
fig.savefig(fig_dir / 'assign7b_fig1.png')
make_plot(df)
air_temp = keep_vals[0,0,:]
df = pd.DataFrame(keep_sfc.T,columns=['sfc_temp','time_seconds'])
df['time_days']=df['time_seconds']/3600./24.
df['air_temp']=air_temp
fig,(ax1,ax2) = plt.subplots(2,1,figsize=(10,10))
varnames=['sfc_temp','air_temp']
for a_name in varnames:
ax1.plot('time_days',a_name,data=df,label=a_name,lw=5);
ax1.grid(True)
ax1.legend()
ax1.set_xlabel("Time (days)")
ax1.set_ylabel("Temperature (K)")
ax1.set_title('surface temp and lowest level temp')
dT = keep_vals[0,1,:] - keep_vals[0,0,:]
dz = np.diff(height)[0]
lapse_rate = dT/dz*1.e3
ax2.plot(df['time_days'],lapse_rate,'b',lw=5)
ax2.set_xlabel('time (days)')
ax2.set_ylabel('lapse rate near surface (K/km)')
ax2.set_title('surface lapse rate')
ax2.grid(True)
fig.savefig(fig_dir / 'Assign_7b_fig2.png')
###Output
_____no_output_____ |
pokedex_scrape.ipynb | ###Markdown
 Building a Pokedex in Python: Scraping the Pokemon Sprites (Step 2 of 6) by [PyImageSearch.com](http://www.pyimagesearch.com) Welcome to **[PyImageSearch Plus](http://pyimg.co/plus)** Jupyter Notebooks!This notebook is associated with the [Building a Pokedex in Python: Scraping the Pokemon Sprites (Step 2 of 6)](https://www.pyimagesearch.com/2014/03/24/building-pokedex-python-scraping-pokemon-sprites-step-2-6/) blog post published on 2014-03-24.Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.We recommend that you execute (press ▶️) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:* [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.htmlnotebook-user-interface)* [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)As a reminder, these PyImageSearch Plus Jupyter Notebooks are not for sharing; please refer to the **Copyright** directly below and **Code License Agreement** in the last cell of this notebook. Happy hacking!*Adrian****Copyright:*** *The contents of this Jupyter Notebook, unless otherwise indicated, are Copyright 2020 Adrian Rosebrock, PyimageSearch.com. All rights reserved. Content like this is made possible by the time invested by the authors. If you received this Jupyter Notebook and did not purchase it, please consider making future content possible by joining PyImageSearch Plus at http://pyimg.co/plus/ today.* Download the code zip file
###Code
!wget https://www.pyimagesearch.com/wp-content/uploads/2014/03/pokedex-scrape.zip
!unzip -qq pokedex-scrape.zip
%cd pokedex-scrape
###Output
_____no_output_____
###Markdown
Blog Post Code Import Packages
###Code
# import the necessary packages
from imutils.paths import list_images
from matplotlib import pyplot as plt
from bs4 import BeautifulSoup
import argparse
import requests
import random
import cv2
###Output
_____no_output_____
###Markdown
Function to display images in Jupyter Notebooks and Google Colab
###Code
def plt_imshow(title, image):
# convert the image frame BGR to RGB color space and display it
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.title(title)
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Scraping and Downloading
###Code
# construct the argument parser and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--pokemon-list", required = True,
# help = "Path to where the raw Pokemon HTML file resides")
#ap.add_argument("-s", "--sprites", required = True,
# help = "Path where the sprites will be stored")
#args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"pokemon_list": "pokemon_list.html",
"sprites": "sprites"
}
# construct the soup and initialize the list of pokemon
# names
soup = BeautifulSoup(open(args["pokemon_list"]).read())
names = []
# loop over all link elements
for link in soup.findAll("a"):
# update the list of pokemon names
names.append(link.text)
# loop over the pokemon names
for name in names:
# initialize the parsed name as just the lowercase
# version of the pokemon name
parsedName = name.lower()
# if the name contains an apostrophe (such as in
# Farfetch'd, just simply remove it)
parsedName = parsedName.replace("'", "")
# if the name contains a period followed by a space
# (as is the case with Mr. Mime), then replace it
# with a dash
parsedName = parsedName.replace(". ", "-")
# handle the case for Nidoran (female)
if name.find(u'\u2640') != -1:
parsedName = "nidoran-f"
# and handle the case for Nidoran (male)
elif name.find(u'\u2642') != -1:
parsedName = "nidoran-m"
# construct the URL to download the sprite
print("[x] downloading {}".format(name))
url = "http://img.pokemondb.net/sprites/red-blue/normal/%s.png" % (parsedName)
r = requests.get(url)
# if the status code is not 200, ignore the sprite
if r.status_code != 200:
print("[x] error downloading {}".format(name))
continue
# write the sprite to file
f = open("{}/{}.png".format(args["sprites"], name.lower()), "wb")
f.write(r.content)
f.close()
###Output
_____no_output_____
###Markdown
Display Pokemon Sprites
###Code
# list path to all the sprite images and randomly select
# ten image paths
spritePaths = list(list_images(args["sprites"]))
spritePaths = random.choices(spritePaths, k=10)
# loop over all sprite iamge paths and display the sprite
# images
for spritePath in spritePaths:
image = cv2.imread(spritePath)
plt_imshow("output", image)
###Output
_____no_output_____ |
python_utils/UD6 vs UD7 performance comparison.ipynb | ###Markdown
UD6 vs UD7 phase2 performance comparisonThe test protocol consists of solving the 100 sample cubes and as many cubes as possible in 10 seconds. Two warmup runs were ran before the final sampling run. About 350 ~ 450 samples were collected in total.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy import stats
sns.set(rc={'figure.figsize': (14, 8)})
sns.set_theme(style="ticks", palette="pastel")
df_ud6 = pd.read_csv("data/UD6_vs_UD7/ud6_benchmarks.csv")
df_ud6.describe()
df_ud7 = pd.read_csv("data/UD6_vs_UD7/ud7_benchmarks.csv")
df_ud7.describe()
data = [df_ud6["phase2_solve_time"], df_ud7["phase2_solve_time"]]
headers = ["ud6_phase2_solve_time", "ud7_phase2_solve_time"]
df = pd.concat(data, axis=1, keys=headers)
df.describe()
ax = sns.boxplot(data=df, showfliers=False)
ax.set(
title="Solution Length, per phase",
xlabel='Solution Length',
ylabel='Length'
)
stats.mannwhitneyu(df_ud6["phase2_solve_time"], df_ud7["phase2_solve_time"])
###Output
_____no_output_____ |
docs/source/Tutorials/archive/Tutorial 1.ipynb | ###Markdown
This notebook demonstrates a basic workflow of using the packerlabimaging package. The fundamental types of imaging trials accepted for this workflow are: - 2photon imaging - All Optical Experiment (2photon imaging + optogenetic stimulation) - Suite2p processing results outputThis tutorial is based off an existing 2photon experiment that includes various trials of 2photon imaging and All optical experiments:
###Code
display.Image("/home/pshah/Documents/code/packerlabimaging/files/packerlabimaging-tutorial-exp-outline.png")
###Output
_____no_output_____
###Markdown
![example experiment setup for package pipeline] (packerlabimaging/files/packerlabimaging-tutorial-exp-outline.png "title") INITIALIZING ALLOPTICAL + TWOPHOTON IMAGING EXPERIMENT OBJECT FROM SCRATCH
###Code
# experiment dictionary
initialization_dict = {
'dataPath': '/home/pshah/mnt/qnap/Data/2020-12-19',
'analysisSavePath': '/home/pshah/Documents/code/packerlabimaging/tests/',
'microscope': "Bruker",
"expID": 'RL109',
'date': '2020-12-19',
'comments': 'testing out analysis workflow',
'trialsInformation': {},
'useSuite2p': True,
's2pResultsPath': "/home/pshah/mnt/qnap/Analysis/2020-12-19/suite2p/alloptical-2p-1x-alltrials/plane0"
}
# add information about each trial in experiment to trialsInformation field of the initialization_dict
trials_list_spont = ['t-005', 't-006']
for idx, trial in enumerate(trials_list_spont):
data_path_base = '/home/pshah/mnt/qnap/Data/2020-12-19'
animal_prep = initialization_dict['expID']
date = data_path_base[-10:]
## everything below should autopopulate and run automatically
paqs_loc = '%s/%s_%s_%s.paq' % (data_path_base, date, animal_prep, trial[2:]) # path to the .paq files for the selected trials
tiffs_loc = f'{data_path_base}/{date}_{trial}/{date}_{trial}_Cycle00001_Ch3.tif'
initialization_dict["trialsInformation"][trial] = {'trialType': 'TwoPhotonImagingTrial',
'tiff_path': f"{tiffs_loc}",
's2p_use': True,
'expGroup': "pre 4ap 2p spont imaging",
'paq_path': paqs_loc
}
trials_list_alloptical = ['t-013']
naparms_list = {'t-013': '/home/pshah/mnt/qnap/Data/2020-12-19/photostim/2020-12-19_RL109_ps_014/'}
for idx, trial in enumerate(trials_list_alloptical):
data_path_base = '/home/pshah/mnt/qnap/Data/2020-12-19'
animal_prep = initialization_dict['expID']
date = data_path_base[-10:]
## everything below should autopopulate and run automatically
paqs_loc = '%s/%s_%s_%s.paq' % (data_path_base, date, animal_prep, trial[2:]) # path to the .paq files for the selected trials
tiffs_loc = f'{data_path_base}/{date}_{trial}/{date}_{trial}_Cycle00001_Ch3.tif'
initialization_dict["trialsInformation"][trial] = {'trialType': 'AllOpticalTrial',
'tiff_path': f"{tiffs_loc}",
's2p_use': True,
'expGroup': "pre 4ap 2p all optical",
'paq_path': paqs_loc,
'naparm_path': naparms_list[trial]
}
###Output
_____no_output_____ |
workbook/work6_numpy.ipynb | ###Markdown
Numpynumpy means numeric in python.It is used to handle matrix and array operation involving numbers.Its similar to list but more advanced and only used for numerical operations. Installation of numpy, scipy and pandas
###Code
!pip install numpy
def array_propeties(a):
print("array = \n", a)
print("dimension = ", a.ndim)
print("shape = ",a.shape)
print("data type =",a.dtype)
print("size = ", a.size)
print()
# import a package an give it an alias as np
# doing this we can type np when using the packages functionns
import numpy as np
# Creating a rowsxcolumn numpy array
a = np.array(
[
[1,2,3],
[4,5,6],
[7,8,9]
]
)
print(a)
# number of dimension
print(a.ndim)
# shape of array ie 3x3
print(a.shape)
# number of elements
print(a.size)
print(type(a))
# creating a 3x3 python list
alist = [
[1,2,3],
[4,5,6],
[7,8,9]
]
print(alist
)
print(type(alist))
import numpy as np
## converting python list to numpy array
blist = [1,2,3,4]
print(blist)
b = np.array(blist)
print(b)
print("array = \n", b)
print("dimension = ", b.ndim)
print("shape = ",b.shape)
print("data type =",b.dtype)
print("size = ", b.size)
print()
# if list contain string everything is converted to a datatype that is a union of value since we mix different datatype
clist = [1, "ali", 2, "python"]
c = np.array(clist)
print("array = \n", c)
print("dimension = ", c.ndim)
print("shape = ",c.shape)
#
print("data type =",c.dtype)
print("size = ", c.size)
print()
import numpy as np
# python list can only get length of the leading dimension
alist = [
[1,2,3],
[4,5,6],
[7,8,9]
]
print(alist)
print(len(alist))
a = np.array(alist)
print("array = \n", a)
print("dimension = ", a.ndim)
print("shape = ",a.shape)
print("data type =",a.dtype)
print("size = ", a.size)
print()
## creating np array of different shapes
# one by 3
import numpy as np
onebythree = np.array([[11,12,13]])
array_propeties(onebythree)
fourbyone = np.array([[21],[22],[23],[24]])
array_propeties(fourbyone)
## Specifying datatype
## initialize elements as float
a= np.array(
[
[11,12,13]
], dtype='float'
)
array_propeties(a)
## initialize elements as float32
a= np.array(
[
[11,12,13]
], dtype='float32'
)
array_propeties(a)
## initialize elements as int64
a= np.array(
[
[11,12,13]
], dtype='int64'
)
array_propeties(a)
## if no type are specified it will get the biggest datatype of elements
a= np.array(
[ #in this case is a float32 or float64 depending on your os
[11.,12,13]
]
)
array_propeties(a)
a = np.array([
[21],
[22],
[31],
[0]
], dtype='float32')
array_propeties(a)
## unsigned integer in 8 bits, uint8 datatype
# Min value is 0 and max value is 255 because 8 bit can only hold max 11111111 in 8 bit which is 255.
a = np.array([
[21],
[22],
[31],
[0]
], dtype='uint8')
array_propeties(a)
# int8 is a signed 8 bit
# min value is -128
# max is 127
## np.arange is similar to range()
import numpy as np
# seq_a = array of 1 to 10-1
seq_a = np.arange(1,10)
array_propeties(seq_a)
# like range(star,stop,step) np.arange(start,stop,step)
seq_a = np.arange(1,10,2)
array_propeties(seq_a)
a10 = np.arange(10)
print(a10)
for n in range(10):
print(n,end=' | ')
for n in a10:
print(n,end=' | ')
start =1
stop = 10+1
step =2
ar10b = np.arange(start,stop,step)
# np array datatype can be changed
ar10b = np.uint8(ar10b)
array_propeties(ar10b)
# np.arrange() can be used to replace range()
for n in ar10b:
print(n,end=' | ')
###Output
array =
[1 3 5 7 9]
dimension = 1
shape = (5,)
data type = uint8
size = 5
1 | 3 | 5 | 7 | 9 |
###Markdown
Class Activitycreate an array of integers [0,5,15,...,100]
###Code
a100 = np.arange(0,100+1,5)
array_propeties(a100)
###Output
array =
[ 0 5 10 15 20 25 30 35 40 45 50 55 60 65 70 75 80 85
90 95 100]
dimension = 1
shape = (21,)
data type = int32
size = 21
###Markdown
np.linspace()
###Code
# create array of floating point value of a specific size
import numpy as np
seq_a2 = np.linspace(1,10,15)
array_propeties(seq_a2)
import numpy as np
start = 0
stop = 10
size = 10
seq_a2 = np.linspace(start,stop,size)
array_propeties(seq_a2)
# when we stop counting at 10 but the size is 10(which mean last index is 9),
# the value other than start and stop are added with some value
# so the total will still be the value of stop(in this case 10)
# this is called an interpolation
start = 0
stop = 10
size = 11
# if size given is 11 then it is printed as how we normally count
# because the last index is 10 which fits value 0 to 10.
# so no interpolation happens here
seq_a2 = np.linspace(start,stop,size)
array_propeties(seq_a2)
## in summary depending on the start size and stop the value would look like this
# index-->value
# 0-->start
# 1-->1.x
# 2-->2.x
# .
# .
# stop-->stop
# creating array of zeros
import numpy as np
rows=1
column = 10
arr0 = np.zeros((rows,column))
array_propeties(arr0)
# creating array of ones
import numpy as np
rows=1
column = 10
arr0 = np.zeros((rows,column))
array_propeties(arr0)
## their datatype are float64 by default, you can change them using dtype
rows=1
column = 10
arr0 = np.zeros((rows,column),dtype='int32')
array_propeties(arr0)
# creating array of ones
import numpy as np
rows=1
column = 10
arr0 = np.zeros((rows,column),dtype='uint8')
array_propeties(arr0)
## creating multiple dimension np array
# 3d array of 4 by 5 by 3
zer_4_5_3 = np.zeros((4,5,3))
print(zer_4_5_3)
array_propeties(zer_4_5_3)
# 4d array of 4 by 5 by 3 by 2
zer_4_5_3_2 = np.zeros((4,5,3,2))
print(zer_4_5_3_2)
array_propeties(zer_4_5_3_2)
import numpy as np
a14 = np.arange(14+1)
# array_propeties(a14)
# reshape_a= a14.reshape((4,-1))
# array_propeties(reshape_a)
if (a14.size % 4)==0:
reshape_a = a14.reshape((4,-1))
print(reshape_a.shape)
else:
eshape_a = a14.reshape((3,-1))
print(reshape_a.shape)
###Output
(3, 5)
###Markdown
2d array
###Code
## resize
import numpy as np
a1 = np.arange(1,17)
a2=np.resize(a1,(4,4))
array_propeties(a2)
print(a2)
# get all elements in row 2
print(f"Elements in row 2 : {a2[1,:]}")
## the , after 1 in a2[1,:] means only, meaning we only print elemenets in 1st row(the second row) from all columns
# alternative get elents in row 2, this only work on 1d array
print("Elements in row 2",a2[1])
# get all value in column 2 or dimension 2
print(f"Elements in col 2 = ", a2[:,1])
###Output
Elements in col 2 = [ 2 6 10 14]
###Markdown
3d array
###Code
import numpy as np
a3 = np.arange(1,9).reshape((2,2,2))
array_propeties(a3)
## accessing 3d array elements
print(a3[0,1,1])
#
print(a3[0,1,:]) # : means all, in this case it means print all values in
###Output
4
[3 4]
###Markdown
Changing Array Element
###Code
import numpy as np
a3x3 = np.arange(1,10).reshape((3,-1))
print(a3x3)
# print value 6
print(a3x3[1,2])
# change value 8 to 18
a3x3=np.array([[1,2,3],
[4, 5, 6],
[7, 8, 9]])
print(a3x3)
print(a3x3[2,1])
a3x3[2,1] = 18
print(a3x3)
print(a3x3[2,1])
import numpy as np
a1 = np.arange(9)
array_propeties(a1)
print("Third elements: ",a1[2])
a1[2]= a1[2] ** 2
print('Third element squared: ', a1[2])
# 3^2 = 9
3**2
###Output
_____no_output_____
###Markdown
Class activity1. create an array of shape(2,5) and another array b of shape (3,5).2. create another array c = a3. change array c row 2 array to sum of array a row2 and b row2.
###Code
a = np.arange(1,11).reshape((2,5))
array_propeties(a)
b = np.arange(1,15+1).reshape((3,5))
array_propeties(b)
c = np.copy(a)
array_propeties(c)
c[1,:] = a[1,:]+b[1,:]
print(c)
###Output
array =
[[ 1 2 3 4 5]
[ 6 7 8 9 10]]
dimension = 2
shape = (2, 5)
data type = int32
size = 10
array =
[[ 1 2 3 4 5]
[ 6 7 8 9 10]
[11 12 13 14 15]]
dimension = 2
shape = (3, 5)
data type = int32
size = 15
array =
[[ 1 2 3 4 5]
[ 6 7 8 9 10]]
dimension = 2
shape = (2, 5)
data type = int32
size = 10
[[ 1 2 3 4 5]
[12 14 16 18 20]]
###Markdown
ADVANCED NUMPY INDEXING AND OPERATIONS Array slicing
###Code
import numpy as np
# create random array with value from 0 to 1000 with shape 5 3
a5x3 = np.random.randint(0,1000,(5,3))
array_propeties(a5x3)
# print 142 from array a5x3
print(a5x3[3,1])
# Change 2D ARRAY TO 1D ARRAY
# change a5x3 to 1d
##using flatten
a53 = a5x3.flatten()
print(a53)
array_propeties(a53)
# 2d to n1d uasing reshape
a15x_= a5x3.reshape((15,-1))
array_propeties(a15x_)
a15x_= a5x3.reshape((1,15))
array_propeties(a15x_)
# 2d arrray to 1 d using ravel
# different between flatten and ravel
# flatten make a copy of array and odify,
# while ravel is a view of the original array, so changes make in ravel changes the otriginal array too.
a53b = a5x3.ravel()
array_propeties(a53b)
###Output
_____no_output_____
###Markdown
class acivity1. create an array of shape (5,5,3) of datatype float322. create an array of c, of 1dimension from array a
###Code
a = np.arange(75,dtype='float32').reshape(5,5,3)
# array_propeties(a)
a1d = a.flatten()
array_propeties(a1d)
import numpy as np
a5 = np.arange(1,(5+1),dtype=np.float32)
print(a5)
#this one wont work
# a5x5x3 = a5.reshape((5,5,3))
#this will only return none
# a5x5x3 = a5.resize((5,5,3))
# print(a5x5x3)
#This will work, it will repeat the values to fill value to fit the new array size
a5x5x3 = np.resize(a5,(5,5,3))
print(a5x5x3)
import numpy as np
start = 1
stop = 3
a3 = np.arange
## NEGATIVE INDEX
stop = 9
a1= np.arange(stop)
print("first element ",a1[0])
# print by index counted from last index
print("first element ",a1[-9])
## SLICING
print("even number", a1[0:stop:2])
# a1[start:end:step]
print("odd number", a1[1:stop:2])
# You can use this to print 2 to 7 but its tedious
a1d = np.arange(10)
# get 2 t0 7
a2i = a1d[2]
a3i = a1d[3]
a4i = a1d[4]
a5i = a1d[5]
a6i = a1d[6]
a7i = a1d[7]
result = np.array([a2i,a3i,a4i,a5i,a6i,a7i])
print(result)
# instead use slicing
a2to7 = a1d[2:(7+1)]
array_propeties(a2to7)
import numpy as np
a2d = np.arange(10)
a2d = np.resize(a2d,(5,5))
array_propeties(a2d)
# what is the value of row 2
#[5,6,7,8,9]
print(a2d[1,:])# from row 2, prit all value in it
# this is the best way to get values from ro2 2 to 4
print(a2d[1:4,:])
#or this, but this one only work on 1d
print(a2d[1:4])
a2d = np.arange(1,26)
a2d = np.resize(a2d,(5,5))
print(a2d)
# # get [7 8 9]
# print(a2d[1,2:])
# # get [12,13,14]
# print(a2d[2,2:])
# # [17,18,19]
# print(a2d[3,2:])
# or get them all at once
# start end step
#
print(a2d[1:4,1:4])
# start:stop:step
# step 2 on rows and 3 of columns
result = a2d[::2,::3]
print(result)
bool_odd = np.array([False,True,False,True,False,True,False,True])
print(bool_odd.index())
import numpy as np
a9 = np.arange(1,10)
i_even=np.array([1,3,5,7])
i_odd =np.array([0,2,4,6,8])
bool_odd = np.array([True,False,True,False,True,False,True,False,True])
bool_even = np.array([False,True,False,True,False,True,False,True,False])
print(a9[i_even])
print(a9[i_odd])
print(a9[bool_even])
print(a9[bool_odd])
###Output
[2 4 6 8]
[1 3 5 7 9]
[2 4 6 8]
[1 3 5 7 9]
###Markdown
Class activity1. Write the index for even number contain in the my_arr below 28=[0,1],22=[0,5] 50 =[1,0],92=[1,1],66 = [1,2],98 = [1,3],74 = [1,6] 44=[2,0],60=[2,5],98[2,6] 96=[3,0],38=[3,1] 8=[4,3],66=[4,4],92=[4,5],62=[4,6]2. write index of number divisible by 5.75 = [0,0]50 = [1,0]85= [2,2],35 = [2,3],60 = [2,4]55 =[3,2]
###Code
import numpy as np
my_arr = np.random.randint(1,100,(5,7))
array_propeties(my_arr)
# each value are remainder of og value divide by 5
ibooldiv5 = my_arr % 5
array_propeties(ibooldiv5)
# assign boolean value True if value is now 0, and False otherwise
ibooldiv5 = ibooldiv5 == 0
array_propeties(ibooldiv5)
# use the new boolean array for indexing
result = my_arr[ibooldiv5]
print(result)
###Output
[55 40 45 5 15]
|
Lesson13.ipynb | ###Markdown
if文(if, else)
###Code
x = 5
y = 4
if (x<y):
print ('A')
else:
print ('B')
###Output
_____no_output_____ |
src/FEM_Order.ipynb | ###Markdown
Number of elements and frequency
###Code
N_set = np.concatenate((
np.linspace(1e01, 1e02, 6, endpoint=False, dtype=int),
np.linspace(1e02, 1e03, 9, endpoint=False, dtype=int),
np.linspace(1e03, 1e04, 10, endpoint=True, dtype=int),
))
k_set = np.linspace(0, 200, 9)[1:] * (np.pi / 2)
# N_set = np.linspace(2, 100, 25, endpoint=False, dtype=int)
# k_set = np.linspace(0, 8, 9)[1:] * np.pi / 2
###Output
_____no_output_____
###Markdown
Defining the parameters of the equation
###Code
f = lambda x: 1 # Source function
a, b = -1, +1 # Domain
ga, gb = 0, 1 # Values at the boundaries
###Output
_____no_output_____
###Markdown
Solving the equation
###Code
errors = []
for k in k_set:
errors_k = []
# Exact solution
exact = Exact_HelmholtzImpedance([f(0), 0], k, a, b, ga, gb, source='const')
exact.verify()
u, u_x, u_xx = exact()
# Numerical solutions
for N in N_set:
print('Solving for'
+ f' k = {round(k / (np.pi / 2))}π/2,'.ljust(12)
+ f' N = {N}'.ljust(12)
+ ' in progress...')
solver = FEM_HelmholtzImpedance(f(0), k, a, b, ga, gb, N=N, N_quad=100, source='const')
solver.solve()
r = solver.sol
r_x = solver.der
errors_k.append(solver.H1_error(u, u_x))
errors.append(errors_k)
###Output
Solving for k = 25π/2, N = 10 in progress...
Solving for k = 25π/2, N = 25 in progress...
Solving for k = 25π/2, N = 40 in progress...
Solving for k = 25π/2, N = 55 in progress...
Solving for k = 25π/2, N = 70 in progress...
Solving for k = 25π/2, N = 85 in progress...
Solving for k = 25π/2, N = 100 in progress...
Solving for k = 25π/2, N = 200 in progress...
Solving for k = 25π/2, N = 300 in progress...
Solving for k = 25π/2, N = 400 in progress...
Solving for k = 25π/2, N = 500 in progress...
Solving for k = 25π/2, N = 600 in progress...
Solving for k = 25π/2, N = 700 in progress...
Solving for k = 25π/2, N = 800 in progress...
Solving for k = 25π/2, N = 900 in progress...
Solving for k = 25π/2, N = 1000 in progress...
Solving for k = 25π/2, N = 2000 in progress...
Solving for k = 25π/2, N = 3000 in progress...
Solving for k = 25π/2, N = 4000 in progress...
Solving for k = 25π/2, N = 5000 in progress...
Solving for k = 25π/2, N = 6000 in progress...
Solving for k = 25π/2, N = 7000 in progress...
Solving for k = 25π/2, N = 8000 in progress...
Solving for k = 25π/2, N = 9000 in progress...
Solving for k = 25π/2, N = 10000 in progress...
Solving for k = 50π/2, N = 10 in progress...
Solving for k = 50π/2, N = 25 in progress...
Solving for k = 50π/2, N = 40 in progress...
Solving for k = 50π/2, N = 55 in progress...
Solving for k = 50π/2, N = 70 in progress...
Solving for k = 50π/2, N = 85 in progress...
Solving for k = 50π/2, N = 100 in progress...
Solving for k = 50π/2, N = 200 in progress...
Solving for k = 50π/2, N = 300 in progress...
Solving for k = 50π/2, N = 400 in progress...
Solving for k = 50π/2, N = 500 in progress...
Solving for k = 50π/2, N = 600 in progress...
Solving for k = 50π/2, N = 700 in progress...
Solving for k = 50π/2, N = 800 in progress...
Solving for k = 50π/2, N = 900 in progress...
Solving for k = 50π/2, N = 1000 in progress...
Solving for k = 50π/2, N = 2000 in progress...
Solving for k = 50π/2, N = 3000 in progress...
Solving for k = 50π/2, N = 4000 in progress...
Solving for k = 50π/2, N = 5000 in progress...
Solving for k = 50π/2, N = 6000 in progress...
Solving for k = 50π/2, N = 7000 in progress...
Solving for k = 50π/2, N = 8000 in progress...
Solving for k = 50π/2, N = 9000 in progress...
Solving for k = 50π/2, N = 10000 in progress...
Solving for k = 75π/2, N = 10 in progress...
Solving for k = 75π/2, N = 25 in progress...
Solving for k = 75π/2, N = 40 in progress...
Solving for k = 75π/2, N = 55 in progress...
Solving for k = 75π/2, N = 70 in progress...
Solving for k = 75π/2, N = 85 in progress...
Solving for k = 75π/2, N = 100 in progress...
Solving for k = 75π/2, N = 200 in progress...
Solving for k = 75π/2, N = 300 in progress...
Solving for k = 75π/2, N = 400 in progress...
Solving for k = 75π/2, N = 500 in progress...
Solving for k = 75π/2, N = 600 in progress...
Solving for k = 75π/2, N = 700 in progress...
Solving for k = 75π/2, N = 800 in progress...
Solving for k = 75π/2, N = 900 in progress...
Solving for k = 75π/2, N = 1000 in progress...
Solving for k = 75π/2, N = 2000 in progress...
Solving for k = 75π/2, N = 3000 in progress...
Solving for k = 75π/2, N = 4000 in progress...
Solving for k = 75π/2, N = 5000 in progress...
Solving for k = 75π/2, N = 6000 in progress...
Solving for k = 75π/2, N = 7000 in progress...
Solving for k = 75π/2, N = 8000 in progress...
Solving for k = 75π/2, N = 9000 in progress...
Solving for k = 75π/2, N = 10000 in progress...
Solving for k = 100π/2, N = 10 in progress...
Solving for k = 100π/2, N = 25 in progress...
Solving for k = 100π/2, N = 40 in progress...
Solving for k = 100π/2, N = 55 in progress...
Solving for k = 100π/2, N = 70 in progress...
Solving for k = 100π/2, N = 85 in progress...
Solving for k = 100π/2, N = 100 in progress...
Solving for k = 100π/2, N = 200 in progress...
Solving for k = 100π/2, N = 300 in progress...
Solving for k = 100π/2, N = 400 in progress...
Solving for k = 100π/2, N = 500 in progress...
Solving for k = 100π/2, N = 600 in progress...
Solving for k = 100π/2, N = 700 in progress...
Solving for k = 100π/2, N = 800 in progress...
Solving for k = 100π/2, N = 900 in progress...
Solving for k = 100π/2, N = 1000 in progress...
Solving for k = 100π/2, N = 2000 in progress...
Solving for k = 100π/2, N = 3000 in progress...
Solving for k = 100π/2, N = 4000 in progress...
Solving for k = 100π/2, N = 5000 in progress...
Solving for k = 100π/2, N = 6000 in progress...
Solving for k = 100π/2, N = 7000 in progress...
Solving for k = 100π/2, N = 8000 in progress...
Solving for k = 100π/2, N = 9000 in progress...
Solving for k = 100π/2, N = 10000 in progress...
Solving for k = 125π/2, N = 10 in progress...
Solving for k = 125π/2, N = 25 in progress...
Solving for k = 125π/2, N = 40 in progress...
Solving for k = 125π/2, N = 55 in progress...
Solving for k = 125π/2, N = 70 in progress...
Solving for k = 125π/2, N = 85 in progress...
Solving for k = 125π/2, N = 100 in progress...
Solving for k = 125π/2, N = 200 in progress...
Solving for k = 125π/2, N = 300 in progress...
Solving for k = 125π/2, N = 400 in progress...
Solving for k = 125π/2, N = 500 in progress...
Solving for k = 125π/2, N = 600 in progress...
Solving for k = 125π/2, N = 700 in progress...
Solving for k = 125π/2, N = 800 in progress...
Solving for k = 125π/2, N = 900 in progress...
Solving for k = 125π/2, N = 1000 in progress...
Solving for k = 125π/2, N = 2000 in progress...
Solving for k = 125π/2, N = 3000 in progress...
Solving for k = 125π/2, N = 4000 in progress...
Solving for k = 125π/2, N = 5000 in progress...
Solving for k = 125π/2, N = 6000 in progress...
Solving for k = 125π/2, N = 7000 in progress...
Solving for k = 125π/2, N = 8000 in progress...
Solving for k = 125π/2, N = 9000 in progress...
Solving for k = 125π/2, N = 10000 in progress...
Solving for k = 150π/2, N = 10 in progress...
Solving for k = 150π/2, N = 25 in progress...
Solving for k = 150π/2, N = 40 in progress...
Solving for k = 150π/2, N = 55 in progress...
Solving for k = 150π/2, N = 70 in progress...
Solving for k = 150π/2, N = 85 in progress...
Solving for k = 150π/2, N = 100 in progress...
Solving for k = 150π/2, N = 200 in progress...
Solving for k = 150π/2, N = 300 in progress...
Solving for k = 150π/2, N = 400 in progress...
Solving for k = 150π/2, N = 500 in progress...
Solving for k = 150π/2, N = 600 in progress...
Solving for k = 150π/2, N = 700 in progress...
Solving for k = 150π/2, N = 800 in progress...
Solving for k = 150π/2, N = 900 in progress...
Solving for k = 150π/2, N = 1000 in progress...
Solving for k = 150π/2, N = 2000 in progress...
Solving for k = 150π/2, N = 3000 in progress...
Solving for k = 150π/2, N = 4000 in progress...
Solving for k = 150π/2, N = 5000 in progress...
Solving for k = 150π/2, N = 6000 in progress...
Solving for k = 150π/2, N = 7000 in progress...
Solving for k = 150π/2, N = 8000 in progress...
Solving for k = 150π/2, N = 9000 in progress...
Solving for k = 150π/2, N = 10000 in progress...
Solving for k = 175π/2, N = 10 in progress...
Solving for k = 175π/2, N = 25 in progress...
Solving for k = 175π/2, N = 40 in progress...
Solving for k = 175π/2, N = 55 in progress...
Solving for k = 175π/2, N = 70 in progress...
Solving for k = 175π/2, N = 85 in progress...
Solving for k = 175π/2, N = 100 in progress...
Solving for k = 175π/2, N = 200 in progress...
Solving for k = 175π/2, N = 300 in progress...
Solving for k = 175π/2, N = 400 in progress...
Solving for k = 175π/2, N = 500 in progress...
Solving for k = 175π/2, N = 600 in progress...
Solving for k = 175π/2, N = 700 in progress...
Solving for k = 175π/2, N = 800 in progress...
Solving for k = 175π/2, N = 900 in progress...
Solving for k = 175π/2, N = 1000 in progress...
Solving for k = 175π/2, N = 2000 in progress...
Solving for k = 175π/2, N = 3000 in progress...
Solving for k = 175π/2, N = 4000 in progress...
Solving for k = 175π/2, N = 5000 in progress...
Solving for k = 175π/2, N = 6000 in progress...
Solving for k = 175π/2, N = 7000 in progress...
Solving for k = 175π/2, N = 8000 in progress...
Solving for k = 175π/2, N = 9000 in progress...
Solving for k = 175π/2, N = 10000 in progress...
Solving for k = 200π/2, N = 10 in progress...
Solving for k = 200π/2, N = 25 in progress...
Solving for k = 200π/2, N = 40 in progress...
Solving for k = 200π/2, N = 55 in progress...
Solving for k = 200π/2, N = 70 in progress...
Solving for k = 200π/2, N = 85 in progress...
Solving for k = 200π/2, N = 100 in progress...
Solving for k = 200π/2, N = 200 in progress...
Solving for k = 200π/2, N = 300 in progress...
Solving for k = 200π/2, N = 400 in progress...
Solving for k = 200π/2, N = 500 in progress...
Solving for k = 200π/2, N = 600 in progress...
Solving for k = 200π/2, N = 700 in progress...
Solving for k = 200π/2, N = 800 in progress...
Solving for k = 200π/2, N = 900 in progress...
Solving for k = 200π/2, N = 1000 in progress...
Solving for k = 200π/2, N = 2000 in progress...
Solving for k = 200π/2, N = 3000 in progress...
Solving for k = 200π/2, N = 4000 in progress...
Solving for k = 200π/2, N = 5000 in progress...
Solving for k = 200π/2, N = 6000 in progress...
Solving for k = 200π/2, N = 7000 in progress...
Solving for k = 200π/2, N = 8000 in progress...
Solving for k = 200π/2, N = 9000 in progress...
Solving for k = 200π/2, N = 10000 in progress...
###Markdown
Plotting the order of accuracy
###Code
plt.rcParams['figure.figsize'] = [10, 5]
# H1 error
fig, axs = plt.subplots()
fig.tight_layout(pad=3.0)
for idx, k in enumerate(k_set):
axs.plot(N_set, [error[0] for error in errors[idx]], label=f'k = {round(k / (np.pi / 2))}π/2')
axs.set(xscale='log', yscale='log', xlabel='N', ylabel='$||u - u^N||_{L^2} + ||u_x - u^N_x||_{L^2}$')
axs.grid(which='both')
axs.legend()
# L2 norm of u error
fig, axs = plt.subplots()
fig.tight_layout(pad=3.0)
for idx, k in enumerate(k_set):
axs.plot(N_set, [error[1] for error in errors[idx]], label=f'k = {round(k / (np.pi / 2))}π/2')
axs.set(xscale='log', yscale='log', xlabel='N', ylabel='$||u - u^N||_{L^2}$')
axs.grid(which='both')
axs.legend()
# L2 norm of u_x
fig, axs = plt.subplots()
fig.tight_layout(pad=3.0)
for idx, k in enumerate(k_set):
axs.plot(N_set, [error[2] for error in errors[idx]], label=f'k = {round(k / (np.pi / 2))}π/2')
axs.set(xscale='log', yscale='log', xlabel='N', ylabel='$||u_x - u^N_x||_{L^2}$')
axs.grid(which='both')
axs.legend()
###Output
_____no_output_____ |
libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Compilacion_a_C.ipynb | ###Markdown
(COMPC)= 5.3 Compilación a C ```{admonition} Notas para contenedor de docker:Comando de docker para ejecución de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.`docker run --rm -v :/datos --name jupyterlab_optimizacion_2 -p 8888:8888 -p 8787:8787 -d palmoreck/jupyterlab_optimizacion_2:3.0.0`password para jupyterlab: `qwerty`Detener el contenedor de docker:`docker stop jupyterlab_optimizacion_2`Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion_2:3.0.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion_2).``` --- ```{admonition} Al final de esta nota el y la lectora::class: tip* Comprenderá diferencias entre lenguajes de programación que son intérpretes y los que requieren/realizan pasos de compilación.* Comprenderá por qué definir tipo de valores en lenguajes que son intérpretes conducen a tiempos de ejecución menores.* Aprenderá lo que es una compilación *ahead of time* (AOT) y *just in time* (JIT). Se mostrarán ejemplos de lenguajes y paquetes que realizan ambos tipos de compilaciones.``` Se presentan códigos y sus ejecuciones en una máquina `m4.16xlarge` de la nube de [AWS](https://aws.amazon.com/). Se utilizó la AMI:```opt2-aws-educate-openblas-02-05-2021``` de la región `us-east-1` (Virginia) para reproducibilidad de resultados. Tal AMI se construyó a partir de una AMI `ubuntu 20.04 - ami-042e8287309f5df03` con el [script_profiling_and_BLAS.sh](https://github.com/palmoreck/scripts_for_useful_tools_installations/blob/main/AWS/ubuntu_20.04/optimizacion_2/script_profiling_and_BLAS.sh) ````{admonition} ComentarioSi se utiliza la *AMI* `opt2-aws-educate-openblas-04-04-2021` colocar en `User data` el siguiente *script*:```bash!/bin/bashvariables:region=us-east-1 make sure instance is in Virginianame_instance=OpenBLASUSER=ubuntuSystem updateapt-get update -yqTag instanceINSTANCE_ID=$(curl -s http://instance-data/latest/meta-data/instance-id)PUBLIC_IP=$(curl -s http://instance-data/latest/meta-data/public-ipv4)sudo -H -u $USER bash -c "/home/$USER/.local/bin/aws ec2 create-tags --resources $INSTANCE_ID --tag Key=Name,Value=$name_instance-$PUBLIC_IP --region=$region"sudo -H -u $USER bash -c "cd / && /home/$USER/.local/bin/jupyter lab --ip=0.0.0.0 --no-browser --config=/home/$USER/.jupyter/jupyter_notebook_config.py &"``````` La máquina `m4.16xlarge` tiene las siguientes características:
###Code
%%bash
lscpu
%%bash
sudo lshw -C memory
%%bash
uname -ar #r for kernel, a for all
###Output
Linux ip-10-0-0-140 5.4.0-1038-aws #40-Ubuntu SMP Fri Feb 5 23:50:40 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
###Markdown
```{admonition} Observación:class: tipEn la celda anterior se utilizó el comando de *magic* `%%bash`. Algunos comandos de *magic* los podemos utilizar también con `import`. Ver [ipython-magics](https://ipython.readthedocs.io/en/stable/interactive/magics.html)``` Características de los lenguajes de programación Los lenguajes de programación y sus implementaciones tienen características como las siguientes:* Realizar un *parsing* de las instrucciones y ejecutarlas de forma casi inmediata (intérprete). Como ejemplo está el lenguaje: [Beginners' All-purpose Symbolic Instruction Code: BASIC](https://en.wikipedia.org/wiki/BASIC)* Realizar un *parsing* de las instrucciones, traducirlas a una [representación intermedia](https://en.wikipedia.org/wiki/Intermediate_representation) (IR) y ejecutarlas. La traducción a una representación intermedia es un [bytecode](https://en.wikipedia.org/wiki/Bytecode). Como ejemplo se encuentra el lenguaje *Python* en su implementación [CPython](https://github.com/python/cpython).* Compilar [ahead of time](https://en.wikipedia.org/wiki/Ahead-of-time_compilation) (AOT) las instrucciones antes de su ejecución. Como ejemplo se encuentran los lenguajes *C, C++* y *Fortran*.* Realizar un *parsing* de las instrucciones y compilarlas en una forma [just in time compilation](https://en.wikipedia.org/wiki/Just-in-time_compilation) (JIT) *at* [runtime](https://en.wikipedia.org/wiki/Runtime_(program_lifecycle_phase)). Como ejemplos se encuentran los lenguajes *Julia* y *Python* en su implementación con [PyPy](https://doc.pypy.org/en/latest/index.html).La ejecución de instrucciones será más rápida dependiendo del lenguaje, la implementación que se haga del mismo y de sus *features*. ```{admonition} Comentarios* Varios proyectos están en desarrollo para mejorar eficiencia y otros temas como el uso del [global interpreter lock](https://docs.python.org/3.9/glossary.htmlterm-global-interpreter-lock) (GIL) en *Python*. Algunos de ellos son: * *PyPy* * *A better API for extending Python in C*: [hpyproject](https://github.com/hpyproject/hpy) * Ver [global interpreter lock](https://en.wikipedia.org/wiki/Global_interpreter_lock) para una explicación más general.* La implementación *CPython* de *Python* es la estándar, pero hay otras más como *PyPy*. Ver [python-vs-cpython](https://stackoverflow.com/questions/17130975/python-vs-cpython) para una breve explicación de implementaciones de Python. Ver [Alternative R implementations](http://adv-r.had.co.nz/Performance.htmlfaster-r) y [R implementations](https://en.wikipedia.org/wiki/R_(programming_language)Implementations) para implementaciones de *R* diferentes a la estándar.``` Cpython Compilación AOT y JIT ```{margin}Es común utilizar la palabra librería en lugar de paquete en el contexto de compilación.``` Una compilación AOT crea una librería, especializada para nuestras máquinas y se puede utilizar de forma instantánea. *Cython* es un paquete que realiza la compilación de módulos de *Python*. Por ejemplo, las librerías de *NumPy*, *SciPy* o *Scikit-learn* instalados vía *pip* o *conda* utilizan *Cython* para compilar secciones de tales librerías adaptadas a nuestras máquinas. Una compilación JIT no requiere que se realice "trabajo previo" de nuestro lado, la compilación se realiza al tiempo que se utiliza el código, *at runtime*. En términos coloquiales, en una compilación JIT, se iniciará la ejecución del código identificando diferentes secciones que pueden compilarse y que por tanto se ejecutarán más lentamente de lo normal pues se estará realizando la compilación al tiempo de ejecución. Sin embargo, en sucesivas ejecuciones del **mismo** código tales secciones serán más rápidas. En resúmen se requiere un *warm-up*, ver por ejemplo [how-fast-is-pypy](https://doc.pypy.org/en/latest/faq.htmlhow-fast-is-pypy). La compilación AOT da los mejores *speedups* pero solicita mayor trabajo de nuestro lado. La compilación JIT da buenos *speedups* con poca intervención nuestra pero utiliza más memoria y más tiempo en iniciar la ejecución del código, ver por ejemplo [python_performance-slide-15](https://raw.githubusercontent.com/vstinner/talks/main/2019-EuroPython/python_performance.pdf) acerca de *PyPy issues*. Para la ejecución frecuente de *scripts* pequeños la compilación AOT resulta una mejor opción que la compilación JIT, ver por ejemplo [couldn't the jit dump and reload already compiled machine code](https://doc.pypy.org/en/latest/faq.htmlcouldn-t-the-jit-dump-and-reload-already-compiled-machine-code). A continuación se presentan ejecuciones en diferentes lenguajes con sus implementaciones estándar para aproximar el área debajo de la curva de $f(x) = e^{-x^2}$ en el intervalo $[0, 1]$ con la regla del rectángulo compuesto. Se mide el tiempo de ejecución utilizando $n = 10^7$ nodos. Python
###Code
%%file Rcf_python.py
import math
import time
def Rcf(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
sum_res = 0
for i in range(n):
x = a+(i+1/2)*h_hat
sum_res += f(x)
return h_hat*sum_res
if __name__ == "__main__":
n = 10**7
f = lambda x: math.exp(-x**2)
a = 0
b = 1
start_time = time.time()
res = Rcf(f,a,b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf tomó", secs, "segundos" )
%%bash
python3 Rcf_python.py
###Output
Rcf tomó 3.477599620819092 segundos
###Markdown
R
###Code
%%file Rcf_R.R
Rcf<-function(f,a,b,n){
'
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
'
h_hat <- (b-a)/n
sum_res <- 0
for(i in 0:(n-1)){
x <- a+(i+1/2)*h_hat
sum_res <- sum_res + f(x)
}
approx <- h_hat*sum_res
}
n <- 10**7
f <- function(x)exp(-x^2)
a <- 0
b <- 1
system.time(Rcf(f,a,b,n))
%%bash
Rscript Rcf_R.R
###Output
user system elapsed
5.607 0.063 5.671
###Markdown
Julia Ver: [Julia: performance-tips](https://docs.julialang.org/en/v1/manual/performance-tips/)
###Code
%%file Rcf_julia.jl
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
# Arguments
- `f::Float`: function expression of integrand.
- `a::Float`: left point of interval.
- `b::Float`: right point of interval.
- `n::Integer`: number of subintervals.
"""
function Rcf(f, a, b, n)
h_hat = (b-a)/n
sum_res = 0
for i in 0:n-1
x = a+(i+1/2)*h_hat
sum_res += f(x)
end
return h_hat*sum_res
end
function main()
a = 0
b = 1
n =10^7
f(x) = exp(-x^2)
@time Rcf(f, a, b, n)
@time Rcf(f, a, b, n)
end
main()
%%bash
/usr/local/julia-1.6.0/bin/julia Rcf_julia.jl
###Output
0.231283 seconds
0.231448 seconds
###Markdown
(RCFJULIATYPEDVALUES)= `Rcf_julia_typed_values.jl`
###Code
%%file Rcf_julia_typed_values.jl
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
# Arguments
- `f::Float`: function expression of integrand.
- `a::Float`: left point of interval.
- `b::Float`: right point of interval.
- `n::Integer`: number of subintervals.
"""
function Rcf(f, a, b, n)
h_hat = (b-a)/n
sum_res = 0.0
for i in 0:n-1
x = a+(i + 1/2)*h_hat
sum_res += f(x)
end
return h_hat*sum_res
end
function main()
a = 0.0
b = 1.0
n =10^7
f(x) = exp(-x^2)
@time Rcf(f, a, b, n)
@time Rcf(f, a, b, n)
end
main()
%%bash
/usr/local/julia-1.6.0/bin/julia Rcf_julia_typed_values.jl
###Output
0.124739 seconds
0.124905 seconds
###Markdown
C Para la medición de tiempos se utilizaron las ligas: [measuring-time-in-millisecond-precision](https://stackoverflow.com/questions/16764276/measuring-time-in-millisecond-precision) y [find-execution-time-c-program](https://www.techiedelight.com/find-execution-time-c-program/). (RCFC)= `Rcf_c.c`
###Code
%%file Rcf_c.c
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include <sys/time.h>
void Rcf(double ext_izq, double ext_der, int n,\
double *sum_res_p);
double f(double nodo);
int main(int argc, char *argv[]){
double sum_res = 0.0;
double a = 0.0, b = 1.0;
int n = 1e7;
struct timeval start;
struct timeval end;
long seconds;
long long mili;
gettimeofday(&start, NULL);
Rcf(a,b,n,&sum_res);
gettimeofday(&end, NULL);
seconds = (end.tv_sec - start.tv_sec);
mili = 1000*(seconds) + (end.tv_usec - start.tv_usec)/1000;
printf("Tiempo de ejecución: %lld milisegundos", mili);
return 0;
}
void Rcf(double a, double b, int n, double *sum){
double h_hat = (b-a)/n;
double x = 0.0;
int i = 0;
*sum = 0.0;
for(i = 0; i <= n-1; i++){
x = a+(i+1/2.0)*h_hat;
*sum += f(x);
}
*sum = h_hat*(*sum);
}
double f(double nodo){
double valor_f;
valor_f = exp(-pow(nodo,2));
return valor_f;
}
%%bash
gcc -Wall Rcf_c.c -o Rcf_c.out -lm
%%bash
./Rcf_c.out
###Output
Tiempo de ejecución: 478 milisegundos
###Markdown
¿Por qué dar información sobre el tipo de valores (u objetos) que se utilizan en un código ayuda a que su ejecución sea más rápida? *Python* es *dynamically typed* que se refiere a que un objeto de cualquier tipo y cualquier *statement* que haga referencia a un objeto, **pueden cambiar su tipo**. Esto hace difícil que la máquina virtual pueda optimizar la ejecución del código pues no se conoce qué tipo será utilizado para las operaciones futuras. Por ejemplo:
###Code
v = -1.0
print(type(v), abs(v))
v = 1 - 1j
print(type(v), abs(v))
###Output
<class 'complex'> 1.4142135623730951
###Markdown
La función `abs` trabaja diferente dependiendo del tipo de objeto. Para un número entero o punto flotante regresa el negativo de $-1.0$ y para un número complejo calcula una norma Euclidiana tomando de $v$ su parte real e imaginaria: $\text{abs}(v) = \sqrt{v.real^2 + v.imag^2}$.Lo anterior en la práctica implica la ejecución de más instrucciones y por tanto mayor tiempo en ejecutarse. Antes de llamar a `abs` en la variable, *Python* revisa el tipo y decide cuál método llamar (*overhead*). ```{admonition} Comentarios* Además cada número en *Python* está *wrapped up* en un objeto de *Python* de alto nivel. Por ejemplo para un entero se tiene el objeto `int`. Tal objeto tiene otras funciones por ejemplo `__str__` para imprimirlo.* Es muy común que en los códigos no cambien los tipos por lo que la compilación AOT es una buena opción para una ejecución más rápida.* Siguiendo con los dos comentarios anteriores, si sólo se desea calcular operaciones matemáticas (como el caso de la raíz cuadrada anterior) no requerimos la funcionalidad del objeto de alto nivel.``` [Cython](https://github.com/cython/cython/) * Es un compilador que traduce instrucciones **anotadas** y escritas en un lenguaje híbrido entre Python y C que resultan un módulo compilado. Este módulo puede ser importado como un módulo regular de Python utilizando `import`. Típicamente el módulo compilado resulta ser similar en sintaxis al lenguaje *C*. ```{margin}La frase código tipo *CPU-bound* es código cuya ejecución involucra un porcentaje mayor para uso de CPU que uso de memoria o I/O.``` * Tiene un buen tiempo en la comunidad (2007 aproximadamente), es altamente usado y es de las herramientas preferidas para código tipo *CPU-bound*. Es un *fork* de [Pyrex](https://www.csse.canterbury.ac.nz/greg.ewing/python/Pyrex/) (2002) que expande sus capacidades. ```{admonition} Comentario*Pyrex* en términos simples es *Python* con manejo de tipo de valores de *C*. *Pyrex* traduce el código escrito en *Python* a código de *C* (lo cual evita el uso de la [Python/C API](https://docs.python.org/3/c-api/index.html)) y permite la declaración de parámetros o valores en tipos de valores de *C*.``` * Requiere conocimiento del lenguaje *C* lo cual debe tomarse en cuenta en un equipo de desarrollo de *software* y se sugiere utilizarlo en secciones pequeñas del código.* Soporta la [API OpenMP](https://www.openmp.org/) para aprovechar los múltiples *cores* de una máquina.* Puede utilizarse vía un script `setup.py` que compila un módulo para usarse con `import` y también puede utilizarse en *IPython* vía un comando *magic*. ```{admonition} ComentarioEn el paso de compilación a código de máquina del dibujo anterior se omitieron detalles como son: creación de un archivo `.c` y compilación de tal archivo con el compilador [gcc](https://gcc.gnu.org/) al módulo compilado (en sistemas Unix tiene extensión `.so`).Ver [machine code](https://en.wikipedia.org/wiki/Machine_code)``` * *Cython* y el compilador *gcc* analizan el código anotado para determinar qué instrucciones pueden optimizarse mediante una compilación AOT. ¿En qué casos y qué tipo de ganancias en velocidad podemos esperar al usar Cython? * Un caso es en el que se tenga un código con muchos *loops* que realicen operaciones matemáticas típicamente no vectorizadas o que no pueden vectorizarse. Esto es, códigos en los que las instrucciones son básicamente sólo *Python* sin utilizar paquetes externos. Además, si en el ciclo las variables no cambian de su tipo (por ejemplo de `int` a `float`) entonces es un código que obtendrá ganancia en velocidad al compilar a código de máquina. ```{admonition} Observación:class: tipSi tu código de *Python* llama a operaciones vectorizadas vía *NumPy* podría ser que no se ejecute más rápido tu código después de compilarlo. Principalmente porque probablemente no se crearán muchos objetos intermedios que es un *feature* de *NumPy*.``` * No esperamos tener un *speedup* después de compilar para llamadas a librerías externas (por ejemplo paqueterías que manejan bases de datos). También es poco probable que se obtengan ganancias significativas en programas que tengan alta carga de I/O.* En general es poco probable que tu código compilado se ejecute más rápido que un código en *C* "bien escrito" y también es poco probable que se ejecute más lento. Es muy posible que el código *C* generado desde *Python* mediante *Cython* pueda alcanzar las velocidades de un código escrito en *C*, a menos que la persona que programó en *C* tenga un gran conocimiento de formas de hacer que el código de *C* se ajuste a la arquitectura de la máquina sobre la que se ejecutan los códigos. Ejemplo utilizando un archivo `setup.py`
###Code
import math
import time
from pytest import approx
from scipy.integrate import quad
from IPython.display import HTML, display
###Output
_____no_output_____
###Markdown
Para este caso requerimos tres archivos:1.El código que será compilado en un archivo con extensión `.pyx` (escrito en *Python*). ```{admonition} Observación:class: tipLa extensión `.pyx` se utiliza en el lenguaje *Pyrex*. ``` 2.Un archivo `setup.py` que contiene las instrucciones para llamar a *Cython* y se encarga de crear el módulo compilado.3.El código escrito en *Python* que importará el módulo compilado. Archivo `.pyx`:
###Code
%%file Rcf_cython.pyx
def Rcf(f,a,b,n): #Rcf: rectángulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
nodes = [a+(i+1/2)*h_hat for i in range(n)]
sum_res = 0
for node in nodes:
sum_res = sum_res+f(node)
return h_hat*sum_res
###Output
Writing Rcf_cython.pyx
###Markdown
Archivo `setup.py` que contiene las instrucciones para el *build*:
###Code
%%file setup.py
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("Rcf_cython.pyx",
compiler_directives={'language_level' : 3})
)
###Output
Writing setup.py
###Markdown
Compilar desde la línea de comandos:
###Code
%%bash
python3 setup.py build_ext --inplace
###Output
Compiling Rcf_cython.pyx because it changed.
[1/1] Cythonizing Rcf_cython.pyx
running build_ext
building 'Rcf_cython' extension
creating build
creating build/temp.linux-x86_64-3.8
x86_64-linux-gnu-gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/include/python3.8 -c Rcf_cython.c -o build/temp.linux-x86_64-3.8/Rcf_cython.o
x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fwrapv -O2 -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.8/Rcf_cython.o -o /home/ubuntu/analisis-numerico-computo-cientifico/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_cython.cpython-38-x86_64-linux-gnu.so
###Markdown
Importar módulo compilado y ejecutarlo:
###Code
f=lambda x: math.exp(-x**2) #using math library
n = 10**7
a = 0
b = 1
import Rcf_cython
start_time = time.time()
res = Rcf_cython.Rcf(f, a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf tomó",secs,"segundos" )
obj, err = quad(f, a, b)
print(res == approx(obj))
###Output
True
###Markdown
Comando de *magic* `%cython` ```{margin}Ver [extensions-bundled-with-ipython](https://ipython.readthedocs.io/en/stable/config/extensions/index.html?highlight=cythonextensions-bundled-with-ipython) para extensiones que antes se incluían en *Ipython*.``` Al instalar *Cython* se incluye tal comando. Al ejecutarse crea el archivo `.pyx`, lo compila con `setup.py` e importa en el *notebook*.
###Code
%load_ext Cython
%%cython
def Rcf(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
nodes = [a+(i+1/2)*h_hat for i in range(n)]
sum_res = 0
for node in nodes:
sum_res = sum_res+f(node)
return h_hat*sum_res
start_time = time.time()
res = Rcf(f, a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf tomó",secs,"segundos" )
obj, err = quad(f, a, b)
print(res == approx(obj))
###Output
True
###Markdown
Anotaciones para analizar un bloque de código *Cython* tiene la opción de *annotation* para generar un archivo con extensión `.html` en el que cada línea puede ser expandida haciendo un doble click que mostrará el código *C* generado. Líneas "más amarillas" refieren a más llamadas en la máquina virtual de *Python*, mientras que líneas más blancas significan "más código en *C* y no *Python*". El objetivo es remover la mayor cantidad de líneas amarillas posibles pues son costosas en tiempo. Si tales líneas están dentro de loops serán todavía más costosas. Al final se busca tener códigos cuyas anotaciones sean lo más blancas posibles. ```{admonition} Observación:class: tipConcentra tu atención en las líneas que son amarillas y están dentro de los *loops*, no inviertas tiempo en líneas amarillas que están fuera de *loops* y que no causan una ejecución lenta. Una ayuda para identificar lo anterior la da el perfilamiento.``` Ejemplo vía línea de comando
###Code
%%bash
$HOME/.local/bin/cython --force -3 --annotate Rcf_cython.pyx
###Output
_____no_output_____
###Markdown
Ver archivo creado: `Rcf_cython.html` ```{margin}La liga correcta del archivo `Rcf_cython.c` es [Rcf_cython.c](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_cython.c)```
###Code
display(HTML("Rcf_cython.html"))
###Output
_____no_output_____
###Markdown
```{admonition} ComentariosPara el código anterior el *statement* en donde se crean los nodos involucra un *loop* y es "muy amarilla". Si se perfila el código se verá que es una línea en la que se gasta una buena parte del tiempo total de ejecución del código.``` Una primera opción que tenemos es crear los nodos para el método de integración dentro del *loop* y separar el llamado a la *list comprehension* `nodes=[a+(i+1/2)*h_hat for i in range(n)]`:
###Code
%%file Rcf_2_cython.pyx
def Rcf(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
sum_res = 0
for i in range(n):
x = a+(i+1/2)*h_hat
sum_res += f(x)
return h_hat*sum_res
%%bash
$HOME/.local/bin/cython --force -3 --annotate Rcf_2_cython.pyx
###Output
_____no_output_____
###Markdown
```{margin}La liga correcta del archivo `Rcf_2_cython.c` es [Rcf_2_cython.c](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_2_cython.c)```
###Code
display(HTML("Rcf_2_cython.html"))
###Output
_____no_output_____
###Markdown
```{admonition} ComentarioPara el código anterior los *statements* que están dentro del loop son "muy amarillos". En tales *statements* involucran tipos de valores que no cambiarán en la ejecución de cada *loop*. Una opción es **declarar los tipos de objetos** que están involucrados en el loop utilizando la sintaxis `cdef`. Ver [function_declarations](https://notes-on-cython.readthedocs.io/en/latest/function_declarations.html), [definition-of-def-cdef-and-cpdef-in-cython](https://stackoverflow.com/questions/28362009/definition-of-def-cdef-and-cpdef-in-cython/41976772)```
###Code
%%file Rcf_3_cython.pyx
def Rcf(f, double a, double b, unsigned int n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
cdef unsigned int i
cdef double x, sum_res, h_hat
h_hat = (b-a)/n
sum_res = 0
for i in range(n):
x = a+(i+1/2)*h_hat
sum_res += f(x)
return h_hat*sum_res
%%bash
$HOME/.local/bin/cython -3 --force --annotate Rcf_3_cython.pyx
###Output
_____no_output_____
###Markdown
```{margin}La liga correcta del archivo `Rcf_3_cython.c` es [Rcf_3_cython.c](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_3_cython.c)```
###Code
display(HTML("Rcf_3_cython.html"))
###Output
_____no_output_____
###Markdown
```{admonition} ComentarioAl definir tipos, éstos sólo serán entendidos por *Cython* y no por *Python*. Cython utiliza estos tipos para convertir el código de *Python* a código de *C*.``` Una opción con la que perdemos flexibilidad pero ganamos en disminuir tiempo de ejecución es directamente llamar a la función `math.exp`:
###Code
%%file Rcf_4_cython.pyx
import math
def Rcf(double a, double b, unsigned int n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
cdef unsigned int i
cdef double x, sum_res, h_hat
h_hat = (b-a)/n
sum_res = 0
for i in range(n):
x = a+(i+1/2)*h_hat
sum_res += math.exp(-x**2)
return h_hat*sum_res
%%bash
$HOME/.local/bin/cython -3 --force --annotate Rcf_4_cython.pyx
###Output
_____no_output_____
###Markdown
```{margin}La liga correcta del archivo `Rcf_4_cython.c` es [Rcf_4_cython.c](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_4_cython.c)```
###Code
display(HTML("Rcf_4_cython.html"))
###Output
_____no_output_____
###Markdown
Mejoramos el tiempo si directamente utilizamos la función `exp` de la librería `math` de *Cython*, ver [calling C functions](https://cython.readthedocs.io/en/latest/src/tutorial/external.html). (RCF5CYTHON)= `Rcf_5_cython.pyx`
###Code
%%file Rcf_5_cython.pyx
from libc.math cimport exp as c_exp
cdef double f(double x) nogil:
return c_exp(-x**2)
def Rcf(double a, double b, unsigned int n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
cdef unsigned int i
cdef double x, sum_res, h_hat
h_hat = (b-a)/n
sum_res = 0
for i in range(n):
x = a+(i+1/2)*h_hat
sum_res += f(x)
return h_hat*sum_res
%%bash
$HOME/.local/bin/cython -3 --force --annotate Rcf_5_cython.pyx
###Output
_____no_output_____
###Markdown
```{margin}La liga correcta del archivo `Rcf_5_cython.c` es [Rcf_5_cython.c](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_5_cython.c)```
###Code
display(HTML("Rcf_5_cython.html"))
###Output
_____no_output_____
###Markdown
```{admonition} ComentarioUn *tradeoff* en la optimización de código se realiza entre flexibilidad, legibilidad y una ejecución rápida del código.```
###Code
%%file setup_2.py
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("Rcf_2_cython.pyx",
compiler_directives={'language_level' : 3})
)
###Output
Writing setup_2.py
###Markdown
Compilar desde la línea de comandos:
###Code
%%bash
python3 setup_2.py build_ext --inplace
%%file setup_3.py
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("Rcf_3_cython.pyx",
compiler_directives={'language_level' : 3})
)
###Output
Writing setup_3.py
###Markdown
Compilar desde la línea de comandos:
###Code
%%bash
python3 setup_3.py build_ext --inplace
%%file setup_4.py
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("Rcf_4_cython.pyx",
compiler_directives={'language_level' : 3})
)
###Output
Writing setup_4.py
###Markdown
Compilar desde la línea de comandos:
###Code
%%bash
python3 setup_4.py build_ext --inplace
%%file setup_5.py
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("Rcf_5_cython.pyx",
compiler_directives={'language_level' : 3})
)
###Output
Writing setup_5.py
###Markdown
Compilar desde la línea de comandos:
###Code
%%bash
python3 setup_5.py build_ext --inplace
###Output
running build_ext
building 'Rcf_5_cython' extension
x86_64-linux-gnu-gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/include/python3.8 -c Rcf_5_cython.c -o build/temp.linux-x86_64-3.8/Rcf_5_cython.o
x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fwrapv -O2 -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.8/Rcf_5_cython.o -o /home/ubuntu/analisis-numerico-computo-cientifico/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_5_cython.cpython-38-x86_64-linux-gnu.so
###Markdown
Importar módulos compilados:
###Code
import Rcf_2_cython, Rcf_3_cython, Rcf_4_cython, Rcf_5_cython
start_time = time.time()
res_2 = Rcf_2_cython.Rcf(f, a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_2 tomó",secs,"segundos" )
###Output
Rcf_2 tomó 3.3402740955352783 segundos
###Markdown
Verificamos que después de la optimización de código continuamos resolviendo correctamente el problema:
###Code
print(res_2 == approx(obj))
start_time = time.time()
res_3 = Rcf_3_cython.Rcf(f, a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_3 tomó",secs,"segundos" )
print(res_3 == approx(obj))
start_time = time.time()
res_4 = Rcf_4_cython.Rcf(a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_4 tomó",secs,"segundos" )
print(res_4 == approx(obj))
start_time = time.time()
res_5 = Rcf_5_cython.Rcf(a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_5 tomó",secs,"segundos" )
###Output
Rcf_5 tomó 0.10629606246948242 segundos
###Markdown
Verificamos que después de la optimización de código continuamos resolviendo correctamente el problema:
###Code
print(res_5 == approx(obj))
###Output
True
###Markdown
Ejemplo de implementación con *NumPy* Comparamos con una implementación usando *NumPy* y vectorización:
###Code
import numpy as np
f_np = lambda x: np.exp(-x**2)
###Output
_____no_output_____
###Markdown
(RCFNUMPY)= `Rcf_numpy`
###Code
def Rcf_numpy(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
aux_vec = np.linspace(a, b, n+1)
nodes = (aux_vec[:-1]+aux_vec[1:])/2
return h_hat*np.sum(f(nodes))
start_time = time.time()
res_numpy = Rcf_numpy(f_np, a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numpy tomó",secs,"segundos" )
print(res_numpy == approx(obj))
###Output
True
###Markdown
```{admonition} Comentarios* La implementación con *NumPy* resulta ser la segunda más rápida principalmente por el uso de bloques contiguos de memoria para almacenar los valores y la vectorización. La implementación anterior, sin embargo, requiere un conocimiento de las funciones de tal paquete. Para este ejemplo utilizamos `linspace` y la funcionalidad de realizar operaciones de forma vectorizada para la creación de los nodos y evaluación de la función. Una situación que podría darse es que para un problema no podamos utilizar alguna función de *NumPy* o bien no tengamos el ingenio para pensar cómo realizar una operación de forma vectorizada. En este caso *Cython* puede ser una opción a utilizar.* En *Cython* se tienen las [memoryviews](https://cython.readthedocs.io/en/latest/src/userguide/memoryviews.html) para acceso de bajo nivel a la memoria similar a la que proveen los *arrays* de *NumPy* en el caso de requerirse *arrays* en una forma más general que no sólo sean de *NumPy* (por ejemplo de *C* o de *Cython*, ver [Cython arrays](https://cython.readthedocs.io/en/latest/src/userguide/memoryviews.htmlview-cython-arrays)).``` ```{admonition} Observación:class: tipCompárese la implementación vía *NumPy* con el uso de listas para los nodos. Recuérdese que las listas de *Python* alojan locaciones donde se pueden encontrar los valores y no los valores en sí. Los *arrays* de *NumPy* almacenan tipos de valores primitivos. Las listas tienen *data fragmentation* que causan *memory fragmentation* y por tanto un mayor impacto del *Von Neumann bottleneck*. Además el almacenamiento de tipo de objetos de alto nivel en las listas causa *overhead* en lugar de almacenamiento de tipo de valores primitivos en un *array* de *NumPy*.``` *Cython* y [OpenMP](http://www.openmp.org/) *OpenMP* es una extensión al lenguaje *C* y es una API para cómputo en paralelo en un sistema de memoria compartida, *aka, shared memory parallel programming* con CPUs. Se revisará con mayor profundidad en la nota de cómputo en paralelo. En *Cython*, *OpenMP* se utiliza mediante [prange](https://cython.readthedocs.io/en/latest/src/userguide/parallelism.htmlcython.parallel.prange) (*parallel range*). Además debe deshabilitarse el *GIL*. ```{admonition} Observación:class: tipAl deshabilitar el GIL en una sección de código se debe operar con tipos primitivos. En tal sección no se debe operar con objetos *Python* (por ejemplo listas).``` (RCF5CYTHONOPENMP)= `Rcf_5_cython_openmp`
###Code
%%file Rcf_5_cython_openmp.pyx
from cython.parallel import prange
from libc.math cimport exp as c_exp
cdef double f(double x) nogil:
return c_exp(-x**2)
def Rcf(double a, double b, unsigned int n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
cdef int i
cdef double x, sum_res, h_hat
h_hat = (b-a)/n
sum_res = 0
for i in prange(n, schedule="guided", nogil=True):
x = a+(i+1/2)*h_hat
sum_res += f(x)
return h_hat*sum_res
###Output
Writing Rcf_5_cython_openmp.pyx
###Markdown
```{admonition} ComentarioCon `prange` puede elegirse diferente *scheduling*. Si `schedule` recibe el valor `static` el trabajo a realizar se reparte equitativamente entre los *cores* y si algunos *threads* terminan antes permanecerán sin realizar trabajo, *aka idle*. Con `dynamic` y `guided` se reparte de manera dinámica *at runtime* que es útil si la cantidad de trabajo es variable y si *threads* terminan antes pueden recibir trabajo a realizar.```
###Code
%%bash
$HOME/.local/bin/cython -3 --force Rcf_5_cython_openmp.pyx
###Output
_____no_output_____
###Markdown
En el archivo `setup.py` se coloca la **directiva** `-fopenmp`. ```{margin}Ver [Rcf_5_cython_openmp.c](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/libro_optimizacion/temas/V.optimizacion_de_codigo/5.3/Rcf_5_cython_openmp.c) para la implementación en *C* de la función `Rcf_5_cython_openmp.Rcf`.```
###Code
%%file setup_5_openmp.py
from setuptools import Extension, setup
from Cython.Build import cythonize
ext_modules = [Extension("Rcf_5_cython_openmp",
["Rcf_5_cython_openmp.pyx"],
extra_compile_args=["-fopenmp"],
extra_link_args=["-fopenmp"],
)
]
setup(ext_modules = cythonize(ext_modules))
###Output
Writing setup_5_openmp.py
###Markdown
Compilar desde la línea de comandos:
###Code
%%bash
python3 setup_5_openmp.py build_ext --inplace
import Rcf_5_cython_openmp
start_time = time.time()
res_5_openmp = Rcf_5_cython_openmp.Rcf(a, b, n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_5_openmp tomó",secs,"segundos" )
###Output
Rcf_5_openmp tomó 0.017746686935424805 segundos
###Markdown
Verificamos que después de la optimización de código continuamos resolviendo correctamente el problema:
###Code
print(res_5_openmp == approx(obj))
###Output
True
###Markdown
```{admonition} Ejercicio:class: tipImplementar la regla de Simpson compuesta con *NumPy*, *Cython* y *Cython* + *OpenMP* en una máquina de AWS con las mismas características que la que se presenta en esta nota y medir tiempo de ejecución.``` [Numba](https://github.com/numba/numba) * Utiliza compilación JIT *at runtime* mediante el compilador [llvmlite](https://github.com/numba/llvmlite).* Puede utilizarse para funciones *built in* de *Python* o de *NumPy*.* Tiene soporte para cómputo en paralelo en CPU/GPU.* Utiliza [CFFI](https://cffi.readthedocs.io/en/latest/) y [ctypes](https://docs.python.org/3/library/ctypes.html) para llamar a funciones de *C*. * Ver [numba architecture](https://numba.readthedocs.io/en/stable/developer/architecture.htmlarchitecture) para una explicación detallada de su funcionamiento. Se utiliza un *decorator* para anotar cuál función se desea compilar. Ejemplo de uso con *Numba*
###Code
from numba import jit
###Output
_____no_output_____
###Markdown
```{margin}Ver [glossary: nopython](https://numba.pydata.org/numba-doc/latest/glossary.htmlterm-nopython-mode) para la definición de *nopython mode* en *Numba*. También puede usarse el *decorator* `njit` que es un *alias* para `@jit(nopython=True)`.``` (RCFNUMBA)= `Rcf_numba`
###Code
@jit(nopython=True)
def Rcf_numba(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
sum_res = 0
for i in range(n):
x = a+(i+1/2)*h_hat
sum_res += np.exp(-x**2)
return h_hat*sum_res
start_time = time.time()
res_numba = Rcf_numba(a,b,n)
end_time = time.time()
###Output
_____no_output_____
###Markdown
```{margin}Se mide dos veces el tiempo de ejecución para no incluir el tiempo de compilación. Ver [5minguide](https://numba.pydata.org/numba-doc/latest/user/5minguide.html).```
###Code
secs = end_time-start_time
print("Rcf_numba con compilación tomó", secs, "segundos" )
start_time = time.time()
res_numba = Rcf_numba(a,b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numba tomó", secs, "segundos" )
###Output
Rcf_numba tomó 0.22369146347045898 segundos
###Markdown
Verificamos que después de la optimización de código continuamos resolviendo correctamente el problema:
###Code
print(res_numba == approx(obj))
###Output
True
###Markdown
Con la función [inspect_types](https://numba.readthedocs.io/en/stable/reference/jit-compilation.html?highlight=inspect_typesDispatcher.inspect_types) nos ayuda para revisar si pudo inferirse información de los tipos de valores a partir del código escrito.
###Code
print(Rcf_numba.inspect_types())
###Output
Rcf_numba (int64, int64, int64)
--------------------------------------------------------------------------------
# File: <ipython-input-71-541f7545b9ed>
# --- LINE 1 ---
@jit(nopython=True)
# --- LINE 2 ---
def Rcf_numba(a,b,n):
# --- LINE 3 ---
"""
# --- LINE 4 ---
Compute numerical approximation using rectangle or mid-point
# --- LINE 5 ---
method in an interval.
# --- LINE 6 ---
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
# --- LINE 7 ---
i=0,1,...,n-1 and h_hat=(b-a)/n
# --- LINE 8 ---
Args:
# --- LINE 9 ---
# --- LINE 10 ---
a (float): left point of interval.
# --- LINE 11 ---
# --- LINE 12 ---
b (float): right point of interval.
# --- LINE 13 ---
# --- LINE 14 ---
n (int): number of subintervals.
# --- LINE 15 ---
# --- LINE 16 ---
Returns:
# --- LINE 17 ---
# --- LINE 18 ---
sum_res (float): numerical approximation to integral
# --- LINE 19 ---
of f in the interval a,b
# --- LINE 20 ---
"""
# --- LINE 21 ---
# label 0
# a = arg(0, name=a) :: int64
# b = arg(1, name=b) :: int64
# n = arg(2, name=n) :: int64
# $6binary_subtract.2 = b - a :: int64
# del b
# h_hat = $6binary_subtract.2 / n :: float64
# del $6binary_subtract.2
h_hat = (b-a)/n
# --- LINE 22 ---
# sum_res = const(int, 0) :: Literal[int](0)
sum_res = 0
# --- LINE 23 ---
# $18load_global.6 = global(range: <class 'range'>) :: Function(<class 'range'>)
# $22call_function.8 = call $18load_global.6(n, func=$18load_global.6, args=[Var(n, <ipython-input-71-541f7545b9ed>:21)], kws=(), vararg=None) :: (int64,) -> range_state_int64
# del n
# del $18load_global.6
# $24get_iter.9 = getiter(value=$22call_function.8) :: range_iter_int64
# del $22call_function.8
# $phi26.0 = $24get_iter.9 :: range_iter_int64
# del $24get_iter.9
# jump 26
# label 26
# sum_res.2 = phi(incoming_values=[Var(sum_res, <ipython-input-71-541f7545b9ed>:22), Var(sum_res.1, <ipython-input-71-541f7545b9ed>:25)], incoming_blocks=[0, 28]) :: float64
# del sum_res.1
# $26for_iter.1 = iternext(value=$phi26.0) :: pair<int64, bool>
# $26for_iter.2 = pair_first(value=$26for_iter.1) :: int64
# $26for_iter.3 = pair_second(value=$26for_iter.1) :: bool
# del $26for_iter.1
# $phi28.1 = $26for_iter.2 :: int64
# del $26for_iter.2
# branch $26for_iter.3, 28, 68
# label 28
# del $26for_iter.3
# i = $phi28.1 :: int64
# del $phi28.1
for i in range(n):
# --- LINE 24 ---
# $const34.4 = const(float, 0.5) :: float64
# $36binary_add.5 = i + $const34.4 :: float64
# del i
# del $const34.4
# $40binary_multiply.7 = $36binary_add.5 * h_hat :: float64
# del $36binary_add.5
# x = a + $40binary_multiply.7 :: float64
# del $40binary_multiply.7
x = a+(i+1/2)*h_hat
# --- LINE 25 ---
# $48load_global.10 = global(np: <module 'numpy' from '/home/ubuntu/.local/lib/python3.8/site-packages/numpy/__init__.py'>) :: Module(<module 'numpy' from '/home/ubuntu/.local/lib/python3.8/site-packages/numpy/__init__.py'>)
# $50load_method.11 = getattr(value=$48load_global.10, attr=exp) :: Function(<ufunc 'exp'>)
# del $48load_global.10
# $const54.13 = const(int, 2) :: Literal[int](2)
# $56binary_power.14 = x ** $const54.13 :: float64
# del x
# del $const54.13
# $58unary_negative.15 = unary(fn=<built-in function neg>, value=$56binary_power.14) :: float64
# del $56binary_power.14
# $60call_method.16 = call $50load_method.11($58unary_negative.15, func=$50load_method.11, args=[Var($58unary_negative.15, <ipython-input-71-541f7545b9ed>:25)], kws=(), vararg=None) :: (float64,) -> float64
# del $58unary_negative.15
# del $50load_method.11
# $62inplace_add.17 = inplace_binop(fn=<built-in function iadd>, immutable_fn=<built-in function add>, lhs=sum_res.2, rhs=$60call_method.16, static_lhs=Undefined, static_rhs=Undefined) :: float64
# del sum_res.2
# del $60call_method.16
# sum_res.1 = $62inplace_add.17 :: float64
# del $62inplace_add.17
# jump 26
sum_res += np.exp(-x**2)
# --- LINE 26 ---
# label 68
# del sum_res
# del a
# del $phi28.1
# del $phi26.0
# del $26for_iter.3
# $72binary_multiply.2 = h_hat * sum_res.2 :: float64
# del sum_res.2
# del h_hat
# $74return_value.3 = cast(value=$72binary_multiply.2) :: float64
# del $72binary_multiply.2
# return $74return_value.3
return h_hat*sum_res
================================================================================
None
###Markdown
Ejemplo de uso de *Numba* con cómputo en paralelo Ver [numba: parallel](https://numba.pydata.org/numba-doc/latest/user/parallel.html), [numba: threading layer](http://numba.pydata.org/numba-doc/latest/user/threading-layer.html)
###Code
from numba import prange
###Output
_____no_output_____
###Markdown
(RCFNUMBAPARALLEL)= `Rcf_numba_parallel`
###Code
@jit(nopython=True, parallel=True)
def Rcf_numba_parallel(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
sum_res = 0
for i in prange(n):
x = a+(i+1/2)*h_hat
sum_res += np.exp(-x**2)
return h_hat*sum_res
start_time = time.time()
res_numba_parallel = Rcf_numba_parallel(a,b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numba_parallel con compilación tomó", secs, "segundos" )
start_time = time.time()
res_numba_parallel = Rcf_numba_parallel(a,b,n)
end_time = time.time()
###Output
_____no_output_____
###Markdown
```{margin} Ver [parallel-diagnostics](https://numba.pydata.org/numba-doc/latest/user/parallel.htmldiagnostics) para información relacionada con la ejecución en paralelo. Por ejemplo ejecutar `Rcf_numba_parallel.parallel_diagnostics(level=4)`.```
###Code
secs = end_time-start_time
print("Rcf_numba_parallel tomó", secs, "segundos" )
###Output
Rcf_numba_parallel tomó 0.011192798614501953 segundos
###Markdown
Verificamos que después de la optimización de código continuamos resolviendo correctamente el problema:
###Code
print(res_numba_parallel == approx(obj))
###Output
True
###Markdown
Ejemplo *Numpy* y *Numba* En el siguiente ejemplo se utiliza la función `linspace` para auxiliar en la creación de los nodos y obsérvese que *Numba* sin problema trabaja los ciclos *for* (en el caso por ejemplo que no hubiéramos podido vectorizar la operación de creación de nodos).
###Code
@jit(nopython=True)
def Rcf_numpy_numba(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
aux_vec = np.linspace(a, b, n+1)
sum_res = 0
for i in range(n-1):
x = (aux_vec[i]+aux_vec[i+1])/2
sum_res += np.exp(-x**2)
return h_hat*sum_res
start_time = time.time()
res_numpy_numba = Rcf_numpy_numba(a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numpy_numba con compilación tomó",secs,"segundos" )
start_time = time.time()
res_numpy_numba = Rcf_numpy_numba(a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numpy_numba tomó",secs,"segundos" )
print(res_numpy_numba == approx(obj))
###Output
True
###Markdown
```{admonition} Observación:class: tipObsérvese que no se mejora el tiempo de ejecución en la siguiente implementación que además de utilizar la función de `linspace` como auxiliar en la creación de nodos, se utiliza la vectorización para la creación de éstos.```
###Code
@jit(nopython=True)
def Rcf_numpy_numba_2(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
"""
h_hat = (b-a)/n
aux_vec = np.linspace(a, b, n+1)
nodes = (aux_vec[:-1]+aux_vec[1:])/2
return h_hat*np.sum(np.exp(-nodes**2))
start_time = time.time()
res_numpy_numba_2 = Rcf_numpy_numba_2(a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numpy_numba_2 con compilación tomó",secs,"segundos" )
start_time = time.time()
res_numpy_numba_2 = Rcf_numpy_numba_2(a, b,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf_numpy_numba_2 tomó",secs,"segundos" )
print(res_numpy_numba_2 == approx(obj))
###Output
True
###Markdown
```{admonition} Ejercicio:class: tipImplementar la regla de Simpson compuesta con *Numba*, *Numpy* y *Numba*, *Numba* con cómputo en paralelo en una máquina de AWS con las mismas características que la que se presenta en esta nota y medir tiempo de ejecución.``` [Rcpp](https://github.com/RcppCore/Rcpp) *Rcpp* permite integrar *C++* y *R* de forma sencilla mediante su API. ¿Por qué usar Rcpp? Con *Rcpp* nos da la posibilidad de obtener eficiencia en ejecución de un código con *C++* conservando la flexibilidad de trabajar con *R*. *C* o *C++* aunque requieren más líneas de código, son órdenes de magnitud más rápidos que *R*. Sacrificamos las ventajas que tiene *R* como facilidad de escribir códigos por velocidad en ejecución. ¿Cuando podríamos usar Rcpp? * En *loops* que no pueden vectorizarse de forma sencilla. Si tenemos *loops* en los que una iteración depende de la anterior.* Si hay que llamar una función millones de veces. ¿Por qué no usamos *C*? Sí es posible llamar funciones de *C* desde *R* pero resulta en más trabajo por parte de nosotros. Por ejemplo, de acuerdo a H. Wickham:*"...R’s C API. Unfortunately this API is not well documented. I’d recommend starting with my notes at [R’s C interface](http://adv-r.had.co.nz/C-interface.html). After that, read “[The R API](http://cran.rstudio.com/doc/manuals/r-devel/R-exts.htmlThe-R-API)” in “Writing R Extensions”. A number of exported functions are not documented, so you’ll also need to read the [R source code](https://github.com/wch/r-source) to figure out the details."*Y como primer acercamiento a la compilación de código desde *R* es preferible seguir las recomendaciones de H. Wickham en utilizar la API de *Rcpp*. Ejemplo con *Rcpp* En la siguiente implementación se utiliza [vapply](https://www.rdocumentation.org/packages/functools/versions/0.2.0/topics/Vapply) que es más rápida que [sapply](https://www.rdocumentation.org/packages/memisc/versions/0.99.27.3/topics/Sapply) pues se especifica con anterioridad el tipo de valor que devuelve.
###Code
Rcf <- function(f,a,b,n){
'
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
'
h_hat <- (b-a)/n
sum_res <- 0
x <- vapply(0:(n-1),function(j)a+(j+1/2)*h_hat,numeric(1))
for(j in 1:n){
sum_res <- sum_res+f(x[j])
}
h_hat*sum_res
}
a <- 0
b <- 1
f <- function(x)exp(-x^2)
n <- 10**7
system.time(res <- Rcf(f,a,b,n))
err_relativo <- function(aprox,obj)abs(aprox-obj)/abs(obj)
###Output
_____no_output_____
###Markdown
```{margin}En la documentación de `integrate` se menciona que se utilice [Vectorize](https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/Vectorize). ```
###Code
obj <- integrate(Vectorize(f),0,1)
print(err_relativo(res,obj$value))
Rcf_2 <- function(f,a,b,n){
'
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
'
h_hat <- (b-a)/n
x <- vapply(0:(n-1),function(j)a+(j+1/2)*h_hat,numeric(1))
h_hat*sum(f(x))
}
system.time(res_2 <- Rcf_2(f,a,b,n))
print(err_relativo(res_2,obj$value))
library(Rcpp)
###Output
_____no_output_____
###Markdown
En *Rcpp* se tiene la función [cppFunction](https://www.rdocumentation.org/packages/Rcpp/versions/1.0.3/topics/cppFunction) que recibe código escrito en *C++* para definir una función que puede ser utilizada desde *R*. Primero reescribamos la implementación en la que no utilicemos `vapply`.
###Code
Rcf_3 <- function(f,a,b,n){
'
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
'
h_hat <- (b-a)/n
sum_res <- 0
for(i in 0:(n-1)){
x <- a+(i+1/2)*h_hat
sum_res <- sum_res+f(x)
}
h_hat*sum_res
}
system.time(res_3 <- Rcf_3(f,a,b,n))
print(err_relativo(res_3,obj$value))
###Output
[1] 4.99495e-14
###Markdown
(RCFRCPP)= `Rcf_Rcpp` Escribimos *source code* en *C++* que será el primer parámetro que recibirá `cppFunction`.
###Code
f_str <- 'double Rcf_Rcpp(double a, double b, int n){
double h_hat;
double sum_res=0;
int i;
double x;
h_hat=(b-a)/n;
for(i=0;i<=n-1;i++){
x = a+(i+1/2.0)*h_hat;
sum_res += exp(-pow(x,2));
}
return h_hat*sum_res;
}'
cppFunction(f_str)
###Output
_____no_output_____
###Markdown
Si queremos obtener más información de la ejecución de la línea anterior podemos usar lo siguiente. ```{margin}Se utiliza `rebuild=TRUE` para que se vuelva a compilar, ligar con la librería en *C++* y más operaciones de `cppFunction`.```
###Code
cppFunction(f_str, verbose=TRUE, rebuild=TRUE)
###Output
Generated code for function definition:
--------------------------------------------------------
#include <Rcpp.h>
using namespace Rcpp;
// [[Rcpp::export]]
double Rcf_Rcpp(double a, double b, int n){
double h_hat;
double sum_res=0;
int i;
double x;
h_hat=(b-a)/n;
for(i=0;i<=n-1;i++){
x = a+(i+1/2.0)*h_hat;
sum_res += exp(-pow(x,2));
}
return h_hat*sum_res;
}
Generated extern "C" functions
--------------------------------------------------------
#include <Rcpp.h>
// Rcf_Rcpp
double Rcf_Rcpp(double a, double b, int n);
RcppExport SEXP sourceCpp_4_Rcf_Rcpp(SEXP aSEXP, SEXP bSEXP, SEXP nSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< double >::type a(aSEXP);
Rcpp::traits::input_parameter< double >::type b(bSEXP);
Rcpp::traits::input_parameter< int >::type n(nSEXP);
rcpp_result_gen = Rcpp::wrap(Rcf_Rcpp(a, b, n));
return rcpp_result_gen;
END_RCPP
}
Generated R functions
-------------------------------------------------------
`.sourceCpp_4_DLLInfo` <- dyn.load('/tmp/RtmpTz18Yr/sourceCpp-x86_64-pc-linux-gnu-1.0.6/sourcecpp_3684ee9b3bc/sourceCpp_6.so')
Rcf_Rcpp <- Rcpp:::sourceCppFunction(function(a, b, n) {}, FALSE, `.sourceCpp_4_DLLInfo`, 'sourceCpp_4_Rcf_Rcpp')
rm(`.sourceCpp_4_DLLInfo`)
Building shared library
--------------------------------------------------------
DIR: /tmp/RtmpTz18Yr/sourceCpp-x86_64-pc-linux-gnu-1.0.6/sourcecpp_3684ee9b3bc
/usr/lib/R/bin/R CMD SHLIB --preclean -o 'sourceCpp_6.so' 'file3684a4b1d19.cpp'
###Markdown
```{admonition} Comentarios* Al ejecutar la línea de `cppFunction`, *Rcpp* compilará el código de *C++* y construirá una función de *R* que se conecta con la función compilada de *C++*. * Si se lee la salida de la ejecución con `verbose=TRUE` se utiliza un tipo de valor `SEXP`. De acuerdo a H. Wickham:*...functions that talk to R must use the SEXP type for both inputs and outputs. SEXP, short for S expression, is the C struct used to represent every type of object in R. A C function typically starts by converting SEXPs to atomic C objects, and ends by converting C objects back to a SEXP. (The R API is designed so that these conversions often don’t require copying.)** La función `Rcpp::wrap` convierte objetos de *C++* a objetos de *R* y `Rcpp:as` viceversa.```
###Code
system.time(res_4 <- Rcf_Rcpp(a,b,n))
print(err_relativo(res_4,obj$value))
###Output
[1] 4.99495e-14
###Markdown
Otras funcionalidades de *Rcpp* `NumericVector` En *Rcpp* se definen clases para relacionar tipos de valores de *R* con tipo de valores de *C++* para el manejo de vectores. Entre éstas se encuentran `NumericVector`, `IntegerVector`, `CharacterVector` y `LogicalVector` que se relacionan con vectores tipo `numeric`, `integer`, `character` y `logical` respectivamente. Por ejemplo, para el caso de `NumericVector` se tiene el siguiente ejemplo.
###Code
f_str <- 'NumericVector my_f(NumericVector x){
return exp(log(x));
}'
cppFunction(f_str)
print(my_f(seq(0,1,by=.1)))
###Output
[1] 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0
###Markdown
Ejemplo con `NumericVector` Para mostrar otro ejemplo en el caso de la regla de integración del rectángulo considérese la siguiente implementación.
###Code
Rcf_implementation_example <- function(f,a,b,n){
'
Compute numerical approximation using rectangle or mid-point
method in an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for
i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (float): function expression of integrand.
a (float): left point of interval.
b (float): right point of interval.
n (int): number of subintervals.
Returns:
sum_res (float): numerical approximation to integral
of f in the interval a,b
'
h_hat <- (b-a)/n
fx <- f(vapply(0:(n-1),function(j)a+(j+1/2)*h_hat,numeric(1)))
h_hat*sum(fx)
}
res_numeric_vector <- Rcf_implementation_example(f,a,b,n)
print(err_relativo(res_numeric_vector,obj$value))
###Output
[1] 2.973185e-16
###Markdown
Utilicemos *Rcpp* para definir una función que recibe un `NumericVector` para realizar la suma. ```{margin}El método `.size()` regresa un *integer*.```
###Code
f_str<-'double Rcf_numeric_vector(NumericVector f_x,double h_hat){
double sum_res=0;
int i;
int n = f_x.size();
for(i=0;i<=n-1;i++){
sum_res+=f_x[i];
}
return h_hat*sum_res;
}'
h_hat <- (b-a)/n
fx <- f(vapply(0:(n-1),function(j)a+(j+1/2)*h_hat,numeric(1)))
print(tail(fx))
cppFunction(f_str,rebuild=TRUE)
res_numeric_vector <- Rcf_numeric_vector(fx,h_hat)
print(err_relativo(res_numeric_vector,obj$value))
###Output
[1] 4.99495e-14
###Markdown
Otro ejemplo en el que se devuelve un vector tipo `NumericVector` para crear los nodos.
###Code
f_str <- 'NumericVector Rcf_nodes(double a, double b, int n){
double h_hat=(b-a)/n;
int i;
NumericVector x(n);
for(i=0;i<n;i++)
x[i]=a+(i+1/2.0)*h_hat;
return x;
}'
cppFunction(f_str,rebuild=TRUE)
print(Rcf_nodes(0,1,2))
###Output
[1] 0.25 0.75
###Markdown
Ejemplo de llamado a función definida en ambiente global con *Rcpp* También en *Rcpp* es posible llamar funciones definidas en el ambiente global, por ejemplo. ```{margin}`RObject` es una clase de *C++* para definir un objeto de *R*.```
###Code
f_str <- 'RObject fun(double x){
Environment env = Environment::global_env();
Function f=env["f"];
return f(x);
}'
cppFunction(f_str,rebuild=TRUE)
fun(1)
f(1)
print(fun)
###Output
function (x)
.Call(<pointer: 0x7fba201b25f0>, x)
###Markdown
```{admonition} Comentario`.Call` es una función base para llamar funciones de `C` desde `R`:*There are two ways to call C functions from R: .C() and .Call(). .C() is a quick and dirty way to call an C function that doesn’t know anything about R because .C() automatically converts between R vectors and the corresponding C types. .Call() is more flexible, but more work: your C function needs to use the R API to convert its inputs to standard C data types.***H. Wickham**.```
###Code
print(f)
###Output
function(x)exp(-x^2)
<bytecode: 0x55669726fec8>
|
data wrangling using SQL/data_wrangling-sql.ipynb | ###Markdown
Spark SQL ExamplesRun the code cells below. This is the same code from the previous screencast.
###Code
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from pyspark.sql.types import IntegerType
from pyspark.sql.functions import desc
from pyspark.sql.functions import asc
from pyspark.sql.functions import sum as Fsum
import datetime
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Define a new Spark Session
###Code
spark = SparkSession \
.builder \
.appName("Data wrangling with Spark SQL") \
.getOrCreate()
###Output
_____no_output_____
###Markdown
Load the same old Titanic data (in json instead of csv)
###Code
path = "titanic.json"
df = spark.read.json(path)
###Output
_____no_output_____
###Markdown
Check couple of rows
###Code
df.take(2)
###Output
_____no_output_____
###Markdown
If you are not aware about the schema then check it through printschema
###Code
df.printSchema()
###Output
root
|-- Age: double (nullable = true)
|-- Cabin: string (nullable = true)
|-- Embarked: string (nullable = true)
|-- Fare: double (nullable = true)
|-- Name: string (nullable = true)
|-- Parch: long (nullable = true)
|-- PassengerId: long (nullable = true)
|-- Pclass: long (nullable = true)
|-- Sex: string (nullable = true)
|-- SibSp: long (nullable = true)
|-- Survived: long (nullable = true)
|-- Ticket: string (nullable = true)
###Markdown
Create a View And Run QueriesThe code below creates a temporary view against which we can run SQL queries.
###Code
df.createOrReplaceTempView("titanic_table")
###Output
_____no_output_____
###Markdown
Check data through SQL query
###Code
spark.sql("SELECT * FROM titanic_table LIMIT 2").show()
###Output
+----+-----+--------+-------+--------------------+-----+-----------+------+------+-----+--------+---------+
| Age|Cabin|Embarked| Fare| Name|Parch|PassengerId|Pclass| Sex|SibSp|Survived| Ticket|
+----+-----+--------+-------+--------------------+-----+-----------+------+------+-----+--------+---------+
|22.0| null| S| 7.25|Braund, Mr. Owen ...| 0| 1| 3| male| 1| 0|A/5 21171|
|38.0| C85| C|71.2833|Cumings, Mrs. Joh...| 0| 2| 1|female| 1| 1| PC 17599|
+----+-----+--------+-------+--------------------+-----+-----------+------+------+-----+--------+---------+
###Markdown
we can use above format or below format to run SQL queries
###Code
spark.sql('''
SELECT *
FROM titanic_table
LIMIT 2
'''
).show()
###Output
+----+-----+--------+-------+--------------------+-----+-----------+------+------+-----+--------+---------+
| Age|Cabin|Embarked| Fare| Name|Parch|PassengerId|Pclass| Sex|SibSp|Survived| Ticket|
+----+-----+--------+-------+--------------------+-----+-----------+------+------+-----+--------+---------+
|22.0| null| S| 7.25|Braund, Mr. Owen ...| 0| 1| 3| male| 1| 0|A/5 21171|
|38.0| C85| C|71.2833|Cumings, Mrs. Joh...| 0| 2| 1|female| 1| 1| PC 17599|
+----+-----+--------+-------+--------------------+-----+-----------+------+------+-----+--------+---------+
###Markdown
Check total rows through SQL count function
###Code
spark.sql('''
SELECT COUNT(*)
FROM titanic_table
'''
).show()
###Output
+--------+
|count(1)|
+--------+
| 891|
+--------+
###Markdown
Check other SQL functions
###Code
spark.sql('''
SELECT Name, Age, Survived
FROM titanic_table
WHERE Sex == 'male'
'''
).collect()
spark.sql('''
SELECT DISTINCT Cabin
FROM titanic_table
ORDER BY Cabin ASC
'''
).show()
###Output
+-----+
|Cabin|
+-----+
| null|
| A10|
| A14|
| A16|
| A19|
| A20|
| A23|
| A24|
| A26|
| A31|
| A32|
| A34|
| A36|
| A5|
| A6|
| A7|
| B101|
| B102|
| B18|
| B19|
+-----+
only showing top 20 rows
###Markdown
User Defined Functions We first need to register the udf function before we can use it in SQL
###Code
spark.udf.register("name_prefix", lambda x: x.split(',')[1].strip(" ").split(" ")[0].strip(" "))
spark.sql('''
SELECT *, name_prefix(Name) AS Name_Prfx
FROM titanic_table
LIMIT 5
'''
).collect()
###Output
_____no_output_____
###Markdown
We can see Name_Prfx column in the above result
###Code
prefix_count = spark.sql('''
SELECT name_prefix(Name) AS Name_Prfx, COUNT(*) as count_by_prefix
FROM titanic_table
WHERE Survived = 1
GROUP BY Name_Prfx
ORDER BY count_by_prefix DESC
'''
)
prefix_count.show()
###Output
+---------+---------------+
|Name_Prfx|count_by_prefix|
+---------+---------------+
| Miss.| 127|
| Mrs.| 99|
| Mr.| 81|
| Master.| 23|
| Dr.| 3|
| Mlle.| 2|
| Ms.| 1|
| Major.| 1|
| Sir.| 1|
| Col.| 1|
| Mme.| 1|
| the| 1|
| Lady.| 1|
+---------+---------------+
###Markdown
Converting Results to Pandas We can easily covert the results to Pandas
###Code
prefix_count_pd = prefix_count.toPandas()
print(prefix_count_pd)
###Output
Name_Prfx count_by_prefix
0 Miss. 127
1 Mrs. 99
2 Mr. 81
3 Master. 23
4 Dr. 3
5 Mlle. 2
6 Ms. 1
7 Sir. 1
8 Major. 1
9 Col. 1
10 Mme. 1
11 the 1
12 Lady. 1
|
Tareas/Franco_Lorenzo-Tarea1.ipynb | ###Markdown
Tarea 1 - Franco Lorenzo 1. Inicialice 3 variables con diferentes valores a. Deje el resultado de (var1 * var2) + (var1 / var2) en var3 e imprima el resultado b. Modifique el valor de var1 por var1 * 67 / 34 y el valor de var2 por 87 e imprima cada una de las variables c. Vuelva a realizas el paso a e imprima nuevamente el resultado
###Code
var1 = 1
var2 = 3
var3 = (var1 * var2 ) + (var1 / var2)
print("El resultado del apartado a es: " + str(var3))
var1 = (var1 * 67) / 34
var2 = var2 * 87
print("El resultado del apartado b es: var1 = " + str(var1) + " y var2 = " + str(var2))
var3 = (var1 * var2 ) + (var1 / var2)
print("El resultado del apartado c es: " + str(var3))
###Output
El resultado del apartado c es: 514.3310795582602
###Markdown
2. Evalúe el polinomio x4 + x3 + 2x2 – x en x=1. El resultado es 4,1151.
###Code
x = 1
(x ** 4) + (x ** 3) + (2 * x ** 2) - x
###Output
_____no_output_____
###Markdown
3. Evalúe el polinomio x4 + x3 + (1/2)x2 – x en x=10. El resultado es 11040,0.
###Code
x = 10
x ** 4 + x ** 3 + (1/2) * x ** 2
###Output
_____no_output_____
###Markdown
4. Escribir el código necesario que muestre la cadena de caracteres: “Bienvenido al curso Introductorio de Python.”
###Code
varMensaje = "Bienvenido al curso Introductorio de Python."
print(varMensaje)
###Output
Bienvenido al curso Introductorio de Python.
###Markdown
5. Escribir el código que lea un número entero introducido por el usuario y después muestre en pantalla el resultado de la siguiente operación: suma = (n(n+1)) / 2
###Code
# Capturar número entero del usuario y convertirlo de tipo string a integer.
# Examinar el valor ingresado. Continuar con la operación si el usuario ingresa un número entero, de lo contraio solicite de nuevo el número entero.
while True:
try:
num = int(input("Ingrese un número entero: "))
break
except ValueError:
print("Valor incorrecto. Por favor ingresar un número entero.")
continue
# Realizar la operacion suma = (n(n+1)) / 2
suma = ( num * ( num + 1 ) ) / 2
print(f"El resultado de la operación ( {num} * ( {num} + 1 ) / 2 ) es: " + str(suma))
###Output
Valor incorrecto. Por favor ingresar un número entero.
El resultado de la operación ( 5 * ( 5 + 1 ) / 2 ) es: 15.0
###Markdown
6. Una tienda vende galletas artesanales a 99.99 colones cada una. Sin embargo, la galleta que no es del día tiene un descuento del 50%. a. Escriba el código que comience leyendo el número de galletas vendidas que no son del día. b. Después el código debe mostrar el precio habitual de una galleta. c. Muestre el descuento que se le aplica por no ser fresca. d. Finalmente, calcule y muestre el costo final a pagar por las galletas que no fueron del día
###Code
costoGalleta = 99.99
# Capturar número entero del usuario y convertirlo de tipo string a integer.
# Examinar el valor ingresado. Continuar con la operación si el usuario ingresa un número entero, de lo contraio solicite de nuevo el número entero.
while True:
try:
galletasNoFrescas = int(input("Ingrese un número de galletas vendidas que No son del día: "))
break
except ValueError:
print("Valor incorrecto. Por favor ingresar un número entero.")
continue
aPagar = costoGalleta * galletasNoFrescas
descuento = aPagar * 0.5
costoTotal = aPagar - descuento
print(f"El precio habital de una galleta fresca es de: {costoGalleta}. Se le aplica un 50% de descuento a las galletas no frescas.")
print(f"La cantidad de galletas vendindas NO frescas es de: {galletasNoFrescas}. Estas tienen un descuento de {descuento}")
print(f"El costo total de {galletasNoFrescas} galletas no frescas es de {costoTotal}.")
###Output
El precio habital de una galleta fresca es de: 99.99. Se le aplica un 50% de descuento a las galletas no frescas.
La cantidad de galletas vendindas NO frescas es de: 5. Estas tienen un descuento de 249.975
El costo total de 5 galletas no frescas es de 249.975.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.