path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
tf_hub_ex.ipynb | ###Markdown
CNN based on ResNet50
###Code
import sys, json, os
import pandas as pd
import numpy as np
import skimage.io
#from tensorflow.python.keras.applications import ResNet50
import tensorflow
from tensorflow import keras
tensorflow.__version__
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
from tensorflow.keras.models import model_from_json, save_model, load_model
#from tensorflow.python.keras.applications import EfficientNetB3
import tensorflow_hub as hub
newmodel = hub.Module('b4')
embed = hub.KerasLayer('b4')
model = tensorflow.keras.Sequential()
model.add(embed)
model.summary()
from panda_bvv_config import *
model_name = os.path.join(note_path, 'model_panda.h5')
checkpoint_name = os.path.join(note_path, 'model_panda_check')
bsize = 32
num_epochs = 20
temp_arr = []
for path, subdirs, files in os.walk(train_cnn):
for name in files:
temp_arr.append(name)
num_train_files = len(temp_arr)
temp_arr = []
for path, subdirs, files in os.walk(valid_cnn):
for name in files:
temp_arr.append(name)
num_valid_files = len(temp_arr)
image_sizey, image_sizex, _ = skimage.io.imread(os.path.join(train_cnn,'isup0',\
os.listdir(train_cnn+'/isup0')[0])).shape
s_per_epoch= num_train_files//bsize
val_steps = num_valid_files//bsize
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
train_cnn,
target_size=(image_sizey, image_sizex),
batch_size= bsize,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
valid_cnn,
target_size=(image_sizey, image_sizex),
batch_size = bsize,
class_mode='categorical')
callbacks_list = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor='val_loss',
mode = 'min',
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=5,
verbose=1,
restore_best_weights = True
),
keras.callbacks.ModelCheckpoint(
filepath= checkpoint_name +".{epoch:02d}.h5",
monitor='val_loss',
mode = 'auto',
save_weights_only = False,
save_freq = 'epoch',
save_best_only=False
),
#keras.callbacks.ReduceLROnPlateau(
#monitor='val_loss',
#factor=0.1,
#patience=6,
#)
]
resnet = ResNet50(include_top=False, pooling='avg', weights='imagenet')
new_model = Sequential()
new_model.add(resnet)
new_model.add(Flatten())
new_model.add(Dense(512, activation='relu'))
new_model.add(Dense(num_classes, activation='softmax'))
#new_model.layers[0].summary()
#new_model.summary()
# Say not to train first layer (ResNet) model. It is already trained
new_model.layers[0].trainable = False
new_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
new_model.fit_generator(
train_generator,
steps_per_epoch=s_per_epoch,
epochs = num_epochs,
callbacks=callbacks_list,
validation_data=validation_generator,
validation_steps=val_steps)
new_model.save(os.path.join(base_path, model_name))
import os
module_name = 'respca_simple_fit'
os.system('jupyter nbconvert --to python ' + module_name + '.ipynb')
with open(module_name + '.py', 'r') as f:
lines = f.readlines()
with open(module_name + '.py', 'w') as f:
for line in lines:
if 'nbconvert --to python' in line:
break
else:
f.write(line)
###Output
_____no_output_____ |
notebooks/5_cross_val_framework.ipynb | ###Markdown
Example Cross Validation and Linear Regression Framework Testing cross-validation frameworks, feature engineering, feature selection, and SciKitLearn machine learning system with linear regression models.Also doing an initial check on the differences between CIMMYT environmental data and Earth Engine remote sensing data for a linear model. Setting up the Environment NOTE: This notebook may use more RAM than your session has available. Switch the Runtime to a TPU on Colab to access 35Gb of RAM to avoid crashing.
###Code
# Install Packages
# Import Packages
import os
import pandas as pd
import re
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
import sklearn
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import linear_model, preprocessing
DISPLAY_PRECISION = 4
pd.set_option("display.precision", DISPLAY_PRECISION)
# Mount User's Drive
# Copy and paste the passkey from your Google account
# You should use the same account that is operating the Colab file
# Ignore if you aren't accessing this file in Google Colab
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
parent_dir_name = 'GxE with GEE'
for dirpath, subdirs, files in os.walk('/content'):
if parent_dir_name in subdirs:
parent_path = dirpath + "/" + parent_dir_name
parent_path
# Set Data Path
## Change the destination to your Drive directory containing the folder 'raw_data.zip'
data_path_end = '/Colab Workspace/Data'
os.chdir(parent_path + data_path_end)
# Print the current working directory
os.getcwd()
# os.chdir('C:/Users/theaa/Downloads')
###Output
_____no_output_____
###Markdown
Import and Process CIMMYT Data
###Code
data = pd.read_pickle('proc_data.pkl')
data.columns=data.columns.str.upper()
data.head()
init_len = data.shape[0]
init_len
# Ensure target variable is numeric and drop observations missing for key variables
data['GRAIN_YIELD'] = pd.to_numeric(data['GRAIN_YIELD'],errors='coerce')
data.drop_duplicates(subset=['UNIQUE_ID'],inplace=True)
data.dropna(subset=['GRAIN_YIELD'],inplace=True)
data.dropna(subset=['HARVEST_FINISHING_DATE'],inplace=True)
data.dropna(subset=['GENO_ID'],inplace=True)
# Yield Vector (nx1)
Y = pd.DataFrame(data['GRAIN_YIELD'],index=data.index)
# Varieties (nx1)
VAR = pd.DataFrame(pd.to_numeric(data['GID'], errors='coerce'),index=data.index)
# Environment locations
ENV = pd.DataFrame(data['LAT_LONG_DAY_MONTH_YEAR'],index=data.index)
Y_VAR_ENV = pd.concat([Y,VAR,ENV],axis=1)
Y_VAR_ENV.to_csv('Y_VAR_ENV.csv')
Y_VAR_ENV.to_pickle('Y_VAR_ENV.pkl')
print(Y.shape)
print(VAR.shape)
print(ENV.shape)
print(Y_VAR_ENV.shape)
# Data from the Imputed environmental covariates done in the LMM replication
W = pd.read_pickle('W_imp.pkl')
W.head()
W.shape
# Drop repeated categorical variables
neg_columns = W.filter(like='_NO').columns
neg_columns
W.drop(neg_columns,axis=1,inplace=True)
W.head()
W.to_pickle('W_proc.pkl')
print(Y.shape) # Should only have one column of target values
print(W.shape) # Should have as many rows as Y
###Output
(169529, 1)
(169529, 58)
###Markdown
Visualization of CIMMYT Environmental Variable Correlations
###Code
def correlation_matrix(y, X, is_plot=False):
# Calculate and plot the correlation symmetrical matrix
# Return:
# yX - concatenated data
# yX_corr - correlation matrix, pearson correlation of values from -1 to +1
# yX_abs_corr - correlation matrix, absolute values
yX = pd.concat([y, X], axis=1)
print("Function correlation_matrix: X.shape, y.shape, yX.shape:", X.shape, y.shape, yX.shape)
print()
# Get feature correlations and transform to dataframe
yX_corr = yX.corr(method='pearson')
# Convert to abolute values
yX_abs_corr = np.abs(yX_corr)
if is_plot:
plt.figure(figsize=(10, 10))
plt.imshow(yX_abs_corr, cmap='RdYlGn', interpolation='none', aspect='auto')
plt.colorbar()
plt.xticks(range(len(yX_abs_corr)), yX_abs_corr.columns, rotation='vertical')
plt.yticks(range(len(yX_abs_corr)), yX_abs_corr.columns);
plt.suptitle('Pearson Correlation Heat Map (absolute values)', fontsize=15, fontweight='bold')
plt.show()
return yX, yX_corr, yX_abs_corr
# Build the correlation matrix for the train data
yX, yX_corr, yX_abs_corr = correlation_matrix(Y['GRAIN_YIELD'], W, is_plot=True)
env_cov_list = ['ALTITUDE',
'PPN_10TH_MO_BEFORE_HARVESTED',
'PPN_11TH_MO_BEFORE_HARVESTED',
'PPN_1ST_MO_BEFORE_HARVESTED',
'PPN_2ND_MO_BEFORE_HARVESTED',
'PPN_3RD_MO_BEFORE_HARVESTED',
'PPN_4TH_MO_BEFORE_HARVESTED',
'PPN_5TH_MO_BEFORE_HARVESTED',
'PPN_6TH_MO_BEFORE_HARVESTED',
'PPN_7TH_MO_BEFORE_HARVESTED',
'PPN_8TH_MO_BEFORE_HARVESTED',
'PPN_9TH_MO_BEFORE_HARVESTED',
'PPN_MONTH_OF_HARVESTED',
'PRECIPITATION_FROM_SOWING_TO_MATURITY',
'TOTAL_PRECIPIT_IN_12_MONTHS',
'IRRIGATED',
'SOIL_ALUMINIUM_TOXICITY',
'OTHER_MICRONUTRIENT_TOXICITY/DEFICIENCY_Y/N',
'LENGTH_OF_ROWS_SOWN',
'SPACE_BTN_ROWS_SOWN',
'NO_OF_ROWS_SOWN',
'LENGTH_OF_ROWS_HARVESTED',
'NO_OF_ROWS_HARVESTED',
'CROP_STAND_OR_DENSITY',
'WEED_PROBLEM',
'BIRD_DAMAGE',
'INSECT_DAMAGE',
'FERTILIZER_APPLIED',
'FOLIAR_DISEASE_DEVELOPMENT',
'LODGING',
'ROOT_DISEASE_DEVELOPMENT',
'HAIL_DAMAGE',
'FROST_DAMAGE_SPIKE'
]
W_factors = ['IRRIGATED',
'SOIL_ALUMINIUM_TOXICITY',
'OTHER_MICRONUTRIENT_TOXICITY/DEFICIENCY_Y/N',
'BIRD_DAMAGE',
'CROP_STAND_OR_DENSITY',
'WEED_PROBLEM',
'INSECT_DAMAGE',
'FERTILIZER_APPLIED',
'FOLIAR_DISEASE_DEVELOPMENT',
'LODGING',
'ROOT_DISEASE_DEVELOPMENT',
'HAIL_DAMAGE',
'FROST_DAMAGE_SPIKE'
]
W_numeric = [x for x in env_cov_list if x not in W_factors]
W_numeric
W_factor_dummies = [x for x in W.columns if x not in W_numeric]
W_factor_dummies
W_column_names = W.columns
W_numeric_interesting = ['GRAIN_YIELD',
'ALTITUDE',
'PPN_MONTH_OF_HARVESTED',
'TOTAL_PRECIPIT_IN_12_MONTHS',
'LENGTH_OF_ROWS_SOWN',
'SPACE_BTN_ROWS_SOWN',
'NO_OF_ROWS_SOWN']
train_data_sample = pd.concat([Y['GRAIN_YIELD'],W], axis=1).sample(5000)
g = sns.PairGrid(train_data_sample[W_numeric_interesting])
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter);
# Visualize multiplot of scatterplots against grain yield
# https://seaborn.pydata.org/tutorial/axis_grids.html
g = sns.PairGrid(train_data_sample[W_numeric_interesting])
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot)
g.map_diag(sns.kdeplot, lw=3, legend=False);
# Visualization of factor variables
# https://seaborn.pydata.org/tutorial/categorical.html
sns.catplot(x="IRRIGATED.YES", y="grain_yield", kind="box", data=train_data_sample, axis=1));
# https://cmdlinetips.com/2019/03/how-to-make-grouped-boxplots-in-python-with-seaborn/
sns.stripplot(y='grain_yield', x='IRRIGATED.YES',
data=train_data_sample,
jitter=True,
dodge=True,
marker='o',
alpha=0.5)
# hue='year')
###Output
_____no_output_____
###Markdown
Import GEE Data and Check for Missing Values
###Code
gee_data = pd.read_pickle('GEE_Data/earth_engine_data.pkl')
gee_data.head()
index_vars = ['HARVEST_FINISHING_DATE','LAT_LONG_DAY_MONTH_YEAR']
data_index = data[index_vars]
data_index.head()
gee_merge = data_index.merge(gee_data,how='left',left_on='LAT_LONG_DAY_MONTH_YEAR',right_index=True)
gee_merge.head()
drop_list = ['HARVEST_FINISHING_DATE','LAT_LONG_DAY_MONTH_YEAR','END_DATE','START_DATE','LAT_COORD','LONG_COORD']
for item in drop_list :
gee_drop = gee_merge.filter(like=item,axis=1).columns
gee_merge.drop(gee_drop,axis=1,inplace=True)
gee_merge.head()
for variable in gee_merge.columns :
gee_merge[variable] = pd.to_numeric(gee_merge[variable], errors='coerce')
gee_merge.dtypes
gee_merge.to_pickle('gee_data_pre_imp.pkl')
gee_merge.isna().sum().sort_values(ascending=False)[0:20]
gee_precip = gee_merge.filter(like='precip',axis=1).columns
gee_precip
import missingno
missingno.matrix(gee_merge, sparkline=False)
missingno.matrix(gee_merge[gee_precip])
missingno.bar(gee_merge,sort='ascending')
# Random forest imputation takes over 10 hours
# imputer = MissForest()
# gee_merge_imp = imputer.fit_transform(gee_merge)
# https://scikit-learn.org/stable/modules/impute.html
import numpy as np
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
imp = IterativeImputer(max_iter=3, random_state=0, n_nearest_features = 50)
gee_merge_imp = pd.DataFrame(imp.fit_transform(gee_merge))
gee_merge_imp.head()
gee_merge_imp.index = gee_merge.index
gee_merge_imp.columns = gee_merge.columns
gee_merge_imp.head()
print(gee_merge_imp.shape)
gee_merge_imp.to_pickle('gee_data_imp.pkl')
###Output
_____no_output_____
###Markdown
Importing GEE Environmental Variables
###Code
gee_data = pd.read_pickle('gee_data_imp.pkl')
gee_data.head()
gee_data.shape
gee_columns = gee_data.columns
###Output
_____no_output_____
###Markdown
Visualizing GEE Data
###Code
gee_precip = gee_data.filter(like='precip',axis=1).columns
gee_precip
gee_data[gee_precip].head()
gee_precip_mm = gee_data[gee_precip] * 1000
gee_precip_mm.head()
###Output
_____no_output_____
###Markdown
Comparing GEE and CIMMYT Environmental Data
###Code
W = pd.read_pickle('W_proc.pkl')
W_precip = W.filter(like='PPN',axis=1).columns
W_precip
W_precip_sorted = ['PPN_MONTH_OF_HARVESTED',
'PPN_1ST_MO_BEFORE_HARVESTED',
'PPN_2ND_MO_BEFORE_HARVESTED',
'PPN_3RD_MO_BEFORE_HARVESTED',
'PPN_4TH_MO_BEFORE_HARVESTED',
'PPN_5TH_MO_BEFORE_HARVESTED',
'PPN_6TH_MO_BEFORE_HARVESTED',
'PPN_7TH_MO_BEFORE_HARVESTED',
'PPN_8TH_MO_BEFORE_HARVESTED',
'PPN_9TH_MO_BEFORE_HARVESTED',
'PPN_10TH_MO_BEFORE_HARVESTED',
'PPN_11TH_MO_BEFORE_HARVESTED']
W[W_precip_sorted].head()
# Build the correlation matrix for the train data
yX, yX_corr, yX_abs_corr = correlation_matrix(W[W_precip_sorted], gee_precip_mm, is_plot=True)
###Output
Function correlation_matrix: X.shape, y.shape, yX.shape: (169529, 13) (169529, 12) (169529, 25)
###Markdown
Introduction of Pedigree Coefficient of Parentage Columns
###Code
A = pd.read_pickle('A_matrix_unique.pkl')
A.head()
A.shape
A.to_csv('A_matrix_unique.csv')
from sklearn.preprocessing import StandardScaler
A_scaled = pd.DataFrame(StandardScaler().fit_transform(A), index = A.index)
from sklearn.decomposition import PCA
n_components = 100
pca = PCA(n_components=n_components)
principalComponents = pca.fit_transform(A_scaled)
principalDf = pd.DataFrame(data = principalComponents, index=A.index, columns=['PC_' + str(comp) for comp in range(n_components)])
principalDf.head()
sum(pca.explained_variance_ratio_)
pc_plot_data = pd.DataFrame({'Explained Variance (%)':pca.explained_variance_ratio_[0:10],
'Principal Components':['PC1','PC2','PC3','PC4','PC5','PC6','PC7','PC8','PC9','PC10']})
sns.barplot(x='Principal Components',y="Explained Variance (%)",
data=pc_plot_data, color="c")
principalDf.reset_index(inplace=True)
principalDf.head()
Y_VAR_ENV.head()
Y_VAR_ENV['GID'] = Y_VAR_ENV['GID'].astype(str)
Y_VAR_ENV.dtypes
# Remove GID with less than 3 unique observations associated
GID_counts = pd.DataFrame(np.unique(Y_VAR_ENV['GID'], return_counts=True)).transpose()
GID_counts.columns = ['GID','Count']
GID_counts.set_index('GID').sort_values('Count')
Y_A = pd.merge(Y_VAR_ENV, principalDf, left_on='GID', right_on='index', how='left')
Y_A = Y_A.set_index(Y_VAR_ENV.index)
Y_A.head()
A_PC = Y_A.iloc[:,4:]
A_PC.head()
A_PC.to_pickle('A_matrix_PC.pkl')
GID_dummies = pd.get_dummies(Y_VAR_ENV['GID'].astype('category'))
GID_dummies_names = GID_dummies.columns
GID_dummies.head()
GID_dummies.sum().sort_values()
GID_dummies.to_pickle('GID_dummies.pkl')
Y_COP = pd.merge(Y_VAR_ENV, A, left_on='GID', right_on='index', how='left')
Y_COP = Y_COP.set_index(Y_VAR_ENV.index)
COP_VAR = Y_COP.iloc[:,3:]
COP_VAR.head()
COP_VAR.to_pickle('COP_VAR.pkl')
###Output
_____no_output_____
###Markdown
Split into Train-Test Sets and Cross-ValidationURL Source: https://colab.research.google.com/github/gal-a/blog/blob/master/docs/notebooks/sklearn/sklearn_logistic_regression_vs_gbm.ipynbscrollTo=TYPRYxhH6qJ3
###Code
Y_VAR_ENV = pd.read_pickle('Y_VAR_ENV.pkl')
W = pd.read_pickle('W_proc.pkl')
W_column_names = W.columns
print(Y_VAR_ENV.shape)
print(W.shape)
COP_VAR = pd.read_pickle('COP_VAR.pkl')
GID_dummies = pd.read_pickle('GID_dummies.pkl')
GID_dummies_names = GID_dummies.columns
W_column_names = W.columns
print(COP_VAR.shape)
print(GID_dummies.shape)
feature_set_CIMMYT_COP = pd.concat([Y_VAR_ENV,W,COP_VAR], axis=1)
feature_set_CIMMYT_COP.shape
feature_set_CIMMYT_COP['YEAR'] = feature_set_CIMMYT_COP['LAT_LONG_DAY_MONTH_YEAR'].str.split("_").str[4]
feature_set_CIMMYT_COP['MONTH'] = feature_set_CIMMYT_COP['LAT_LONG_DAY_MONTH_YEAR'].str.split("_").str[3]
feature_set_CIMMYT_COP['DAY'] = feature_set_CIMMYT_COP['LAT_LONG_DAY_MONTH_YEAR'].str.split("_").str[2]
feature_set_CIMMYT_COP['DATE'] = pd.to_datetime(feature_set_CIMMYT_COP['YEAR'] + "-" + feature_set_CIMMYT_COP['MONTH'] + "-" + feature_set_CIMMYT_COP['DAY'])
feature_set_CIMMYT_COP['DATE']
# feature_set_CIMMYT_COP.sort_values(by='DATE',inplace=True)
# Setup X and y
y = pd.DataFrame(feature_set_CIMMYT_COP['GRAIN_YIELD'])
X = feature_set_CIMMYT_COP.drop('GRAIN_YIELD',axis=1)
# Split holdout set for final model comparison
holdout_ratio = 0.15
X_build, X_holdout, y_build, y_holdout = train_test_split(X, y, test_size=holdout_ratio, random_state=0, stratify=X['GID'])
print("X_build.shape, y_build.shape", X_build.shape, y_build.shape)
print("X_holdout.shape, y_holdout.shape", X_holdout.shape, y_holdout.shape)
# Split test set for hyperparameter tuning
TEST_SIZE_RATIO = 0.15 # split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_build, y_build, test_size=TEST_SIZE_RATIO, random_state=0, stratify=X_build['GID'])
print("X_train.shape, y_train.shape", X_train.shape, y_train.shape)
print("X_test.shape, y_test.shape", X_test.shape, y_test.shape)
X_train_index = X_train.index
X_test_index = X_test.index
y_train_index = y_train.index
y_test_index = y_test.index
X_train_GID = X_train['GID']
X_test_GID = X_test['GID']
X_holdout_GID = X_holdout['GID']
set(X_holdout_GID) - (set(X_train_GID) | set(X_test_GID))
X_train_GID.to_csv('X_train_GID.csv')
X_test_GID.to_csv('X_test_GID.csv')
X_holdout_GID.to_csv('X_holdout_GID.csv')
###Output
_____no_output_____
###Markdown
Scaling CIMMYT Columns with Standard Scaler
###Code
#@title Create W_numeric and W_factor_dummies object if not already present
W = pd.read_pickle('W_proc.pkl')
W_column_names = W.columns
env_cov_list = ['ALTITUDE',
'PPN_10TH_MO_BEFORE_HARVESTED',
'PPN_11TH_MO_BEFORE_HARVESTED',
'PPN_1ST_MO_BEFORE_HARVESTED',
'PPN_2ND_MO_BEFORE_HARVESTED',
'PPN_3RD_MO_BEFORE_HARVESTED',
'PPN_4TH_MO_BEFORE_HARVESTED',
'PPN_5TH_MO_BEFORE_HARVESTED',
'PPN_6TH_MO_BEFORE_HARVESTED',
'PPN_7TH_MO_BEFORE_HARVESTED',
'PPN_8TH_MO_BEFORE_HARVESTED',
'PPN_9TH_MO_BEFORE_HARVESTED',
'PPN_MONTH_OF_HARVESTED',
'PRECIPITATION_FROM_SOWING_TO_MATURITY',
'TOTAL_PRECIPIT_IN_12_MONTHS',
'IRRIGATED',
'SOIL_ALUMINIUM_TOXICITY',
'OTHER_MICRONUTRIENT_TOXICITY/DEFICIENCY_Y/N',
'LENGTH_OF_ROWS_SOWN',
'SPACE_BTN_ROWS_SOWN',
'NO_OF_ROWS_SOWN',
'LENGTH_OF_ROWS_HARVESTED',
'NO_OF_ROWS_HARVESTED',
'CROP_STAND_OR_DENSITY',
'WEED_PROBLEM',
'BIRD_DAMAGE',
'INSECT_DAMAGE',
'FERTILIZER_APPLIED',
'FOLIAR_DISEASE_DEVELOPMENT',
'LODGING',
'ROOT_DISEASE_DEVELOPMENT',
'HAIL_DAMAGE',
'FROST_DAMAGE_SPIKE'
]
W_factors = ['IRRIGATED',
'SOIL_ALUMINIUM_TOXICITY',
'OTHER_MICRONUTRIENT_TOXICITY/DEFICIENCY_Y/N',
'BIRD_DAMAGE',
'CROP_STAND_OR_DENSITY',
'WEED_PROBLEM',
'INSECT_DAMAGE',
'FERTILIZER_APPLIED',
'FOLIAR_DISEASE_DEVELOPMENT',
'LODGING',
'ROOT_DISEASE_DEVELOPMENT',
'HAIL_DAMAGE',
'FROST_DAMAGE_SPIKE'
]
W_numeric = [x for x in env_cov_list if x not in W_factors]
W_factor_dummies = [x for x in W.columns if x not in W_numeric]
X_train.name = 'X_train'
X_test.name = 'X_test'
X_holdout.name = 'X_holdout'
for data_set in [X_train, X_test, X_holdout] :
names = data_set[W_numeric].columns
scaler = preprocessing.StandardScaler()
data_set_scaled = scaler.fit_transform(data_set[W_numeric]) # Scales numeric columns
data_set_scaled = pd.DataFrame(data_set_scaled, columns=names, index=data_set.index) # Adds back column names and index
data_set_scaled = pd.concat([data_set_scaled,data_set[W_factor_dummies],data_set[GID_dummies_names]], axis = 1) # Concatenate scaled numeric columns with factor columns
pkl_name = data_set.name + '_scaled_GID.pkl'
data_set_scaled.to_pickle(pkl_name)
del X_train, X_test, X_holdout
import gc
gc.collect()
y_train.name = 'y_train'
y_test.name = 'y_test'
y_holdout.name = 'y_holdout'
for data_set in [y_train, y_test, y_holdout] :
names = data_set.columns
scaler = preprocessing.StandardScaler()
data_set_scaled = scaler.fit_transform(data_set)
data_set_scaled = pd.DataFrame(data_set_scaled, columns=names, index=data_set.index)
pkl_name = data_set.name + '_scaled.pkl'
data_set_scaled.to_pickle(pkl_name)
X_train_scaled_GID = pd.read_pickle('X_train_scaled_GID.pkl')
X_test_scaled_GID = pd.read_pickle('X_test_scaled_GID.pkl')
X_holdout_scaled_GID = pd.read_pickle('X_holdout_scaled_GID.pkl')
X_train_scaled_GID.index[0:5]
y_train_scaled = pd.read_pickle('y_train_scaled.pkl')
y_test_scaled = pd.read_pickle('y_test_scaled.pkl')
y_holdout_scaled = pd.read_pickle('y_holdout_scaled.pkl')
y_train_scaled.index[0:5]
print(sum(X_train_scaled_GID.index == y_train_scaled.index) == X_train_scaled_GID.shape[0])
print(sum(X_test_scaled_GID.index == y_test_scaled.index) == X_test_scaled_GID.shape[0])
y_train_scaled.to_csv('y_train_scaled.csv')
y_test_scaled.to_csv('y_test_scaled.csv')
y_holdout_scaled.to_csv('y_holdout_scaled.csv')
X_train_scaled = X_train_scaled_GID[W_column_names]
X_test_scaled = X_test_scaled_GID[W_column_names]
X_holdout_scaled = X_holdout_scaled_GID[W_column_names]
X_train_scaled.to_pickle('X_train_scaled.pkl')
X_test_scaled.to_pickle('X_test_scaled.pkl')
X_holdout_scaled.to_pickle('X_holdout_scaled.pkl')
X_train_scaled.to_csv('X_train_scaled.csv')
X_test_scaled.to_csv('X_test_scaled.csv')
X_holdout_scaled.to_csv('X_holdout_scaled.csv')
X_train_scaled.head()
y_train_scaled.head()
# Should interactions with factors be scaled?
# If so, should the values for 0 from the original factor still be 0 in the interaction column?
###Output
_____no_output_____
###Markdown
Adding COP PCA Columns to CIMMYT Data
###Code
A_PC = pd.read_pickle('A_matrix_PC.pkl')
# Rejoin Principal Components from A Matrix to Features
X_train_scaled_PCA = pd.merge(X_train_scaled, A_PC, left_index = True, right_index = True, how = 'left')
X_test_scaled_PCA = pd.merge(X_test_scaled, A_PC, left_index = True, right_index = True, how = 'left')
X_holdout_scaled_PCA = pd.merge(X_holdout_scaled, A_PC, left_index = True, right_index = True, how = 'left')
X_train_scaled_PCA.head()
X_train_scaled_PCA.to_pickle('X_train_scaled_PCA.pkl')
X_test_scaled_PCA.to_pickle('X_test_scaled_PCA.pkl')
X_holdout_scaled_PCA.to_pickle('X_holdout_scaled_PCA.pkl')
###Output
_____no_output_____
###Markdown
Splitting Train-Test Set GEE Data
###Code
Y_VAR_ENV = pd.read_pickle('Y_VAR_ENV.pkl')
gee_data = pd.read_pickle('gee_data_imp.pkl')
COP_VAR = pd.read_pickle('COP_VAR.pkl')
GID_dummies = pd.read_pickle('GID_dummies.pkl')
GID_dummies_names = GID_dummies.columns
gee_columns = gee_data.columns
print(Y_VAR_ENV.shape)
print(gee_data.shape)
print(COP_VAR.shape)
print(GID_dummies.shape)
Y_VAR_ENV.head()
gee_data.head()
COP_VAR.head()
feature_set_gee_COP = pd.concat([Y_VAR_ENV,gee_data,COP_VAR], axis=1)
feature_set_gee_COP.shape
# Setup X and y
y_gee = pd.DataFrame(feature_set_gee_COP['GRAIN_YIELD'])
X_gee = feature_set_gee_COP.drop('GRAIN_YIELD',axis=1)
# Split holdout set for final model comparison
holdout_ratio = 0.15
X_build_gee, X_holdout_gee, y_build_gee, y_holdout_gee = train_test_split(X_gee, y_gee, test_size=holdout_ratio, random_state=0, stratify=X_gee['GID'])
print("X_build.shape, y_build.shape", X_build_gee.shape, y_build_gee.shape)
print("X_holdout.shape, y_holdout.shape", X_holdout_gee.shape, y_holdout_gee.shape)
# Split test set for hyperparameter tuning
TEST_SIZE_RATIO = 0.15 # split into train and test sets
X_train_gee, X_test_gee, y_train_gee, y_test_gee = train_test_split(X_build_gee, y_build_gee, test_size=TEST_SIZE_RATIO, random_state=0, stratify=X_build_gee['GID'])
print("X_train.shape, y_train.shape", X_train_gee.shape, y_train_gee.shape)
print("X_test.shape, y_test.shape", X_test_gee.shape, y_test_gee.shape)
X_train_gee_index = X_train_gee.index
X_test_gee_index = X_test_gee.index
y_train_gee_index = y_train_gee.index
y_test_gee_index = y_test_gee.index
X_train_gee_GID = X_train_gee['GID']
X_test_gee_GID = X_test_gee['GID']
X_holdout_gee_GID = X_holdout_gee['GID']
X_train_gee_GID.to_csv('X_train_gee_GID.csv')
X_test_gee_GID.to_csv('X_test_gee_GID.csv')
X_holdout_gee_GID.to_csv('X_holdout_gee_GID.csv')
del X_gee, y_gee, feature_set_gee_COP, gee_data, COP_VAR
import gc
gc.collect()
###Output
_____no_output_____
###Markdown
Scaling GEE Columns with Standard Scaler
###Code
X_train_gee.name = 'X_train_gee'
X_test_gee.name = 'X_test_gee'
X_holdout_gee.name = 'X_holdout_gee'
for data_set in [X_train_gee] :
names = data_set[gee_columns].columns
scaler = preprocessing.StandardScaler()
data_set_scaled = scaler.fit_transform(data_set[gee_columns])
data_set_scaled = pd.DataFrame(data_set_scaled, columns=names, index=data_set.index)
data_set_scaled = pd.concat([data_set_scaled,data_set[GID_dummies_names]], axis = 1)
pkl_name = data_set.name + '_scaled_GID.pkl'
data_set_scaled.to_pickle(pkl_name)
del X_train_gee
import gc
gc.collect()
for data_set in [X_test_gee, X_holdout_gee] :
names = data_set[gee_columns].columns
scaler = preprocessing.StandardScaler()
data_set_scaled = scaler.fit_transform(data_set[gee_columns])
data_set_scaled = pd.DataFrame(data_set_scaled, columns=names, index=data_set.index)
data_set_scaled = pd.concat([data_set_scaled,data_set[GID_dummies_names]], axis = 1)
pkl_name = data_set.name + '_scaled_GID.pkl'
data_set_scaled.to_pickle(pkl_name)
X_train_gee_scaled_GID = pd.read_pickle('X_train_gee_scaled_GID.pkl')
X_test_gee_scaled_GID = pd.read_pickle('X_test_gee_scaled_GID.pkl')
X_holdout_gee_scaled_GID = pd.read_pickle('X_holdout_gee_scaled_GID.pkl')
X_train_gee_scaled = X_train_gee_scaled_GID[gee_columns]
X_test_gee_scaled = X_test_gee_scaled_GID[gee_columns]
X_holdout_gee_scaled = X_holdout_gee_scaled_GID[gee_columns]
X_train_gee_scaled.to_pickle('X_train_gee_scaled.pkl')
X_test_gee_scaled.to_pickle('X_test_gee_scaled.pkl')
X_holdout_gee_scaled.to_pickle('X_holdout_gee_scaled.pkl')
X_train_gee_scaled.head()
y_train_scaled=pd.read_pickle('y_train_scaled.pkl')
y_train_scaled.head()
X_train_gee_scaled.to_csv('X_train_gee_scaled.csv')
X_test_gee_scaled.to_csv('X_test_gee_scaled.csv')
X_holdout_gee_scaled.to_csv('X_holdout_gee_scaled.csv')
# Should interactions with factors be scaled?
# If so, should the values for 0 from the original factor still be 0 in the interaction column?
###Output
_____no_output_____
###Markdown
Add COP PCA to GEE Data
###Code
A_PC = pd.read_pickle('A_matrix_PC.pkl')
# Rejoin Principal Components from A Matrix to Features
X_train_gee_scaled_PCA = pd.merge(X_train_gee_scaled, A_PC, left_index = True, right_index = True, how = 'left')
X_test_gee_scaled_PCA = pd.merge(X_test_gee_scaled, A_PC, left_index = True, right_index = True, how = 'left')
X_holdout_gee_scaled_PCA = pd.merge(X_holdout_gee_scaled, A_PC, left_index = True, right_index = True, how = 'left')
X_train_gee_scaled_PCA.head()
X_train_gee_scaled_PCA.to_pickle('X_train_gee_scaled_PCA.pkl')
X_test_gee_scaled_PCA.to_pickle('X_test_gee_scaled_PCA.pkl')
X_holdout_gee_scaled_PCA.to_pickle('X_holdout_gee_scaled_PCA.pkl')
###Output
_____no_output_____
###Markdown
Combining CIMMYT and GEE Data
###Code
X_train_scaled = pd.read_pickle('X_train_scaled.pkl')
X_test_scaled = pd.read_pickle('X_test_scaled.pkl')
X_holdout_scaled = pd.read_pickle('X_holdout_scaled.pkl')
X_train_gee_scaled = pd.read_pickle('X_train_gee_scaled.pkl')
X_test_gee_scaled = pd.read_pickle('X_test_gee_scaled.pkl')
X_holdout_gee_scaled = pd.read_pickle('X_holdout_gee_scaled.pkl')
X_holdout_scaled.head()
X_holdout_gee_scaled.head()
y_holdout_scaled.
X_train_combined = pd.concat([X_train_scaled, X_train_gee_scaled], axis=1)
X_test_combined = pd.concat([X_test_scaled, X_test_gee_scaled], axis=1)
X_holdout_combined = pd.concat([X_holdout_scaled, X_holdout_gee_scaled], axis=1)
X_train_combined.head()
X_train_combined.to_pickle('X_train_combined.pkl')
X_test_combined.to_pickle('X_test_combined.pkl')
X_holdout_combined.to_pickle('X_holdout_combined.pkl')
A_PC = pd.read_pickle('A_matrix_PC.pkl')
X_train_combined_PCA = pd.merge(X_train_combined, A_PC, left_index = True, right_index = True, how = 'left')
X_test_combined_PCA = pd.merge(X_test_combined, A_PC, left_index = True, right_index = True, how = 'left')
X_holdout_combined_PCA = pd.merge(X_holdout_combined, A_PC, left_index = True, right_index = True, how = 'left')
X_train_combined_PCA.head()
X_train_combined_PCA.to_pickle('X_train_combined_PCA.pkl')
X_test_combined_PCA.to_pickle('X_test_combined_PCA.pkl')
X_holdout_combined_PCA.to_pickle('X_holdout_combined_PCA.pkl')
COP_VAR = pd.read_pickle('COP_VAR.pkl')
COP_VAR.shape
X_train_combined_COP = pd.merge(X_train_combined, COP_VAR, left_index = True, right_index = True, how = 'left')
X_test_combined_COP = pd.merge(X_test_combined, COP_VAR, left_index = True, right_index = True, how = 'left')
X_holdout_combined_COP = pd.merge(X_holdout_combined, COP_VAR, left_index = True, right_index = True, how = 'left')
X_train_combined_COP.head()
X_train_combined_COP.to_pickle('X_train_combined_GID.pkl')
X_test_combined_COP.to_pickle('X_test_combined_GID.pkl')
X_holdout_combined_COP.to_pickle('X_holdout_combined_GID.pkl')
###Output
_____no_output_____
###Markdown
Feature Engineering and Linear Regression TestsThe following are not part of the standardized ML workflow that is being compared for the other models. It is just to test the data in a linear model. Other Feature Engineering Attemps Create Interaction Terms with Polynomial
###Code
# Creating interaction terms with scikitlearn polynomial
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
interaction = sklearn.preprocessing.PolynomialFeatures(degree=2, interaction_only=True, include_bias=False, order='C')
interact_fit = interaction.fit(W)
interact_names = interact_fit.get_feature_names(W.columns)
W_interact = pd.DataFrame(interact_fit.transform(W), columns=interact_names, index=W.index)
W_interact_factors = [W_interact.iloc[:,col].name for col in range(W_interact.shape[1]) if W_interact.iloc[:,col].max() == 1 and W_interact.iloc[:,col].min() == 0]
W_interact_cont = [W_interact.iloc[:,col].name for col in range(W_interact.shape[1]) if W_interact.iloc[:,col].name not in W_interact_factors]
orig_name = W.columns
# Still need a better way to check for factors
W_interact_factors
W_interact[W_interact_factors] = W_interact[W_interact_factors].astype('category')
W_interact.dtypes
W_interact.head()
###Output
_____no_output_____
###Markdown
Feature Engineering with and AutoFeat
###Code
# Split test set for feature selection
feature_test_size = 0.10 # split into train and test sets
X_feat_train, X_feat_test, y_feat_train, y_feat_test = train_test_split(X_train_scaled, y_train_scaled, test_size=feature_test_size, random_state=0)
print("X_feat_train.shape, y_feat_train.shape", X_feat_train.shape, y_feat_train.shape)
print("X_feat_test.shape, y_feat_test.shape", X_feat_test.shape, y_feat_test.shape)
!pip install autofeat
from autofeat import FeatureSelector, AutoFeatRegressor
%matplotlib inline
%load_ext autoreload
%autoreload 2
# Automatically generate potential features using the feature training set evaluated against the target
# Only uses feature training data for generation
# Final selection will be done on feature test data
for steps in range(2):
np.random.seed(55)
print("### AutoFeat with %i feateng_steps" % steps)
afreg = AutoFeatRegressor(verbose=1, feateng_steps=steps)
X_feat_train_gen = afreg.fit_transform(X_feat_train, y_feat_train.values.ravel())
r2 = afreg.score(X_feat_train, y_feat_train.values.ravel())
print("## Final R^2: %.4f" % r2)
plt.figure()
plt.scatter(afreg.predict(X_feat_train), y_feat_train.values.ravel(), s=2);
plt.title("%i FE steps (R^2: %.4f; %i new features)" % (steps, r2, len(afreg.new_feat_cols_)))
X_feat_test_gen = afreg.transform(X_feat_test)
X_feat_test_gen.head()
###Output
[AutoFeat] Computing 153 new features.
[AutoFeat] 153/ 153 new features ...done.
###Markdown
Feature Selector from MLXtendhttp://rasbt.github.io/mlxtend/api_subpackages/mlxtend.feature_selection/sequentialfeatureselectorhttp://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/From Example 3
###Code
#https://hub.packtpub.com/4-ways-implement-feature-selection-python-machine-learning/
# Evaluate potential features and select best predictors using feature test set
from sklearn.linear_model import LinearRegression
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
import matplotlib.pyplot as plt
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
import time
start = time.time()
lr = LinearRegression()
sfs = SFS(lr,
k_features=50,
forward=True,
floating=False,
scoring='neg_mean_squared_error',
cv=15)
sfs = sfs.fit(X_feat_test_gen, y_feat_test)
fig = plot_sfs(sfs.get_metric_dict(), kind='std_err')
plt.title('Sequential Forward Selection (w. StdErr)')
plt.grid()
plt.show()
print('Total runtime was (s): {}'.format(time.time() - start))
# Last runtime with 30 features: 1153 seconds
features = list(sfs.k_feature_names_)
features
# Transform training set for hyperparameter tuning using afreg
# Select features using optimal feature set of feature selection
# This training set is paired with y_train_scaled
X_train_model = afreg.transform(X_train_scaled)
X_train_model = X_train_model[features]
# The parameter tuning is evaluated against the X_test_scaled and y_test_scaled data
X_test_model = afreg.transform(X_test_scaled)
X_test_model = X_test_model[features]
y_train_model = y_train_scaled
y_test_model = y_test_scaled
X_train_model.to_pickle('X_train_model.pkl')
X_test_model.to_pickle('X_test_model.pkl')
y_train_model.to_pickle('y_train_model.pkl')
y_test_model.to_pickle('y_test_model.pkl')
# Alternative data set without feature engineering or selection
X_train_model = X_train_scaled
X_test_model = X_test_scaled
y_train_model = y_train_scaled
y_test_model = y_test_scaled
# Exhaustive Feature Selection (Takes too long)
# from sklearn.linear_model import LinearRegression
# from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
# import time
# start = time.time()
# lr = LinearRegression()
# efs = EFS(lr,
# min_features=5,
# max_features=17,
# scoring='neg_mean_squared_error',
# cv=3)
# efs.fit(X_train, y_train)
# print('Best MSE score: %.2f' % efs.best_score_ * (-1))
# print('Best subset:', efs.best_idx_)
# print('Total runtime was (s): {}'.format(time.time() - start))
# https://pypi.org/project/autofeat/#data
# https://github.com/cod3licious/autofeat/blob/master/autofeat_examples.ipynb
###Output
_____no_output_____
###Markdown
GridSearchCV for Regularized Regressionhttps://alfurka.github.io/2018-11-18-grid-search/https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.03-Hyperparameters-and-Model-Validation.ipynbscrollTo=OcK-JlX-ZCLo
###Code
# Import model data objects, in case you want to skip the processing steps above
X_train_model = pd.read_pickle('X_train_scaled_GID.pkl')
X_test_model = pd.read_pickle('X_test_scaled_GID.pkl')
y_train_model = pd.read_pickle('y_train_scaled.pkl')
y_test_model = pd.read_pickle('y_test_scaled.pkl')
X_train_model.head()
df = pd.DataFrame([['Train', 'OLS'], ['Train', 'Ridge'], ['Train', 'LASSO'],
['Test', 'OLS'], ['Test', 'Ridge'], ['Test', 'LASSO']],
columns=['Train_Test', 'Model'])
results_grid = pd.DataFrame(index = pd.MultiIndex.from_frame(df), columns=['R_2','Adj_R_2','RMSE','MAE'])
results_grid
from sklearn.model_selection import GridSearchCV
import sklearn.linear_model
from sklearn import metrics
ols_regressor = sklearn.linear_model.LinearRegression()
ols_train_pred = ols_regressor.fit(X_train_model, y_train_model).predict(X_train_model)
results_grid.loc['Train','OLS']['R_2'] = metrics.r2_score(y_train_model, ols_train_pred)
results_grid.loc['Train','OLS']['Adj_R_2'] = 1-(1-results_grid.loc['Train','OLS']['R_2'])*(y_train_model.shape[0]-1)/(y_train_model.shape[0]-X_train_model.shape[1]-1)
results_grid.loc['Train','OLS']['RMSE'] = np.sqrt(metrics.mean_squared_error(y_train_model, ols_train_pred))
results_grid.loc['Train','OLS']['MAE'] = metrics.mean_absolute_error(y_train_model, ols_train_pred)
ols_test_pred = ols_regressor.fit(X_train_model, y_train_model).predict(X_test_model)
results_grid.loc['Test','OLS']['R_2'] = metrics.r2_score(y_test_model, ols_test_pred)
results_grid.loc['Test','OLS']['Adj_R_2'] = 1-(1-results_grid.loc['Test','OLS']['R_2'])*(y_train_model.shape[0]-1)/(y_train_model.shape[0]-X_train_model.shape[1]-1)
results_grid.loc['Test','OLS']['RMSE'] = np.sqrt(metrics.mean_squared_error(y_test_model, ols_test_pred))
results_grid.loc['Test','OLS']['MAE'] = metrics.mean_absolute_error(y_test_model, ols_test_pred)
results_grid
lasso = sklearn.linear_model.Lasso(selection = 'random')
parameters = {'alpha' : [0.001, 0.01, 0.1, 1, 5], 'tol' : [0.01, 0.1, 1], 'max_iter' : [50, 100, 500, 1000]}
lasso_regressor = GridSearchCV(lasso, parameters, scoring = 'neg_mean_squared_error', cv = 5)
lasso_fitted = lasso_regressor.fit(X_train_model, y_train_model)
lasso_train_pred = lasso_fitted.predict(X_train_model)
results_grid.loc['Train','LASSO']['R_2'] = metrics.r2_score(y_train_model, lasso_train_pred)
results_grid.loc['Train','LASSO']['Adj_R_2'] = 1-(1-results_grid.loc['Train','LASSO']['R_2'])*(y_train_model.shape[0]-1)/(y_train_model.shape[0]-X_train_model.shape[1]-1)
results_grid.loc['Train','LASSO']['RMSE'] = np.sqrt(metrics.mean_squared_error(y_train_model, lasso_train_pred))
results_grid.loc['Train','LASSO']['MAE'] = metrics.mean_absolute_error(y_train_model, lasso_train_pred)
lasso_test_pred = lasso_fitted.predict(X_test_model)
results_grid.loc['Test','LASSO']['R_2'] = metrics.r2_score(y_test_model, lasso_test_pred)
results_grid.loc['Test','LASSO']['Adj_R_2'] = 1-(1-results_grid.loc['Test','LASSO']['R_2'])*(y_train_model.shape[0]-1)/(y_train_model.shape[0]-X_train_model.shape[1]-1)
results_grid.loc['Test','LASSO']['RMSE'] = np.sqrt(metrics.mean_squared_error(y_test_model, lasso_test_pred))
results_grid.loc['Test','LASSO']['MAE'] = metrics.mean_absolute_error(y_test_model, lasso_test_pred)
results_grid
ridge = sklearn.linear_model.Ridge()
parameters = {'alpha' : [.001, 0.01, 0.1, 0.5, 1, 1.5, 5]}
ridge_regressor = GridSearchCV(ridge, parameters, scoring = 'neg_mean_squared_error', cv = 5)
ridge_fitted = ridge_regressor.fit(X_train_model, y_train_model)
ridge_train_pred = ridge_fitted.predict(X_train_model)
results_grid.loc['Train','Ridge']['R_2'] = metrics.r2_score(y_train_model, ridge_train_pred)
results_grid.loc['Train','Ridge']['Adj_R_2'] = 1-(1-results_grid.loc['Train','Ridge']['R_2'])*(y_train_model.shape[0]-1)/(y_train_model.shape[0]-X_train_model.shape[1]-1)
results_grid.loc['Train','Ridge']['RMSE'] = np.sqrt(metrics.mean_squared_error(y_train_model, ridge_train_pred))
results_grid.loc['Train','Ridge']['MAE'] = metrics.mean_absolute_error(y_train_model, ridge_train_pred)
ridge_test_pred = ridge_fitted.predict(X_test_model)
results_grid.loc['Test','Ridge']['R_2'] = metrics.r2_score(y_test_model, ridge_test_pred)
results_grid.loc['Test','Ridge']['Adj_R_2'] = 1-(1-results_grid.loc['Test','Ridge']['R_2'])*(y_train_model.shape[0]-1)/(y_train_model.shape[0]-X_train_model.shape[1]-1)
results_grid.loc['Test','Ridge']['RMSE'] = np.sqrt(metrics.mean_squared_error(y_test_model, ridge_test_pred))
results_grid.loc['Test','Ridge']['MAE'] = metrics.mean_absolute_error(y_test_model, ridge_test_pred)
results_grid
# Residuals Plot
###Output
_____no_output_____
###Markdown
More advanced options for regression comparisonPipelines and Iterative Gridsearch
###Code
# Create a pipeline
# pipe = Pipeline([("classifier", RandomForestClassifier())])
# # Create dictionary with candidate learning algorithms and their hyperparameters
# search_space = [
# {"classifier": [LogisticRegression()],
# "classifier__penalty": ['l2','l1'],
# "classifier__C": np.logspace(0, 4, 10)
# },
# {"classifier": [LogisticRegression()],
# "classifier__penalty": ['l2'],
# "classifier__C": np.logspace(0, 4, 10),
# "classifier__solver":['newton-cg','saga','sag','liblinear'] ##This solvers don't allow L1 penalty
# },
# {"classifier": [RandomForestClassifier()],
# "classifier__n_estimators": [10, 100, 1000],
# "classifier__max_depth":[5,8,15,25,30,None],
# "classifier__min_samples_leaf":[1,2,5,10,15,100],
# "classifier__max_leaf_nodes": [2, 5,10]}]
# # create a gridsearch of the pipeline, the fit the best model
# gridsearch = GridSearchCV(pipe, search_space, cv=5, verbose=0,n_jobs=-1) # Fit grid search
# best_model = gridsearch.fit(features, target)
# from sklearn import metrics
# from sklearn import linear_model
# from sklearn.pipeline import Pipeline
# from sklearn.preprocessing import PolynomialFeatures
# from sklearn.model_selection import GridSearchCV, train_test_split
# def test(models, X, Y, iterations = 100):
# results = pd.DataFrame(columns=[list(models.keys())])
# for i in models:
# result_iter = pd.DataFrame(columns=['Parameters','Train_R_2','Train_RMSE','Train_MAE', 'Test_R_2','Test_RMSE','Test_MAE'])
# for j in range(iterations):
# X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size= 0.2)
# train_pred = models[i].fit(X_train, y_train).predict(X_train)
# result_iter.loc[j,'Train_R_2'] = (metrics.r2_score(y_train, train_pred))
# # result_iter.loc[j,'Train_RMSE'] = (np.sqrt(metrics.mean_squared_error(y_train, train_pred)))
# # result_iter.loc[j,'Train_MAE'] = (metrics.mean_absolute_error(y_train, train_pred))
# test_pred = models[i].fit(X_train, y_train).predict(X_test)
# result_iter.loc[j,'Test_R_2'] = (metrics.r2_score(y_test, test_pred))
# # result_iter.loc[j,'Test_RMSE'] = (np.sqrt(metrics.mean_squared_error(y_test, test_pred)))
# # result_iter.loc[j,'Test_MAE'] = (metrics.mean_absolute_error(y_test, test_pred))
# # results.loc[i,:] = [np.mean(result_iter['Train_R_2']), np.mean(result_iter['Train_RMSE']), np.mean(result_iter['Train_MAE']),
# # np.mean(result_iter['Test_R_2']), np.mean(result_iter['Test_RMSE']), np.mean(result_iter['Test_MAE'])]
# results.
# return pd.DataFrame(results)
# models = {'OLS': linear_model.LinearRegression(),
# 'Lasso': linear_model.Lasso(),
# 'Ridge': linear_model.Ridge()}
# start = time.time()
# results = test(models, X=X_train_model[features], Y=y_train_model)
# print('Total runtime was (s): {}'.format(time.time() - start))
# results
# lasso_params = {'alpha':[0.02, 0.024, 0.025, 0.026, 0.03]}
# ridge_params = {'alpha':[200, 230, 250,265, 270, 275, 290, 300, 500]}
# models2 = {'OLS': linear_model.LinearRegression(),
# 'Lasso': GridSearchCV(linear_model.Lasso(),
# param_grid=lasso_params).fit(X_train_scaled, y_train_scaled),
# 'Ridge': GridSearchCV(linear_model.Ridge(),
# param_grid=ridge_params).fit(X_train_scaled, y_train_scaled)}
# Lasso = GridSearchCV(linear_model.Lasso(), param_grid=lasso_params).fit(X_train_scaled, y_train_scaled)
# start = time.time()
# results_grid = test(models2, X=X_train_gen[features], Y=y_train_scaled)
# print('Total runtime was (s): {}'.format(time.time() - start))
# results_grid
# How to pull optimal alpha values from gridsearch?
# https://towardsdatascience.com/how-to-perform-lasso-and-ridge-regression-in-python-3b3b75541ad8
###Output
_____no_output_____
###Markdown
Other Resources
###Code
# https://machinelearningmastery.com/compare-machine-learning-algorithms-python-scikit-learn/
# Pipelining Tutorial
# https://towardsdatascience.com/hyper-parameter-tuning-and-model-selection-like-a-movie-star-a884b8ee8d68
# Hint: If Medium gives you paywalls,
# you can usually get around them by opening the window in incognito mode
# Gradient Boosting Regression Example
# https://towardsdatascience.com/a-complete-machine-learning-project-walk-through-in-python-part-two-300f1f8147e2
# Regularized Boosting with XGBoost
# https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
# Speeding things up with PySpark
# https://www.analyticsvidhya.com/blog/2019/11/build-machine-learning-pipelines-pyspark/
###Output
_____no_output_____ |
sdkv1/ch12/cloudformation/Create and update stack with one model.ipynb | ###Markdown
Create one-model endpoint
###Code
# Update this with your own model name
training_job = 'tensorflow-training-2020-06-08-07-46-04-367'
job = sm.describe_training_job(TrainingJobName=training_job)
model_data_url = job['ModelArtifacts']['S3ModelArtifacts']
role_arn = job['RoleArn']
# https://github.com/aws/deep-learning-containers/blob/master/available_images.md
container_image = '763104351884.dkr.ecr.us-east-1.amazonaws.com/tensorflow-inference:2.1.0-cpu-py36-ubuntu18.04'
import time
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
stack_name='endpoint-one-model-'+timestamp
print(stack_name)
with open('endpoint-one-model.yml', 'r') as f:
response = cf.create_stack(StackName=stack_name,
TemplateBody=f.read(),
Parameters=[
{"ParameterKey":"ModelName", "ParameterValue":training_job+'-'+timestamp},
{"ParameterKey":"ContainerImage","ParameterValue":container_image},
{"ParameterKey":"ModelDataUrl", "ParameterValue":model_data_url},
{"ParameterKey":"RoleArn", "ParameterValue":role_arn} ])
print(response)
waiter = cf.get_waiter('stack_create_complete')
waiter.wait(StackName=stack_name)
response = cf.describe_stack_events(StackName=stack_name)
for e in response['StackEvents']:
print('%s %s' % (e['ResourceType'], e['ResourceStatus']))
response = cf.describe_stacks(StackName=stack_name)
print(response['Stacks'][0]['StackStatus'])
for o in response['Stacks'][0]['Outputs']:
if o['OutputKey']=='EndpointName':
endpoint_name = o['OutputValue']
print(endpoint_name)
###Output
_____no_output_____
###Markdown
Apply change set to update instance count
###Code
response = cf.create_change_set(
StackName=stack_name,
ChangeSetName='add-instance',
UsePreviousTemplate=True,
Parameters=[
{"ParameterKey":"InstanceCount", "ParameterValue": "2"},
{"ParameterKey":"ModelName", "UsePreviousValue": True},
{"ParameterKey":"ContainerImage","UsePreviousValue": True},
{"ParameterKey":"ModelDataUrl", "UsePreviousValue": True},
{"ParameterKey":"RoleArn", "UsePreviousValue": True}
]
)
response
waiter = cf.get_waiter('change_set_create_complete')
waiter.wait(
StackName=stack_name,
ChangeSetName='add-instance'
)
response = cf.describe_change_set(
StackName=stack_name,
ChangeSetName='add-instance'
)
response['Changes']
response = cf.execute_change_set(
StackName=stack_name,
ChangeSetName='add-instance'
)
response
response = cf.describe_stacks(StackName=stack_name)
print(response['Stacks'][0]['StackStatus'])
response = cf.describe_stack_events(StackName=stack_name)
for e in response['StackEvents']:
print('%s %s' % (e['ResourceType'], e['ResourceStatus']))
waiter = cf.get_waiter('stack_update_complete')
waiter.wait(StackName=stack_name)
response = sm.describe_endpoint(EndpointName=endpoint_name)
response['ProductionVariants'][0]['CurrentInstanceCount']
###Output
_____no_output_____
###Markdown
Apply change set to add second production variant to endpoint
###Code
!pygmentize endpoint-two-models.yml
# Update this with your own model name
training_job_2 = 'tensorflow-training-2020-06-08-07-32-18-734'
job_2 = sm.describe_training_job(TrainingJobName=training_job_2)
model_data_url_2 = job_2['ModelArtifacts']['S3ModelArtifacts']
with open('endpoint-two-models.yml', 'r') as f:
response = cf.create_change_set(
StackName=stack_name,
ChangeSetName='add-model',
TemplateBody=f.read(),
Parameters=[
{"ParameterKey":"ModelName", "UsePreviousValue": True},
{"ParameterKey":"ModelDataUrl", "UsePreviousValue": True},
{"ParameterKey":"ContainerImage", "UsePreviousValue": True},
{"ParameterKey":"RoleArn", "UsePreviousValue": True},
{"ParameterKey":"ModelName2", "ParameterValue": training_job_2+'-'+timestamp},
{"ParameterKey":"ModelDataUrl2", "ParameterValue": model_data_url_2}
]
)
response
waiter = cf.get_waiter('change_set_create_complete')
waiter.wait(
StackName=stack_name,
ChangeSetName='add-model'
)
response = cf.describe_change_set(
StackName=stack_name,
ChangeSetName='add-model'
)
response['Changes']
response = cf.execute_change_set(
StackName=stack_name,
ChangeSetName='add-model'
)
response
waiter = cf.get_waiter('stack_update_complete')
waiter.wait(StackName=stack_name)
response = sm.describe_endpoint(EndpointName=endpoint_name)
response['ProductionVariants']
###Output
_____no_output_____
###Markdown
Create a CloudWatch alarm for model latency
###Code
cw = boto3.client('cloudwatch')
alarm_name = 'My_endpoint_latency'
response = cw.put_metric_alarm(
AlarmName=alarm_name,
ComparisonOperator='GreaterThanThreshold',
EvaluationPeriods=1,
MetricName='ModelLatency',
Namespace='AWS/SageMaker',
Period=60,
Statistic='Average',
Threshold=500000.0,
AlarmDescription='Alarm when 1-minute average latency exceeds 500ms',
Dimensions=[
{
'Name': 'EndpointName',
'Value': endpoint_name
},
{
'Name': 'VariantName',
'Value': 'variant-2'
}
],
Unit='Microseconds'
)
response
response = cw.describe_alarms(AlarmNames=[alarm_name])
for a in response['MetricAlarms']:
if a['AlarmName'] == alarm_name:
alarm_arn = a['AlarmArn']
print(alarm_arn)
###Output
_____no_output_____
###Markdown
Canary deployment of second model
###Code
weights = list(range(10,110,10))
print(weights)
for w in weights:
response = cf.update_stack(
StackName=stack_name,
UsePreviousTemplate=True,
Parameters=[
{"ParameterKey":"ModelName", "UsePreviousValue": True},
{"ParameterKey":"ModelDataUrl", "UsePreviousValue": True},
{"ParameterKey":"ContainerImage", "UsePreviousValue": True},
{"ParameterKey":"RoleArn", "UsePreviousValue": True},
{"ParameterKey":"ModelName2", "UsePreviousValue": True},
{"ParameterKey":"ModelDataUrl2", "UsePreviousValue": True},
{"ParameterKey":"VariantWeight", "ParameterValue": str(100-w)},
{"ParameterKey":"VariantWeight2", "ParameterValue": str(w)}
],
RollbackConfiguration={
'RollbackTriggers': [
{
'Arn': alarm_arn,
'Type': 'AWS::CloudWatch::Alarm'
}
],
'MonitoringTimeInMinutes': 5
}
)
waiter = cf.get_waiter('stack_update_complete')
waiter.wait(StackName=stack_name)
print("Sending %d percent of traffic to new model" % w)
cf.delete_stack(StackName=stack_name)
###Output
_____no_output_____ |
Numerical Analysis/Numerical Analysis - 8.ipynb | ###Markdown
Numerical Analysis - 8 Rafael Barsotti 1) Implemente o método de Euler para resolver o problema de valor inicial (PVI) $x′ = x^{1/3}$, $x(0) = 0$. O que acontece ? (Observe que esse problema apresenta mais de uma solução analítica.)
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math as m
# Questão 1 - Método de Euler
#Função f(x,t)
def f1(x):
y = x**(1/3)
return y
# Método de Euler
def euler_method(a,b,f1,x0,t0,n):
D = np.array([[0,x0]])
h = (b-a)/n
t = t0
x = x0
for i in range(n):
x = x + h*f1(x)
t = t + h
D = np.append(D,[[t,x]], axis = 0)
return D
# Plot EDO
def edo_plot(D):
x = D[:,0]
y = D[:,1]
plt.plot(x,y, 'ro', color = 'b')
plt.show()
def euler_plot(D):
x = D[:,0]
y = D[:,1]
plt.plot(x,y, color = 'b')
plt.show()
a = euler_method(0,100,f1,0,0,30)
edo_plot(a)
euler_plot(a)
###Output
_____no_output_____
###Markdown
2) Considere o método de Heun, também conhecido como método dos trapézios para EDO’s ou método de Euler melhorado, dado por: $\overline{x}(t + h) = x(t) + hf(t, x(t))$ $x(t + h) = x(t) + \frac{h}{2}[f(t, x(t)) + f(t + h, \overline{x}(t + h))]$ (a) Utilize o método de Heun (na mão!) para obter uma solução para o PVI $x′ = −x + t + \frac{1}{2}$, $x(0) = 1$ no intervalo $[0, 1]$ com $h = 0.1$. Interpolando os pontos por um spline de ordem 1 obtenha a chamada aproximação poligonal de Euler.
###Code
#Questão 2a - Método de Heun
#Função f(x,t)
def f2(x,t):
y = -x + t + 1/2
return y
# Método de Heun
def heun_method(f2,n):
t = 0
x = 1
h = 0.1
D = np.array([[t,x]])
for i in range(n):
xbarra = x + h*f2(x,t)
x = x + h/2*(f2(x,t)+f2(xbarra,t)
t = t + h
print(t,xbarra,x)
heun_method(f2,10)
###Output
0 0.95 0.9525
1 0.91725 0.9190125
2 0.89711125 0.8982063125
3 0.8883856812500001 0.8888767128125
4 0.88998904153125 0.8899334250953125
5 0.9009400825857812 0.9003897497112578
6 0.920350774740132 0.9193527234886884
7 0.9474174511398195 0.946014214757263
8 0.9814127932815366 0.979642864355323
9 1.0216785779197908 1.0195767922415673
###Markdown
(b) Implemente o método de Heun para obter uma solução para o PVI $x′ = −100x^2$ , $x(0) = 1$ com $h = 0.1$. Agora substitua $\overline{x}(t + h)$ por $x(t + h)$. Explique o que acontece.
###Code
#Questão 2b - Método de Heun
#Função f(x,t)
def f2(x):
y = -100*(x**2)
return y
# Método de Heun
def heun_method(f2,n):
t = 0
x = 1
h = 0.1
D = np.array([[t,x]])
for i in range(n):
xbarra = x + h*f2(x)
x = x + h/2*(f2(x)+f2(xbarra))
t = t + h
D = np.append(D,[[t,x]], axis = 0)
return D
###Output
_____no_output_____
###Markdown
3) Mostre que o método de Heun é um método de Runge-Kutta. Qual é a ordem? 4) Considere o PVI $x′ = (tx)^3 −(\frac{x}{t})^2$, $x(1) = 1$. Utilize (na mão) os métodos de Taylor e Runge-Kutta de ordem 2 para obter aproximações para $x(1 + h)$ com $h = 0.1$. Compare as respostas. 5a) Resolva o PVI $x′ = 10x − 5t^2 + 11t − 1$, $x(0) = 0$. Com $h = 2^{−8}$, obtenha uma solução computacional do PVI no intervalo $[0, 3]$ utilizando o RK4 descrito em sala. Faça um gráfico com a solução analítica e a aproximação poligonal obtida utilizando os pontos obtidos pelo RK4.
###Code
#Questão 5a - Método de RK4
# Função f(x,t)
def f3(x,t):
y = 10*x - 5*(t**2) + 11*t - 1
return y
def f3_analytic(x,n,h):
t = 0
c1 = x
D = np.array([[t,x]])
for i in range(n):
t = t + h
x = c1*m.e**(10*t) + (t**2)/2 - t
D = np.append(D,[[t,x]], axis = 0)
return D
# Método RK4
def rk4_method(f3,x,t,h,n):
D = np.array([[t,x]])
for i in range(n):
K1 = h*f3(x,t)
K2 = h*f3(x+(1/2*K1),t+(h*1/2))
K3 = h*f3(x+(1/2*K2),t+(h*1/2))
K4 = h*f3(x+K3,t+h)
x = x + 1/6*(K1 + 2*K2 + 2*K3 + K4)
t = t + h
D = np.append(D,[[t,x]], axis = 0)
return D
def erro_global(d1,d2):
e = d2[:,1] - d1[:,1]
error = np.amax(e)
print("O erro global é {}".format(error))
# Solucao Analitica com c = 0
D = f3_analytic(0,768,2**-8)
euler_plot(D)
# Plot RK4
h = 2**-8
d = rk4_method(f3,0,0,h,768)
edo_plot(d)
euler_plot(d)
###Output
_____no_output_____
###Markdown
5b) Refaça o item anterior substituindo a condição inicial por $x(0) = \epsilon$, com $\epsilon = 0.0001$. Obtenhao erro global, isto é, a máxima distância entre a solução analítica e a aproximação numérica.
###Code
# Plot Solucao Analitica c1 = 0.0001
D = f3_analytic(0.0001,768,2**-8)
euler_plot(D)
# Plot RK4
h = 2**-8
e = 0.0001
d = rk4_method(f3,e,0,h,768)
edo_plot(d)
euler_plot(d)
erro_global(d,D)
###Output
O erro global é -0.0001
|
exercise-02/solution-02.ipynb | ###Markdown
Exercise 02Metropolis simulation of the 1d quantum anharmonic oscillator.A c++ code to simulate the model is available in the folder 'code', and the data from which these plots are made are in 'code/results'
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["figure.figsize"] = (20,10)
plt.figure()
dat = np.loadtxt("code/results/h_mu1_lamda0")
plt.plot(dat[:,0],dat[:,1], '.', label="N=128")
x = np.arange(-3,3,0.01)
plt.plot(x, np.exp(-x*x)/np.sqrt(np.pi), '--', label="analytic")
plt.xlim(-3,3)
plt.title("lambda=0, mu^2=1")
plt.legend()
plt.show()
plt.figure()
dat = np.loadtxt("code/results/h_mu6")
plt.plot(dat[:,0],dat[:,1], '.-', label="mu^2=6")
dat = np.loadtxt("code/results/h_mu3")
plt.plot(dat[:,0],dat[:,1], '.-', label="mu^2=3")
dat = np.loadtxt("code/results/h_mu0")
plt.plot(dat[:,0],dat[:,1], '.-', label="mu^2=0")
dat = np.loadtxt("code/results/h_mu-3")
plt.plot(dat[:,0],dat[:,1], '.-', label="mu^2=-3")
dat = np.loadtxt("code/results/h_mu-5")
plt.plot(dat[:,0],dat[:,1], '.-', label="mu^2=-5")
dat = np.loadtxt("code/results/h_mu-8")
plt.plot(dat[:,0],dat[:,1], '.-', label="mu^2=-8")
plt.xlim(-3,3)
plt.title("lambda=1, various mu^2")
plt.legend()
plt.rcParams["figure.figsize"] = (20,10)
plt.show()
plt.figure()
dat = np.loadtxt("code/results/h_mu-1_corr")
plt.errorbar(dat[:,0], dat[:,1], yerr = dat[:,2], marker = '.', label="mu^2=-1")
dat = np.loadtxt("code/results/h_mu-2_corr")
plt.errorbar(dat[:,0], dat[:,1], yerr = dat[:,2], marker = '.', label="mu^2=-2")
dat = np.loadtxt("code/results/h_mu-3_corr")
plt.errorbar(dat[:,0], dat[:,1], yerr = dat[:,2], marker = '.', label="mu^2=-3")
plt.yscale("log", nonposy='clip')
plt.xlim(1,50)
plt.xlabel("t/a")
plt.ylim(0.001,1)
plt.title("Correlator: lambda=1, N=128")
plt.legend()
plt.rcParams["figure.figsize"] = (20,10)
plt.show()
plt.figure()
color = ['r', 'g', 'b']
# these values for E_0 are copied from the output files in code/results:
E0 = {}
E0[-1]=0.50980651272806732
E0[-2]=0.33261720673993472
E0[-3]=0.12223116355668487
# value of lattice spacing to convert dimensionless lattice t/a to t.
# (note this was previously missing, spotted by Carl-Joar.)
a = 0.1
plt.xlim(1,24)
plt.ylim(0,2.5)
plt.title("E_1-E_0: lambda=1, N=128")
plt.rcParams["figure.figsize"] = (20,10)
for mu2 in [-1,-2,-3]:
dat = np.loadtxt('code/results/h_mu'+str(mu2)+'_corr')
plt.plot(2, E0[mu2], 'x', color=color[mu2], label='E_0: '+'mu^2='+str(mu2))
for dt_over_a in range(5,9):
arr = []
for t_over_a in range(2,30):
arr.append([t_over_a, -np.log((dat[t_over_a+dt_over_a]/dat[t_over_a])[1])/(dt_over_a*a)])
arr = np.array(arr)
plt.plot(arr[:,0], arr[:,1]+E0[mu2], '.-', color=color[mu2], label = 'E_1: mu^2='+str(mu2)+' [dt/a='+str(dt_over_a)+']')
plt.xlabel("t/a")
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/Lucas/Lesson_24 - The Weighted Mean and the Median.ipynb | ###Markdown
1 - Introduction In the previous mission, we learned about the **mean** and worked with a data set on **house sale prices:**| |Order | PID | MS SubClass | MS Zoning | Lot Frontage | Lot Area | Street | Alley | Lot Shape | Mo Sold | Yr Sold | Sale Type | Sale Condition | SalePrice | |-------|-----|-------------|-----------|--------------|----------|--------|-------|-----------|---------|---------|-----------|----------------|-----------|--------|| 0 | 1 | 526301100 | 20 | RL | 141.0 | 131770 | Pave | NaN | 0 | 5 | 2010 | WD | Normal | 215000 || 1 | 2 | 526350040 | 20 | RH | 80.0 | 11622 | Pave | NaN | 0 | 6 | 2010 | WD | Normal | 105000 || 2 | 3 | 526351010 | 20 | RL | 81.0 | 14267 | Pave | NaN | 12500 | 6 | 2010 | WD | Normal | 172000 || 3 | 4 | 526353030 | 20 | RL | 93.0 | 11160 | Pave | NaN | 0 | 4 | 2010 | WD | Normal | 244000 || 4 | 5 | 527105010 | 60 | RL | 74.0 | 13830 | Pave | NaN | 0 | 3 | 2010 | WD | Normal | 189900 |In one of the steps, we computed the mean sale price and found it to be approximately 180796: Let's say that instead of the above data set, we only have the following summary table based on it:| |Year | Mean Price | Houses Sold | |------|------------|---------------|-----|| 0 | 2006 | 181761.648000 | 625 || 1 | 2007 | 185138.207493 | 694 || 2 | 2008 | 178841.750804 | 622 || 3 | 2009 | 181404.567901 | 648 || 4 | 2010 | 172597.598240 | 341 | Once again, our task is to find the **mean** house sale price across all years. Intuitively, we just need to find the mean of the Mean Price column. Let's do that and see it the result matches what we got first from computing the mean of the SalePrice column in the original data set.
###Code
import pandas as pd
import numpy as np
houses = pd.read_csv("AmesHousing_1.txt",sep='\t')
houses_per_year = houses.pivot_table(index="Yr Sold",
values=["SalePrice"],
aggfunc=[np.mean,np.count_nonzero])
houses_per_year.columns = houses_per_year.columns.droplevel(0)
houses_per_year.columns = ["Mean_Price", "Houses_Sold"]
houses_per_year.index.name = "Year"
print(houses_per_year.Mean_Price.loc[2006])
houses_per_year.head()
###Output
181761.648
###Markdown
**Exercise**- Compute the **mean** of the **Mean_Price** column in the **houses_per_year** data set. Assign the value to a variable named **mean_new.** - Note that **houses_per_year** is a DataFrame object, so you can use directly the **Series.mean()** method.- Compute the **mean** of the **SalePrice** column in the **houses** data set. Assign the value to a variable named **mean_original**.- Measure the difference between the two means, and assign the result to a variable named **difference**. If they are equal, the difference should be 0. - For answer checking purposes use **mean_original - mean_new**, not **mean_new - mean_original**.
###Code
# put your code here
mean_new = houses_per_year["Mean_Price"].mean()
mean_original = houses["SalePrice"].mean()
difference = mean_original - mean_new
difference
###Output
_____no_output_____
###Markdown
2 - Different Weights Rather counterintuitively, we noticed in the previous exercise that the **mean** of the **Mean_Price** column is not equal to that of the **SalePrice** column. The root of the problem is related to the fact that we have different number of houses sold each year (notice the **Houses_Sold** column):|| Year | Mean Price | Houses Sold | |------|------------|---------------|-----|| 0 | 2006 | 181761.648000 | 625 || 1 | 2007 | 185138.207493 | 694 || 2 | 2008 | 178841.750804 | 622 || 3 | 2009 | 181404.567901 | 648 || 4 | 2010 | 172597.598240 | 341 |Because each year has a different number of sold houses, the mean of sale prices for each individual year weighs differently in the overall mean. But when we computed the mean of the **Mean_Price** column, we gave each year the same weight because we added all the five mean prices and then divided by 5.To understand why it's wrong to give each year an equal weight, let's begin thinking of the problem on a smaller scale. Consider these two samples of $n=5$ and $n=2$ (remember that $n$ gives the number of sample points) which contain sale prices for year 2009 and 2010:To find the mean across years 2009 and 2010, we can add the five prices for 2009 with the two prices for 2010, and then divide by 7 (because we have 7 prices in total):Notice in the numerator that year 2009 has a greater contribution (1413860) than year 2010 (274000). If we took instead the mean for each year individually, add the two means and divide their sum by 2, then we'd give each year the same weight. This is wrong, and it leads to a wrong result for the overall mean:This is the same mistake we made in the previous exercise: we gave each year the same weight. To compute the correct overall mean, we need to: - Find the sum of prices for each year individually. For instance, 341 houses were sold in 2010 and each house had an average price of approximately 172598. The sum of prices for year 2010 is $341 \times 172,598 = 58,855,918$ . - Add all the sums together. - Divide the final sum by the number of houses sold (not by the number of years) to find the mean sale price per house. **Exercise**- Using only the data we have in the **houses_per_year** data set, compute the sum of prices for each year.- Add all the sums together.- Divide the final sum by the total number of houses sold. Assign the result to a variable named **weighted_mean.**- Compute again the mean of the **SalePrice** column in the **houses** data set. Assign the value to a variable named **mean_original**.- Round each mean value to 10 decimal places to get rid of minor rounding errors and then measure the difference between the two means. Assign the result to a variable named **difference**. If the two means are equal, you should get a difference of 0.
###Code
# put your code here
sum_prices = [ houses_per_year.iloc[i]["Mean_Price"] * houses_per_year.iloc[i]["Houses_Sold"] for i in range(5)]
weighted_mean = sum(sum_prices) / sum(houses_per_year["Houses_Sold"])
mean_original = houses["SalePrice"].mean()
difference = mean_original - weighted_mean
difference
###Output
_____no_output_____
###Markdown
3 - The Weighted Mean When we take into account the **different weights** and compute the **mean** like we did in the previous exercise, we call that mean the **weighted mean**. Just as the arithmetic mean we learned about in the previous mission, the weighted mean can be easily defined algebraically.In the previous exercise, we compute the weighted mean for this distribution of sample means:We multiplied each value by the number of houses sold in that year to take into account the different weights, then we summed up the products and divided by the total number of houses. This is the distribution of weights we used:Now imagine that instead of the actual values, we have six unknown prices, which we'll abbreviate with $x$, and six unknown corresponding weights which we'll abbreviate with $w$:To find the weighted mean, we need to:- Multiply each $x$ value (mean house price) by its corresponding weight value $w$ (total number of houses sold): $x_1 \times w_1$, $x_2 \times w_2$, $x_3 \times w_3$, $x_4 \times w_4$, $x_5 \times w_5$ and $x_6 \times w_6$.- Add the products together (for convenience we drop the multiplication sign $\times$): $x_1w_1 + x_2w_2 + x_3w_3 + x_4w_4 + x_5w_5 + x_6w_6$. Divide the sum of the products by the sum of the weights (that is, the total number of houses sold) to get the weighted mean:The equation above only works if we have six mean values and six weights. We can easily extend the equation, however, to account for any number of mean values and weights:This is how the formula above would work if we had three mean values and three weights:We learned previously to condense sums using $\displaystyle \sum_{i=1}^n$ , so the formula above becomes:The weighted mean can be abbreviated just like the arithemtic mean: $\overline{x}$ for samples, and $\mu$ for populations. Strictly speaking, the formula above gives the weighted mean for a sample because we used $n$, not $N$. To get the formula for a population, we simply have to change $n$ to $N$: **Exercise**- Write a function that computes the weighted mean for any array of numbers. - The function should take in two arrays: one array containing the mean values, and another array with the corresponding weights. - The function returns the weighted mean.- Use the function you wrote to compute the weighted mean for the **Mean_Price** column in the **houses_per_year** data set. Assign the result to a variable named **weighted_mean_function**.- Use the **numpy.average()** function to compute the weighted mean for the same **Mean_Price** column. Read the [documentation](https://docs.scipy.org/doc/numpy/reference/generated/numpy.average.html) to figure out how you can pass in the weights. Assign the result to a variable named **weighted_mean_numpy.**- Compare the two weighted means (the one from your function and the one from **np.average()**) using the == operator. **Round** (round()) each mean to 10 decimal places to get rid of minor rounding errors. Assign the result to a variable named **equal**.
###Code
# put your code here
def weighted_mean(values, weights):
val = 0
for i in range(len(values)):
val += values.iloc[i] * weights.iloc[i]
return val / sum(weights)
weighted_mean_function = weighted_mean(houses_per_year["Mean_Price"], houses_per_year["Houses_Sold"])
weighted_mean_numpy = np.average(a=houses_per_year["Mean_Price"], weights=houses_per_year["Houses_Sold"])
equal = weighted_mean_function == weighted_mean_numpy
equal
###Output
_____no_output_____
###Markdown
4 - The Median for Open-ended Distributions While learning about the weighted mean we saw that there are distributions where it's possible to compute the mean, although that wouldn't be correct. There are distributions, however, where it's impossible to compute the mean. Consider for instance the frequency distribution of the **TotRms AbvGrd** variable, which describes the number of rooms above ground:
###Code
houses['TotRms AbvGrd'].value_counts()
###Output
_____no_output_____
###Markdown
The lowest boundary of the distribution is well-defined — the lowest value is 2 and no value in the distribution can be lower than that. But the upper boundary is not defined as precisely — the highest value is "10 or more" which means that houses can actually have 10, 11, 15, or even more rooms. The upper boundary ("10 or more") is thus open, and for this reason we say that the TotRms AbvGrd variable has an **open-ended distribution.**It's still reasonable to want to find an average value (a single representative value) for this distribution, but "10 or more" is not numerical, which makes it impossible to compute the mean. Remember that the definition of the mean is $\displaystyle \frac{\sum_{i=1}^n x_i}{n}$, so we can't compute the $\displaystyle \sum_{i=1}^n$ part because of the "10 or more" value.A common workaround is to sort all the values in the distribution in an ascending order and then select the middle value as the most representative value of the distribution. Consider this sample of 5 values from the **TomRms AbvGrd** column:First, we need to order the values in an ascending order:This distribution has five values and the middle one is the third one because it divides the distribution in two halves of equal length. The third value is $7$, and the two resulting halves are $[5,6]$ and $[7, \text{10 or more}]$ . We call this middle value the **median**, so for this case the median is 7.Let's practice computing medians for a few distributions before finding the median of the **TotRms AbvGrd** above.**Exercise**- Compute the median for each of the three distributions we already defined in the code editor. - Assign the median of **distribution1** to a variable named **median1.** - Assign the median of **distribution2** to a variable named **median2.** - Assign the median of **distribution3** to a variable named **median3.**What is the median of the distribution [3, 7, 2, 12]? You don't have to write an answer for this question, but it'd be we useful to consider it before continuing to the next screen.
###Code
distribution1 = [23, 24, 22, '20 years or lower,', 23, 42, 35]
distribution2 = [55, 38, 123, 40, 71]
distribution3 = [45, 22, 7, '5 books or lower', 32, 65, '100 books or more']
# put your code here
median1 = 23
median2 = 55
median3 = 32
###Output
_____no_output_____
###Markdown
5 - Distributions with Even Number of Values When a distribution has an even number of values, it's not clear which is the middle one. Consider this sorted distribution with 6 values:It's impossible to choose a value from this distribution that divides the distribution in two halves of equal length. The workaround is to take the two middle values and compute their mean. The two middle values are $[7,7]$, and the two resulting halves are: $[5,6]$ and $[8,\text{10 or more}]$.The median is the mean of the two middle values, that is $\frac{7+7}{2} = 7$This value of 7 is the average value of the distribution above. In statistics, the term **"average"** refers to the most representative value of a distribution. Although it's common to use "average" and "mean" interchangeably, **"average" is not restricted to refer only to the mean**. Depending on the context, it can also refer to the **median** or the **mode**.For the mean, we learned that there are special symbols like $\overline{x}$ or $\mu$. For the median, there's no widely accepted standard notation — most commonly, both the sample and the population median are simply denoted with the word **median**.Unlike the mean, the median doesn't have a neat way to be defined algebraically. This is because sorting a distribution in an ascending order and then choosing a middle value or two doesn't involve any arithmetic. The different treatment for odd and even-numbered distributions also poses some theoretical challenges for constructing a single definition.**Exercise**- Find the median value of the **TotRms AbvGrd** column. - Sort the values in the column in ascending order. - Replace the **'10 or more'** value with the integer __10__ using the [Series.replace()](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.replace.html) method. We do this only for sorting purposes. To avoid modifying the data in the original data set, make a copy of the column using the [Series.copy()](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html) method and save it to a distinct variable. - Convert the column to the int type using the **Series.astype()** method. - Sort the values in ascending order using the **Series.sort_values()** method.- Depending on whether the distribution has an odd or even number of values, find the median and assign it to a variable named **median.**
###Code
houses_copied = houses.copy()
houses_copied = houses_copied.replace("10 or more", 10)
houses_copied = houses_copied["TotRms AbvGrd"].astype(int, copy=False).sort_values()
# Lets find if it has even or odd numbers
median_is_even = len(houses_copied) % 2 == 0 # True
half = int((len(houses_copied) - 1) / 2)
# Since number of values is even, lets find the median
median = (houses_copied.iloc[half] + houses_copied.iloc[half + 1]) / 2
# Print median
median
###Output
_____no_output_____
###Markdown
6 - The Median as a Resistant Statistic When we compute the mean, we account equally for each value in the distribution — we sum up all the values in the distribution and then divide the total by the number of values we added. When we compute the median, however, we don't consider equally each value in the distribution. In fact, we only consider the middle value (or the middle two values).This property makes the median more resistant to changes in the data compared to the mean. Let's consider this simple distribution:$$[2,3,5,5,10]$$Both the median and the mean of this distribution are 5. Let's change the last value in the distribution from 10 to 1000:$$[2,3,5,5,1000]$$The median is still 5, but the mean is now 203. This is because the mean takes into account every value in the distribution, while the median considers only the middle value. Because the **median** is so resistant to changes in the data, it's classified as a **resistant** or **robust statistic**.This property makes the median ideal for finding reasonable averages for distributions containing outliers. Consider this distribution of annual salaries for five people in a company:$$[20000,34000,40000,45000,800000]$$The mean is heavily influenced by the person winning 800,000, and it amounts to a value of 187,000, which is not representative for anyone — the first four people win much less that 187,000, and the last person wins much more. It makes more sense to compute a median value for this distribution, and report that the average salary in the company is 40,000, accompanied by an outlier of 800,000.**Exercise**- The **Lot Area** and **SalePrice** variables have outliers. Confirm this information by visualizing the distributions using a box plot. Remember from the previous course that outliers will appear as dots on the graph. - You can use the **Series.plot.box()** method.- Compute the **median** and the **mean** for each of the two variables. - Because the variables are numerical, you can use the **Series.median()** method and the **Series.mean()** method.- For each variable, compute the difference between the **mean** and the **median**. For answer checking purposes, use **mean - median**, not **median - mean**. - Assign the difference for the **Lot Area** column to a variable named **lotarea_difference.** - Assign the difference for the **SalePrice** column to a variable named **saleprice_difference.** - Inspect the differences. Do you find the differences large?
###Code
import seaborn as sns
sns.boxplot(houses["Lot Area"], orient = 'vertical', width = .15)
sns.boxplot(houses["SalePrice"], orient = 'vertical', width = .15)
lotarea_difference = houses["Lot Area"].mean() - houses["Lot Area"].median()
saleprice_difference = houses["SalePrice"].mean() - houses["SalePrice"].median()
print("Lot Area Difference: {} \nSale Price Difference: {}".format(
lotarea_difference, saleprice_difference
))
###Output
Lot Area Difference: 3680.7051194539254
Sale Price Difference: 20796.060068259394
###Markdown
7 - The Median for Ordinal Scales Data points belonging to ordinal variables are often coded using numbers. Consider the frequency distribution of the **Overall Cond** variable, which rates the overall condition of a house:
###Code
houses['Overall Cond'].value_counts().sort_index()
###Output
_____no_output_____
###Markdown
In the [documentation](https://s3.amazonaws.com/dq-content/307/data_description.txt), we can find that each numerical value corresponds to a specific quality level:| Code | Quality ||------|----------------|| 1 | Very poor || 2 | Poor || 3 | Fair || 4 | Below average || 5 | Average || 6 | Above average || 7 | Good || 8 | Very good || 9 | Excellent || 10 | Very excellent |Because words like "fair" or "average" are coded with numbers, it becomes mathematically possible to compute the mean. But whether or not it's theoretically sound to compute the mean for ordinal variables is contentious. Below we explore one argument against computing the mean, and in the next section we'll explore an argument that supports the idea of using the mean.Remember from the previous course that if two data points are measured on an ordinal scale and there's a difference between them, we can tell the direction of the difference, but we don't know the size of the difference.If the overall condition of a house is rated with an 8 (Very good), and another house gets a 4 (Below average), we can't say that the conditions of the former are twice as better than the latter. The most we can say is that the house which got an 8 has better conditions (we can't quantify how much better — it could be twice as better, three times as better, 1.5 times as better, we simply don't know).This should be more clear if we consider the fact that the numbers used to encode the quality levels ("Poor", "Fair", "Good", etc.) are chosen arbitrarily. Instead of numbers from 1 to 10, we could have numbers from 30 to 40, or from 50 to 70 in steps of 2, or from 0 to 9:| Code | Quality ||------|----------------|| 0 | Very poor || 1 | Poor || 2 | Fair || 3 | Below average || 4 | Average || 5 | Above average || 6 | Good || 7 | Very good || 8 | Excellent || 9 | Very excellent |Inside the framework of a 0-9 system, an "Excellent" label would be encoded as a 8, and a "Below average" as a 3. If we took ratios, we'd reach different conclusions for different encoding systems: - For a 1-10 encoding system, the conditions of an "Excellent" (9) house would be 2.25 times as better than those of a "Below average" (4) house ($\frac{9}{4}=2.25$).For a 0-9 encoding system, the same "Excellent" (8) house would have conditions that are 2.67 times as better than the conditions of a "Below average" (3) house ($\frac{8}{3}=2.67$). It can be argued thus that the numerical values of an ordinal variable are not subject to meaningful arithmetical operations. But computing the mean involves meaningful arithmetical operations, so it's not theoretically sound to use the mean for ordinal variables.Because the median doesn't involve arithmetical operations, it's considered a better alternative to the mean. This doesn't fully apply, however, to even-numbered distributions, where we need to take the mean of the middle two values to find the median. This poses some theoretical problems, and we'll see in the next mission that the **mode** might be a better choice in this case as a measure of average.**Exercise**- Find the **mean** and the **median** of the **Overall Cond** variable. - Assign the **mean** to a variable named **mean**. - Assign the **median** to a variable named **median**.- Plot a histogram to visualize the distribution of the **Overall Cond** variable. Between the **mean** and the **median**, which one do you think describes better the shape of the histogram? - If you think it's the **mean**, assign the string **'mean'** to a variable named **more_representative**, otherwise assign **'median'**.
###Code
# put your code here
mean = houses["Overall Cond"].mean()
median = houses["Overall Cond"].median()
more_representative = "mean"
houses["Overall Cond"].plot.hist()
###Output
_____no_output_____ |
classification/Decision-Tree-UnderSampling.ipynb | ###Markdown
Dataset Preprocess
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, classification_report
from sklearn import tree
from sklearn.datasets import make_classification
from sklearn.model_selection import StratifiedKFold
import pydotplus
from IPython.display import Image
import graphviz
import itertools
import seaborn as sns
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
precision = cf[1,1] / sum(cf[:,1])
recall = cf[1,1] / sum(cf[1,:])
f1_score = 2*precision*recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy,precision,recall,f1_score)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf,annot=box_labels,fmt="",cmap=cmap,cbar=cbar,xticklabels=categories,yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
path = '~/Documents/dmproject/datasets/'
df = pd.read_csv(path + 'train.csv')
df_test = pd.read_csv(path + 'test.csv')
# Define function to find missing values
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:,1] != 0].sort_values('% of Total Values', ascending=False).round(1)
# Print some summary information
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"+"There are " + str(mis_val_table_ren_columns.shape[0]) +" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
df_discretized = df.copy()
df_test_discretized = df_test.copy()
#function to discretize the variables
#input: the dataset and the list of variables' names to discretize
def discretize_data(dataset, variables):
for variable in variables:
#get the unique variable's values
var = sorted(dataset[variable].unique())
#generate a mapping from the variable's values to the number representation
mapping = dict(zip(var, range(0, len(var) + 1)))
#add a new colum with the number representation of the variable
dataset[variable] = dataset[variable].map(mapping).astype(int)
return dataset
#discretize the variable
variables = ['CustomerCountry', 'CustomerLabel' ]
df_discretized = discretize_data(df_discretized, variables)
df_test_discretized = discretize_data(df_test_discretized, variables)
df_discretized = df_discretized.drop(columns=['TotalSale','MonthSale','WeekSale'])
df_test_discretized = df_test_discretized.drop(columns=['TotalSale','MonthSale','WeekSale'])
# unskew the data
# rfm_log = df_discretized[['TotalSale', 'TotalItems', 'DistinctItems', 'MaxItems', 'Entropy', 'Frequency']].apply(np.log, axis = 1).round(3)
rfm_log = df_discretized[['DistinctItems', 'MaxItems', 'Entropy', 'Frequency']].apply(np.log, axis = 1).round(3)
rfm_test = df_test_discretized[['DistinctItems', 'MaxItems', 'Entropy', 'Frequency']].apply(np.log, axis = 1).round(3)
# scale the data
scaler = StandardScaler()
df_scaled = scaler.fit_transform(rfm_log)# transform into a dataframe
df_test_scaled = scaler.fit_transform(rfm_test)
df_scaled = pd.DataFrame(df_scaled, index = rfm_log.index, columns = rfm_log.columns)
df_test_scaled = pd.DataFrame(df_test_scaled, index = rfm_test.index, columns = rfm_test.columns)
#to classify, we first need to split the dataset into train and test dataset.
#we can do so using train_test_split, in this case we select a stratified split
df_decision_tree = df_discretized.copy()
label = df_decision_tree.pop('CustomerLabel')
#label = df_discretized.pop('customer_type')
X_train, X_val, y_train, y_val = train_test_split(df_decision_tree, label, stratify = label, test_size=0.30)
X_test = df_test_discretized.drop(columns=['CustomerLabel'])
y_test = df_test_discretized['CustomerLabel']
###Output
_____no_output_____
###Markdown
Decision Tree
###Code
from imblearn.pipeline import Pipeline, make_pipeline
from imblearn.under_sampling import RandomUnderSampler
###Output
_____no_output_____
###Markdown
OverSampling
###Code
sm = RandomUnderSampler(random_state=42)
X_train, y_train = sm.fit_sample(X_train, y_train)
X_train.shape, y_train.shape
print(X_train.shape)
print(y_train.shape)
param_grid = {'criterion': ['gini', 'entropy'],
'max_depth': [2, 5, 10, 15, None],
'min_samples_split': [2, 5, 10, 20],
'min_samples_leaf': [1, 5, 10, 20],}
clf = GridSearchCV(DecisionTreeClassifier(splitter="best"), param_grid, cv=StratifiedKFold(15), scoring='accuracy')
clf_fit = clf.fit(X_train, y_train,)
# tree_performance = roc_auc_score(y_val, clf_fit.predict(X_val))
# print("DecisionTree: Area under the ROC curve = {}".format(tree_performance))
opt_df = clf_fit.best_estimator_
print("{}".format(clf_fit.best_params_))
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_val, clf.predict(X_val)
print(classification_report(y_true, y_pred))
print()
#visualize the actual decision tree obtained
import pydotplus
from IPython.display import Image
dot_data = tree.export_graphviz(opt_df, out_file=None,
feature_names=list(X_train.columns),
class_names=['0', '1', '2'],
filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
result = opt_df.score(X_test, y_test)
print("Accuracy: %.2f%%" % (result*100.0))
y_pred = opt_df.predict(X_test)
cf_matrix = confusion_matrix(y_pred, y_test)
labels = ['high_spend','medium_spend','low_spend']
cf_matrix
make_confusion_matrix(cf_matrix,categories=labels, figsize=(8,6), cbar=False)
# roc curve for classes
fpr = {}
tpr = {}
thresh ={}
n_class = 3
probs = opt_df.predict_proba(X_test)
for i in range(n_class):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, probs[:,i], pos_label=i)
random_probs = [0 for i in range(len(y_test))]
p_fpr, p_tpr, _ = roc_curve(y_test, random_probs, pos_label=1)
plt.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='Class 0')
plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Class 1')
plt.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Class 2')
plt.plot(p_fpr, p_tpr, linestyle='--', color='blue')
plt.title('Multiclass ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive rate')
plt.legend(loc='best')
# plt.savefig('Multiclass ROC',dpi=300)
###Output
_____no_output_____ |
Assignment-1/Given-Materials-Python/exercise1.ipynb | ###Markdown
Programming Exercise 1: Linear Regression IntroductionIn this exercise, you will implement linear regression and get to see it work on data. Before starting on this programming exercise, we strongly recommend watching the video lectures and completing the review questions for the associated topics.All the information you need for solving this assignment is in this notebook, and all the code you will be implementing will take place within this notebook. The assignment can be promptly submitted to the coursera grader directly from this notebook (code and instructions are included below).Before we begin with the exercises, we need to import all libraries required for this programming exercise. Throughout the course, we will be using [`numpy`](http://www.numpy.org/) for all arrays and matrix operations, and [`matplotlib`](https://matplotlib.org/) for plotting.You can find instructions on how to install required libraries in the README file in the [github repository](https://github.com/dibgerge/ml-coursera-python-assignments).
###Code
# used for manipulating directory paths
import os
# Scientific and vector computation for python
import numpy as np
# Plotting library
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # needed to plot 3-D surfaces
# library written for this exercise providing additional functions for assignment submission, and others
import utils
# define the submission/grader object for this exercise
grader = utils.Grader()
# tells matplotlib to embed plots within the notebook
%matplotlib inline
###Output
_____no_output_____
###Markdown
Submission and GradingAfter completing each part of the assignment, be sure to submit your solutions to the grader.For this programming exercise, you are only required to complete the first part of the exercise to implement linear regression with one variable. The second part of the exercise, which is optional, covers linear regression with multiple variables. The following is a breakdown of how each part of this exercise is scored.**Required Exercises**| Section | Part |Submitted Function | Points |---------|:- |:- | :-: | 1 | [Warm up exercise](section1) | [`warmUpExercise`](warmUpExercise) | 10 | 2 | [Compute cost for one variable](section2) | [`computeCost`](computeCost) | 40 | 3 | [Gradient descent for one variable](section3) | [`gradientDescent`](gradientDescent) | 50 | | Total Points | | 100 **Optional Exercises**| Section | Part | Submitted Function | Points ||:-------:|:- |:-: | :-: || 4 | [Feature normalization](section4) | [`featureNormalize`](featureNormalize) | 0 || 5 | [Compute cost for multiple variables](section5) | [`computeCostMulti`](computeCostMulti) | 0 || 6 | [Gradient descent for multiple variables](section5) | [`gradientDescentMulti`](gradientDescentMulti) |0 || 7 | [Normal Equations](section7) | [`normalEqn`](normalEqn) | 0 |You are allowed to submit your solutions multiple times, and we will take only the highest score into consideration.At the end of each section in this notebook, we have a cell which contains code for submitting the solutions thus far to the grader. Execute the cell to see your score up to the current section. For all your work to be submitted properly, you must execute those cells at least once. They must also be re-executed everytime the submitted function is updated. DebuggingHere are some things to keep in mind throughout this exercise:- Python array indices start from zero, not one (contrary to OCTAVE/MATLAB). - There is an important distinction between python arrays (called `list` or `tuple`) and `numpy` arrays. You should use `numpy` arrays in all your computations. Vector/matrix operations work only with `numpy` arrays. Python lists do not support vector operations (you need to use for loops).- If you are seeing many errors at runtime, inspect your matrix operations to make sure that you are adding and multiplying matrices of compatible dimensions. Printing the dimensions of `numpy` arrays using the `shape` property will help you debug.- By default, `numpy` interprets math operators to be element-wise operators. If you want to do matrix multiplication, you need to use the `dot` function in `numpy`. For, example if `A` and `B` are two `numpy` matrices, then the matrix operation AB is `np.dot(A, B)`. Note that for 2-dimensional matrices or vectors (1-dimensional), this is also equivalent to `A@B` (requires python >= 3.5). 1 Simple python and `numpy` functionThe first part of this assignment gives you practice with python and `numpy` syntax and the homework submission process. In the next cell, you will find the outline of a `python` function. Modify it to return a 5 x 5 identity matrix by filling in the following code:```pythonA = np.eye(5)```
###Code
def warmUpExercise():
"""
Example function in Python which computes the identity matrix.
Returns
-------
A : array_like
The 5x5 identity matrix.
Instructions
------------
Return the 5x5 identity matrix.
"""
# ======== YOUR CODE HERE ======
A = np.identity(5) # modify this line
# ==============================
return A
###Output
_____no_output_____
###Markdown
The previous cell only defines the function `warmUpExercise`. We can now run it by executing the following cell to see its output. You should see output similar to the following:```pythonarray([[ 1., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]])```
###Code
warmUpExercise()
###Output
_____no_output_____
###Markdown
1.1 Submitting solutionsAfter completing a part of the exercise, you can submit your solutions for grading by first adding the function you modified to the grader object, and then sending your function to Coursera for grading. The grader will prompt you for your login e-mail and submission token. You can obtain a submission token from the web page for the assignment. You are allowed to submit your solutions multiple times, and we will take only the highest score into consideration.Execute the next cell to grade your solution to the first part of this exercise.*You should now submit your solutions.*
###Code
# appends the implemented function in part 1 to the grader object
grader[1] = warmUpExercise
# send the added functions to coursera grader for getting a grade on this part
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 0 / 50 |
Feature Normalization | 0 / 0 |
Computing Cost (for multiple variables) | 0 / 0 |
Gradient Descent (for multiple variables) | 0 / 0 |
Normal Equations | 0 / 0 |
--------------------------------
| 50 / 100 |
###Markdown
2 Linear regression with one variableNow you will implement linear regression with one variable to predict profits for a food truck. Suppose you are the CEO of a restaurant franchise and are considering different cities for opening a new outlet. The chain already has trucks in various cities and you have data for profits and populations from the cities. You would like to use this data to help you select which city to expand to next. The file `Data/ex1data1.txt` contains the dataset for our linear regression problem. The first column is the population of a city (in 10,000s) and the second column is the profit of a food truck in that city (in $10,000s). A negative value for profit indicates a loss. We provide you with the code needed to load this data. The dataset is loaded from the data file into the variables `x` and `y`:
###Code
# Read comma separated data
data = np.loadtxt(os.path.join('Data', 'ex1data1.txt'), delimiter=',')
X, y = data[:, 0], data[:, 1]
m = y.size # number of training examples
###Output
_____no_output_____
###Markdown
2.1 Plotting the DataBefore starting on any task, it is often useful to understand the data by visualizing it. For this dataset, you can use a scatter plot to visualize the data, since it has only two properties to plot (profit and population). Many other problems that you will encounter in real life are multi-dimensional and cannot be plotted on a 2-d plot. There are many plotting libraries in python (see this [blog post](https://blog.modeanalytics.com/python-data-visualization-libraries/) for a good summary of the most popular ones). In this course, we will be exclusively using `matplotlib` to do all our plotting. `matplotlib` is one of the most popular scientific plotting libraries in python and has extensive tools and functions to make beautiful plots. `pyplot` is a module within `matplotlib` which provides a simplified interface to `matplotlib`'s most common plotting tasks, mimicking MATLAB's plotting interface.You might have noticed that we have imported the `pyplot` module at the beginning of this exercise using the command `from matplotlib import pyplot`. This is rather uncommon, and if you look at python code elsewhere or in the `matplotlib` tutorials, you will see that the module is named `plt`. This is used by module renaming by using the import command `import matplotlib.pyplot as plt`. We will not using the short name of `pyplot` module in this class exercises, but you should be aware of this deviation from norm.In the following part, your first job is to complete the `plotData` function below. Modify the function and fill in the following code:```python pyplot.plot(x, y, 'ro', ms=10, mec='k') pyplot.ylabel('Profit in $10,000') pyplot.xlabel('Population of City in 10,000s')```
###Code
def plotData(x, y):
"""
Plots the data points x and y into a new figure. Plots the data
points and gives the figure axes labels of population and profit.
Parameters
----------
x : array_like
Data point values for x-axis.
y : array_like
Data point values for y-axis. Note x and y should have the same size.
Instructions
------------
Plot the training data into a figure using the "figure" and "plot"
functions. Set the axes labels using the "xlabel" and "ylabel" functions.
Assume the population and revenue data have been passed in as the x
and y arguments of this function.
Hint
----
You can use the 'ro' option with plot to have the markers
appear as red circles. Furthermore, you can make the markers larger by
using plot(..., 'ro', ms=10), where `ms` refers to marker size. You
can also set the marker edge color using the `mec` property.
"""
fig = pyplot.figure() # open a new figure
# ====================== YOUR CODE HERE =======================
plt.scatter(x, y, c='r', s=100, edgecolors='#000000')
plt.ylabel('Profit in $10,000')
plt.xlabel('Population of City in 10,000s')
# plt.show()
# =============================================================
###Output
_____no_output_____
###Markdown
Now run the defined function with the loaded data to visualize the data. The end result should look like the following figure:Execute the next cell to visualize the data.
###Code
plotData(X, y)
###Output
_____no_output_____
###Markdown
To quickly learn more about the `matplotlib` plot function and what arguments you can provide to it, you can type `?pyplot.plot` in a cell within the jupyter notebook. This opens a separate page showing the documentation for the requested function. You can also search online for plotting documentation. To set the markers to red circles, we used the option `'or'` within the `plot` function.
###Code
?pyplot.plot
###Output
_____no_output_____
###Markdown
2.2 Gradient DescentIn this part, you will fit the linear regression parameters $\theta$ to our dataset using gradient descent. 2.2.1 Update EquationsThe objective of linear regression is to minimize the cost function$$ J(\theta) = \frac{1}{2m} \sum_{i=1}^m \left( h_{\theta}(x^{(i)}) - y^{(i)}\right)^2$$where the hypothesis $h_\theta(x)$ is given by the linear model$$ h_\theta(x) = \theta^Tx = \theta_0 + \theta_1 x_1$$Recall that the parameters of your model are the $\theta_j$ values. These arethe values you will adjust to minimize cost $J(\theta)$. One way to do this is touse the batch gradient descent algorithm. In batch gradient descent, eachiteration performs the update$$ \theta_j = \theta_j - \alpha \frac{1}{m} \sum_{i=1}^m \left( h_\theta(x^{(i)}) - y^{(i)}\right)x_j^{(i)} \qquad \text{simultaneously update } \theta_j \text{ for all } j$$With each step of gradient descent, your parameters $\theta_j$ come closer to the optimal values that will achieve the lowest cost J($\theta$).**Implementation Note:** We store each example as a row in the the $X$ matrix in Python `numpy`. To take into account the intercept term ($\theta_0$), we add an additional first column to $X$ and set it to all ones. This allows us to treat $\theta_0$ as simply another 'feature'. 2.2.2 ImplementationWe have already set up the data for linear regression. In the following cell, we add another dimension to our data to accommodate the $\theta_0$ intercept term. Do NOT execute this cell more than once.
###Code
# Add a column of ones to X. The numpy function stack joins arrays along a given axis.
# The first axis (axis=0) refers to rows (training examples)
# and second axis (axis=1) refers to columns (features).
X = np.stack([np.ones(m), X], axis=1)
###Output
_____no_output_____
###Markdown
2.2.3 Computing the cost $J(\theta)$As you perform gradient descent to learn minimize the cost function $J(\theta)$, it is helpful to monitor the convergence by computing the cost. In this section, you will implement a function to calculate $J(\theta)$ so you can check the convergence of your gradient descent implementation. Your next task is to complete the code for the function `computeCost` which computes $J(\theta)$. As you are doing this, remember that the variables $X$ and $y$ are not scalar values. $X$ is a matrix whose rows represent the examples from the training set and $y$ is a vector whose each elemennt represent the value at a given row of $X$.
###Code
def computeCost(X, y, theta):
"""
Compute cost for linear regression. Computes the cost of using theta as the
parameter for linear regression to fit the data points in X and y.
Parameters
----------
X : array_like
The input dataset of shape (m x n+1), where m is the number of examples,
and n is the number of features. We assume a vector of one's already
appended to the features so we have n+1 columns.
y : array_like
The values of the function at each data point. This is a vector of
shape (m, ).
theta : array_like
The parameters for the regression function. This is a vector of
shape (n+1, ).
Returns
-------
J : float
The value of the regression cost function.
Instructions
------------
Compute the cost of a particular choice of theta.
You should set J to the cost.
"""
# initialize some useful values
m = y.size # number of training examples
# You need to return the following variables correctly
J = 0
# ====================== YOUR CODE HERE =====================
ht = np.dot(X, theta)
ht_minus_y = ht - y # ht(x) - y
ht_minus_y = ht_minus_y ** 2
summation = np.sum(ht_minus_y)
J = (1 / (2*m)) * summation
# ===========================================================
return J
###Output
_____no_output_____
###Markdown
Once you have completed the function, the next step will run `computeCost` two times using two different initializations of $\theta$. You will see the cost printed to the screen.
###Code
J = computeCost(X, y, theta=np.array([0.0, 0.0]))
print('With theta = [0, 0] \nCost computed = %.2f' % J)
print('Expected cost value (approximately) 32.07\n')
# further testing of the cost function
J = computeCost(X, y, theta=np.array([-1, 2]))
print('With theta = [-1, 2]\nCost computed = %.2f' % J)
print('Expected cost value (approximately) 54.24')
###Output
With theta = [0, 0]
Cost computed = 32.07
Expected cost value (approximately) 32.07
With theta = [-1, 2]
Cost computed = 54.24
Expected cost value (approximately) 54.24
###Markdown
*You should now submit your solutions by executing the following cell.*
###Code
grader[2] = computeCost
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 50 / 50 | Nice work!
Feature Normalization | 0 / 0 |
Computing Cost (for multiple variables) | 0 / 0 |
Gradient Descent (for multiple variables) | 0 / 0 |
Normal Equations | 0 / 0 |
--------------------------------
| 100 / 100 |
###Markdown
2.2.4 Gradient descentNext, you will complete a function which implements gradient descent.The loop structure has been written for you, and you only need to supply the updates to $\theta$ within each iteration. The equation is - $$ \theta_j = \theta_j - \alpha \frac{1}{m} \sum_{i=1}^m \left( h_\theta(x^{(i)}) - y^{(i)}\right)x_j^{(i)} \qquad \text{simultaneously update } \theta_j \text{ for all } j$$As you program, make sure you understand what you are trying to optimize and what is being updated. Keep in mind that the cost $J(\theta)$ is parameterized by the vector $\theta$, not $X$ and $y$. That is, we minimize the value of $J(\theta)$ by changing the values of the vector $\theta$, not by changing $X$ or $y$. [Refer to the equations in this notebook](section2) and to the video lectures if you are uncertain. A good way to verify that gradient descent is working correctly is to look at the value of $J(\theta)$ and check that it is decreasing with each step. The starter code for the function `gradientDescent` calls `computeCost` on every iteration and saves the cost to a `python` list. Assuming you have implemented gradient descent and `computeCost` correctly, your value of $J(\theta)$ should never increase, and should converge to a steady value by the end of the algorithm.**Vectors and matrices in `numpy`** - Important implementation notesA vector in `numpy` is a one dimensional array, for example `np.array([1, 2, 3])` is a vector. A matrix in `numpy` is a two dimensional array, for example `np.array([[1, 2, 3], [4, 5, 6]])`. However, the following is still considered a matrix `np.array([[1, 2, 3]])` since it has two dimensions, even if it has a shape of 1x3 (which looks like a vector).Given the above, the function `np.dot` which we will use for all matrix/vector multiplication has the following properties:- It always performs inner products on vectors. If `x=np.array([1, 2, 3])`, then `np.dot(x, x)` is a scalar.- For matrix-vector multiplication, so if $X$ is a $m\times n$ matrix and $y$ is a vector of length $m$, then the operation `np.dot(y, X)` considers $y$ as a $1 \times m$ vector. On the other hand, if $y$ is a vector of length $n$, then the operation `np.dot(X, y)` considers $y$ as a $n \times 1$ vector.- A vector can be promoted to a matrix using `y[None]` or `[y[np.newaxis]`. That is, if `y = np.array([1, 2, 3])` is a vector of size 3, then `y[None, :]` is a matrix of shape $1 \times 3$. We can use `y[:, None]` to obtain a shape of $3 \times 1$.
###Code
def gradientDescent(X, y, theta, alpha, num_iters):
"""
Performs gradient descent to learn `theta`. Updates theta by taking `num_iters`
gradient steps with learning rate `alpha`.
Parameters
----------
X : array_like
The input dataset of shape (m x n+1).
y : arra_like
Value at given features. A vector of shape (m, ).
theta : array_like
Initial values for the linear regression parameters.
A vector of shape (n+1, ).
alpha : float
The learning rate.
num_iters : int
The number of iterations for gradient descent.
Returns
-------
theta : array_like
The learned linear regression parameters. A vector of shape (n+1, ).
J_history : list
A python list for the values of the cost function after each iteration.
Instructions
------------
Peform a single gradient step on the parameter vector theta.
While debugging, it can be useful to print out the values of
the cost function (computeCost) and gradient here.
"""
# Initialize some useful values
m = y.shape[0] # number of training examples
# make a copy of theta, to avoid changing the original array, since numpy arrays
# are passed by reference to functions
theta = theta.copy()
J_history = [] # Use a python list to save cost in every iteration
for i in range(num_iters):
# ==================== YOUR CODE HERE =================================
ht = np.dot(X,theta)
ht_minus_y = ht - y # ht(x) - y
s = np.dot(ht_minus_y , X)
delta = (1/m) * s
theta = theta - alpha * delta
# =====================================================================
# save the cost J in every iteration
J_history.append(computeCost(X, y, theta))
return theta, J_history
###Output
_____no_output_____
###Markdown
After you are finished call the implemented `gradientDescent` function and print the computed $\theta$. We initialize the $\theta$ parameters to 0 and the learning rate $\alpha$ to 0.01. Execute the following cell to check your code.
###Code
# initialize fitting parameters
theta = np.zeros(2)
# some gradient descent settings
iterations = 1500
alpha = 0.01
theta, J_history = gradientDescent(X ,y, theta, alpha, iterations)
print('Theta found by gradient descent: {:.4f}, {:.4f}'.format(*theta))
print('Expected theta values (approximately): [-3.6303, 1.1664]')
###Output
Theta found by gradient descent: -3.6303, 1.1664
Expected theta values (approximately): [-3.6303, 1.1664]
###Markdown
We will use your final parameters to plot the linear fit. The results should look like the following figure.
###Code
# plot the linear fit
plotData(X[:, 1], y)
plt.plot(X[:, 1], np.dot(X, theta), '-')
pyplot.legend([ 'Linear regression', 'Training data']);
###Output
_____no_output_____
###Markdown
Your final values for $\theta$ will also be used to make predictions on profits in areas of 35,000 and 70,000 people.Note the way that the following lines use matrix multiplication, rather than explicit summation or looping, to calculate the predictions. This is an example of code vectorization in `numpy`.Note that the first argument to the `numpy` function `dot` is a python list. `numpy` can internally converts **valid** python lists to numpy arrays when explicitly provided as arguments to `numpy` functions.
###Code
# Predict values for population sizes of 35,000 and 70,000
predict1 = np.dot([1, 3.5], theta)
print('For population = 35,000, we predict a profit of {:.2f}\n'.format(predict1*10000))
predict2 = np.dot([1, 7], theta)
print('For population = 70,000, we predict a profit of {:.2f}\n'.format(predict2*10000))
###Output
For population = 35,000, we predict a profit of 4519.77
For population = 70,000, we predict a profit of 45342.45
###Markdown
*You should now submit your solutions by executing the next cell.*
###Code
grader[3] = gradientDescent
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 50 / 50 | Nice work!
Feature Normalization | 0 / 0 |
Computing Cost (for multiple variables) | 0 / 0 |
Gradient Descent (for multiple variables) | 0 / 0 |
Normal Equations | 0 / 0 |
--------------------------------
| 100 / 100 |
###Markdown
2.4 Visualizing $J(\theta)$To understand the cost function $J(\theta)$ better, you will now plot the cost over a 2-dimensional grid of $\theta_0$ and $\theta_1$ values. You will not need to code anything new for this part, but you should understand how the code you have written already is creating these images.In the next cell, the code is set up to calculate $J(\theta)$ over a grid of values using the `computeCost` function that you wrote. After executing the following cell, you will have a 2-D array of $J(\theta)$ values. Then, those values are used to produce surface and contour plots of $J(\theta)$ using the matplotlib `plot_surface` and `contourf` functions. The plots should look something like the following:The purpose of these graphs is to show you how $J(\theta)$ varies with changes in $\theta_0$ and $\theta_1$. The cost function $J(\theta)$ is bowl-shaped and has a global minimum. (This is easier to see in the contour plot than in the 3D surface plot). This minimum is the optimal point for $\theta_0$ and $\theta_1$, and each step of gradient descent moves closer to this point.
###Code
# grid over which we will calculate J
theta0_vals = np.linspace(-10, 10, 100)
theta1_vals = np.linspace(-1, 4, 100)
# initialize J_vals to a matrix of 0's
J_vals = np.zeros((theta0_vals.shape[0], theta1_vals.shape[0]))
# Fill out J_vals
for i, theta0 in enumerate(theta0_vals):
for j, theta1 in enumerate(theta1_vals):
J_vals[i, j] = computeCost(X, y, [theta0, theta1])
# Because of the way meshgrids work in the surf command, we need to
# transpose J_vals before calling surf, or else the axes will be flipped
J_vals = J_vals.T
# surface plot
fig = pyplot.figure(figsize=(12, 5))
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(theta0_vals, theta1_vals, J_vals, cmap='viridis')
pyplot.xlabel('theta0')
pyplot.ylabel('theta1')
pyplot.title('Surface')
# contour plot
# Plot J_vals as 15 contours spaced logarithmically between 0.01 and 100
ax = pyplot.subplot(122)
pyplot.contour(theta0_vals, theta1_vals, J_vals, linewidths=2, cmap='viridis', levels=np.logspace(-2, 3, 20))
pyplot.xlabel('theta0')
pyplot.ylabel('theta1')
pyplot.plot(theta[0], theta[1], 'ro', ms=10, lw=2)
pyplot.title('Contour, showing minimum')
pass
###Output
_____no_output_____
###Markdown
Optional ExercisesIf you have successfully completed the material above, congratulations! You now understand linear regression and should able to start using it on your own datasets.For the rest of this programming exercise, we have included the following optional exercises. These exercises will help you gain a deeper understanding of the material, and if you are able to do so, we encourage you to complete them as well. You can still submit your solutions to these exercises to check if your answers are correct. 3 Linear regression with multiple variablesIn this part, you will implement linear regression with multiple variables to predict the prices of houses. Suppose you are selling your house and you want to know what a good market price would be. One way to do this is to first collect information on recent houses sold and make a model of housing prices.The file `Data/ex1data2.txt` contains a training set of housing prices in Portland, Oregon. The first column is the size of the house (in square feet), the second column is the number of bedrooms, and the third column is the priceof the house. 3.1 Feature NormalizationWe start by loading and displaying some values from this dataset. By looking at the values, note that house sizes are about 1000 times the number of bedrooms. When features differ by orders of magnitude, first performing feature scaling can make gradient descent converge much more quickly.
###Code
# Load data
data = np.loadtxt(os.path.join('Data', 'ex1data2.txt'), delimiter=',')
X = data[:, :2]
y = data[:, 2]
m = y.size
# print out some data points
print('{:>8s}{:>8s}{:>10s}'.format('X[:,0]', 'X[:, 1]', 'y'))
print('-'*26)
for i in range(10):
print('{:8.0f}{:8.0f}{:10.0f}'.format(X[i, 0], X[i, 1], y[i]))
###Output
X[:,0] X[:, 1] y
--------------------------
2104 3 399900
1600 3 329900
2400 3 369000
1416 2 232000
3000 4 539900
1985 4 299900
1534 3 314900
1427 3 198999
1380 3 212000
1494 3 242500
###Markdown
Your task here is to complete the code in `featureNormalize` function:- Subtract the mean value of each feature from the dataset.- After subtracting the mean, additionally scale (divide) the feature values by their respective “standard deviations.”The standard deviation is a way of measuring how much variation there is in the range of values of a particular feature (most data points will lie within ±2 standard deviations of the mean); this is an alternative to taking the range of values (max-min). In `numpy`, you can use the `std` function to compute the standard deviation. For example, the quantity `X[:, 0]` contains all the values of $x_1$ (house sizes) in the training set, so `np.std(X[:, 0])` computes the standard deviation of the house sizes.At the time that the function `featureNormalize` is called, the extra column of 1’s corresponding to $x_0 = 1$ has not yet been added to $X$. You will do this for all the features and your code should work with datasets of all sizes (any number of features / examples). Note that each column of the matrix $X$ corresponds to one feature.**Implementation Note:** When normalizing the features, it is importantto store the values used for normalization - the mean value and the standard deviation used for the computations. After learning the parametersfrom the model, we often want to predict the prices of houses we have notseen before. Given a new x value (living room area and number of bedrooms), we must first normalize x using the mean and standard deviation that we had previously computed from the training set.
###Code
def featureNormalize(X):
"""
Normalizes the features in X. returns a normalized version of X where
the mean value of each feature is 0 and the standard deviation
is 1. This is often a good preprocessing step to do when working with
learning algorithms.
Parameters
----------
X : array_like
The dataset of shape (m x n).
Returns
-------
X_norm : array_like
The normalized dataset of shape (m x n).
Instructions
------------
First, for each feature dimension, compute the mean of the feature
and subtract it from the dataset, storing the mean value in mu.
Next, compute the standard deviation of each feature and divide
each feature by it's standard deviation, storing the standard deviation
in sigma.
Note that X is a matrix where each column is a feature and each row is
an example. You needto perform the normalization separately for each feature.
Hint
----
You might find the 'np.mean' and 'np.std' functions useful.
"""
# You need to set these values correctly
X_norm = X.copy()
mu = np.zeros(X.shape[1])
sigma = np.zeros(X.shape[1])
# =========================== YOUR CODE HERE =====================
mu = np.mean(X_norm, axis = 0)
# mu = np.sum(X_norm, axis = 0) / X.shape[0]
sigma = np.std(X_norm , axis = 0)
X_norm = (X_norm - mu) / sigma
# ================================================================
return X_norm, mu, sigma
###Output
_____no_output_____
###Markdown
Execute the next cell to run the implemented `featureNormalize` function.
###Code
# call featureNormalize on the loaded data
X_norm, mu, sigma = featureNormalize(X)
print('Computed mean:', mu)
print('Computed standard deviation:', sigma)
###Output
Computed mean: [2000.68085106 3.17021277]
Computed standard deviation: [7.86202619e+02 7.52842809e-01]
###Markdown
*You should now submit your solutions.*
###Code
grader[4] = featureNormalize
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 50 / 50 | Nice work!
Feature Normalization | 0 / 0 | Nice work!
Computing Cost (for multiple variables) | 0 / 0 | Nice work!
Gradient Descent (for multiple variables) | 0 / 0 | Nice work!
Normal Equations | 0 / 0 | Nice work!
--------------------------------
| 100 / 100 |
###Markdown
After the `featureNormalize` function is tested, we now add the intercept term to `X_norm`:
###Code
# Add intercept term to X
X = np.concatenate([np.ones((m, 1)), X_norm], axis=1)
###Output
_____no_output_____
###Markdown
3.2 Gradient DescentPreviously, you implemented gradient descent on a univariate regression problem. The only difference now is that there is one more feature in the matrix $X$. The hypothesis function and the batch gradient descent updaterule remain unchanged. You should complete the code for the functions `computeCostMulti` and `gradientDescentMulti` to implement the cost function and gradient descent for linear regression with multiple variables. If your code in the previous part (single variable) already supports multiple variables, you can use it here too.Make sure your code supports any number of features and is well-vectorized.You can use the `shape` property of `numpy` arrays to find out how many features are present in the dataset.**Implementation Note:** In the multivariate case, the cost function canalso be written in the following vectorized form:$$ J(\theta) = \frac{1}{2m}(X\theta - \vec{y})^T(X\theta - \vec{y}) $$where $$ X = \begin{pmatrix} - (x^{(1)})^T - \\ - (x^{(2)})^T - \\ \vdots \\ - (x^{(m)})^T - \\ \\ \end{pmatrix} \qquad \mathbf{y} = \begin{bmatrix} y^{(1)} \\ y^{(2)} \\ \vdots \\ y^{(m)} \\\end{bmatrix}$$the vectorized version is efficient when you are working with numerical computing tools like `numpy`. If you are an expert with matrix operations, you can prove to yourself that the two forms are equivalent.
###Code
def computeCostMulti(X, y, theta):
"""
Compute cost for linear regression with multiple variables.
Computes the cost of using theta as the parameter for linear regression to fit the data points in X and y.
Parameters
----------
X : array_like
The dataset of shape (m x n+1).
y : array_like
A vector of shape (m, ) for the values at a given data point.
theta : array_like
The linear regression parameters. A vector of shape (n+1, )
Returns
-------
J : float
The value of the cost function.
Instructions
------------
Compute the cost of a particular choice of theta. You should set J to the cost.
"""
# Initialize some useful values
m = y.shape[0] # number of training examples
# You need to return the following variable correctly
J = 0
# ======================= YOUR CODE HERE ===========================
ht = np.dot(X, theta)
tem = (ht - y)**2
summ = np.sum(tem, axis = 0)
J = (1/ (2*m)) * summ
# ==================================================================
return J
###Output
_____no_output_____
###Markdown
*You should now submit your solutions.*
###Code
grader[5] = computeCostMulti
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 50 / 50 | Nice work!
Feature Normalization | 0 / 0 | Nice work!
Computing Cost (for multiple variables) | 0 / 0 | Nice work!
Gradient Descent (for multiple variables) | 0 / 0 | Nice work!
Normal Equations | 0 / 0 | Nice work!
--------------------------------
| 100 / 100 |
###Markdown
###Code
def gradientDescentMulti(X, y, theta, alpha, num_iters):
"""
Performs gradient descent to learn theta.
Updates theta by taking num_iters gradient steps with learning rate alpha.
Parameters
----------
X : array_like
The dataset of shape (m x n+1).
y : array_like
A vector of shape (m, ) for the values at a given data point.
theta : array_like
The linear regression parameters. A vector of shape (n+1, )
alpha : float
The learning rate for gradient descent.
num_iters : int
The number of iterations to run gradient descent.
Returns
-------
theta : array_like
The learned linear regression parameters. A vector of shape (n+1, ).
J_history : list
A python list for the values of the cost function after each iteration.
Instructions
------------
Peform a single gradient step on the parameter vector theta.
While debugging, it can be useful to print out the values of
the cost function (computeCost) and gradient here.
"""
# Initialize some useful values
m = y.shape[0] # number of training examples
# make a copy of theta, which will be updated by gradient descent
theta = theta.copy()
J_history = []
for i in range(num_iters):
# ======================= YOUR CODE HERE ==========================
ht = np.dot(X, theta)
ht_minus_y = ht - y
s = np.dot(ht_minus_y , X)
theta = theta - alpha * (1/m) * s
# =================================================================
# save the cost J in every iteration
J_history.append(computeCostMulti(X, y, theta))
return theta, J_history
# initialize fitting parameters
# some gradient descent settings
iterations = 1500
alpha = 0.01
theta, J_history = gradientDescentMulti(X ,y, theta = np.array([0, 0, 0]) , alpha = alpha,num_iters=iterations)
print(theta)
###Output
[340412.56301439 109370.05670466 -6500.61509507]
###Markdown
*You should now submit your solutions.*
###Code
grader[6] = gradientDescentMulti
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 50 / 50 | Nice work!
Feature Normalization | 0 / 0 | Nice work!
Computing Cost (for multiple variables) | 0 / 0 | Nice work!
Gradient Descent (for multiple variables) | 0 / 0 | Nice work!
Normal Equations | 0 / 0 | Nice work!
--------------------------------
| 100 / 100 |
###Markdown
3.2.1 Optional (ungraded) exercise: Selecting learning ratesIn this part of the exercise, you will get to try out different learning rates for the dataset and find a learning rate that converges quickly. You can change the learning rate by modifying the following code and changing the part of the code that sets the learning rate.Use your implementation of `gradientDescentMulti` function and run gradient descent for about 50 iterations at the chosen learning rate. The function should also return the history of $J(\theta)$ values in a vector $J$.After the last iteration, plot the J values against the number of the iterations.If you picked a learning rate within a good range, your plot look similar as the following Figure. If your graph looks very different, especially if your value of $J(\theta)$ increases or even blows up, adjust your learning rate and try again. We recommend trying values of the learning rate $\alpha$ on a log-scale, at multiplicative steps of about 3 times the previous value (i.e., 0.3, 0.1, 0.03, 0.01 and so on). You may also want to adjust the number of iterations you are running if that will help you see the overall trend in the curve.**Implementation Note:** If your learning rate is too large, $J(\theta)$ can diverge and ‘blow up’, resulting in values which are too large for computer calculations. In these situations, `numpy` will tend to returnNaNs. NaN stands for ‘not a number’ and is often caused by undefined operations that involve −∞ and +∞.**MATPLOTLIB tip:** To compare how different learning learning rates affect convergence, it is helpful to plot $J$ for several learning rates on the same figure. This can be done by making `alpha` a python list, and looping across the values within this list, and calling the plot function in every iteration of the loop. It is also useful to have a legend to distinguish the different lines within the plot. Search online for `pyplot.legend` for help on showing legends in `matplotlib`.Notice the changes in the convergence curves as the learning rate changes. With a small learning rate, you should find that gradient descent takes a very long time to converge to the optimal value. Conversely, with a large learning rate, gradient descent might not converge or might even diverge!Using the best learning rate that you found, run the scriptto run gradient descent until convergence to find the final values of $\theta$. Next,use this value of $\theta$ to predict the price of a house with 1650 square feet and3 bedrooms. You will use value later to check your implementation of the normal equations. Don’t forget to normalize your features when you make this prediction!
###Code
"""
Instructions
------------
We have provided you with the following starter code that runs
gradient descent with a particular learning rate (alpha).
Your task is to first make sure that your functions - `computeCost`
and `gradientDescent` already work with this starter code and
support multiple variables.
After that, try running gradient descent with different values of
alpha and see which one gives you the best result.
Finally, you should complete the code at the end to predict the price
of a 1650 sq-ft, 3 br house.
Hint
----
At prediction, make sure you do the same feature normalization.
"""
# Choose some alpha value - change this
num_iters = 400
pyplot.xlabel('Number of iterations')
pyplot.ylabel('Cost J')
alpha = np.array([0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 1.2])
for x in np.nditer(alpha):
theta = np.zeros(3)
# init theta and run gradient descent
theta, J_history = gradientDescentMulti(X, y, theta, x, num_iters)
# Plot the convergence graph
pyplot.plot(np.arange(len(J_history)), J_history, lw=2, label=x)
pyplot.legend(loc='upper right')
# Display the gradient descent's result
print('theta computed from gradient descent: {:s}'.format(str(theta)))
# Estimate the price of a 1650 sq-ft, 3 br house
# ======================= YOUR CODE HERE ===========================
# Recall that the first column of X is all-ones.
# Thus, it does not need to be normalized.
price = 0 # You should change this
alp = 0.1
theta, J_history = gradientDescentMulti(X, y, theta, alp, num_iters)
# print(X)
features = np.array([1650, 3])
features = (features - mu)/ sigma
features = np.concatenate([np.array([1]), features]) ### feature X0 should be always 1. If you normalize X0, it's supposed
### to be zero, but that contradicts our purpose. so, normalize the
### features except X0. Add an extra '1' in the first column manually.
price = np.dot(features, theta)
# print(theta)
# ===================================================================
print('Predicted price of a 1650 sq-ft, 3 br house (using gradient descent): ${:.0f}'.format(price))
###Output
theta computed from gradient descent: [112272.89290139 33254.00585532 14524.42608546]
theta computed from gradient descent: [238067.09470609 67368.83752161 19746.42023768]
theta computed from gradient descent: [334302.06399328 99411.44947359 3267.01285407]
theta computed from gradient descent: [340410.91897274 109162.68848142 -6293.24735132]
theta computed from gradient descent: [340412.65957447 109447.79558639 -6578.3539709 ]
theta computed from gradient descent: [340412.65957447 109447.79646964 -6578.35485416]
theta computed from gradient descent: [340412.65957447 109447.79646964 -6578.35485416]
theta computed from gradient descent: [340412.65957447 109447.79646964 -6578.35485416]
Predicted price of a 1650 sq-ft, 3 br house (using gradient descent): $293081
###Markdown
*You do not need to submit any solutions for this optional (ungraded) part.* 3.3 Normal EquationsIn the lecture videos, you learned that the closed-form solution to linear regression is$$ \theta = \left( X^T X\right)^{-1} X^T\vec{y}$$Using this formula does not require any feature scaling, and you will get an exact solution in one calculation: there is no “loop until convergence” like in gradient descent. First, we will reload the data to ensure that the variables have not been modified. Remember that while you do not need to scale your features, we still need to add a column of 1’s to the $X$ matrix to have an intercept term ($\theta_0$). The code in the next cell will add the column of 1’s to X for you.
###Code
# Load data
data = np.loadtxt(os.path.join('Data', 'ex1data2.txt'), delimiter=',')
X = data[:, :2]
y = data[:, 2]
m = y.size
X = np.concatenate([np.ones((m, 1)), X], axis=1)
###Output
_____no_output_____
###Markdown
Complete the code for the function `normalEqn` below to use the formula above to calculate $\theta$.
###Code
def normalEqn(X, y):
"""
Computes the closed-form solution to linear regression using the normal equations.
Parameters
----------
X : array_like
The dataset of shape (m x n+1).
y : array_like
The value at each data point. A vector of shape (m, ).
Returns
-------
theta : array_like
Estimated linear regression parameters. A vector of shape (n+1, ).
Instructions
------------
Complete the code to compute the closed form solution to linear
regression and put the result in theta.
Hint
----
Look up the function `np.linalg.pinv` for computing matrix inverse.
"""
theta = np.zeros(X.shape[1])
# ===================== YOUR CODE HERE ============================
xTx = np.dot(X.transpose(), X)
xTx_inverse = np.linalg.pinv(xTx)
xTy = np.dot(X.transpose(), y)
theta = np.dot(xTx_inverse , xTy)
# =================================================================
return theta
theta = normalEqn(X, y)
print(theta)
###Output
[89597.90954361 139.21067402 -8738.01911255]
###Markdown
*You should now submit your solutions.*
###Code
grader[7] = normalEqn
grader.grade()
###Output
Submitting Solutions | Programming Exercise linear-regression
Use token from last successful submission ([email protected])? (Y/n): y
Part Name | Score | Feedback
--------- | ----- | --------
Warm up exercise | 10 / 10 | Nice work!
Computing Cost (for one variable) | 40 / 40 | Nice work!
Gradient Descent (for one variable) | 50 / 50 | Nice work!
Feature Normalization | 0 / 0 | Nice work!
Computing Cost (for multiple variables) | 0 / 0 | Nice work!
Gradient Descent (for multiple variables) | 0 / 0 | Nice work!
Normal Equations | 0 / 0 | Nice work!
--------------------------------
| 100 / 100 |
###Markdown
Optional (ungraded) exercise: Now, once you have found $\theta$ using thismethod, use it to make a price prediction for a 1650-square-foot house with3 bedrooms. You should find that gives the same predicted price as the valueyou obtained using the model fit with gradient descent (in Section 3.2.1).
###Code
# Calculate the parameters from the normal equation
theta = normalEqn(X, y);
# Display normal equation's result
print('Theta computed from the normal equations: {:s}'.format(str(theta)));
# Estimate the price of a 1650 sq-ft, 3 br house
# ====================== YOUR CODE HERE ======================
price = 0 # You should change this
theta = normalEqn(X, y)
price = np.dot(np.array([1, 1650, 3]), theta)
# ============================================================
print('Predicted price of a 1650 sq-ft, 3 br house (using normal equations): ${:.0f}'.format(price))
###Output
Theta computed from the normal equations: [89597.90954361 139.21067402 -8738.01911255]
Predicted price of a 1650 sq-ft, 3 br house (using normal equations): $293081
|
tensor_operations.ipynb | ###Markdown
Here we are using pytorch library with torch.tensor() functionalities such that it will help to take all the inputs vectors, matrices, 3d-arrays, numpy arrays. And help to apply mathematics on it and convert it in tensor form . Functions we can incorporate in torch.tensor() matrix.An short introduction about PyTorch and about the chosen functions. - function 1 : math functions like torch.rand(), torch.abs_() and torch.allclose- function 2 : torch.as_strided (layout functions)- function 3 : In these the functions deals with the individual elements instead of clusters.- function 4 : It deals with subtensors such as storage_offset() function.- function 5 : symeifg(eigenvalue functions)
###Code
# Import torch and other required modules
import torch
import os
import numpy as np
###Output
_____no_output_____
###Markdown
Function 1 - Some of the math functions we used here to play with the tensor inputs. 1. tensor = torch.rand ((no.of rows, no. of columns)), 2. torch.abs_(input, alpha=1) 3. torch.allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False)
###Code
# Example 1 - working (change this)
#x = np.array([[1, 2], [3, 4.]])
#y = torch.from_numpy(x)
tensor=torch.rand((2,3))
z = tensor.new_tensor([[9,0,-7.],[5,7,.0]], requires_grad=False)
z.shape
z.permute([-2,1])
###Output
_____no_output_____
###Markdown
First here we are using rand() to have random items in the corresponding dimensions and store it in tensor . Then creating a new tensor by by function new_tensor() making gradiend_descent value as false. And we are multiplying it with tensor such that current tensor z will take dimension in tensor. We can check the length of tensor z with .shape method. And can also permute some indices with .permute.
###Code
# Example 2 - working
z.abs_()
z.add_(2,alpha=1)
###Output
_____no_output_____
###Markdown
Here we are using abs_() function to convert all the tensor value positive , then here we are using .add_ to add input number to each item of tensor with alpha value to be 1.
###Code
# Example 3 - breaking (to illustrate when it breaks)
tensor = torch.tensor([[1, 2,-1.], [3, 4, 5]])
tensor2 = torch.exp(tensor).sum()
w = torch.rand(2,3)
w.allclose(tensor, rtol=1e-05, atol=1e-08, equal_nan=False)
w.argsort()
w.asin_()
###Output
_____no_output_____
###Markdown
In allclose function all of this represents:input (Tensor) – first tensor to compareother (Tensor) – second tensor to compareatol (float, optional) – absolute tolerance. Default: 1e-08rtol (float, optional) – relative tolerance. Default: 1e-05equal_nan (bool, optional) – if True, then two NaN s will be considered equal. Default: Falseargsort function is used to output all the elements in tensors in sorted order.And converting the output to asin_ will provides support for the inverse sine function in PyTorch. It expects the input to be in the range [-1, 1] and gives the output in radian form. It returns nan if the input does not lie in the range [-1, 1]. The input type is tensor and if the input contains more than one element, element-wise inverse sine is computed. Closing comments about when to use this function Function 2 - 1.torch.as_strided , 2.torch.bincount, 3 . torch.diag_embed1. In torch.as_strided function it will Create a view of an existing torch.Tensor input with specified size, stride and storage_offset.2. In bincount function each tensor value have some weight associated with it such that input will contain the range of tensor and weigt means how far te tensor will be that's why last element is the dimension associated with weights as a tensor input.3. In diag_embed in the tensor the inputs will be added along the diagonal only.
###Code
# Example 1 - working
w.as_strided((3,1),(2,2), storage_offset=0)
t = torch.tensor([[9,0,0],[-9,-1.,-4],[0,-9,-8]])
#w.baddbmm_(t,t, beta=1, alpha=1)
w.bernoulli_(p=0.5, generator=None)
###Output
_____no_output_____
###Markdown
Here bernoulli_() function will Fills each location of self with an independent sample from Bernoulli(p) . self can have integral dtype.
###Code
# Example 2 - working
torch.bincount(weights=torch.tensor([2]),input=torch.tensor([11]) ,minlength=1)
###Output
_____no_output_____
###Markdown
Here the function bincount() take two tensors one would be the input which is a tensor tells the range of input values and weights which tells how much max value of the tensor and minlength tells that only 1-d tensor input is to be taken.
###Code
# Example 3 - breaking (to illustrate when it breaks)
#torch.cholesky_solve(input=torch.tensor([[5,6],[8,9]]),input2=torch.tensor([[3,8],[1,8]]),upper=False)
torch.diag_embed(t,offset=0, dim1=-2, dim2=-1)
###Output
_____no_output_____
###Markdown
Makes a tensor whose diagonals of certain 2D planes (determined by dim1 and dim2) are filled by input. To encourage making bunched corner to corner frameworks, the 2D planes shaped by the last two components of the returned tensor are picked as a matter of course. The contention offset controls which diagonal to consider: Whenever offset = 0, it is the principle diagonal. Whenever offset > 0, it is over the principle diagonal. Whenever offset < 0, it is beneath the principle corner to corner. The size of the new grid will be determined to make the predetermined diagonal of the size of the last information measurement. Note that for balance other than 00 , the request for dim1 and dim2 matters. Trading them is identical to changing the indication of offset. Closing comments about when to use this function Function 3 - 1. torch.erfinv, 2. torch.spilt() 3. torch.sparse_maskerfinv will deal with inverse error function of input element.As the name suggest .split function will split the function into chunks and then apply its functionality on them.sparse_mask take input value and convert it along with the mask input into list.
###Code
# Example 1 - working
torch.erfinv(t)
###Output
_____no_output_____
###Markdown
torch.erfinv(input, out=None) → TensorComputes the inverse error function of each element of input. The inverse error function is defined in the range (-1, 1)(−1,1)
###Code
# Example 2 - working
#torch.index_fill(t,w,2,dim=1)
#torch.scatter_add(dim=1, index=t, src=w)
torch.split(t,split_size_or_sections=2,dim=0)
###Output
_____no_output_____
###Markdown
Parts the tensor into chunks. Each chunks is a perspective on the first tensor. On the off chance that split_size_or_sections is a whole number sort, at that point tensor will be part into similarly measured pieces (if conceivable). Last piece will be smaller if the tensor size along the given measurement dimensions isn't distinguishable by split_size. On the other hand that split_size_or_sections is a rundown, at that point tensor will be part into len(split_size_or_sections) chunks with sizes in dimensions as indicated by split_size_or_sections
###Code
# Example 3 - breaking (to illustrate when it breaks)
nnz = 5
dims = [5, 5,2,2]
I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
V = torch.randn(nnz, dims[2], dims[3])
size = torch.Size(dims)
S = torch.sparse_coo_tensor(I,V,size).coalesce()
D = torch.randn(dims)
D.sparse_mask(S)
###Output
_____no_output_____
###Markdown
Returns another SparseTensor with values from Tensor info iltered by indices of mask and values are ignored. input and mask must have the same shape. Parameters input (Tensor) – an info Tensor cover (SparseTensor) – a SparseTensor which we channel input dependent on its lists Closing comments about when to use this function Function 4 - 1. storage_offset() : It deals with the subtensors.2. stride : It deals with the dimensions present as agruments.3. sum : It will sum up the elements in the matrix.
###Code
# Example 1 - working
#torch.stft(n_fft=t,hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True)
t.storage_offset()
###Output
_____no_output_____
###Markdown
Since here there is no self argument calling in function so the offset for tensor t will be 0.
###Code
# Example 2 - working
t.stride()
###Output
_____no_output_____
###Markdown
stride is when one value jumps to the other . It mainly takes dimensions as argument in tensor form such that for each value we returned the dimension of tensor and return nothing if no argument is passed .
###Code
# Example 3 - breaking (to illustrate when it breaks)
torch.sum(t,dtype=None)
###Output
_____no_output_____
###Markdown
Here sum function is adding all the tensors elements and giving us the out since in tensor all the element in matrix converted into float so it will ignore the different dtype elementsonly the total no. of elements in dimenional array should be same. Closing comments about when to use this function Function 5 - Here we are using functions taking eigen value and eigenvectorsBasically here the concept of upper triangular matrix is applied on the vector.
###Code
# Example 1 - working
torch.symeig(t,eigenvectors=False, upper=True)
###Output
_____no_output_____
###Markdown
This function will take the concept of upper triangular matrix such that it will be used to calculate the eigen values and eigen vectors . This capacityreturns eigenvalues and eigenvectors of a genuine symmetric grid input or a cluster of genuine symmetric networks, spoke to by a namedtuple (eigenvalues, eigenvectors). This capacity ascertains all eigenvalues (and vectors) of info to such an extent that \text{input} = V \text{diag}(e) V^Tinput=Vdiag(e)V The boolean contention eigenvectors characterizes calculation of both eigenvectors and eigenvalues or eigenvalues as it were. On the off chance that it is False, just eigenvalues are figured. On the off chance that it is True, the two eigenvalues and eigenvectors are figured. Since the information network input should be symmetric, just the upper triangular segment is utilized naturally. In the event that upper is False, at that point lower triangular segment is utilized.
###Code
# Example 2 - working
torch.symeig(t, eigenvectors=True)
###Output
_____no_output_____
###Markdown
It will be same as eigen values but now since eigenvectors = True it will print both eigenvalues and eigenvectors.
###Code
# Example 3 - breaking (to illustrate when it breaks
torch.rand(2)
###Output
_____no_output_____
###Markdown
two rows random values print Closing comments about when to use this function ConclusionSummarize what was covered in this notebook, and where to go nextFunctions of torch.tensors and torch will be used. Reference LinksProvide links to your references and other interesting articles about tensors* Official documentation for `torch.Tensor`: https://pytorch.org/docs/stable/tensors.html* ...
###Code
!pip install jovian --upgrade --quiet
import jovian
jovian.commit()
###Output
_____no_output_____ |
training/TimeSeriesTraining.ipynb | ###Markdown
RNN on Time series datasetOn this Notebook we will build a recurrent model on a multi-variate time series dataset. The dataThe dataset is a multi-variate time series. Let's do a very short EDA on it:
###Code
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# load data
def parse(x):
return datetime.strptime(x, '%Y %m %d %H')
dataset_path = '/home/fer/data/formaciones/master/deep-learning-intro/datasets/time_series/pollution.csv'
dataset = pd.read_csv(dataset_path, parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)
dataset.drop('No', axis=1, inplace=True)
values = dataset.values
# specify columns to plot
groups = [0, 1, 2, 3, 5, 6, 7]
i = 1
# plot each column
plt.figure(figsize=(10,10))
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(values[:, group])
plt.title(dataset.columns[group], y=0.5, loc='right')
i += 1
plt.show()
# manually specify column names
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
dataset.index.name = 'date'
# mark all NA values with 0
dataset['pollution'].fillna(0, inplace=True)
# drop the first 24 hours
dataset = dataset[24:]
# summarize first 5 rows
dataset.head()
###Output
_____no_output_____
###Markdown
This is what we have. It should be necessary to spend more time on reviewing it, but we will focus on the RNN application. Let's first get rid of the categorical feature by using a label encoder (not the best solution for sure) and scale the now continuous features.Scale the datasetHint: use the minmaxscaler function
###Code
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
values = dataset.values
# integer encode direction
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
values = values.astype('float32')
...
###Output
_____no_output_____
###Markdown
The next function converts our series data into a supervised learning one, by shifting it and adding timesteps to each row:
###Code
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
reframed.head()
###Output
_____no_output_____
###Markdown
This way, in our example, we have the feature values for the current timestep and the previous. The idea is to predict the next from the current. So we are interested in predicting $var1(t)$ from $var1(t-1), var2(t-1), \cdots, var8(t-1)$. Let's drop the useless features:
###Code
# drop columns we don't want to predict
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)
reframed.head()
###Output
_____no_output_____
###Markdown
Let's divide it now in traiing and testing. In order to do that, we can leave the last year as testing.Split the data
###Code
# split into train and test sets
values = reframed.values
...
train = ...
test = ...
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
###Output
_____no_output_____
###Markdown
Please note the dataset shape: (N_samples, timesteps, n_features). Model Architecture:We will build now an LSTM network in order to predict the target.- Use a LSTM layer, with 50 units.- Use a dense layer with a single feature: the one we are trying to predict.- Compile it using mae as the loss and adam as the optimizer.- train it using the test data as validation, not shuffling the data (why?) and using 10 epochs and a batch size of 72.- Store the fitting inside a history variable, for plotting purposes.Build the model
###Code
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
# design network
...
# fit network
history = ...
###Output
_____no_output_____
###Markdown
Now let's plot the loss. We have a few epochs, but we can see the prgress:
###Code
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Finally, let's see how well we did. Keep in mind that the model does predict at scaled level. So we need to de-escalate:
###Code
import numpy as np
# make a prediction
yhat = model.predict(test_X)
x_test = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, x_test[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
y_test = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((y_test, x_test[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
###Output
_____no_output_____
###Markdown
Compute the rmse and mae at test level
###Code
from sklearn.metrics import ...
from math import sqrt
# calculate RMSE
rmse = ...
mae = ...
print('Test RMSE: %.3f' % rmse)
print('Test MAE: %.3f' % mae)
###Output
_____no_output_____
###Markdown
And finally, let's plot the first 100 results:
###Code
plt.figure(figsize=(10,10))
plt.plot(inv_y[0:100], marker='.', label="true")
plt.plot(inv_yhat[0:100], 'r', label="prediction")
plt.ylabel('Value')
plt.xlabel('Time Step')
plt.legend()
plt.show()
###Output
_____no_output_____ |
analysis/notebooks/will-04-03-2022-preprocessing_and_modeling.ipynb | ###Markdown
Dataset pre processing This notbook focuses on the bncc dataset pre processing aiming the training of a classification model for "etapa do conhecimento" prediction
###Code
import html
import pandas as pd
from nltk.tokenize import word_tokenize
from sklearn import (
feature_extraction,
linear_model,
metrics,
model_selection,
preprocessing,
)
from src.cleaning import cleaning
pd.set_option("display.max_rows", 1000)
pd.set_option("display.max_columns", 1000)
pd.set_option("display.width", 1000)
###Output
_____no_output_____
###Markdown
Selecting and filtering the features that will be used in the model
###Code
# Importing Data
df_bncc = pd.read_csv("/home/wilsonfranccadeolveiraneto/Documentos/TERA/bncc-classifier/data/raw/bncc_first_classifier.csv")
# Making a copy of the dataset and visualizing it
df_bncc_copy = df_bncc.copy()
df_bncc_copy.head()
# Dropping unuseful columns
df_bncc_copy = df_bncc_copy[["id", "question", "name.2"]]
df_bncc_copy.head()
# Renaming dataset columns
df_bncc_copy.columns = ["id", "questions", "target"]
df_bncc_copy.head(3)
# Observing the classes of the target to drop, aiming to comprehend only the topics of bncc
df_bncc_copy["target"].value_counts().to_frame()
df_bncc_copy.shape
df_bncc_copy_targets_bncc = df_bncc_copy[df_bncc_copy["target"].isin(["Médio & Pré-Vestibular", "Fundamental II", "Fundamental I"])]
# Observing if the filtering was correctly applied
df_bncc_copy_targets_bncc
# Observing if the filtering was correctly applied by visualizing the clases
df_bncc_copy_targets_bncc["target"].value_counts().to_frame()
# How many observations do we have now?
df_bncc_copy_targets_bncc.shape
# Encoding the target with labels for the classifier
# this procedure can be done with LabelEncoder from scikit-learn
# will give us more control over the pipeline
lb_enc = preprocessing.LabelEncoder()
# train on the column we want encode
lb_enc.fit(df_bncc_copy_targets_bncc["target"])
# transform the same column, but here we'll have this transformation for test and after for train
df_bncc_copy_targets_bncc["target_enc"] = lb_enc.transform(
df_bncc_copy_targets_bncc["target"]
)
df_bncc_copy_targets_bncc
# you can access how the labels was transformed by looking at the classes
lb_enc.classes_
###Output
_____no_output_____
###Markdown
Cleaning the dataset
###Code
# chaining all cleaning steps
df_bncc_copy_targets_bncc["questions_clean"] = (
df_bncc_copy_targets_bncc["questions"]
.astype(str)
.apply(html.unescape)
.apply(lambda x: cleaning.remove_html(x))
.apply(lambda x: x.lower())
.apply(lambda x: cleaning.remove_punctuation_2(x))
.apply(cleaning.remove_italic_quotes)
.apply(cleaning.remove_open_quotes)
.apply(cleaning.remove_end_quotes)
.apply(cleaning.remove_italic_dquotes)
.apply(cleaning.remove_open_dquotes)
.apply(cleaning.remove_quote)
.apply(lambda x: cleaning.remove_pt_stopwords(x))
.apply(lambda x: cleaning.remove_en_stopwords(x))
)
df_bncc_copy_targets_bncc.head(10)
# class to remove frq and rare, we can choose how many rare or frq words to remove
remove_frq_rare = cleaning.RemoveFrqRare(df=df_bncc_copy_targets_bncc)
remove_frq_rare.calc_frq_words()
remove_frq_rare.calc_rare_words()
bncc_cleaned_df = remove_frq_rare.remove_frq_and_rare()
bncc_cleaned_df.head()
###Output
_____no_output_____
###Markdown
Exporting the dataset
###Code
bncc_cleaned_df = bncc_cleaned_df[["id", "questions_clean", "target_enc"]]
# Exporting
bncc_cleaned_df.to_csv(
"/home/wilsonfranccadeolveiraneto/Documentos/TERA/bncc-classifier/data/curated/df_bncc_model_two_curated_pedro_02032022.csv"
)
###Output
_____no_output_____
###Markdown
Modeling
###Code
# removing registers with zero chars
bncc_cleaned_df["words_count"] = bncc_cleaned_df["questions_clean"].apply(len)
bncc_cleaned_filtered_df = bncc_cleaned_df[bncc_cleaned_df["words_count"] != 0]
bncc_cleaned_filtered_df
###Output
_____no_output_____
###Markdown
Eu vi depois que essa poha do CountVectorizer n precisa tokenizar antes, pode jogar o texto mesmo q ele cria os tokens pro bag of words. Por isso q to dropando ae embaixo.
###Code
# sklearn patterns names
X = bncc_cleaned_filtered_df["questions_clean"]
y = bncc_cleaned_filtered_df["target_enc"]
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, random_state=1
)
# bow vect
vectBOW = feature_extraction.text.CountVectorizer()
X_train_trans = vectBOW.fit_transform(X_train)
X_test_trans = vectBOW.transform(X_test)
print(X_train_trans.shape, X_test_trans.shape)
logreg = linear_model.LogisticRegression(class_weight="balanced")
logreg.fit(X_train_trans, y_train)
y_pred_class = logreg.predict(X_test_trans)
print(metrics.classification_report(y_test, y_pred_class))
###Output
precision recall f1-score support
0 0.65 0.78 0.71 4575
1 0.65 0.64 0.65 8338
2 0.77 0.70 0.73 8785
accuracy 0.70 21698
macro avg 0.69 0.71 0.70 21698
weighted avg 0.70 0.70 0.70 21698
|
Prediction_with_HMM.ipynb | ###Markdown
Prediction with HMM
###Code
!pip install hmmlearn
import numpy as np
from hmmlearn import hmm
np.random.seed(42) # to make the code reproducable
print('imports done')
###Output
Requirement already satisfied: hmmlearn in /usr/local/lib/python3.7/dist-packages (0.2.5)
Requirement already satisfied: scipy>=0.19 in /usr/local/lib/python3.7/dist-packages (from hmmlearn) (1.4.1)
Requirement already satisfied: numpy>=1.10 in /usr/local/lib/python3.7/dist-packages (from hmmlearn) (1.19.5)
Requirement already satisfied: scikit-learn>=0.16 in /usr/local/lib/python3.7/dist-packages (from hmmlearn) (0.22.2.post1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.16->hmmlearn) (1.0.1)
imports done
###Markdown
Data Loading and Splitting
###Code
data = np.load('data.npy')
n_sample = data.shape[0]
flat_data = data.reshape(n_sample, -1)
print('data shape after flattening', flat_data.shape)
n_train = int(0.8 * n_sample )
n_test = n_sample - n_train
train_data = flat_data[:n_train]
test_data = flat_data[n_train:]
print('Training data split', train_data.shape)
print('Testing data split', test_data.shape)
###Output
data shape after flattening (4500, 9)
Training data split (3600, 9)
Testing data split (900, 9)
###Markdown
Model Training
###Code
# model creation and training
model = hmm.GaussianHMM(n_components=32,
covariance_type="full",
n_iter=100,
verbose=True)
model.fit(train_data)
###Output
1 -9825.9681 +nan
2 50783.7443 +60609.7125
3 191823.7693 +141040.0250
4 198193.6123 +6369.8430
5 198193.6130 +0.0007
###Markdown
Prediction with model
###Code
# Prediction function to predict next data point
def predict_next(model, known_data):
state_sequence = model.predict(known_data)
prob_next_step = model.transmat_[state_sequence[-1], :]
t1 = model._generate_sample_from_state(np.argmax(prob_next_step))
t1 = 1/(1 + np.exp(-t1)) # sigmoid to get probablity score like resutls
return t1
# Distance functions to measure distance between target and prediction
def hamming_dist(a, b):
return np.count_nonzero(a!=b)
def euclidian_dist(a, b):
return np.linalg.norm(a-b)
t1 = (predict_next(model, train_data[:5]) > 0.5).astype(np.float)
t1
label = train_data[5]
label
euclidian_dist(t1, label)
hamming_dist(t1, label)
###Output
_____no_output_____
###Markdown
Test Case Generations
###Code
test_idx = np.random.randint(5, n_test, size=100)
start_idx = test_idx-5
test_cases = [test_data[start:end] for start, end in zip(start_idx,test_idx)]
test_labels = test_data[test_idx]
print(len(test_cases))
print(test_labels.shape)
dists = []
for label, a_case in zip(test_labels, test_cases):
t1 = (predict_next(model, a_case) > 0.5).astype(np.float)
e_dist = euclidian_dist(t1, label)
h_dist = hamming_dist(t1, label)
dists.append([e_dist, h_dist])
print('prediction done')
%matplotlib inline
import matplotlib.pyplot as plt
dists = np.array(dists)
print('Average distances:')
d_means = np.mean(dists, axis=0)
print('Euclidian:', d_means[0])
print('Hamming :', d_means[1])
plt.plot(dists)
###Output
_____no_output_____ |
metadataset-notebooks/Figure3-LowLevel.ipynb | ###Markdown
Set up the Model
###Code
# Get the model
model_ft = models.squeezenet1_0(pretrained=True)
# Model to test
model_bottom, model_top = get_model_parts(model_ft, args.model_name)
model_bottom.eval()
model_top.eval()
model_bottom = model_bottom.to(args.device)
model_top = model_top.to(args.device)
###Output
_____no_output_____
###Markdown
Evaluation Methods
###Code
response = requests.get("https://git.io/JJkYN")
class_labels = response.text.split("\n")
def rgb_to_hsv(rgb):
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select(
[r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
def hsv_to_rgb(hsv):
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def shift_hue(arr,hout):
hsv=rgb_to_hsv(arr)
hsv[...,0]=hout
rgb=hsv_to_rgb(hsv)
return rgb
def colorize(image, hue):
arr = np.array(image)
arr_ = shift_hue(arr, hue)
image_ = Image.fromarray(arr_)
return image_
def get_concept_scores_mv_valid(tensor, labels, concept_bank, model_bottom, model_top,
alpha=1e-4, beta=1e-4, n_steps=100,
lr=1e-1, momentum=0.9, enforce_validity=True):
max_margins = concept_bank.margin_info.max
min_margins = concept_bank.margin_info.min
concept_norms = concept_bank.norms
concept_intercepts = concept_bank.intercepts
concepts = concept_bank.bank
concept_names = concept_bank.concept_names.copy()
device = tensor.device
embedding = model_bottom(tensor)
embedding = embedding.detach()
criterion = nn.CrossEntropyLoss()
W = nn.Parameter(torch.zeros(1, concepts.shape[0], device=device), requires_grad=True)
# Normalize the concept vectors
normalized_C = max_margins * concepts / concept_norms
# Compute the current distance of the sample to decision boundaries of SVMs
margins = (torch.matmul(concepts, embedding.T) + concept_intercepts) / concept_norms
# Computing constraints for the concepts scores
W_clamp_max = (max_margins*concept_norms - concept_intercepts - torch.matmul(concepts, embedding.T))
W_clamp_min = (min_margins*concept_norms - concept_intercepts - torch.matmul(concepts, embedding.T))
W_clamp_max = (W_clamp_max / (max_margins * concept_norms)).detach().T
W_clamp_min = (W_clamp_min / (max_margins * concept_norms)).detach().T
if enforce_validity:
W_clamp_max[(margins > 0).T] = 0.
W_clamp_min[(margins < 0).T] = 0.
optimizer = optim.SGD([W], lr=lr, momentum=momentum)
history = []
es = n_steps
for i in range(n_steps):
optimizer.zero_grad()
new_embedding = embedding + torch.matmul(W, normalized_C)
new_out = model_top(new_embedding)
l1_loss = torch.norm(W, dim=1, p=1)
l2_loss = torch.norm(W, dim=1, p=2)
ce_loss = criterion(new_out, labels)
loss = ce_loss + l1_loss*alpha + l2_loss*beta
#print(loss.item(), ce_loss.item(), l1_loss.item(), l2_loss.item())
loss.backward()
pred = new_out.argmax(dim=1).detach().item()
history.append(f"{pred}, {ce_loss.item()}, {l1_loss.item()}, {l2_loss.item()}, {W[0, 0]}, {W.grad[0, 0]}, {W.sum()}")
optimizer.step()
if enforce_validity:
W_projected = torch.where(W < W_clamp_min, W_clamp_min, W)
W_projected = torch.where(W > W_clamp_max, W_clamp_max, W_projected)
W.data = W_projected
W.grad.zero_()
final_emb = embedding + torch.matmul(W, normalized_C)
W = W[0].detach().cpu().numpy().tolist()
concept_scores = dict()
for i, n in enumerate(concept_names):
concept_scores[n] = W[i]
concept_names = sorted(concept_names, key=concept_scores.get, reverse=True)
new_out, orig_out = model_top(final_emb), model_top(embedding)
if (new_out.argmax(dim=1) == labels):
success = True
else:
success = False
return success, concept_scores, concept_names, np.array(W)
img = PIL.Image.open("green_apple.jpeg").convert("RGB")
img_ = PIL.Image.open("green_apple.jpeg").convert("L").convert("RGB")
preds = []
ces = []
images = []
alphas = np.concatenate([np.linspace(0., 0.3, 16), np.linspace(0.3, 0.6, 4)])
#alphas = np.linspace(0.1, 0.5, 15)
labels = torch.tensor(class_labels.index("Granny Smith")).long().view(1).to(args.device)
for alpha in tqdm(alphas):
average_img = PIL.Image.fromarray(np.array(alpha*np.array(img) + (1-alpha)*np.array(img_), dtype=np.uint8))
images.append(average_img)
tensor = data_transforms(average_img).unsqueeze(0).to(args.device)
success, concept_scores, concept_scores_list, W_old = get_concept_scores_mv_valid(tensor, labels,
concept_bank,
model_bottom, model_top,
alpha=1e-2, beta=1e-1, lr=1e-2)
pred = model_top(model_bottom(tensor)).detach().cpu().numpy()[0, class_labels.index("Granny Smith")]
preds.append(pred)
ces.append(concept_scores['greenness'])
plt.imshow(images[0])
plt.axis("off")
plt.savefig("./paper_figures/fig4_gray0.pdf")
plt.savefig("./paper_figures/fig4_gray0.png")
plt.close()
plt.imshow(images[len(images)//2])
plt.axis("off")
plt.savefig("./paper_figures/fig4_gray_half.png")
plt.savefig("./paper_figures/fig4_gray_half.pdf")
plt.close()
plt.imshow(images[-1])
plt.axis("off")
plt.savefig("./paper_figures/fig4_gray_1.pdf")
plt.savefig("./paper_figures/fig4_gray_1.png")
plt.close()
plt.figure(figsize=[7, 5])
plt.plot(np.linspace(0,1,len(alphas)), np.array(ces)[::-1], marker='o', color='green', label='\'Greenness\' CCE')
plt.plot(np.linspace(0,1,len(alphas)), preds[::-1], marker='o', color='black', label='\'Granny Smith\' prob predicted ')
plt.yticks(fontname='Arial', fontsize=18)
plt.xticks(fontname='Arial', fontsize=16)
plt.xlabel('Degree of perturbation', fontname='Arial', fontsize=18)
plt.legend(prop={'family':'Arial', 'size':16}, loc="upper right")
plt.savefig("./paper_figures/fig4_low_level_img.png")
plt.savefig("./paper_figures/fig4_low_level_img.pdf")
###Output
_____no_output_____
###Markdown
50 Images
###Code
from google_images_download import google_images_download #importing the library
response = google_images_download.googleimagesdownload() #class instantiation
arguments = {"keywords":"granny smith apple","limit":25,"print_urls":True, "size": "medium",
"metadata":True} #creating list of arguments
paths = response.download(arguments) #passing the arguments to the function
print(paths)
paths = [os.path.join("./downloads/granny smith apple", f) for f in os.listdir("./downloads/granny smith apple/")]
ces_scores = []
#img_paths = paths[0]['granny smith apple']
img_paths = paths
labels = torch.tensor(class_labels.index("Granny Smith")).long().view(1).to(args.device)
for path in tqdm(img_paths[2:]):
img = PIL.Image.open(path).convert("RGB")
img_ = PIL.Image.open(path).convert("L").convert("RGB")
#plt.imshow(img_)
ces_img = []
alphas = np.concatenate([np.linspace(0., 0.3, 16), np.linspace(0.3, 0.6, 4)])
for alpha in alphas:
average_img = PIL.Image.fromarray(np.array(alpha*np.array(img) + (1-alpha)*np.array(img_), dtype=np.uint8))
tensor = data_transforms(average_img).unsqueeze(0).to(args.device)
success, concept_scores, concept_scores_list, W_old = get_concept_scores_mv_valid(tensor, labels,
concept_bank,
model_bottom, model_top,
alpha=0., beta=1e-2, lr=1.,
enforce_validity=True)
pred = model_top(model_bottom(tensor)).detach().cpu().numpy()[0, class_labels.index("Granny Smith")]
ces_img.append(concept_scores['greenness'])
ces_scores.append(ces_img)
plt.figure(figsize=[7, 5])
ces_normalized = []
for k in range(len(ces_scores)):
img_ces = np.array(ces_scores[k])
normalized_ces = img_ces
ces_normalized.append(normalized_ces)
plt.plot(np.linspace(0, 1, len(alphas)), np.flip(normalized_ces), color='gray', lw=1)
plt.plot(np.linspace(0, 1, len(alphas)), np.mean(np.flip(np.array(ces_normalized), axis=1), axis=0), color='green', lw=4, marker='o')
plt.yticks(fontname='Arial', fontsize=18)
plt.xticks(fontname='Arial', fontsize=16)
plt.ylabel('Greenness CCE', fontname='Arial', fontsize=18)
plt.xlabel('Degree of perturbation', fontname='Arial', fontsize=18)
plt.savefig("./paper_figures/fig4_low_level.png")
plt.savefig("./paper_figures/fig4_low_level.pdf")
###Output
_____no_output_____ |
code/Heart-disease (2).ipynb | ###Markdown
Heart-disease-Prediction-using-Machine-Learning-Algorithms
###Code
#Here I have imported all the essentil libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
%matplotlib inline
sns.set_style("whitegrid")
plt.style.use("ggplot")
#here I input the dataset folder
df = pd.read_csv("C:/Users/Abhishek Nagrecha/Desktop/input/heart.csv")
df.head()
#this is the shape of the used dataset
df.info()
print( "The shape of the data is:",df.shape)
#To display how many patients have got a heart disease
df.target.value_counts()
# df.sex.value_counts()
###Output
_____no_output_____
###Markdown
Dataset Exploration for better understanding
###Code
df.target.value_counts().plot(kind="bar", color=["red", "blue"])
# Checking for the missing values in the dataset
df.isna().sum()
categorical_val = []
continuous_val = []
for column in df.columns:
print('-------------------------')
print(f"{column} : {df[column].unique()}")
if len(df[column].unique()) <= 10:
categorical_val.append(column)
else:
continuous_val.append(column)
print(categorical_val)
print(continuous_val)
#Here I have shown visually the categorical features in corelation with having a heart_disease
plt.figure(figsize=(20, 20))
for i, column in enumerate(categorical_val, 1):
plt.subplot(3, 3, i)
df[df["target"] == 0][column].hist(bins=35, color='blue', label='Heart Disease = NO', alpha=0.6)
df[df["target"] == 1][column].hist(bins=35, color='red', label='Heart Disease = YES', alpha=0.6)
plt.legend()
plt.xlabel(column)
#Here I have shown visually the continuous features in corelation with having a heart_disease
plt.figure(figsize=(20, 20))
for i, column in enumerate(continuous_val, 1):
plt.subplot(3, 3, i)
df[df["target"] == 0][column].hist(bins=35, color='blue', label='Heart Disease = NO', alpha=0.6)
df[df["target"] == 1][column].hist(bins=35, color='red', label='Heart Disease = YES', alpha=0.6)
plt.legend()
plt.xlabel(column)
###Output
_____no_output_____
###Markdown
Data Pre-processing
###Code
# After exploring the dataset, I observed that I need to convert some
# categorical variables into dummy variables and scale all the values
categorical_val.remove('target')
dataset = pd.get_dummies(df, columns = categorical_val)
dataset.head()
print(df.columns)
print(dataset.columns)
from sklearn.preprocessing import MinMaxScaler
m_sc = MinMaxScaler()
col_to_scale = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
dataset[col_to_scale] = m_sc.fit_transform(dataset[col_to_scale])
dataset.head()
###Output
_____no_output_____
###Markdown
Applying machine learning algorithms
###Code
#here I have specified all the scoring metrices which would be used to evalute the model's performance
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
print("Train Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_train, pred) :.2f}")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_train, pred) :.2f}")
print(f"\t\t\tRecall Score: {recall_score(y_train, pred) :.2f}")
print(f"\t\t\tF1 score: {f1_score(y_train, pred) :.2f}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, pred)}\n")
elif train==False:
pred = clf.predict(X_test)
print("Test Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_test, pred) :.2f}")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_test, pred) :.2f}")
print(f"\t\t\tRecall Score: {recall_score(y_test, pred) :.2f}")
print(f"\t\t\tF1 score: {f1_score(y_test, pred) :.2f}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
#here I divided the data in the ratio od 70:30
from sklearn.model_selection import train_test_split
X = dataset.drop('target', axis=1)
y = dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver='sag')
log_reg.fit(X_train, y_train)
print_score(log_reg, X_train, y_train, X_test, y_test, train=True)
print_score(log_reg, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, log_reg.predict(X_test))
train_score = accuracy_score(y_train, log_reg.predict(X_train))
results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
###Output
_____no_output_____
###Markdown
K-nearest neighbors
###Code
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier()
knn_classifier.fit(X_train, y_train)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=True)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, knn_classifier.predict(X_test))
train_score = accuracy_score(y_train, knn_classifier.predict(X_train))
results_df = pd.DataFrame(data=[["K Nearest Neighbor", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
###Output
_____no_output_____
###Markdown
Support Vector Machine
###Code
from sklearn.svm import SVC
svm_model = SVC(kernel='poly', gamma=0.1, C=1.0)
svm_model.fit(X_train, y_train)
print_score(svm_model, X_train, y_train, X_test, y_test, train=True)
print_score(svm_model, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, svm_model.predict(X_test))
train_score = accuracy_score(y_train, svm_model.predict(X_train))
results_df = pd.DataFrame(data=[["Support Vector Machine", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning to get better performances
###Code
#tuning the parameters for Logistic regression here
from sklearn.model_selection import RandomizedSearchCV, StratifiedKFold
from scipy.stats import randint
hyperparameters = {
'C': randint(0.0001, 1000),
'penalty': ['l1', 'l2'],
'max_iter': randint(100, 500),
'class_weight': [{1: 0.5, 0: 0.5}, {1: 0.4, 0: 0.6}, {1: 0.6, 0: 0.4}, {1: 0.7, 0: 0.3}, {1: 0.8, 0: 0.2}]
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
log_reg = LogisticRegression()
random_search_cv = RandomizedSearchCV(log_reg, hyperparameters, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True)
random_search_cv.fit(X_train, y_train)
random_search_cv.best_estimator_
log_reg = LogisticRegression(C=741,
solver='warn',class_weight={0: 0.5, 1: 0.5},fit_intercept=True, intercept_scaling=1, l1_ratio=None,
max_iter=197, multi_class='warn', n_jobs=None, penalty='l1',
random_state=None, tol=0.0001, verbose=0,
warm_start=False)
log_reg.fit(X_train, y_train)
print_score(log_reg, X_train, y_train, X_test, y_test, train=True)
print_score(log_reg, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, log_reg.predict(X_test))
train_score = accuracy_score(y_train, log_reg.predict(X_train))
tuning_results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
tuning_results_df
#tuning the hyperparameters for K nearest neighbor here
hyperparameters = {'n_neighbors': randint(1, 10),
'leaf_size': randint(1, 8),
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'cityblock']
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
knn = KNeighborsClassifier()
random_search_cv = RandomizedSearchCV(knn, hyperparameters, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True)
random_search_cv.fit(X_train, y_train)
random_search_cv.best_estimator_
knn_classifier = KNeighborsClassifier(n_neighbors=7,algorithm='auto', leaf_size=1, metric='euclidean',
metric_params=None, p=2,
weights='distance')
knn_classifier.fit(X_train, y_train)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=True)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, knn_classifier.predict(X_test))
train_score = accuracy_score(y_train, knn_classifier.predict(X_train))
results_df = pd.DataFrame(data=[[" K-nearest Neighbor", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
svm_model = SVC(kernel='rbf', gamma=0.1, C=1.0)
hyperparameters = {
"C": [0.001, 0.01,0.1,0.3,0.5,0.7,1,3,5,7,9],
"gamma": randint(0.01, 1),
'kernel': ['linear', 'rbf', 'poly', 'sigmoid'],
'degree': randint(1, 10)
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
svm_random = RandomizedSearchCV(svm_model, hyperparameters, n_jobs=-1, cv=5, verbose=1, scoring="accuracy")
svm_random.fit(X_train, y_train)
svm_model = SVC(C=5, gamma=0.1, kernel='rbf',cache_size=200, class_weight=None,
coef0=0.0, decision_function_shape='ovr',
degree=3,)
svm_model.fit(X_train, y_train)
print_score(svm_model, X_train, y_train, X_test, y_test, train=True)
print_score(svm_model, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, svm_model.predict(X_test))
train_score = accuracy_score(y_train, svm_model.predict(X_train))
results_df = pd.DataFrame(data=[["Support Vector Machine", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
###Output
_____no_output_____ |
notebooks/03-classification.ipynb | ###Markdown
ScenarioQuick recap: in this scenario, 1000 spectra were created, among which 50% had a main peak with SNR varying between 1 and 3 dB. In 5% of the spectra, a spurious peak of intensity ranging from 1 to 3 dB was placed at random positions in the spectra.
###Code
X, y, d = pickle.load(gzip.open('../data/artificial.pickle', 'rb'), encoding='latin1')
###Output
_____no_output_____
###Markdown
Model definition The idea behind each step in the preprocessing phase is explained in the previous notebooks. Here, I just used a pipeline to stitch everything together.
###Code
model = make_pipeline(preprocessing.Cwt(), # Wavelet transform
StandardScaler(),
preprocessing.ForestSelect(k=10, trees=100), # Feature selection via Random Forest
linear_model.LogisticRegression())
###Output
_____no_output_____
###Markdown
The model was tested against the conventional methods for evaluating peak presence in gamma-ray spectra (Unidentified Second Difference and Library Correlation). I implemented these algorithms inside a sklearn Estimator class, so that they can be evaluated in the same pipeline using the same class methods as the original sklearn estimators.Cross-validation was repeated KFold with repetitions=3 and k=10. ResultsThe performance of the ML model was vastly superior than the traditional methods with regard to aROC values, as well as with every other metric (see below).
###Code
clf = [(u'ML model', model),
(u'Second difference', classic_algorithms.SecondDifference(channel=50,
fwhm=3,
tol=1)),
(u'Library correlation', classic_algorithms.LibCorNID(channel=50,
sensitivity=0.8,
fwhm=3,
tol=1))]
results = show_results.summary('Peak Classification',
clf,
X,
y,
cv=True,
n_iter=1,
train_sizes=np.linspace(0.05,1.00,50),
n_jobs=3, # avoid hijacking all cpus
learnGraph=False,
rocGraph=True)
pd.DataFrame(results, columns=show_results.columns).set_index("Method")
###Output
_____no_output_____
###Markdown
However, the accuracy values of the traditional methods are far smaller than their respective aROC values. This indicates that, in contrast to the ML model, the methods default parameters may need some adjustment. Learning curveThe machine learning model's learning efficiency is also something. It achieves almost minimum validation error after learning from just a handful spectrum.
###Code
learning_curve.plot_learning_curve(model, X, y, cv=5, train_sizes=np.linspace(0.05,1,40))
###Output
_____no_output_____ |
analysis/notebooks/iops_test_rockpro64.ipynb | ###Markdown
IOPS Test Report Experiment Environment Part 2: SBC-based Experiment Set Up Parameters Active Range 75% Pre Condition 1 SEQ 128KB W TOIO - TC/QD TC 2 / QD 16 Duration 2 x User Capacity Purge Method None Write Cache Enabled Test Parameters Active Range 75% Test Stimulus 1 RND Block Size (Outer Loop) 1024KB, 128KB, 64KB, 32KB, 16KB, 8KB, 4KB, 512B R/W Mix (Inner Loop) 100/0, 95/5, 65/35, 50/50, 35/65, 5/95, 0/100 TOIO - TC/QD TC 2 / QD 16
###Code
%run 'common/plot_iops.py'
profiles_dirname = 'rockpro64/iops/5'
plot_ss_convergence(profiles_dirname)
plot_ss_measurement_window(profiles_dirname)
plot_measurement_window_tabular(profiles_dirname)
plot_measurement_window_2d(profiles_dirname)
plot_measurement_window_3d(profiles_dirname)
###Output
_____no_output_____ |
docs/notebooks/benchmark.ipynb | ###Markdown
Benchmark
###Code
%pip install --upgrade https://github.com/remifan/commplax/archive/master.zip
%pip install --upgrade https://github.com/remifan/LabPtPTm2/archive/master.zip
%pip install --upgrade https://github.com/remifan/gdbp_study/archive/master.zip
import os
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from functools import partial
import matplotlib.pyplot as plt
from commplax import util
from gdbp import gdbp_base as gb, data as gdat, aux
aux.dl_trained_params() # download pre-trained parameters
loc_trained_params = './trained_params/pretrained'
LP = np.array([-4, -2, -1, 0, 1]) # launched power in dBm
# use Pandas.Dataframe to store the results
df_test_res = pd.DataFrame({c: pd.Series(dtype=t) for c, t in {'ChInd': 'int',
'LPdBm': 'float',
'Model': 'str',
'Q': 'float'}.items()})
# check `gdbp_study-master/src/gdbp_base.py` for model definition
def init_models(data: gdat.Input, **kwargs):
''' make CDC and DBP's derivatives
(all methods has trainable R-filter)
cdc: static D-filter, no NLC
dbp: static D-filter, scalar manually optimized NLC factor
fdbp: static D-filter, static N-filter scaled by manually optimized NLC factor
edbp: static D-filter, tap-by-tap optimizable/trainable N-filter
gdbp: tap-by-tap optimizable/trainable D-filter and N-filter
'''
mode = kwargs.get('mode', 'train')
steps = kwargs.get('steps', 3)
dtaps = kwargs.get('dtaps', 261)
ntaps = kwargs.get('ntaps', 41)
rtaps = kwargs.get('rtaps', 61)
xi = kwargs.get('xi', 1.1) # optimal xi for FDBP
fdbp_init = partial(gb.fdbp_init, data.a, steps=steps)
model_init = partial(gb.model_init, data)
comm_conf = {'mode': mode, 'steps': steps, 'dtaps': dtaps, 'rtaps': rtaps} # common configurations
# init. func.| define model structure parameters and some initial values | define static modules | identifier
cdc = model_init({**comm_conf, 'ntaps': 1, 'init_fn': fdbp_init(xi=0.0)}, [('fdbp_0',)], name='CDC')
dbp = model_init({**comm_conf, 'ntaps': 1, 'init_fn': fdbp_init(xi=0.15)}, [('fdbp_0',)], name='DBP')
fdbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [('fdbp_0',)], name='FDBP')
edbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [('fdbp_0', r'DConv_\d')], name='EDBP')
gdbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [], name='GDBP')
return cdc, dbp, fdbp, edbp, gdbp
def load_data(ch: int):
''' build pairs of datasets for training and testing '''
ds_train = gdat.load(1, LP, ch, 2)
ds_test = gdat.load(2, LP, ch, 1)
return [(ds_tr, ds_te) for ds_tr, ds_te in zip(ds_train, ds_test)]
def sweep_channel(ch: int,
df_test_res=df_test_res,
use_pretrained_params=False,
save_params=False,
save_subdirname='regular_taps',
model_init_kwargs={}):
''' sweep launched power of given channel '''
util.clear_xla_cache() # release JAX's cache to save RAM
# iterate data of target channel
for i, chds in enumerate(tqdm(load_data(ch),
desc=f'sweeping launch power on Ch#{ch}',
leave=False)):
ds_train, ds_test = chds
models_train = init_models(ds_train, **model_init_kwargs)
models_test = init_models(ds_test, mode='test', **model_init_kwargs)
# iterate models
for j, (model_train, model_test) in enumerate(tqdm(zip(models_train, models_test),
desc='iterating models',
leave=False,
total=len(models_train))):
params_file = os.path.join(loc_trained_params,
'snr_vs_lp',
save_subdirname,
'params_%d_%d_%d' % (ch, i, j)) # params_{ch}_{lp}_{mod}
if use_pretrained_params:
params = util.load_variable(params_file)
else:
# use trained params of the 3rd last batch, as tailing samples are corrupted by CD
params_queue = [None] * 3
# iterate the training steps
for _, p, _ in gb.train(model_train, ds_train, n_iter=2000):
params_queue.append(p)
params = params_queue.pop(0)
if save_params:
util.save_variable(params, params_file)
test_Q = gb.test(model_test, params, ds_test)[0].QSq.total
# collect result
df_test_res = df_test_res.append({'ChInd': ch,
'LPdBm': ds_test.a['lpdbm'],
'Model': model_test.name,
'Q': test_Q},
ignore_index=True)
return df_test_res
# it may take a while to finish
kwargs = {'save_subdirname': 'regular_taps',
'use_pretrained_params': True, # use pretrained parameters to save time
'save_params': False} # save trained parameters after training
for ch in tqdm(1 + np.arange(7), desc='sweeping channels'):
df_test_res = sweep_channel(ch, df_test_res, **kwargs)
df_test_res
# save results
df_test_res.to_csv('benchmark_regular_taps.csv', index=False)
###Output
_____no_output_____
###Markdown
Now we visualize the results by manipulating the results table, see [Pandas.Dataframe](https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html) for instructions.
###Code
grp_ch = df_test_res.loc[df_test_res['ChInd'].isin([1, 4])].groupby('ChInd')
for n_ch, g_ch in grp_ch:
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
for n_mod, g_mod in g_ch.groupby('Model', sort=False):
ax.plot(g_mod.LPdBm, g_mod.Q, '-o', label=n_mod)
ax.legend(loc='upper left')
ax.set_title(f'Ch#{n_ch}')
ax.set_xlabel('Launched Power (dBm)')
ax.set_ylabel('Q-facotr (dB)')
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
fmt = ['o', '^', '<', 's', '*']
grp_opt_Q = df_test_res.groupby(['ChInd', 'Model'], as_index=False, sort=False)['Q'] \
.max().groupby('Model', sort=False)
for f, (n, g) in zip(fmt, grp_opt_Q):
ax.plot(g.ChInd, g.Q, f, label=n)
ax.legend()
ax.set_xlabel('channel index')
ax.set_ylabel('Q-factor (dB)')
ax.set_ylim([8.15, 9.5])
ax.set_title(r'Optimal $Q$-factor')
# it may take a while to finish
kwargs = {'save_subdirname': 'few_taps',
'use_pretrained_params': True, # use pretrained parameters to save time
'save_params': False,
'model_init_kwargs': {'dtaps': 221, 'ntaps': 11, 'xi': 0.5}}
df_test_res_ft = sweep_channel(4, df_test_res, **kwargs)
df_test_res_ft
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
for n_mod, g_mod in df_test_res_ft.groupby('Model', sort=False):
ax.plot(g_mod.LPdBm, g_mod.Q, '-o', label=n_mod)
ax.legend(loc='upper left')
ax.set_title(f'Ch4 (fewer taps)')
ax.set_xlabel('Launched Power (dBm)')
ax.set_ylabel('Q-facotr (dB)')
###Output
_____no_output_____ |
drug_data/drug_poincare_embed.ipynb | ###Markdown
Helping functions used to parse the XML drug database
###Code
def build_drug_dataframe(root):
names = []
atc_level_five = []
for drug in list(root):
atc_codes = drug.find('{http://www.drugbank.ca}atc-codes')
name = drug.find('{http://www.drugbank.ca}name').text
# hold ATC values collected for each drug
for elem in atc_codes.iter():
try:
atc_five = re.search('[A-Z][0-9]{2}[A-Z]{2}[0-9]{2}', elem.attrib['code'])
if atc_five != None:
atc_level_five.append(atc_five.group(0))
names.append(name)
except KeyError:
pass
return pd.DataFrame({'Name':names, 'ATC5':atc_level_five})
def build_transitive_closure(df):
edges = []
drug_names = df['Name'].unique()
# iterate through drugs
for drug in drug_names:
codes = df[df['Name'] == drug]
# build root to branch verticies for each ACT (asymmetric)
for atc in codes['ATC5']:
edges.append(('DRUG', atc[0]))
edges.append((atc[0], atc[0:3]))
edges.append((atc[0:3], atc[0:4]))
edges.append((atc[0:4], atc[0:5]))
edges.append((atc[0:5], atc[0:]))
# if a drug has multiple ATC codes link them together
if len(codes) > 1:
edges + list(itertools.permutations(codes['ATC5'], 2))
return edges
###Output
_____no_output_____
###Markdown
Collect ATC drug codes and build graph
###Code
# TODO: Pull data from S3 instead of local path
path = r'C:\Users\bwesterber\Desktop\Capstone\full database.xml'
tree = ET.parse(path)
root = tree.getroot()
# build dataframe with drug name and ATC codes
df = build_drug_dataframe(root)
# create all edge/vertex pairs
edges = build_transitive_closure(df)
# fit poincare embedding
model = PoincareModel(edges, burn_in = 10, negative = 10, alpha = 1)
model.train(epochs = 50)
# apply embeddings
drug_names = list(df['ATC5'].unique())
vectors = []
drugs = []
for drug in drug_names:
vectors.append(model.kv.word_vec(drug))
drugs.append(drug)
df_embedded = pd.DataFrame(vectors)
# visualize embedding with PCA
pca = PCA(n_components = 2)
pca.fit(df_embedded)
df_transformed_pca = pd.DataFrame(pca.transform(df_embedded))
ATC1 = [x[0] for x in drugs]
df_transformed_pca['ATC1'] = ATC1
for drug_class in set(ATC1):
sample = df_transformed_pca[df_transformed_pca['ATC1'] == drug_class]
plt.scatter(sample[0], sample[1])
plt.title('Poincare Drug Embedding')
plt.xlabel('PC1 ({} %)'.format(np.round(pca.explained_variance_ratio_[0]*100), 2))
plt.ylabel('PC2 ({} %)'.format(np.round(pca.explained_variance_ratio_[1]*100), 2))
# evaluate embedding
print(model.kv.most_similar('B01AE02', topn = 10))
###Output
[('B01AE06', 0.4483974012456041), ('B01AE01', 0.547536720460752), ('B01AE05', 0.9630397152789056), ('B01A', 1.0778636844621843), ('B01AC06', 1.1342419655153235), ('B01AC07', 1.1441062888574294), ('B01AC19', 1.2042339489237033), ('B01AE', 1.242976217592917), ('B01AC27', 1.2805842113465207), ('B01AC26', 1.3721767546315884)]
|
CMA_score.ipynb | ###Markdown
CMA scoreMathieu Bourdenx - Oct. 2020
###Code
import numpy as np
+t vc
import pandas as pd
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
import scanpy as sc
# Load cluster ID (from Seurat)
clusters = pd.read_excel('../../cluster_identity.xlsx')
###Output
_____no_output_____
###Markdown
Load datasets (very very long - 1hr+)
###Code
#Load full dataset
adata = sc.read_loom('../../cx_rnaAssay.loom')
# Load small dataset containing cell metadata
small_adata = sc.read_loom('./cx_integratedAssay.loom')
# Copy metadata
adata.obs = small_adata.obs
# Create a barcode dataframe
barcode = small_adata.obs
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
# Normalize counts as CPM
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e6)
# Log transform data
sc.pp.log1p(adata)
###Output
_____no_output_____
###Markdown
CMA score calculation
###Code
# Load matrix file with weight and direction
model_matrix = pd.read_excel('./activation_model.xlsx')
cma_network = adata[:, model_matrix['Gene name Ms']]
cma_data_zs = cma_network.copy().X.todense().T
for i in tqdm(np.arange(cma_data_zs.shape[0])):
µ = np.mean(cma_data_zs[i, :])
sd = np.std(cma_data_zs[i, :])
cma_data_zs[i, :] = (cma_data_zs[i, :] - µ)/sd
for i,j in tqdm(enumerate(barcode.index)):
cell_matrix = model_matrix.copy()
for g in cell_matrix.index:
cell_matrix.loc[g, 'gene_count'] = cma_data_zs[g, i]
cell_matrix['gene_score'] = cell_matrix['gene_count'] * cell_matrix['Direction'] * cell_matrix['Weight']
score = cell_matrix['gene_score'].sum()/np.sum(cell_matrix['Weight'])
barcode.loc[j, 'score'] = score
for barplotin tqdm(barcode.index):
barcode.loc[i, 'broad.cell.type'] = clusters.loc[int(barcode.loc[i, 'seurat_clusters']), 'broad.cell.type']
plt.figure(figsize=(12, 6))
sns.barplot(data=barcode, x="broad.cell.type", y='score', hue='Condition')
# Calculation of net score to WT 2m
for maj_cell in tqdm(np.unique(barcode['broad.cell.type'])):
µ = np.mean(barcode[barcode['broad.cell.type'] == maj_cell][barcode['Condition'] == 'CX_WT_2m']['score'])
for cell_index in barcode[barcode['broad.cell.type'] == maj_cell].index:
barcode.loc[cell_index, 'net_score_group'] = barcode.loc[cell_index, 'score'] - µ
# Create a new age category to align 6 and 8m
for i in tqdm(barcode.index):
if barcode.loc[i, 'Age'] == '2m':
barcode.loc[i, 'new_age'] = '2m'
elif barcode.loc[i, 'Age'] == '6m':
barcode.loc[i, 'new_age'] = '8m'
elif barcode.loc[i, 'Age'] == '8m':
barcode.loc[i, 'new_age'] = '8m'
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "excit."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/excitatory_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "inhib."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/inhib_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "astro."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/astro_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "microglia"], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/microglia_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "oligo."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/oligo_net.png', dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Plots with 3 groups
###Code
def make_plots(cellpop):
fig, ax = plt.subplots(figsize=(4,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == cellpop], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO", 'PD'])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
plt.legend(bbox_to_anchor=(1,1))
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots_3groups/{}_netscore.png'.format(cellpop), dpi=300)
plt.show()
cell_to_plot = ['excit.', "oligo.", 'astro.', 'microglia', 'inhib.', 'OPCs']
for i in cell_to_plot:
make_plots(cellpop=i)
###Output
_____no_output_____
###Markdown
CMA component heatmap *Trial function on excitatory neurons*
###Code
neuron_matrix = np.zeros((18, 6))
wt_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'WT'][barcode['new_age'] == '2m'].index)
wt_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'WT'][barcode['new_age'] == '8m'].index)
ko_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '2m'].index)
ko_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '8m'].index)
pd_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'PD'][barcode['new_age'] == '2m'].index)
pd_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'PD'][barcode['new_age'] == '8m'].index)
for rank in tqdm(np.arange(neuron_matrix.shape[0])):
neuron_matrix[rank, 0] = np.mean(cma_network[wt_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 1] = np.mean(cma_network[wt_8m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 2] = np.mean(cma_network[ko_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 3] = np.mean(cma_network[ko_8m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 4] = np.mean(cma_network[pd_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 5] = np.mean(cma_network[pd_8m_index, :].X.todense()[:, rank])
neuron_matrix_zs = neuron_matrix.copy()
for i in np.arange(neuron_matrix_zs.shape[0]):
µ = np.mean(neuron_matrix_zs[i, :])
sd = np.std(neuron_matrix_zs[i, :])
neuron_matrix_zs[i, :] = (neuron_matrix_zs[i, :] - µ) / sd
plt.figure(figsize=(6, 6))
plt.imshow(neuron_matrix_zs, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(6), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m', 'PD 2m', 'PD 8m'], rotation='vertical')
plt.savefig('./heatmaps_3groups/ex_neurons.png', dpi=300)
plt.savefig('./heatmaps_3groups/ex_neurons.pdf')
plt.show()
neuron_matrix_2group = neuron_matrix[:, :-2]
for i in np.arange(neuron_matrix_2group.shape[0]):
µ = np.mean(neuron_matrix_2group[i, :])
sd = np.std(neuron_matrix_2group[i, :])
neuron_matrix_2group[i, :] = (neuron_matrix_2group[i, :] - µ) / sd
plt.figure(figsize=(6, 6))
plt.imshow(neuron_matrix_2group, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(4), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m'], rotation='vertical')
plt.savefig('./heatmaps/ex_neurons.png', dpi=300)
plt.savefig('./heatmaps/ex_neurons.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
*All cells function*
###Code
def make_heatmaps(cellpop):
# Prepare empty matrix
matrix = np.zeros((18, 6))
#Find cell indices for each condition
wt_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'WT'][barcode['new_age'] == '2m'].index)
wt_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'WT'][barcode['new_age'] == '8m'].index)
ko_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '2m'].index)
ko_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '8m'].index)
pd_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'PD'][barcode['new_age'] == '2m'].index)
pd_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'PD'][barcode['new_age'] == '8m'].index)
#Calculate mean per gene for every condition
for rank in tqdm(np.arange(matrix.shape[0])):
matrix[rank, 0] = np.mean(cma_network[wt_2m_index, :].X.todense()[:, rank])
matrix[rank, 1] = np.mean(cma_network[wt_8m_index, :].X.todense()[:, rank])
matrix[rank, 2] = np.mean(cma_network[ko_2m_index, :].X.todense()[:, rank])
matrix[rank, 3] = np.mean(cma_network[ko_8m_index, :].X.todense()[:, rank])
matrix[rank, 4] = np.mean(cma_network[pd_2m_index, :].X.todense()[:, rank])
matrix[rank, 5] = np.mean(cma_network[pd_8m_index, :].X.todense()[:, rank])
#Perform z-scoring on each row
matrix_zs = matrix.copy()
for i in np.arange(matrix_zs.shape[0]):
µ = np.mean(matrix_zs[i, :])
sd = np.std(matrix_zs[i, :])
matrix_zs[i, :] = (matrix_zs[i, :] - µ) / sd
#Plot heatmap including all conditions
plt.figure(figsize=(6, 6))
plt.imshow(matrix_zs, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(6), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m', 'PD 2m', 'PD 8m'], rotation='vertical')
plt.savefig('./heatmaps_3groups/{}.png'.format(cellpop), dpi=300)
plt.savefig('./heatmaps_3groups/{}.pdf'.format(cellpop))
plt.show()
#Perform z-scoring on only 2 groups
matrix_2group = matrix[:, :-2]
for i in np.arange(matrix_2group.shape[0]):
µ = np.mean(matrix_2group[i, :])
sd = np.std(matrix_2group[i, :])
matrix_2group[i, :] = (matrix_2group[i, :] - µ) / sd
cell_to_plot = ['excit.', "oligo.", 'astro.', 'microglia', 'inhib.', 'OPCs']
for i in cell_to_plot:
make_heatmaps(cellpop=i)
###Output
/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:7: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
import sys
/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:8: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:10: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
# Remove the CWD from sys.path while we load stuff.
/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:11: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
# This is added back by InteractiveShellApp.init_path()
/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:13: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
del sys.path[0]
/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:14: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
0%| | 0/18 [00:00<?, ?it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
6%|▌ | 1/18 [00:00<00:02, 6.72it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
11%|█ | 2/18 [00:00<00:02, 6.49it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
17%|█▋ | 3/18 [00:00<00:02, 6.24it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
22%|██▏ | 4/18 [00:00<00:02, 6.25it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
28%|██▊ | 5/18 [00:00<00:02, 5.95it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
33%|███▎ | 6/18 [00:00<00:01, 6.29it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
39%|███▉ | 7/18 [00:01<00:01, 5.99it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
44%|████▍ | 8/18 [00:01<00:01, 5.85it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
50%|█████ | 9/18 [00:01<00:01, 5.87it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
56%|█████▌ | 10/18 [00:01<00:01, 5.42it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
61%|██████ | 11/18 [00:01<00:01, 5.63it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
67%|██████▋ | 12/18 [00:02<00:01, 5.69it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
72%|███████▏ | 13/18 [00:02<00:00, 5.51it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
78%|███████▊ | 14/18 [00:02<00:00, 5.80it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
83%|████████▎ | 15/18 [00:02<00:00, 5.41it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
89%|████████▉ | 16/18 [00:02<00:00, 5.50it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
94%|█████████▍| 17/18 [00:02<00:00, 5.66it/s]/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
/opt/anaconda3/lib/python3.7/site-packages/anndata/core/anndata.py:1239: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if is_categorical(df_full[k]):
100%|██████████| 18/18 [00:03<00:00, 5.68it/s]
|
30_3_ttest,_ANOVA,_resampling.ipynb | ###Markdown
###Code
import numpy as np
import pandas as pd
from scipy import stats
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
###Output
_____no_output_____
###Markdown
T-tests one-sample t-test generate the data from normal distribution
###Code
mean = 100
standard_deviation = 18
n_samples = 78
theoretic_standard_error = standard_deviation / np.sqrt(n_samples)
print(f'mean = {mean:.4f},standard error = {theoretic_standard_error:.4f}')
np.random.seed(12345)
samples = np.random.normal(loc = mean, scale = standard_deviation, size = n_samples)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples,bins = 5,alpha = 0.7,color = 'blue')
_ = ax.axvline(samples.mean(),linestyle = '--',color = 'black',alpha = 1.,label = f'mean = {samples.mean():.4f}')
_ = ax.axvspan(samples.mean() - samples.std()/np.sqrt(n_samples),
samples.mean() + samples.std() / np.sqrt(n_samples),
color = 'red', alpha = 0.7,label = f'standard error = {samples.std() / np.sqrt(n_samples):.4f}')
_ = ax.legend()
stats.ttest_1samp(samples,100,)
mean = 106
standard_deviation = 18
n_samples = 78
theoretic_standard_error = standard_deviation / np.sqrt(n_samples)
print(f'mean = {mean:.4f},standard error = {theoretic_standard_error:.4f}')
np.random.seed(12345)
samples = np.random.normal(loc = mean, scale = standard_deviation, size = n_samples)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples,bins = 5,alpha = 0.7,color = 'blue')
_ = ax.axvline(samples.mean(),linestyle = '--',color = 'black',alpha = 1.,label = f'mean = {samples.mean():.4f}')
_ = ax.axvspan(samples.mean() - samples.std()/np.sqrt(n_samples),
samples.mean() + samples.std() / np.sqrt(n_samples),
color = 'red', alpha = 0.7,label = f'standard error = {samples.std() / np.sqrt(n_samples):.4f}')
_ = ax.legend()
stats.ttest_1samp(samples,100,)
###Output
_____no_output_____
###Markdown
generate from non-normal distribution: exponential$$P(X \leq x) = 1 - e^{\lambda x}$$$$E[x] = 1/\lambda$$
###Code
mean = 117
n_samples = 78
np.random.seed(12345)
samples = np.random.exponential(scale = mean, size = n_samples)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples,bins = 5,alpha = 0.7,color = 'blue')
_ = ax.axvline(samples.mean(),linestyle = '--',color = 'black',alpha = 1.,label = f'mean = {samples.mean():.4f}')
_ = ax.axvspan(samples.mean() - samples.std()/np.sqrt(n_samples),
samples.mean() + samples.std() / np.sqrt(n_samples),
color = 'red', alpha = 0.7,label = f'standard error = {samples.std() / np.sqrt(n_samples):.4f}')
_ = ax.legend()
stats.ttest_1samp(samples,100)
###Output
_____no_output_____
###Markdown
[generate from more distributions if you like](https://docs.scipy.org/doc/numpy-1.14.1/reference/routines.random.html) paired 2-sample t-test generate from normal distribution and kinda equal variance, they have to have the same sample size
###Code
sample_mean1 = 0.52
sample_mean2 = 0.75
sample_sd1 = 0.15
sample_sd2 = 0.19
sample_size1 = 30
sample_size2 = 30
np.random.seed(12345)
samples1 = np.random.normal(sample_mean1,sample_sd1,sample_size1)
samples2 = np.random.normal(sample_mean2,sample_sd2,sample_size2)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples1,color = 'red',label='sample1',alpha = 0.4)
_ = ax.hist(samples2,color = 'blue',label='sample2',alpha = 0.4)
_ = ax.axvline(samples1.mean(),linestyle = '--',color = 'red',alpha = 1.,label = f'mean = {samples1.mean():.4f}')
_ = ax.axvspan(samples1.mean() - samples1.std()/np.sqrt(sample_size1),
samples1.mean() + samples1.std() / np.sqrt(sample_size1),
color = 'red', alpha = 0.7,label = f'standard error = {samples1.std() / np.sqrt(sample_size1):.4f}')
_ = ax.axvline(samples2.mean(),linestyle = '--',color = 'blue',alpha = 1.,label = f'mean = {samples2.mean():.4f}')
_ = ax.axvspan(samples2.mean() - samples2.std()/np.sqrt(sample_size2),
samples2.mean() + samples2.std() / np.sqrt(sample_size2),
color = 'blue', alpha = 0.7,label = f'standard error = {samples2.std() / np.sqrt(sample_size2):.4f}')
_ = ax.legend()
stats.ttest_rel(samples1,samples2,)
###Output
_____no_output_____
###Markdown
independent 2-sample t-test generate from normal distribution and kinda equal variance and unequal sample size
###Code
sample_mean1 = 0.52
sample_mean2 = 0.75
sample_sd1 = 0.15
sample_sd2 = 0.19
sample_size1 = 30
sample_size2 = 60 # only change from above
np.random.seed(12345)
samples1 = np.random.normal(sample_mean1,sample_sd1,sample_size1)
samples2 = np.random.normal(sample_mean2,sample_sd2,sample_size2)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples1,color = 'red',label='sample1',alpha = 0.4)
_ = ax.hist(samples2,color = 'blue',label='sample2',alpha = 0.4)
_ = ax.axvline(samples1.mean(),linestyle = '--',color = 'red',alpha = 1.,label = f'mean = {samples1.mean():.4f}')
_ = ax.axvspan(samples1.mean() - samples1.std()/np.sqrt(sample_size1),
samples1.mean() + samples1.std() / np.sqrt(sample_size1),
color = 'red', alpha = 0.7,label = f'standard error = {samples1.std() / np.sqrt(sample_size1):.4f}')
_ = ax.axvline(samples2.mean(),linestyle = '--',color = 'blue',alpha = 1.,label = f'mean = {samples2.mean():.4f}')
_ = ax.axvspan(samples2.mean() - samples2.std()/np.sqrt(sample_size2),
samples2.mean() + samples2.std() / np.sqrt(sample_size2),
color = 'blue', alpha = 0.7,label = f'standard error = {samples2.std() / np.sqrt(sample_size2):.4f}')
_ = ax.legend()
stats.ttest_ind(samples1,samples2,equal_var = True)
###Output
_____no_output_____
###Markdown
generate from normal distribution and unequal vairance and unequal sample size
###Code
sample_mean1 = 0.52
sample_mean2 = 1.03
sample_sd1 = 0.15 * 5
sample_sd2 = 0.15 #
sample_size1 = 30
sample_size2 = 60 #
np.random.seed(12345)
samples1 = np.random.normal(sample_mean1,sample_sd1,sample_size1)
samples2 = np.random.normal(sample_mean2,sample_sd2,sample_size2)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples1,color = 'red',label='sample1',alpha = 0.4)
_ = ax.hist(samples2,color = 'blue',label='sample2',alpha = 0.4)
_ = ax.axvline(samples1.mean(),linestyle = '--',color = 'red',alpha = 1.,label = f'mean = {samples1.mean():.4f}')
_ = ax.axvspan(samples1.mean() - samples1.std()/np.sqrt(sample_size1),
samples1.mean() + samples1.std() / np.sqrt(sample_size1),
color = 'red', alpha = 0.7,label = f'standard error = {samples1.std() / np.sqrt(sample_size1):.4f}')
_ = ax.axvline(samples2.mean(),linestyle = '--',color = 'blue',alpha = 1.,label = f'mean = {samples2.mean():.4f}')
_ = ax.axvspan(samples2.mean() - samples2.std()/np.sqrt(sample_size2),
samples2.mean() + samples2.std() / np.sqrt(sample_size2),
color = 'blue', alpha = 0.7,label = f'standard error = {samples2.std() / np.sqrt(sample_size2):.4f}')
_ = ax.legend()
stats.ttest_ind(samples1,samples2,equal_var = False)
###Output
_____no_output_____
###Markdown
ANOVA one-way ANOVA
###Code
from sklearn.datasets import load_iris
data,groups = load_iris(return_X_y=True)
print(data[:,0],groups)
df = pd.DataFrame(np.vstack([data[:,0],groups]).T,columns = ['measures','groups'])
df
df['x'] = 0
sns.barplot(x = 'x',
y = 'measures',
hue = 'groups',
data = df,
)
df[df['groups'] == 0]['measures'].values
stats.f_oneway(df[df['groups'] == 0]['measures'].values,
df[df['groups'] == 1]['measures'].values,
df[df['groups'] == 2]['measures'].values,)
import statsmodels.api as sm
from statsmodels.formula.api import ols
mod = ols('measures ~ C(groups)',
data=df).fit()
aov_table = sm.stats.anova_lm(mod, typ=2)
esq_sm = aov_table['sum_sq'][0]/(aov_table['sum_sq'][0]+aov_table['sum_sq'][1])
aov_table['EtaSq'] = [esq_sm, 'NaN']
print(aov_table)
###Output
sum_sq df F PR(>F) EtaSq
C(groups) 63.212133 2.0 119.264502 1.669669e-31 0.618706
Residual 38.956200 147.0 NaN NaN NaN
###Markdown
multiple comparison
###Code
pair_t = mod.t_test_pairwise('C(groups)')
print(pair_t.result_frame)
###Output
coef std err t ... Conf. Int. Upp. pvalue-hs reject-hs
1.0-0.0 0.930 0.102958 9.032819 ... 1.133469 1.776357e-15 True
2.0-0.0 1.582 0.102958 15.365506 ... 1.785469 0.000000e+00 True
2.0-1.0 0.652 0.102958 6.332686 ... 0.855469 2.765638e-09 True
[3 rows x 8 columns]
###Markdown
2-way ANOVA
###Code
df = pd.read_csv("https://raw.githubusercontent.com/Opensourcefordatascience/Data-sets/master/crop_yield.csv")
df.head()
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.stats.multicomp
"Yield ~ C(Fert) + C(Water) + C(Fert):C(Water)"
model = ols('Yield ~ C(Fert)*C(Water)', df).fit() # with interaction
print(f"Overall model F({model.df_model: .0f},{model.df_resid: .0f}) = {model.fvalue: .3f}, p = {model.f_pvalue: .4f}")
print()
print(model.summary())
res = sm.stats.anova_lm(model, typ= 2)
print(res)
model2 = ols('Yield ~ C(Fert)+ C(Water)', df).fit() # without interaction
print(f"Overall model F({model2.df_model: .0f},{model2.df_resid: .0f}) = {model2.fvalue: .3f}, p = {model2.f_pvalue: .4f}")
print()
print(model2.summary())
res2 = sm.stats.anova_lm(model2, typ= 2)
print(res2)
###Output
Overall model F( 2, 17) = 5.430, p = 0.0150
OLS Regression Results
==============================================================================
Dep. Variable: Yield R-squared: 0.390
Model: OLS Adj. R-squared: 0.318
Method: Least Squares F-statistic: 5.430
Date: Sun, 24 Nov 2019 Prob (F-statistic): 0.0150
Time: 16:52:02 Log-Likelihood: -51.772
No. Observations: 20 AIC: 109.5
Df Residuals: 17 BIC: 112.5
Df Model: 2
Covariance Type: nonrobust
===================================================================================
coef std err t P>|t| [0.025 0.975]
-----------------------------------------------------------------------------------
Intercept 32.6800 1.353 24.153 0.000 29.825 35.535
C(Fert)[T.B] -3.7200 1.562 -2.381 0.029 -7.016 -0.424
C(Water)[T.Low] -3.5600 1.562 -2.279 0.036 -6.856 -0.264
==============================================================================
Omnibus: 1.169 Durbin-Watson: 2.736
Prob(Omnibus): 0.557 Jarque-Bera (JB): 0.820
Skew: -0.081 Prob(JB): 0.664
Kurtosis: 2.022 Cond. No. 3.19
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
sum_sq df F PR(>F)
C(Fert) 69.192 1.0 5.669070 0.029228
C(Water) 63.368 1.0 5.191895 0.035887
Residual 207.488 17.0 NaN NaN
###Markdown
multiple comparison
###Code
mc = statsmodels.stats.multicomp.MultiComparison(df['Yield'], df['Fert'])
mc_results = mc.tukeyhsd()
print(mc_results)
mc = statsmodels.stats.multicomp.MultiComparison(df['Yield'], df['Water'])
mc_results = mc.tukeyhsd()
print(mc_results)
###Output
Multiple Comparison of Means - Tukey HSD, FWER=0.05
===================================================
group1 group2 meandiff p-adj lower upper reject
---------------------------------------------------
High Low -3.56 0.0574 -7.2436 0.1236 False
---------------------------------------------------
###Markdown
resampling remember this?
###Code
mean = 117
n_samples = 78
np.random.seed(12345)
samples = np.random.exponential(scale = mean, size = n_samples)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples,bins = 5,alpha = 0.7,color = 'blue')
_ = ax.axvline(samples.mean(),linestyle = '--',color = 'black',alpha = 1.,label = f'mean = {samples.mean():.4f}')
_ = ax.axvspan(samples.mean() - samples.std()/np.sqrt(n_samples),
samples.mean() + samples.std() / np.sqrt(n_samples),
color = 'red', alpha = 0.7,label = f'standard error = {samples.std() / np.sqrt(n_samples):.4f}')
_ = ax.legend()
print(stats.ttest_1samp(samples,100))
n_sim = 1000
results = []
for _ in range(n_sim):
resampled = np.random.choice(samples,
size = n_samples,
replace = True, #
)
resampled_mean = resampled.mean()
results.append(resampled_mean)
results = np.array(results)
fig,ax = plt.subplots(figsize = (8,6))
_ = ax.hist(results,color = 'blue',bins = 25,alpha = 0.4)
_ = ax.axvline(100,linestyle = '--',color = 'black',label = 'what we want to compare against to')
_ = ax.axvline(mean,linestyle = '--',color = 'red',label = f'true mean = {mean:.4f}')
_ = ax.axvline(results.mean(),linestyle = '--',color = 'blue',label = f'average of resampled means = {results.mean():.4f}')
_ = ax.axvline(samples.mean(),linestyle = '--',color = 'yellow',label = f'sample mean = {samples.mean():.4f}')
_ = ax.legend()
p = (np.sum(results < mean) + 1) / (n_sim + 1)
p / 2
###Output
_____no_output_____
###Markdown
and this?
###Code
sample_mean1 = 0.52
sample_mean2 = 1.03
sample_sd1 = 0.15 * 5
sample_sd2 = 0.15 #
sample_size1 = 30
sample_size2 = 60 #
np.random.seed(12345)
samples1 = np.random.normal(sample_mean1,sample_sd1,sample_size1)
samples2 = np.random.normal(sample_mean2,sample_sd2,sample_size2)
fig,ax = plt.subplots(figsize = (8,4))
_ = ax.hist(samples1,color = 'red',label='sample1',alpha = 0.4)
_ = ax.hist(samples2,color = 'blue',label='sample2',alpha = 0.4)
_ = ax.axvline(samples1.mean(),linestyle = '--',color = 'red',alpha = 1.,label = f'mean = {samples1.mean():.4f}')
_ = ax.axvspan(samples1.mean() - samples1.std()/np.sqrt(sample_size1),
samples1.mean() + samples1.std() / np.sqrt(sample_size1),
color = 'red', alpha = 0.7,label = f'standard error = {samples1.std() / np.sqrt(sample_size1):.4f}')
_ = ax.axvline(samples2.mean(),linestyle = '--',color = 'blue',alpha = 1.,label = f'mean = {samples2.mean():.4f}')
_ = ax.axvspan(samples2.mean() - samples2.std()/np.sqrt(sample_size2),
samples2.mean() + samples2.std() / np.sqrt(sample_size2),
color = 'blue', alpha = 0.7,label = f'standard error = {samples2.std() / np.sqrt(sample_size2):.4f}')
_ = ax.legend()
print(stats.ttest_ind(samples1,samples2,equal_var=False))
np.random.seed(12345)
diffs = []
for _ in range(100):
group1_ = np.random.choice(samples1,size = 1000, replace = True)
group2_ = np.random.choice(samples2,size = 1000, replace = True)
diff = group1_.mean() - group2_.mean()
diffs.append(diff)
diffs = np.array(diffs)
plt.hist(diffs)
mean_difference_between_samples = samples1.mean() - samples2.mean()
pool = np.concatenate([samples1,samples2])
np.random.seed(12345)
results = []
for _ in range(n_sim):
np.random.shuffle(pool)
new_samples1 = pool[:samples1.shape[0]]
new_samples2 = pool[samples1.shape[0]:]
new_mean_difference_between_samples = new_samples1.mean() - new_samples2.mean()
results.append(new_mean_difference_between_samples)
results = np.array(results)
fig,ax = plt.subplots(figsize = (8,6))
_ = ax.hist(results,bins = 25,alpha = 0.4,color = 'blue')
_ = ax.axvline(mean_difference_between_samples,linestyle = '--', color = 'black',label = 'experiment difference')
_ = ax.legend()
p = (np.sum(results < mean_difference_between_samples) + 1) / (n_sim + 1)
p / 2
np.random.seed(12345)
responses = np.random.choice([0,1],size = 25, )
corrects = np.random.choice([0,1],size = 25)
performance = np.sum(responses == corrects) / len(responses)
print(performance)
import pandas as pd
df = pd.DataFrame(np.vstack([responses,corrects]).T,columns = ['response','corrects'])
df
np.arange(len(responses))
range(len(responses))
res = []
for _ in range(100):
idx = np.random.choice(np.arange(len(responses)),size = 1000, replace = True)
reps = responses[idx]
corr = corrects[idx]
res.append( np.sum(reps == corr) / 1000)
res = np.array(res)
plt.hist(res,alpha = 0.3)
plt.axvline(res.mean())
###Output
_____no_output_____ |
02 - First Steps/0205 - The Interfaces of matplotlib.ipynb | ###Markdown
IntroductionWhen I first started learning matplotlib, it seemed as if there was an infinite number of ways to do the same set of tasks. Searching for tutorials could present you with a collection of lessons, each achieving roughly the same goal, but doing so in a slightly different manner each time. I was being productive with matplotlib, but I didn't feel like I was getting any closer to really understanding how the library worked. The reason for my uneasiness was largely due to the fact that matplotlib has three different interfaces to choose from, each with its own set of pros and cons and special use cases.In this lesson, we'll discuss the reason for the existence of each interface. We'll learn how to choose the right interface for the job. And, finally, we'll see an example of each interface in action.Personally, I feel it's easiest to work from the top to the bottom, so we'll work our way inward from the interface that offers the highest-level of abstraction to the lowest. With that in mind, we'll begin by exploring the pylab interface. pylabIf you remember at the beginning of the course, I mentioned that matplotlib was originally created to make Python a viable alternative to Matlab. Given this goal, the author, John Hunter, set out to create an interface that would very closely match that of the Matlab language. The interface he created was called pylab and it provided a nearly one-to-one mapping of the procedurally-based, and stateful, Matlab interface. The major benefits to this interface is that it made it possible for Matlab devotees to make the switch to Python with relative ease. Though the interface has since been deprecated in favor of the pyplot interface, given that it puts everything you need right at your fingertips, and is less verbose than the other interfaces, I would argue that if you want to just pop into a python interpreter and do a quick "one off", interactive EDA session, it's still a good fit for the job.The main problem, however, with the pylab interface is that it imports everything into the global namespace. This can cause issues with other user defined, or imported, functions eclipsing matplotlib functionality. It also obscures your code since it's not immediately obvious whether a function call comes from matplotlib or, for example, its dependent library, NumPy. For this reason, the pyplot module is now considered to be the canonical way to interactively explore data with matplotlib. pyplotThe idea behind the pyplot interface is that, even though the approach taken by pylab doesn’t follow good software engineering practices, users, nonetheless, still need a lightweight way to interact with matplotlib. The difference between pylab and pyplot is that pylab imports everything it uses into the global namespace making everything seem a bit “magical”, whereas pyplot makes it explicit where each function used in a script comes from. The pyplot approach leads to much easier to understand, and therefore, more maintainable code. As such, the pyplot interface is the preferred way to interactively explore a data set, and is now the interface used in the majority of tutorials that you'll find online. Also, just recently, the matplotlib documentation was overhauled and now, pretty consistently, uses pyplot everywhere.Where the pyplot interface breaks down, however, is when you need more control over how your plots are created. pyplot provides a state machine like interface that purposefully obscures away the details of what classes are being instantiated and which instances are being modified with each function call. This is great when doing exploratory data analysis, but can be a bit limiting when writing scripts to process large amounts of data, or when embedding matplotlib into an application. In either of these cases, you'll need to drop down into matplotlib's object-oriented API. The Object-Oriented APIThe pylab and pyplot interfaces are simply lightweight abstractions built atop matplotlib's set of classes for creating graphics. Calling a function like `plot` from either interface will first check for existing objects to modify, and then create them as needed. If you need more control over when classes are instantiated and how they're modified, however, then you're going to need to use the object-oriented API. ExamplesNow that you understand the impetus behind each interface, its pros and cons, and when to use it, it's time to get a little taste of each one in action. We'll start with the Object-Oriented API and work our up to the highest level of abstraction so you can easily see what each layer adds to the previous one.Now, one note before we continue, you can safely ignore the code in this first cell, it's here mainly just to make sure that our plots look consistent across each example.
###Code
%matplotlib inline
# Tweaking the 'inline' config a bit to make sure each bit of
# code below displays the same plot.
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [4, 4]
mpl.rcParams['figure.subplot.left'] = 0
mpl.rcParams['figure.subplot.bottom'] = 0
mpl.rcParams['figure.subplot.right'] = 1
mpl.rcParams['figure.subplot.top'] = 1
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# The current version of NumPy available from conda is issuing a warning
# message that some behavior will change in the future when used with the
# current version of matplotlib available from conda. This cell just keeps
# that warning from being displayed.
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
###Output
_____no_output_____
###Markdown
Object-Oriented API
###Code
from IPython.display import display_png
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import numpy as np
# Define the size of the figure to prevent the spread
# of the data from looking eliptical
fig = Figure(figsize=(5, 5))
# We've chosen the Agg canvas to render PNG output
canvas = FigureCanvasAgg(fig)
# Generate some radom (normally distributed) data using the NumPy library
x = np.random.randn(1000)
y = np.random.randn(1000)
# Create a new Axes object using the subplot function from the Figure object
ax = fig.add_subplot(111)
# Set the x and y axis limits to 4 standard deviations from the mean
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
# Call the Axes method hist to generate the histogram; hist creates a
# sequence of Rectangle artists for each histogram bar and adds them
# to the Axes container. Here "100" means create 100 bins.
#ax.hist(x, 100)
ax.scatter(x, y)
# Decorate the figure with a title and save it.
ax.set_title('Normally distributed data with $\mu=0, \sigma=1$')
# Display the figure as PNG
display_png(fig);
###Output
_____no_output_____
###Markdown
The Scripting Interface (pyplot)
###Code
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randn(1000)
y = np.random.randn(1000)
# The creation of Figure and Axes objects is taken care of for us
plt.figure(figsize=(5, 5))
plt.scatter(x, y)
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.title('Normally distributed data with $\mu=0, \sigma=1$');
###Output
_____no_output_____
###Markdown
The MATLAB Interface (pylab)
###Code
from pylab import *
# Even functions from the inner modules of NumPy are
# made to be global
x = randn(1000)
y = randn(1000)
figure(figsize=(5, 5))
scatter(x, y)
xlim(-4, 4)
ylim(-4, 4)
title('Normally distributed data with $\mu=0, \sigma=1$');
###Output
_____no_output_____ |
courses/calculus/derivatives.ipynb | ###Markdown
$$\frac{\partial}{\partial x} ||x||_2^2 = 2x, x \in \mathbb{R}^n$$
###Code
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = x@x
ty = torch.dot(tx, tx)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = 2 * x
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
###Output
[ 0.94597166 -1.36285176 0.48487899 -3.40147127 1.50628567 -3.06944268
0.01025416 -0.24045534 -1.61396376 5.74363879 -1.19564584 0.94491399
2.19191224 -2.4303376 ]
[ 0.94597166 -1.36285176 0.48487899 -3.40147127 1.50628567 -3.06944268
0.01025416 -0.24045534 -1.61396376 5.74363879 -1.19564584 0.94491399
2.19191224 -2.4303376 ]
0.0
###Markdown
$$\frac{\partial}{\partial x} ||x||_1 = sign(x), x \in \mathbb{R}^n$$
###Code
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = np.linalg.norm(x, ord=1)
ty = torch.norm(tx, p=1)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = np.sign(x)
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
###Output
[ 1. -1. 1. -1. -1. 1. 1. 1. 1. 1. -1. -1. -1. -1.]
[ 1. -1. 1. -1. -1. 1. 1. 1. 1. 1. -1. -1. -1. -1.]
0.0
###Markdown
$$\frac{\partial}{\partial x} \sum_{x=1}^n x_i = \mathbb{1}, x \in \mathbb{R}^n$$
###Code
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = np.sum(x)
ty = torch.sum(tx)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = np.ones((x.shape[0]))
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
###Output
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
0.0
###Markdown
$$x, y \in \mathbb{R}^n$$$$\frac{\partial x^Ty}{\partial x} = y$$$$\frac{\partial x^Ty}{\partial y} = x$$
###Code
x = np.random.randn(14)
y = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
z = x @ y
tz = torch.dot(tx, ty)
tz.backward()
print(z)
print(tz.data.numpy())
print(metrics.tdist(z, tz.data.numpy()))
dx = y
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
dy = x
dy_sol = ty.grad.data.numpy()
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
###Output
[ 0.1597877 -0.71626359 0.05052283 -0.14333741 0.94357539 0.35764423
-0.0834492 0.6778061 0.55606037 0.22271946 -1.52898548 1.02921118
-1.16625876 -1.00956165]
[ 0.1597877 -0.71626359 0.05052283 -0.14333741 0.94357539 0.35764423
-0.0834492 0.6778061 0.55606037 0.22271946 -1.52898548 1.02921118
-1.16625876 -1.00956165]
0.0
[ 1.33583134 0.31866529 -0.33759525 -0.58526828 -0.11491994 2.24181779
-3.14741652 0.53513589 0.23249044 0.86761195 -1.14821271 2.11434424
1.00094276 -0.051415 ]
[ 1.33583134 0.31866529 -0.33759525 -0.58526828 -0.11491994 2.24181779
-3.14741652 0.53513589 0.23249044 0.86761195 -1.14821271 2.11434424
1.00094276 -0.051415 ]
0.0
###Markdown
$$x \in \mathbb{R}^n, \space M \in \mathbb{R}^{n*n} \text{ symetric}$$$$\frac{\partial x^TMx}{\partial x} = 2Mx$$
###Code
x = np.random.randn(3)
M = np.random.randn(3, 3)
M = M.T @ M
tx = torch.tensor(x, requires_grad=True)
tM = torch.tensor(M, requires_grad=True)
z = x @ M @ x
tz = torch.matmul(torch.matmul(tx, tM), tx)
tz.backward()
dx = 2 * M @ x
print(dx)
print(tx.grad.data.numpy())
print(metrics.tdist(dx, tx.grad.data.numpy()))
###Output
[-6.50427942 17.90265975 21.71056981]
[-6.50427942 17.90265975 21.71056981]
0.0
###Markdown
$$z = c * x, \space x \in \mathbb{R^n}, c \in \mathbb{R}$$$$\frac{\partial E}{\partial x} = \frac{\partial E}{z} * c$$$$\frac{\partial E}{\partial c} = \frac{\partial E}{z}^T x$$
###Code
x = np.random.randn(14)
c = np.array(2.3)
z = c * x
e = z.T @ z
tx = torch.tensor(x, requires_grad=True)
tc = torch.tensor(c, requires_grad=True)
tz = tc * tx
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
###Output
69.14040081119921
69.14040081119921
0.0
###Markdown
$$z = x^Ty, \space x, y, z \in \mathbb{R}^n$$$$\frac{\partial E}{\partial x} = \frac{\partial E}{\partial z} * y$$$$\frac{\partial E}{\partial y} = \frac{\partial E}{\partial z} * x$$
###Code
x = np.random.randn(14)
y = np.random.randn(14)
z = x @ y
e = z**2
tx = torch.tensor(x, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.dot(tx, ty)
te = tz**2
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dx = dz * y
dy = dz * x
dx_sol = tx.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
###Output
[ 0.64162316 -0.25024783 0.58195254 0.5249406 0.19211156 -0.27328717
-0.58695596 0.41147809 -0.54707873 -0.54306029 0.64884119 0.62286809
0.01604418 -0.10489789]
[ 0.64162316 -0.25024783 0.58195254 0.5249406 0.19211156 -0.27328717
-0.58695596 0.41147809 -0.54707873 -0.54306029 0.64884119 0.62286809
0.01604418 -0.10489789]
2.311859930211523e-15
[ 0.37152175 -0.61625417 -0.22857998 0.08432851 -0.58186175 -0.50892006
0.00678927 -0.03410433 -0.10686017 -0.24079192 -0.46981833 -0.33330914
1.29597924 -0.26675607]
[ 0.37152175 -0.61625417 -0.22857998 0.08432851 -0.58186175 -0.50892006
0.00678927 -0.03410433 -0.10686017 -0.24079192 -0.46981833 -0.33330914
1.29597924 -0.26675607]
2.442545479692608e-15
###Markdown
$$z = Xy, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^m, z \in \mathbb{R}^n$$$$\frac{\partial E}{\partial X} = \frac{\partial E}{\partial z} y^T$$$$\frac{\partial E}{\partial y} = X^T \frac{\partial E}{\partial z}$$
###Code
X = np.random.randn(7, 3)
y = np.random.randn(3)
z = X @ y
e = z @ z
tX = torch.tensor(X, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.matmul(tX, ty)
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dX = np.outer(dz, y)
dy = X.T @ dz
dX_sol = tX.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
###Output
[[-0.04438874 -0.63716949 -1.34684281]
[-0.02115185 -0.30362016 -0.6417894 ]
[-0.05135561 -0.7371741 -1.55823159]
[ 0.02244455 0.32217592 0.6810124 ]
[-0.15305772 -2.19703727 -4.64407646]
[ 0.12999258 1.86595311 3.94423393]
[ 0.12162297 1.74581325 3.69028343]]
[[-0.04438874 -0.63716949 -1.34684281]
[-0.02115185 -0.30362016 -0.6417894 ]
[-0.05135561 -0.7371741 -1.55823159]
[ 0.02244455 0.32217592 0.6810124 ]
[-0.15305772 -2.19703727 -4.64407646]
[ 0.12999258 1.86595311 3.94423393]
[ 0.12162297 1.74581325 3.69028343]]
0.0
[-6.97979932 9.63875043 5.67732131]
[-6.97979932 9.63875043 5.67732131]
1.7763568394002505e-15
###Markdown
$$z = y^TX, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^n, z \in \mathbb{R}^m$$$$\frac{\partial E}{\partial X} = y^T\frac{\partial E}{\partial z}$$$$\frac{\partial E}{\partial y} = X \frac{\partial E}{\partial z}$$
###Code
X = np.random.randn(7, 3)
y = np.random.randn(7)
z = y @ X
e = z @ z
tX = torch.tensor(X, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.matmul(ty, tX)
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dX = np.outer(y, dz)
dy = X @ dz
dX_sol = tX.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
###Output
[[-0.63119366 0.28804921 1.16363545]
[ 3.28079586 -1.49721187 -6.04830275]
[-0.74990752 0.34222502 1.38249007]
[ 0.53283534 -0.24316276 -0.98230721]
[-1.34046736 0.61173073 2.47121515]
[ 0.22914034 -0.10456964 -0.42243109]
[ 1.98750493 -0.9070104 -3.66405959]]
[[-0.63119366 0.28804921 1.16363545]
[ 3.28079586 -1.49721187 -6.04830275]
[-0.74990752 0.34222502 1.38249007]
[ 0.53283534 -0.24316276 -0.98230721]
[-1.34046736 0.61173073 2.47121515]
[ 0.22914034 -0.10456964 -0.42243109]
[ 1.98750493 -0.9070104 -3.66405959]]
2.622130794118103e-16
[-7.06392399 -2.60272907 4.7734634 1.26648965 -0.64150134 6.32889909
-4.70270526]
[-7.06392399 -2.60272907 4.7734634 1.26648965 -0.64150134 6.32889909
-4.70270526]
2.220446049250313e-16
###Markdown
$$Z = XY, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^{m*p}, z \in \mathbb{R}^{n*p}$$$$\frac{\partial E}{\partial X} = \frac{\partial E}{\partial Z}Y^T$$$$\frac{\partial E}{\partial Y} = X^T \frac{\partial E}{\partial Z}$$
###Code
X = np.random.randn(7, 3)
Y = np.random.randn(3, 2)
Z = X @ Y
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tY = torch.tensor(Y, requires_grad=True)
tZ = torch.matmul(tX, tY)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ @ Y.T
dY = X.T @ dZ
dX_sol = tX.grad.data.numpy()
dY_sol = tY.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dY)
print(dY_sol)
print(metrics.tdist(dY, dY_sol))
###Output
[[ 3.97871846 -6.33975355 -4.23418738]
[-0.39306208 -3.00703587 -0.39979006]
[ 4.05223587 -5.1486296 -4.01785362]
[-4.14463001 6.98619505 4.49678062]
[-1.82270442 2.523923 1.85408595]
[-0.64980715 -4.20649064 -0.4887448 ]
[ 0.94733823 -2.78377223 -1.295082 ]]
[[ 3.97871846 -6.33975355 -4.23418738]
[-0.39306208 -3.00703587 -0.39979006]
[ 4.05223587 -5.1486296 -4.01785362]
[-4.14463001 6.98619505 4.49678062]
[-1.82270442 2.523923 1.85408595]
[-0.64980715 -4.20649064 -0.4887448 ]
[ 0.94733823 -2.78377223 -1.295082 ]]
2.180535678842061e-15
[[ -5.90633464 -11.19726466]
[-10.5344012 -0.37380538]
[-12.84078589 10.21902099]]
[[ -5.90633464 -11.19726466]
[-10.5344012 -0.37380538]
[-12.84078589 10.21902099]]
1.9868029630580076e-15
###Markdown
$$Z = X^TX, \space X \in \mathbb{R}^{n*m}, Z \in \mathbb{R}^{m*m}$$$$\frac{\partial E}{\partial X} = X(\frac{\partial E}{\partial Z} + \frac{\partial E}{\partial Z}^T)$$
###Code
X = np.random.randn(5, 3)
Z = X.T @ X
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tZ = torch.matmul(torch.transpose(tX, 1, 0), tX)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = X @ (dZ + dZ.T)
dX_sol = tX.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
###Output
[[ -6.53808255 24.78188913 -32.65780906]
[ -68.32951866 39.72331895 114.64315097]
[ -61.11703755 52.95722259 101.65674081]
[ 62.48972191 -29.36472574 -122.19888141]
[ -39.63881961 -0.17154525 47.63986001]]
[[ -6.53808255 24.78188913 -32.65780906]
[ -68.32951866 39.72331895 114.64315097]
[ -61.11703755 52.95722259 101.65674081]
[ 62.48972191 -29.36472574 -122.19888141]
[ -39.63881961 -0.17154525 47.63986001]]
2.1334777765716796e-14
###Markdown
$Z_I = f(X_I)$, with $Z$ and $X$ tensors of same size, $f: \mathbb{R} \rightarrow \mathbb{R}$$$\frac{\partial E}{\partial X_I} = \frac{\partial E}{\partial Z_I} * f'(X_I)$$
###Code
X = np.random.randn(5, 3)
Z = np.cos(X)
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tZ = torch.cos(tX)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ * (-np.sin(X))
dX_sol = tX.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
###Output
[[ 0.37743023 0.97076711 -0.16627778]
[-0.17436211 -0.68908609 0.9419013 ]
[-0.56618871 -0.2965124 0.99917863]
[ 0.87913338 -0.08090861 -0.32402193]
[ 0.90011796 0.22875516 -0.75883033]]
[[ 0.37743023 0.97076711 -0.16627778]
[-0.17436211 -0.68908609 0.9419013 ]
[-0.56618871 -0.2965124 0.99917863]
[ 0.87913338 -0.08090861 -0.32402193]
[ 0.90011796 0.22875516 -0.75883033]]
0.0
###Markdown
$Z_I = f(X_I, Y_I)$, with $Z$, $X$and $Y$ tensors of same size, $f: \mathbb{R}*\mathbb{R} \rightarrow \mathbb{R}$$$\frac{\partial E}{\partial X_I} = \frac{\partial E}{\partial Z_I} * \frac{\partial f(X_I, Y_I)}{\partial X_I}$$$$\frac{\partial E}{\partial Y_I} = \frac{\partial E}{\partial Z_I} * \frac{\partial f(X_I, Y_I)}{\partial Y_I}$$
###Code
X = np.random.rand(7, 3) + 0.1
Y = np.random.randn(7, 3)
Z = np.power(X, Y)
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tY = torch.tensor(Y, requires_grad=True)
tZ = torch.pow(tX, tY)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ * Y * np.power(X, Y-1)
dY = dZ * np.log(X) * np.power(X, Y)
dX_sol = tX.grad.data.numpy()
dY_sol = tY.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dY)
print(dY_sol)
print(metrics.tdist(dY, dY_sol))
###Output
[[ 0.67332577 -1.9056877 -14.16141866]
[ -2.62874314 1.04251959 -0.24685522]
[ -2.97332623 -32.53156119 -12.38202496]
[ -2.45148253 -1.3856159 -4.30861926]
[ 1.50858889 -2.81216764 0.25862449]
[ -1.13230957 1.1170135 2.31469228]
[ 2.66040311 -0.55871038 0.28672347]]
[[ 0.67332577 -1.9056877 -14.16141866]
[ -2.62874314 1.04251959 -0.24685522]
[ -2.97332623 -32.53156119 -12.38202496]
[ -2.45148253 -1.3856159 -4.30861926]
[ 1.50858889 -2.81216764 0.25862449]
[ -1.13230957 1.1170135 2.31469228]
[ 2.66040311 -0.55871038 0.28672347]]
0.0
[[ 2.47169302e-02 8.84477685e-02 -2.44620876e+00]
[-1.57985735e+00 -2.29210742e-01 -5.42199613e-01]
[-4.77586116e+00 -7.77662821e+00 -9.49341509e+00]
[-1.24734811e+00 -1.34867052e+00 -2.38055701e+00]
[ 1.97672305e-01 2.92962373e-04 -6.65333391e-02]
[-8.63374000e-01 -2.96900600e-01 -1.22098813e-01]
[ 6.24425701e-02 -1.53363054e-01 -7.81627557e-02]]
[[ 2.47169302e-02 8.84477685e-02 -2.44620876e+00]
[-1.57985735e+00 -2.29210742e-01 -5.42199613e-01]
[-4.77586116e+00 -7.77662821e+00 -9.49341509e+00]
[-1.24734811e+00 -1.34867052e+00 -2.38055701e+00]
[ 1.97672305e-01 2.92962373e-04 -6.65333391e-02]
[-8.63374000e-01 -2.96900600e-01 -1.22098813e-01]
[ 6.24425701e-02 -1.53363054e-01 -7.81627557e-02]]
9.156185367235439e-16
###Markdown
Every tensor sum of an axis can be transformed into a 3D-tensor sum on axis 1, using only reshape. $$X \in \mathbb{R}^{m * n * p}, Y \in \mathbb{R}^{m * p}$$$y$ is the sum of $X$ on axis $2$.$$Y_{ik} = \sum_{j=i}^n X_{ijk}$$$$\frac{\partial E}{\partial X_{ijk}} = \frac{\partial E}{\partial Y_{ik}}$$
###Code
def prod(x):
res = 1
for v in x: res *= v
return res
def sum_axis(X, axis):
shape3 = (prod(X.shape[:axis]), X.shape[axis], prod(X.shape[axis+1:]))
final_shape = X.shape[:axis] + X.shape[axis+1:]
return np.sum(X.reshape(shape3), axis=1).reshape(final_shape)
X = np.random.randn(2, 4, 3, 7)
s = [sum_axis(X, i) for i in range(4)]
tX = torch.tensor(X, requires_grad = True)
s_sol = [torch.sum(tX, i) for i in range(4)]
for i in range(4):
print(s[i].shape)
print(s_sol[i].data.numpy().shape)
print(metrics.tdist(s[i], s_sol[i].data.numpy()))
def my_expand_dims3(x, size):
y = np.empty((x.shape[0], size, x.shape[1]))
for i in range(x.shape[0]):
for j in range(size):
for k in range(x.shape[1]):
y[i, j, k] = x[i, k]
return y
def dsum_axis(X, axis, dout):
dout = dout.reshape((prod(X.shape[:axis]), prod(X.shape[axis+1:])))
return my_expand_dims3(dout, X.shape[axis]).reshape(X.shape)
a = np.array([[1, 2, 3], [4, 5, 6]])
a2 = my_expand_dims3(a, 2)
print(a2)
for i in range(4):
ds = 2 * s[i]
dX = dsum_axis(X, i, ds)
si_flat = s_sol[i].view(-1)
tz = torch.dot(si_flat, si_flat)
tz.backward()
dX_sol = tX.grad.data.numpy()
print(dX.shape)
print(dX_sol.shape)
print(metrics.tdist(dX, dX_sol))
tX.grad.data.zero_()
###Output
(2, 4, 3, 7)
(2, 4, 3, 7)
0.0
(2, 4, 3, 7)
(2, 4, 3, 7)
0.0
(2, 4, 3, 7)
(2, 4, 3, 7)
0.0
(2, 4, 3, 7)
(2, 4, 3, 7)
0.0
|
guides/ipynb/sequential_model_mine.ipynb | ###Markdown
The Sequential model**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/04/12**Last modified:** 2020/04/12**Description:** Complete guide to the Sequential model. Setup
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
###Output
_____no_output_____
###Markdown
When to use a Sequential modelA `Sequential` model is appropriate for **a plain stack of layers**where each layer has **exactly one input tensor and one output tensor**.Schematically, the following `Sequential` model:
###Code
# Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
###Output
_____no_output_____
###Markdown
is equivalent to this function:
###Code
# Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")
# Call layers on a test input
x = tf.ones((3, 3))
y = layer3(layer2(layer1(x)))
###Output
_____no_output_____
###Markdown
A Sequential model is **not appropriate** when:- Your model has multiple inputs or multiple outputs- Any of your layers has multiple inputs or multiple outputs- You need to do layer sharing- You want non-linear topology (e.g. a residual connection, a multi-branchmodel) Creating a Sequential modelYou can create a Sequential model by passing a list of layers to the Sequentialconstructor:
###Code
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
)
###Output
_____no_output_____
###Markdown
Its layers are accessible via the `layers` attribute:
###Code
model.layers
###Output
_____no_output_____
###Markdown
You can also create a Sequential model incrementally via the `add()` method:
###Code
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu"))
model.add(layers.Dense(3, activation="relu"))
model.add(layers.Dense(4))
###Output
_____no_output_____
###Markdown
Note that there's also a corresponding `pop()` method to remove layers:a Sequential model behaves very much like a list of layers.
###Code
model.pop()
print(len(model.layers)) # 2
###Output
2
###Markdown
Also note that the Sequential constructor accepts a `name` argument, just likeany layer or model in Keras. This is useful to annotate TensorBoard graphswith semantically meaningful names.
###Code
model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu", name="layer1"))
model.add(layers.Dense(3, activation="relu", name="layer2"))
model.add(layers.Dense(4, name="layer3"))
###Output
_____no_output_____
###Markdown
Specifying the input shape in advanceGenerally, all layers in Keras need to know the shape of their inputsin order to be able to create their weights. So when you create a layer likethis, initially, it has no weights:
###Code
layer = layers.Dense(3)
layer.weights # Empty
###Output
_____no_output_____
###Markdown
It creates its weights the first time it is called on an input, since the shapeof the weights depends on the shape of the inputs:
###Code
# Call layer on a test input
x = tf.ones((1, 4))
y = layer(x)
layer.weights # Now it has weights, of shape (4, 3) and (3,)
###Output
_____no_output_____
###Markdown
Naturally, this also applies to Sequential models. When you instantiate aSequential model without an input shape, it isn't "built": it has no weights(and calling`model.weights` results in an error stating just this). The weights are createdwhen the model first sees some input data:
###Code
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
) # No weights at this stage!
# At this point, you can't do this:
# model.weights
# You also can't do this:
# model.summary()
# Call the model on a test input
x = tf.ones((1, 4))
y = model(x)
print("Number of weights after calling the model:", len(model.weights)) # 6
###Output
Number of weights after calling the model: 6
###Markdown
Once a model is "built", you can call its `summary()` method to display itscontents:
###Code
model.summary()
###Output
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_7 (Dense) (1, 2) 10
_________________________________________________________________
dense_8 (Dense) (1, 3) 9
_________________________________________________________________
dense_9 (Dense) (1, 4) 16
=================================================================
Total params: 35
Trainable params: 35
Non-trainable params: 0
_________________________________________________________________
###Markdown
However, it can be very useful when building a Sequential model incrementallyto be able to display the summary of the model so far, including the currentoutput shape. In this case, you should start your model by passing an `Input`object to your model, so that it knows its input shape from the start:
###Code
model = keras.Sequential()
model.add(keras.Input(shape=(4,)))
model.add(layers.Dense(2, activation="relu"))
model.summary()
###Output
Model: "sequential_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_10 (Dense) (None, 2) 10
=================================================================
Total params: 10
Trainable params: 10
Non-trainable params: 0
_________________________________________________________________
###Markdown
Note that the `Input` object is not displayed as part of `model.layers`, sinceit isn't a layer:
###Code
model.layers
###Output
_____no_output_____
###Markdown
A simple alternative is to just pass an `input_shape` argument to your firstlayer:
###Code
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu", input_shape=(4,)))
model.summary()
###Output
Model: "sequential_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_11 (Dense) (None, 2) 10
=================================================================
Total params: 10
Trainable params: 10
Non-trainable params: 0
_________________________________________________________________
###Markdown
Models built with a predefined input shape like this always have weights (evenbefore seeing any data) and always have a defined output shape.In general, it's a recommended best practice to always specify the input shapeof a Sequential model in advance if you know what it is. A common debugging workflow: `add()` + `summary()`When building a new Sequential architecture, it's useful to incrementally stacklayers with `add()` and frequently print model summaries. For instance, thisenables you to monitor how a stack of `Conv2D` and `MaxPooling2D` layers isdownsampling image feature maps:
###Code
model = keras.Sequential()
model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images
model.add(layers.Conv2D(32, 5, strides=2, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
# Can you guess what the current output shape is at this point? Probably not.
# Let's just print it:
model.summary()
# The answer was: (40, 40, 32), so we can keep downsampling...
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(2))
# And now?
model.summary()
# Now that we have 4x4 feature maps, time to apply global max pooling.
model.add(layers.GlobalMaxPooling2D())
# Finally, we add a classification layer.
model.add(layers.Dense(10))
###Output
Model: "sequential_7"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 123, 123, 32) 2432
_________________________________________________________________
conv2d_1 (Conv2D) (None, 121, 121, 32) 9248
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 40, 40, 32) 0
=================================================================
Total params: 11,680
Trainable params: 11,680
Non-trainable params: 0
_________________________________________________________________
Model: "sequential_7"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 123, 123, 32) 2432
_________________________________________________________________
conv2d_1 (Conv2D) (None, 121, 121, 32) 9248
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 40, 40, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 38, 38, 32) 9248
_________________________________________________________________
conv2d_3 (Conv2D) (None, 36, 36, 32) 9248
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 12, 12, 32) 0
_________________________________________________________________
conv2d_4 (Conv2D) (None, 10, 10, 32) 9248
_________________________________________________________________
conv2d_5 (Conv2D) (None, 8, 8, 32) 9248
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 4, 4, 32) 0
=================================================================
Total params: 48,672
Trainable params: 48,672
Non-trainable params: 0
_________________________________________________________________
###Markdown
Very practical, right? What to do once you have a modelOnce your model architecture is ready, you will want to:- Train your model, evaluate it, and run inference. See our[guide to training & evaluation with the built-in loops]( /guides/training_with_built_in_methods/)- Save your model to disk and restore it. See our[guide to serialization & saving](/guides/serialization_and_saving/).- Speed up model training by leveraging multiple GPUs. See our[guide to multi-GPU and distributed training](https://keras.io/guides/distributed_training/). Feature extraction with a Sequential modelOnce a Sequential model has been built, it behaves like a [Functional APImodel](/guides/functional_api/). This means that every layer has an `input`and `output` attribute. These attributes can be used to do neat things, likequicklycreating a model that extracts the outputs of all intermediate layers in aSequential model:
###Code
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=[layer.output for layer in initial_model.layers],
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
for _ in features:
print(_.shape)
###Output
(1, 123, 123, 32)
(1, 121, 121, 32)
(1, 119, 119, 32)
###Markdown
Here's a similar example that only extract features from one layer:
###Code
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
for _ in features:
print(_.shape)
###Output
(121, 121, 32)
|
_build/html/_sources/materials/01-Introduction.ipynb | ###Markdown
Introduction to Python **Important Note**: This course was originally designed and developed by [Tom Donoghue](https://tomdonoghue.github.io/). While lectures, assignments, exams and coding labs will be altered from the original run of the course in Fall 2018, *tons* of credit for this course is due to Tom for his awesome work getting this course off the ground. The PDF slides from the start of the first class are available here: https://cogs18.github.io/assets/intro/01_welcome.pdf Logistics- Who: - Instructor: Shannon Ellis - TA: Shivani, Holly, & Sam - IAs: Andrew, Donovan, Ethan, Matthew, Suzy, & Xiaoxuan(Andrina) - Where: - Lectures MWF @ 8AM (Zoom/Peterson Hall 110) - Coding Lab Sections (Zoom/CSB 115; Wed 9AM-5PM) - Course Website: https://cogs18.github.io - Campuswire Page: https://campuswire.com/p/G9193CB28 Expectations & Approach- Goal: to learn practical programming in Python- How: hands-on, community driven, skills based course, assignment & project driven- Lectures & Lab Sections will be used for interactive activities- Assignments, coding labs & a final project will be designed to get you coding Why Learn Computation? - Computation is how things are done - Computation is the foundation of much of the modern world What is Python - Python is a programming language - It is a way to do computation - It gets the computer to do the work for you - Python is an ecosystem - It is a culture of practice for computation What does Python look like
###Code
variable_name = "variable name."
print(variable_name)
a = 1
b = 2
c = a + b
print(c)
###Output
3
|
examples/Logica_example_Life_expectency.ipynb | ###Markdown
Logica example: Top of countries by life expectancy Install and import Logica
###Code
!pip install logica
from logica import colab_logica
from google.colab import auth
auth.authenticate_user()
colab_logica.SetProject('YOUR_PROJECT_ID')
###Output
_____no_output_____
###Markdown
Countries with highest life expectancy among countries with area above 25k.We join the life expectancy table with countries area table, filter by country size and order by life expectancy.
###Code
%%logica TopLife
# Defining short names for public tables.
LifeExpectancy(..r) :-
`bigquery-public-data.census_bureau_international.mortality_life_expectancy`(..r);
CountryArea(..r) :-
`bigquery-public-data.census_bureau_international.country_names_area`(..r);
# Extracting the data.
@OrderBy(TopLife, "life_expectancy desc");
@Limit(TopLife, 20);
TopLife(country_name:, life_expectancy:, country_area:) :-
LifeExpectancy(country_name:, life_expectancy:, year: 2016),
CountryArea(country_name:, country_area:),
country_area > 25000;
###Output
_____no_output_____ |
OLCI/12_OLCI_spatial_interrogation.ipynb | ###Markdown
OLCI spatial plotting, quality control and data interrogation Version: 2.0 Date: 10/04/2019 Author: Ben Loveday and Hayley Evers-King (Plymouth Marine Laboratory) Credit: This code was developed for EUMETSAT under contracts for the Copernicus programme. License: This code is offered as free-to-use in the public domain, with no warranty. This aim of this code is to introduce you to Python and to simply import a netCDF file in to your python workspace, conduct some basic operations, and plot an image. In this case, we will be using a level-2 OLCI image, but the script can be easily adapted to plot any netCDF variable. The first step in any python code is usually to import libraries that you will need. Libraries are usually code modules that perform specific tasks or provide specific capability (e.g. statistical analysis or plotting routines). In this case we will import the xarray library for handling netCDF files, the numpy library which will help to conduct various operations on the data, and the matplotlib plotting library to generate some images. We will also import the os library, that allows python access to some command-line-eqsue capability like 'list directory', as well as the python library that governs the reporting of warning (so that we can turn them off here, and make the code run without being so 'noisy').
###Code
%matplotlib inline
# libraries are imported here, and we can import any library with an alias that allows us easy access to them later.
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Usually we also define functions at the top of a Python script. Functions are routines that can be called elsewhere in our script and perform a specific task. Typically we would use a function to take care of any process that we are going to perform more than once. The box below defines a function that will mask our data according to quality flags. We will call this function later on.
###Code
def flag_data_fast(flags_we_want, flag_names, flag_values, flag_data, flag_type='WQSF'):
flag_bits = np.uint64()
if flag_type == 'SST':
flag_bits = np.uint8()
elif flag_type == 'WQSF_lsb':
flag_bits = np.uint32()
for flag in flags_we_want:
try:
flag_bits = flag_bits | flag_values[flag_names.index(flag)]
except:
print(flag + " not present")
return (flag_data & flag_bits) > 0
###Output
_____no_output_____
###Markdown
Now we will start our script, proper.To run this script, you will need to point it to where your data is. If you keep your scripts and files in the same folder, you will not need to set the full path to the data file. However you may want to store things in different places and so it is good practice to be specific. To help to find your data, please complete the MYPATH variable below with the output generated by the /Configuration_Testing/Data_Path_Checker.ipynb Jupyter notebook in the Configuration_Testing folder.
###Code
# e.g. MYPATH = os.path.join("C:/","Users","me","Desktop")
MYPATH = "<please insert your path from Data_Path_Checker.ipynb here, removing the quotes and chevrons>"
input_root = os.path.join(MYPATH,'OLCI_test_data')
input_path = 'S3A_OL_2_WRR____20180203T061351_20180203T065737_20180204T113446_2626_027_248______MAR_O_NT_002.SEN3'
file_name_chl = 'chl_nn.nc'
###Output
_____no_output_____
###Markdown
We'll quickly check, in the next box, if your data path is ok, and that the data file exists check.
###Code
# quick path length check (some windows versions have a problem with long file paths)
if len(os.path.join(input_root,input_path,file_name_chl)) > 259 \
or len(os.path.join(input_root,input_path,file_name_chl)) > 248:
print('Beware, your path name is quite long. Consider moving your data to a new directory')
else:
print('Path length name seems fine')
if os.path.exists(os.path.join(input_root,input_path,file_name_chl)):
print('Found the required data file')
else:
print('Data file missing. Please check your path and file name')
###Output
_____no_output_____
###Markdown
We read the file using functions from the netCDF4 (alias "nc") library. Note that to use a library in python you use the imported alias followed by a dot, and then the function you want (e.g. nc.Dataset).
###Code
OLCI_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_chl))
###Output
_____no_output_____
###Markdown
To access a variable you can use the following command, where the name of the variable you are interested in, follows the hash. If you remove the hash in the following box, put the cursor after the dot and hit 'tab' you will be presented with a list of all of the variables and methods associated with the OLCI_file object. Python is an 'object orientatated' language, which means that all objects have relevant methods associated with them. note: If you want to run all this code in one go, remember to put the hash back at the start of this line beforehand.
###Code
#OLCI_file.
###Output
_____no_output_____
###Markdown
So, lets load in some data, and then close our data file
###Code
CHL = OLCI_file.CHL_NN.data
OLCI_file.close()
###Output
_____no_output_____
###Markdown
You can look at the variables in your workspace in interactive python environments (like this, or ipython) by typing 'whos'. This will tell you the name of the variable, it's type and then information on it, such as its size and shape.
###Code
#whos
###Output
_____no_output_____
###Markdown
Lets take a look at our data..
###Code
plt.imshow(CHL);
###Output
_____no_output_____
###Markdown
This is not the prettiest plot - Python can do much better. For a start, we may wish to look at a smaller area. We'll do this now, using the relevant indexes for area of data you wish to use.
###Code
row1=4000
row2=8000
col1=0
col2=3000
CHL_subset = CHL[row1:row2, col1:col2]
plt.imshow(CHL_subset);
###Output
_____no_output_____
###Markdown
You will notice a few problems with displaying plots like this. Firstly - they don't look very pretty (the colour scheme is not ideal, it is hard to see the coastline, and you can't differentiate the land from cloud), and secondly - the axes don't provide any information on the location (other than within the array) and there is no colour bar. To make a better plot we will need to add a few more tools to the libraries/modules we've currently imported. Below are a few lines of code to import cartopy (which will help us make a better, map based plot) and a few other tools to tweak how the data is displayed. The Cartopy is module allows us to use map projections to display data in a geographically relevant way. For those that are familiar to python, Cartopy has largely replaced the Basemap library.
###Code
import cartopy.crs as ccrs
import cartopy.feature as cfeature
land_resolution = '50m'
land_poly = cfeature.NaturalEarthFeature('physical', 'land', land_resolution,
edgecolor='k',
facecolor=cfeature.COLORS['land'])
###Output
_____no_output_____
###Markdown
We will also need to load other data to make the plot - the longitude and latitude data associated with each pixel of the chlorophyll data. This data can be found in the geo_coordinates.nc file, within each S3 OLCI L2 folder. We load this in a very similar way to how we loaded the chlorophyll data, just with different file and variable names. The data path remains the same, referring the folder that contains all the netcdf files.
###Code
file_name_geo = 'geo_coordinates.nc'
GEO_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_geo))
LAT = GEO_file.variables['latitude'][:]
LON = GEO_file.variables['longitude'][:]
LAT_subset = LAT[row1:row2, col1:col2]
LON_subset = LON[row1:row2, col1:col2]
GEO_file.close()
###Output
_____no_output_____
###Markdown
Then we need to initialise the map we will use for plotting. The important things to choose here are:1. the projection you wish to use (this may depend on your region of interest, particularly if you are looking at polar data, more information about the different projects is available here: https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html)2. The limits of your map (by default, this will be set to your data limits)3. The resolution of the map coastline.See more information about the options for Cartopy here: https://scitools.org.uk/cartopy/docs/latest/(If you don't have basemap installed, you can type "conda install -c scitools/label/archive cartopy" in your command prompt, with the anaconda distribution).We start by defining a figure (line 1 below) and then defining a map projection (line 2). All mapping instructions are taken care of using our map object 'm'. Now we make the plot (this may take some time to draw!)
###Code
fig1 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON, LAT, np.ma.masked_invalid(CHL), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
###Output
_____no_output_____
###Markdown
You can also save the figure using the code below (this will save in the folder where you are running the code, if you want to save elsewhere you need to specify the path).
###Code
fig2 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON_subset,LAT_subset,np.ma.masked_invalid(CHL_subset), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig2.savefig('OLCI_CHL_spatial_demo_no_flags.png', bbox_inches='tight')
###Output
_____no_output_____
###Markdown
However, this data is not flag masked. This means that we may have data that is subject to glint, or cloud, or a variety of other conditions that variously undermine quality. So, lets apply some flags. We are going to flag extensively, removing all data that corresponds to the following conditions...
###Code
flags_we_want = ['CLOUD', 'CLOUD_AMBIGUOUS', 'CLOUD_MARGIN', 'INVALID', 'COSMETIC', 'SATURATED', 'SUSPECT',
'HISOLZEN', 'HIGHGLINT', 'SNOW_ICE', 'AC_FAIL', 'WHITECAPS', 'ANNOT_ABSO_D', 'ANNOT_MIXR1',
'ANNOT_DROUT', 'ANNOT_TAU06', 'RWNEG_O2', 'RWNEG_O3', 'RWNEG_O4', 'RWNEG_O5', 'RWNEG_O6',
'RWNEG_O7', 'RWNEG_O8']
file_name_flags = 'wqsf.nc'
FLAG_file = xr.open_dataset(os.path.join(input_root,input_path,file_name_flags))
# get all the flag names
flag_names = FLAG_file['WQSF'].flag_meanings.split(' ')
# get all the flag bit values
flag_vals = FLAG_file['WQSF'].flag_masks
# get the flag field itself
FLAGS = FLAG_file.variables['WQSF'].data
FLAG_file.close()
# make the flag mask using the function we defined above "flag_data_fast"
flag_mask = flag_data_fast(flags_we_want, flag_names, flag_vals, FLAGS, flag_type='WQSF')
flag_mask = flag_mask.astype(float)
flag_mask[flag_mask == 0.0] = np.nan
# subset the flag mask
FLAG_subset = flag_mask[row1:row2, col1:col2]
###Output
_____no_output_____
###Markdown
And now we apply the flag data to our data and plot again...
###Code
CHL_subset[np.isfinite(FLAG_subset)] = np.nan
fig3 = plt.figure(figsize=(20, 20), dpi=300)
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
f1 = plt.pcolormesh(LON_subset,LAT_subset,np.ma.masked_invalid(CHL_subset), shading='flat', vmin=np.log10(0.01), vmax=np.log10(50), cmap=plt.cm.viridis)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07, ticks=[np.log10(0.01), np.log10(0.1),np.log10(0.5), np.log10(1),np.log10(3),np.log10(10),np.log10(50)])
cbar.ax.set_xticklabels(['0.01','0.1','0.5','1','3','10','50'], fontsize=20)
cbar.set_label('Chlorophyll, mg.m$^{-3}$', fontsize=20)
plt.title('OLCI [CHL_NN] mg.m$^{-3}$', fontsize=20);
plt.show()
fig3.savefig('OLCI_CHL_spatial_demo_flags.png', bbox_inches='tight')
###Output
_____no_output_____ |
Convolutional_Neural_Networks_Assignment.ipynb | ###Markdown
AssignmentLoad a pretrained network from TensorFlow Hub, [ResNet50](https://tfhub.dev/google/imagenet/resnet_v1_50/classification/1) - a 50 layer deep network trained to recognize [1000 objects](https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt). Starting usage:```pythonmodule = hub.Module("https://tfhub.dev/google/imagenet/resnet_v1_50/classification/1")height, width = hub.get_expected_image_size(module)images = ... A batch of images with shape [batch_size, height, width, 3].logits = module(images) Logits with shape [batch_size, num_classes].```Apply it to classify the images downloaded below (images from a search for animals in national parks):
###Code
!pip install google_images_download
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
arguments = {"keywords": "animal national park", "limit": 20,
"print_urls": True}
absolute_image_paths = response.download(arguments)
absolute_image_paths
# Imports
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
# https://pillow.readthedocs.io/en/stable/
from PIL import Image, ImageOps
import tensorflow as tf
import tensorflow_hub as hub
# Resize all images to 224x224 pixels
image_path_list = absolute_image_paths['animal national park']
def resize_image(filename, new_width=256, new_height=256):
pil_image = Image.open(filename)
pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS)
pil_image_rgb = pil_image.convert('RGB')
pil_image_rgb.save(filename, format='JPEG', quality=90)
for path in image_path_list:
resize_image(path, 224, 224)
# Show images
def show_images(images_list):
plt.figure()
for i, path in enumerate(images_list):
if path != faulty_img:
plt.subplot(5,5, i+1)
plt.imshow(np.asarray(Image.open(path)))
#plt.title(path)
plt.grid(False)
plt.yticks([])
plt.xticks([])
plt.show()
show_images(images_list)
# Making Predictions
def process_img_path(img_path):
return image.load_img(img_path, target_size=(224, 224))
def img_classifier(img):
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
model = ResNet50(weights='imagenet')
features = model.predict(x)
results = decode_predictions(features, top=3)[0]
return results
predictions = []
for path in images_list:
if path != faulty_img:
result = img_classifier(process_img_path(path))
predictions.append(result)
print(result)
for path, prediction in zip(image_path_list, predictions):
for _, animal, prob in prediction:
print(f'{animal}: {prob:.3f}')
plt.subplot()
plt.imshow(np.asarray(Image.open(path)))
plt.grid(False)
plt.yticks([])
plt.xticks([])
plt.show()
print()
###Output
_____no_output_____
###Markdown
Report both the most likely estimated class for any image, and also investigate (a) images where the classifier isn't that certain (the best estimate is low), and (b) images where the classifier fails.Answer (in writing in the notebook) the following - "What sorts of images do CNN classifiers do well with? What sorts do they not do so well? And what are your hypotheses for why?" Resources and Stretch GoalsStretch goals- Enhance your code to use classes/functions and accept terms to search and classes to look for in recognizing the downloaded images (e.g. download images of parties, recognize all that contain balloons)- Check out [other available pretrained networks](https://tfhub.dev), try some and compare- Image recognition/classification is somewhat solved, but *relationships* between entities and describing an image is not - check out some of the extended resources (e.g. [Visual Genome](https://visualgenome.org/)) on the topic- Transfer learning - using images you source yourself, [retrain a classifier](https://www.tensorflow.org/hub/tutorials/image_retraining) with a new category- (Not CNN related) Use [piexif](https://pypi.org/project/piexif/) to check out the metadata of images passed in to your system - see if they're from a national park! (Note - many images lack GPS metadata, so this won't work in most cases, but still cool)Resources- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) - influential paper (introduced ResNet)- [YOLO: Real-Time Object Detection](https://pjreddie.com/darknet/yolo/) - an influential convolution based object detection system, focused on inference speed (for applications to e.g. self driving vehicles)- [R-CNN, Fast R-CNN, Faster R-CNN, YOLO](https://towardsdatascience.com/r-cnn-fast-r-cnn-faster-r-cnn-yolo-object-detection-algorithms-36d53571365e) - comparison of object detection systems- [Common Objects in Context](http://cocodataset.org/) - a large-scale object detection, segmentation, and captioning dataset- [Visual Genome](https://visualgenome.org/) - a dataset, a knowledge base, an ongoing effort to connect structured image concepts to language
###Code
###Output
_____no_output_____ |
Example5.ipynb | ###Markdown
Author: Adrian SwartzDate: 3/29/2019 Solutions to Andew Ng's coursera problems Example 5: Regularized Linear Regression and Bias vs. VarianceAndrew Ng's course is designed for the solutions to be worked through in Octave (similar to matlab). However, I am going to do these problems in python (within a jupyter notebook) and demonstrate how various approaches can be implemented in python.To some degree, I have already touched on model evaluation and bias/variance in the Logistic Regression examples.This notebook is structured as follows:[Regularized Linear Regression and Bias vs. Variance](Part1)1. [Simple Linear Regression with Regularization](1) 1a. [Exploratory Data Analysis](eda) 1b. [Preprocess the Data](prep) 1c. [Regularized Cost and Gradient Functions for Linear Regression](funcs) 1d. [Optimize the (Linear) Model Parameters](opt) 1e. [Model Evaluation](me) 2. [Linear Regression with Regularization and Non-linear Features](2) 2a. [Non-Linear Feature Generation](fg) 2b. [Optimize the (Linear) Model Parameters](opt2) 2c. [Model Evaluation](me2) 3. [Linear Regression with Regularization and Non-linear Features with sklearn](3) 3a. [Non-Linear Feature Generation](fg1) 3b. [Regularized Linear Regression with sklearn](sklearn) 4. [Bias vs. Variance](4)***For this exercise, we will import several python packages: NumPy (numerical python for matrix manupulations and advanced math), pandas (DataFrames offer a useful platform for importing data and performing exploratory data analysis), matplotlib and seaborn for data visualization.
###Code
# import standard packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
cp=sns.color_palette()
# import a few special packages we'll use for this example
from scipy.io import loadmat
from sklearn.preprocessing import PolynomialFeatures
from scipy.optimize import minimize
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
###Output
_____no_output_____
###Markdown
Regularized Linear Regression and Bias vs. VariancePrompt:Implement regularized linear regression and use it to study models with different bias-variance properties.In the first half of the exercise, you will implement regularized linear regression to predict the amount of water flowing out of a dam using the change of water level in a reservoir. In the next half, you will go through some diagnostics of debugging learning algorithms and examine the effects of bias v.s. variance.***OK. Let's dive into the data and begin building the frameworks for regularized linear regression. Exploratory Data Analysis
###Code
data = loadmat('ex5/ex5data1.mat')
data
X_train = data['X']
X_val = data['Xval']
X_test = data['Xtest']
y_train = data['y']
y_val = data['yval']
y_test = data['ytest']
fig, ax = plt.subplots(figsize=(8,6))
plt.scatter(X_train, y_train)
plt.scatter(X_test, y_test, color=cp[3])
plt.scatter(X_val, y_val, marker='x', color='black')
ax.set_xlabel('Change in Water Level')
ax.set_ylabel('Water Flow')
X_train.shape, X_test.shape, X_val.shape
y_train.shape, y_test.shape, y_val.shape
###Output
_____no_output_____
###Markdown
There's hardly any data, but because it's well sampled (can see that from the graph), this will work well enough for our purposes of implementing regularized linear regression and playing around with validation techniques. Preprocess the Data
###Code
poly = PolynomialFeatures(1)
# setting =1 leaves the original data (no new non-linear features)
# However, PolynomialFeatures will add the bias term for us
X_train = np.matrix(data['X'])
X_test = np.matrix(data['Xtest'])
X_val = np.matrix(data['Xval'])
X_train = poly.fit_transform(X_train)
X_test = poly.fit_transform(X_test)
X_val = poly.fit_transform(X_val)
y_train = np.matrix(data['y'])
y_test = np.matrix(data['ytest'])
y_val = np.matrix(data['yval'])
X_train.shape, X_test.shape, X_val.shape
X_train[:5] # checking that the bias term is added.
#Initialize theta - needs to be an array to work with scipy.optimize functions
theta = np.zeros(X_train.shape[1])
theta.shape
###Output
_____no_output_____
###Markdown
Regularized Cost and Gradient Functions for Linear Regression
###Code
def Regularized_Cost(theta, X, y, reg_scale):
""" Calculates the regularized cost function for logistic regression
input
theta: a numpy array of length n; the model parameters
X: an m x n matrix; instances x features
y: an m x 1 matrix; target variable
reg_scale: a float; strength of the regularization
output
cost: the sum of the ordinary least squares over all the data
"""
theta = np.matrix(theta)
M = len(X)
cost = np.sum( np.power(((X * theta.T) - y), 2) ) / (2*M)
cost += (reg_scale * np.sum(np.power(theta[:,1:theta.shape[1]], 2)) / (2 * M))
return cost
Regularized_Cost(theta, X_train, y_train, 1)
def Regularized_Gradient(theta, X, y, reg_scale):
""" Calculates the regularized gradient of the logistic regression cost function
input
theta: a 1 x n matrix; the model parameters
X: an m x n matrix; instances x features
y: an m x 1 matrix; target variable
reg_scale: a float; strength of the regularization
output
gradient: a numpy array of floats of lenth n
"""
M=len(X)
theta=np.matrix(theta)
parameters = int(theta.shape[1])
grad = np.zeros(parameters)
error = (X * theta.T) - y
for i in range(parameters):
if (i == 0):
grad[i] = np.sum(np.multiply(error, np.matrix(X[:,i]).T)) / M
else:
grad[i] = (np.sum(np.multiply(error, np.matrix(X[:,i]).T)) / M)
grad[i] += (reg_scale * theta[:,i] / M)
return grad
Regularized_Gradient(theta, X_train, y_train, 1)
###Output
_____no_output_____
###Markdown
Optimize the (Linear) Model Parameters
###Code
result = minimize(fun=Regularized_Cost, x0=theta, args=(X_train,y_train,1), method='TNC', \
jac=Regularized_Gradient, options={'maxiter':1000})
result
#Generate the predictions
x = np.matrix(np.linspace(-60,60, num=100)).T
x_ = poly.fit_transform(x)
y_pred = (x_ * np.matrix(result.x).T)
x.shape, y_pred.shape
fig, ax = plt.subplots(figsize=(10,6))
plt.scatter(data['X'], data['y']) # plot the data
plt.scatter(data['Xtest'], data['ytest'], color=cp[3])
plt.scatter(data['Xval'], data['yval'], marker='x', color='black')
plt.plot(x, y_pred, color=cp[3]) # plot the fit
ax.set_xlabel('Change in Water Level')
ax.set_ylabel('Water Flow')
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
y_pred = (X_train * np.matrix(result.x).T)
mae = mean_absolute_error(y_train, y_pred)
mse = mean_squared_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print('Training Error')
print('Mean Absolute Error: {:.4}; Mean Squared Error: {:.4}; R^2 score: {:.4}'.format(mae, mse, r2))
print(' ')
y_pred_test = (X_test * np.matrix(result.x).T)
mae = mean_absolute_error(y_test, y_pred_test)
mse = mean_squared_error(y_test, y_pred_test)
r2 = r2_score(y_test, y_pred_test)
print('Test Error')
print('Mean Absolute Error: {:.4}; Mean Squared Error: {:.4}; R^2 score: {:.4}'.format(mae, mse, r2))
print(' ')
y_pred_val = (X_val * np.matrix(result.x).T)
mae = mean_absolute_error(y_val, y_pred_val)
mse = mean_squared_error(y_val, y_pred_val)
r2 = r2_score(y_val, y_pred_val)
print('Val Error')
print('Mean Absolute Error: {:.4}; Mean Squared Error: {:.4}; R^2 score: {:.4}'.format(mae, mse, r2))
print(' ')
###Output
Val Error
Mean Absolute Error: 5.926; Mean Squared Error: 58.87; R^2 score: 0.6358
###Markdown
Linear Regression with Regularization and Non-linear FeaturesIn this section I will add non-linear features up to fourth order using sklearn's Ploly Non-Linear Feature Generation
###Code
poly = PolynomialFeatures(4) # for 4th order polynomial feature generation
X_train = np.matrix(data['X'])
X_test = np.matrix(data['Xtest'])
X_val = np.matrix(data['Xval'])
X_train = poly.fit_transform(X_train)
X_test = poly.fit_transform(X_test)
X_val = poly.fit_transform(X_val)
y_train = np.matrix(data['y'])
y_test = np.matrix(data['ytest'])
y_val = np.matrix(data['yval'])
#Initialize theta - needs to be an array to work with scipy.optimize functions
theta = np.zeros(X_train.shape[1])
theta.shape
result = minimize(fun=Regularized_Cost, x0=theta, args=(X_train,y_train,1), method='TNC', \
jac=Regularized_Gradient, options={'maxiter':50000})
result
#Generate the predictions
x = np.matrix(np.linspace(-60,60, num=100)).T
x_ = poly.fit_transform(x)
y_pred = (x_ * np.matrix(result.x).T)
fig, ax = plt.subplots(figsize=(10,6))
plt.scatter(data['X'], data['y']) # plot the data
plt.scatter(data['Xtest'], data['ytest'], color=cp[3])
plt.scatter(data['Xval'], data['yval'], marker='x', color='black')
plt.plot(x, y_pred, color='red') # plot the fit
ax.set_xlabel('Change in Water Level')
ax.set_ylabel('Water Flow')
###Output
_____no_output_____
###Markdown
Definitely over fitting! Model Evaluation
###Code
y_pred = (X_train * np.matrix(result.x).T)
mae = mean_absolute_error(y_train, y_pred)
mse = mean_squared_error(y_train, y_pred)
r2 = r2_score(y_train, y_pred)
print('Training Error')
print('Mean Absolute Error: {:.4}; Mean Squared Error: {:.4}; R^2 score: {:.4}'.format(mae, mse, r2))
print(' ')
y_pred_test = (X_test * np.matrix(result.x).T)
mae = mean_absolute_error(y_test, y_pred_test)
mse = mean_squared_error(y_test, y_pred_test)
r2 = r2_score(y_test, y_pred_test)
print('Test Error')
print('Mean Absolute Error: {:.4}; Mean Squared Error: {:.4}; R^2 score: {:.4}'.format(mae, mse, r2))
print(' ')
y_pred_val = (X_val * np.matrix(result.x).T)
mae = mean_absolute_error(y_val, y_pred_val)
mse = mean_squared_error(y_val, y_pred_val)
r2 = r2_score(y_val, y_pred_val)
print('Val Error')
print('Mean Absolute Error: {:.4}; Mean Squared Error: {:.4}; R^2 score: {:.4}'.format(mae, mse, r2))
print(' ')
###Output
Val Error
Mean Absolute Error: 5.257; Mean Squared Error: 41.45; R^2 score: 0.7436
###Markdown
The over fitting is pretty clear now. The model has a much lower MSE for the training data, but performs poorly when applied to the test and val sets. Linear Regression with Regularization and Non-linear Features with sklearnIn this section I will add non-linear features up to fourth order using sklearn's Ploly Non-Linear Feature Generation
###Code
poly = PolynomialFeatures(4) # for 4th order polynomial feature generation
X_train = np.matrix(data['X'])
X_test = np.matrix(data['Xtest'])
X_val = np.matrix(data['Xval'])
X_train = poly.fit_transform(X_train)
X_test = poly.fit_transform(X_test)
X_val = poly.fit_transform(X_val)
y_train = np.matrix(data['y'])
y_test = np.matrix(data['ytest'])
y_val = np.matrix(data['yval'])
#Initialize theta - needs to be an array to work with scipy.optimize functions
theta = np.zeros(X_train.shape[1])
theta.shape
###Output
_____no_output_____
###Markdown
Regularized Linear Regression with sklearn`LinearRegression` in sklearn does not include regularization. Instead, we have to explicitely call either [Ridge](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.htmlsklearn.linear_model.Ridge) (L2) or [Lasso](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.htmlsklearn.linear_model.Lasso) (L1) functions. See the documentation for more information. The first paramter for these objects is `alpha` which is the same as $\lambda$ in Andrew Ng's course. `alpha` is the true regularization strength, while $C$ (which appears in LogisticRegression, SVM, and other classification techniques) is the inverse regularization strength (i.e. $C$ = 1/$\lambda$).
###Code
#unregularized ordinary least squares linear regression
linreg = LinearRegression(fit_intercept=False).fit(X_train, y_train)
# Ridge regression with regulatization strength, alpha=1
rdg_1 = Ridge(alpha=1.0, fit_intercept=False).fit(X_train, y_train)
# Ridge regression with regulatization strength, alpha=1
rdg_100000 = Ridge(alpha=100000, fit_intercept=False).fit(X_train, y_train)
# Ridge regression with regulatization strength, alpha=1
rdg_p001 = Ridge(alpha=.001, fit_intercept=False).fit(X_train, y_train)
#Generate the predictions
x = np.matrix(np.linspace(-60,60, num=100)).T
x_ = poly.fit_transform(x)
linreg_pred = (x_ * np.matrix(linreg.coef_).T)
rdg_1_pred = (x_ * np.matrix(rdg_1.coef_).T)
rdg_100000_pred = (x_ * np.matrix(rdg_100000.coef_).T)
rdg_p001_pred = (x_ * np.matrix(rdg_p001.coef_).T)
x.shape, linreg_pred.shape
fig, ax = plt.subplots(figsize=(10,6))
plt.scatter(data['X'], data['y'], label='training data') # plot the data
plt.scatter(data['Xtest'], data['ytest'], color=cp[3], label='test data')
plt.scatter(data['Xval'], data['yval'], marker='x', color='black', label='val data')
plt.plot(x, linreg_pred, label='LinearRegression')
plt.plot(x, rdg_p001_pred, label='Ridge, alpha=0.001')
plt.plot(x, rdg_1_pred, label='Ridge, alpha=1')
plt.plot(x, rdg_100000_pred, label='Ridge, alpha=100000')
ax.set_xlabel('Change in Water Level')
ax.set_ylabel('Water Flow')
plt.legend()
# Lasso regression with regulatization strength, alpha=1
lso_1 = Lasso(alpha=1.0, fit_intercept=False).fit(X_train, y_train)
# Lasso regression with regulatization strength, alpha=1
lso_100000 = Lasso(alpha=100000, fit_intercept=False).fit(X_train, y_train)
# Lasso regression with regulatization strength, alpha=1
lso_p001 = Lasso(alpha=.001, fit_intercept=False).fit(X_train, y_train)
#Generate the predictions
x = np.matrix(np.linspace(-60,60, num=100)).T
x_ = poly.fit_transform(x)
lso_1_pred = (x_ * np.matrix(lso_1.coef_).T)
lso_100000_pred = (x_ * np.matrix(lso_100000.coef_).T)
lso_p001_pred = (x_ * np.matrix(lso_p001.coef_).T)
fig, ax = plt.subplots(figsize=(10,6))
plt.scatter(data['X'], data['y'], label='training data') # plot the data
plt.scatter(data['Xtest'], data['ytest'], color=cp[3], label='test data')
plt.scatter(data['Xval'], data['yval'], marker='x', color='black', label='val data')
plt.plot(x, linreg_pred, label='LinearRegression')
plt.plot(x, lso_p001_pred, label='Ridge, alpha=0.001')
plt.plot(x, lso_1_pred, label='Ridge, alpha=1')
plt.plot(x, lso_100000_pred, label='Ridge, alpha=100000')
ax.set_xlabel('Change in Water Level')
ax.set_ylabel('Water Flow')
plt.legend()
###Output
_____no_output_____
###Markdown
We can see from these two graphs that Lasso (L1) and Ridge (L2) regularizations provide quite different results. Lasso penalizes outliers and can also reduce the feature weights to zero. For this reason, Lasso can be more effective when features are highly correlated (although you can't choose which one it reduces the weight for). From the graph you can see that for very strong regularization, Lasso does a better job of reducing the fourth-order and third-order components. Bias vs. Variance Learning CurvesLet's use sklearn's capabalities for this. I'm unlikely to ever want to implement this manually for more complex models. So might as well spend the time working with sklearn.
###Code
X = np.matrix(np.vstack((data['X'], data['Xtest'], data['Xval'])))
y = np.matrix(np.vstack((data['y'], data['ytest'], data['yval'])))
X.shape, y.shape
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.01, 1.0, 10)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
poly = PolynomialFeatures(1)
# setting =1 leaves the original data (no new non-linear features)
# However, PolynomialFeatures will add the bias term for us
X_poly = poly.fit_transform(X)
title = "Linear Regression, Order 1"
# Cross validation with 2 iterations, higher cv gives smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=40, test_size=0.3, random_state=0)
estimator = LinearRegression(fit_intercept=False)
plot_learning_curve(estimator, title, X_poly, y, ylim=(0.3, 1), cv=cv, n_jobs=4)
poly = PolynomialFeatures(2)
# setting =1 leaves the original data (no new non-linear features)
# However, PolynomialFeatures will add the bias term for us
X_poly = poly.fit_transform(X)
title = "Linear Regression, Order 2"
# Cross validation with 2 iterations, higher cv gives smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=40, test_size=0.3, random_state=0)
estimator = LinearRegression(fit_intercept=False)
plot_learning_curve(estimator, title, X_poly, y, ylim=(0.3, 1), cv=cv, n_jobs=4)
poly = PolynomialFeatures(3)
# setting =1 leaves the original data (no new non-linear features)
# However, PolynomialFeatures will add the bias term for us
X_poly = poly.fit_transform(X)
title = "Linear Regression, Order 3"
# Cross validation with 2 iterations, higher cv gives smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=40, test_size=0.3, random_state=0)
estimator = LinearRegression(fit_intercept=False)
plot_learning_curve(estimator, title, X_poly, y, ylim=(0.3, 1), cv=cv, n_jobs=4)
poly = PolynomialFeatures(4)
# setting =1 leaves the original data (no new non-linear features)
# However, PolynomialFeatures will add the bias term for us
X_poly = poly.fit_transform(X)
title = "Linear Regression, Order 4"
# Cross validation with 2 iterations, higher cv gives smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=40, test_size=0.3, random_state=0)
estimator = LinearRegression(fit_intercept=False)
plot_learning_curve(estimator, title, X_poly, y, ylim=(0.3, 1), cv=cv, n_jobs=4)
poly = PolynomialFeatures(8)
# setting =1 leaves the original data (no new non-linear features)
# However, PolynomialFeatures will add the bias term for us
X_poly = poly.fit_transform(X)
title = "Linear Regression, Order 4"
# Cross validation with 2 iterations, higher cv gives smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=40, test_size=0.3, random_state=0)
estimator = LinearRegression(fit_intercept=False)
plot_learning_curve(estimator, title, X_poly, y, ylim=(0.3, 1), cv=cv, n_jobs=4)
###Output
_____no_output_____
###Markdown
Model Complexity
###Code
poly_orders = range(1,7)
X_train = data['X']
X_val = data['Xval']
X_test = data['Xtest']
y_train = data['y']
y_val = data['yval']
y_test = data['ytest']
accs_train=[]
accs_test=[]
for p in poly_orders:
poly = PolynomialFeatures(p)
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.fit_transform(X_test)
linreg = LinearRegression(fit_intercept=False).fit(X_train_poly, y_train)
accs_train.append(linreg.score(X_train_poly, y_train))
accs_test.append(linreg.score(X_test_poly, y_test))
fig, ax = plt.subplots()
plt.plot(poly_orders, accs_train, '-o')
plt.plot(poly_orders, accs_test, '-o')
ax.set_ylim((.5,1))
ax.set_xlabel('Feature Polynomial Order')
ax.set_ylabel('Accuracy')
ax.set_title('Model Complexity Curve')
###Output
_____no_output_____ |
Devel/devel_Split_Plot.ipynb | ###Markdown
First Let's Plot My Results compared to Jacks (N.B WL_FAST and WL_TLAG are the column headings for Jacks data
###Code
fig,axs = plt.subplots(2, 2,sharex='col',figsize=(10,10))
plt.subplot(221)
plt.errorbar(data['BAZ'],data['FAST'],yerr=data['DFAST'],fmt='o',elinewidth=0.5)
plt.ylabel('Fast Direction (deg)')
plt.ylim([-90,90])
plt.yticks(np.arange(-90,91,30))
plt.title('My Fast')
plt.subplot(223)
plt.errorbar(data['BAZ'],data['WL_FAST'],yerr=data['WL_DFAST'],fmt='ro',elinewidth=0.5)
plt.ylim([-90,90])
plt.yticks(np.arange(-90,91,30))
plt.title('Jacks Fast')
plt.xlabel('Back Azimuth')
plt.ylabel('Fast Direction (deg)')
plt.subplot(222)
plt.errorbar(data['BAZ'],data['TLAG'],yerr=data['DTLAG'],fmt='o',elinewidth=0.5)
plt.ylabel('Tlag (s)')
plt.ylim([0,4])
plt.title('My Lag')
plt.subplot(224)
plt.errorbar(data['BAZ'],data['WL_TLAG'],yerr=data['WL_DTLAG'],fmt='ro',elinewidth=0.5)
plt.ylim([0,4])
plt.ylabel('Tlag (s)')
plt.xlabel('Back Azimuth')
plt.title('Jacks Lag')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Now We have made the first simple plot, lets try to interrogate the data some more. Lets try to determine average fast (circular avg) and tlag for mine and jacks meausrments and the difference in indiviual measurements
###Code
import scipy as scp # Need scipy fo rthe circular mean function
mean_fast = scp.stats.circmean(data['FAST'],high =180, low =-180)
mean_tlag = scp.mean(data['TLAG'])
mean_wl_fast = scp.stats.circmean(data['WL_FAST'],high =180, low =-180)
mean_wl_tlag = scp.mean(data['WL_TLAG'])
print('{:04.2f}, {:-4.2f} is the mean of my measurements'.format(mean_fast,mean_tlag))
print('{:04.2f}, {:-4.2f} is the mean of Jacks measurements'.format(mean_wl_fast,mean_wl_tlag))
fig2,ax2 = plt.subplots(2,1,sharex='col',figsize = (10,10))
plt.subplot(2,1,1)
plt.plot(data.index,data['FAST'],'x')
plt.plot(data.index,data['WL_FAST'],'rx')
plt.ylabel('Fast Direction')
plt.xlabel('Event No.')
plt.subplot(2,1,2)
plt.plot(data.index,data['TLAG'],'x')
plt.plot(data.index,data['WL_TLAG'],'rx')
plt.show()
data.index
t_diff = abs(data['TLAG'] - data['WL_TLAG'])
plt.hist(t_diff,bins=[0,0.25,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.25,2.5,2.75,3.0])#,'bx')
plt.xlabel('Difference in Lat Times (s)')
plt.ylabel('Frequency')
import splitwavepy as sw
with open('/Users/ja17375/Scripts/Python/Splitting_Codes/SKS_Splitting/Eigm_Files/Eigm_NEW_summary.txt','r') as reader:
eigs = []
for line in reader.readlines():
e = line.strip().split('\n')[-1] # Strip off newline characters
eig_load = sw.load('/Users/ja17375/Scripts/Python/Splitting_Codes/SKS_Splitting/Eigm_Files/{}'.format(e))
eigs.append(eig_load) #Creates a list of eigm objects
reader.close()
stacked = sw.measure.Stack(eigs)
stacked.wolfe_silver()
type(axs[1,1])
list(result.values())[1]
result['distance']
dup = JACK['DATE'].duplicated()
dup.count()
JACK[(JACK['TLAG']<1.0)]['TLAG']
###Output
_____no_output_____
###Markdown
The below code fragment can be used to match my splitting observations to Jack's and produce (and save) a new dataframe containing both measurements
###Code
# In order to be able to compare my splitting to jacks, I need to make the date information be the same format.
# For now I will do this conversion here. In the future it may be more useful to directly report dates in Julday format
#
Joe_Jul = [0 for row in range(len(JOSEPH))] # list that will hold the converted dates
# a = []
nans = []
# First lets add some new columns to house the matched splitting data.
JACK_FAST = np.zeros(shape=(len(JOSEPH),1))
JACK_DFAST = JACK_FAST.copy()
JACK_TLAG = JACK_FAST.copy()
JACK_DTLAG = JACK_FAST.copy()
for i in range(0,len(JOSEPH)):
if not math.isnan(JOSEPH['YEAR'][i]): # If I was not able to attempt a mesurement this will be a nan. So we want to exclude these rows
#Create a UTCDateTime object d for each of my observations
d = obspy.core.UTCDateTime(int(JOSEPH['YEAR'][i]),int(JOSEPH['MON'][i]),int(JOSEPH['DAY'][i]),int(JOSEPH['HOUR'][i]),int(JOSEPH['MIN'][i]))#,JOSEPH['SEC'][i])
#Now use the UTCDateTime object to return the dates in the same format used by jack (YYYYJJJ)
Joe_Jul[i] = int(str(d.year)+str(d.julday).zfill(3))
match = (JACK[(JACK['DATE'] == Joe_Jul[i])])
if len(match) != 1:
Exception('More than one Julian Day match for {}',jul)
else:
pass
# Append the index of the row to the variable a. This allows me to later slice the required rows of Jacks data.
# This is the most logical way to do this in a iPython environment but perhaps could be made more efficient if/when
# This is combined into the rest of the module
# a.append(int(match.index.values.tolist()[0]))
b = int(match.index.values.tolist()[0])
(JACK_FAST[i],JACK_DFAST[i],JACK_TLAG[i],JACK_DTLAG[i]) = (JACK['FAST'][b],JACK['DFAST'][b],JACK['TLAG'][b],JACK['DTLAG'][b])
else:
(Joe_Jul[i],JACK_FAST[i],JACK_DFAST[i],JACK_TLAG[i],JACK_DTLAG[i]) = math.nan,math.nan,math.nan,math.nan,math.nan
nans.append(i)
#Do nothing, there is no splitting measurement. Passing a nan into UTCDatetime throws an error.
#Now we have set up Jacks data to be added in the write place, lets add it
comb = JOSEPH.assign(DATE = Joe_Jul, JACK_FAST = JACK_FAST,JACK_DFAST = JACK_DFAST,JACK_TLAG=JACK_TLAG,JACK_DTLAG=JACK_DTLAG)
# Now extract (and slightly re-order) all the columns that I want right now
comparison = comb[['STAT','DATE','FAST','DFAST','TLAG','DTLAG','JACK_FAST','JACK_DFAST','JACK_TLAG','JACK_DTLAG','QUAL']]
comparison=comparison.drop(nans)
comparison.to_csv('Joseph_Jack_splitting_comp.txt',sep= ' ')
comparison[(comparison['QUAL']== 'a')]
import splitwavepy as sw
sw.load('/Users/ja17375/Scripts/Python/Splitting_Codes/SKS_Splitting/Eigm_Files/NEW_2005_02_15_14_42_23.eigm').plot()
pwd
st = obspy.read('./Data/NEW_2007_07_22_10_49_39_*.sac')
st[0].stats
# In this fragment I now want to see if my recorded events match with Jacks. Each of the 7 character date strings should
# be unique so testing for equality should yield a maximum of 1 match.
a = [] # list to hold the indicies of the rows from Jacks data that match
for jul in Joe_Jul:
# The variable match holds the row of Jack's data which match a given julian date string
match = (JACK[(JACK['DATE'] == jul)])
if len(match) != 1:
Exception('More than one Julian Day match for {}',jul)
else:
pass
# Append the index of the row to the variable a. This allows me to later slice the required rows of Jacks data.
# This is the most logical way to do this in a iPython environment but perhaps could be made more efficient if/when
# This is combined into the rest of the module
a.append(int(match.index.values.tolist()[0]))
if len(a) == len(Joe_Jul): #Tests to see if I have found matches for all of my obervations
print("All Julian Days Match")
else:
pass
a
JACK.iloc[a,:][['FAST','DFAST','TLAG','DTLAG']] #iloc is a pandas attribute that splices rows given interger positions [row,column]
tst = [1,5,80]
JACK.iloc[match.index[:],:]
JOSEPH_NONAN[['FAST','DFAST','TLAG','DTLAG']]
math.isnan(JOSEPH['YEAR'][1])
###Output
_____no_output_____ |
PTT/Web Scraping.ipynb | ###Markdown
Requests : 建立各種 HTTP 請求,從網頁伺服器上取得想要的資料。 Beautiful Soup : 分析 HTML 網頁( 用於擷取出網頁中所需的資料 )。
###Code
import pandas as pd
import numpy as np
import os
import requests
from bs4 import BeautifulSoup
###Output
_____no_output_____
###Markdown
Step 1 : 貼上網址
###Code
Target_URL = 'https://www.ptt.cc/bbs/Gossiping/index.html'
###Output
_____no_output_____
###Markdown
Step 2 : 自訂 HTTP 請求的標頭,避免訪問網站時受限制。( Not necessarily )
###Code
URL_headers = { 'user-agent':'Mozilla/5.0(Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36' }
###Output
_____no_output_____
###Markdown
Step 3 : 設定請求時間,避免網站回應時間過久。( Not necessarily )
###Code
time = 0.5
###Output
_____no_output_____
###Markdown
Step 4 : 抓取網頁資料。 4-1 設定 Cookie ( If necessary ) 進入網站之前,使用者須先點選 『 是否已滿18歲 』 按鈕 開啟瀏覽器開發者模式( F12 ),觀察點選「已滿18歲」會送給伺服器之封包內容 4-2 查看網頁伺服器回應請求的狀態 4-3 抓取資料
###Code
Cookie_URL = 'https://www.ptt.cc/ask/over18?from=%2Fbbs%2FGossiping%2Findex.html'
# Session() 將我們送出 requests 所收到的 cookies 全部儲存起來,且於下一次發送請求時送出對應的參數。
r = requests.Session( )
# 設定 Cookies 內容
payload = { 'from':'/bbs/Gossiping/index.html', 'yes' : 'yes' }
# post( ) 函數送出 post 請求
r1 = r.post( Cookie_URL, data = payload )
# 查看網頁伺服器回應 post( ) 請求的狀態
print( '網頁伺服器回應 post( ) 請求的狀態:' )
try:
if r1.status_code == 200 :
print( '請求成功!' )
r1.raise_for_status( ) # 取得請求錯誤的詳細資訊!
except requests.exceptions.RequestException as ex1 :
print( 'HTTP 請求錯誤 : ' + str( ex1 ) )
except requests.exceptions.HTTPError as ex2 :
print( 'HTTP 回應錯誤 : ' + str( ex2 ) )
except requests.exceptions.Timeout as ex3 :
print( 'Timeout 錯誤 : ' + str( ex3 ) )
except requests.exceptions.ConnectionError as ex4 :
print( '網路連線錯誤 : ' + str( ex4 ) )
print( '---------------------' '\n' )
# get( ) 函數送出 HTTP 請求
r2 = r.get( Target_URL, timeout = time )
# 查看網頁伺服器回應 get( ) 請求的狀態
print( '網頁伺服器回應 get( ) 請求的請求狀態:' )
try:
if r2.status_code == 200 :
print( '請求成功!' )
r2.raise_for_status( ) # 取得請求錯誤的詳細資訊!
except requests.exceptions.RequestException as ex1 :
print( 'HTTP 請求錯誤 : ' + str( ex1 ) )
except requests.exceptions.HTTPError as ex2 :
print( 'HTTP 回應錯誤 : ' + str( ex2 ) )
except requests.exceptions.Timeout as ex3 :
print( 'Timeout 錯誤 : ' + str( ex3 ) )
except requests.exceptions.ConnectionError as ex4 :
print( '網路連線錯誤 : ' + str( ex4 ) )
print( '---------------------' '\n' )
print( '網頁使用的編碼:' + str( r2.encoding ) + '\n' '---------------------' '\n' )
print( '網址:' )
print( r2.url )
print( '---------------------' '\n' )
print( '\n' '網頁原始碼: ' '\n' )
print( r2.text )
###Output
網頁伺服器回應 post( ) 請求的狀態:
請求成功!
---------------------
網頁伺服器回應 get( ) 請求的請求狀態:
請求成功!
---------------------
網頁使用的編碼:utf-8
---------------------
網址:
https://www.ptt.cc/bbs/Gossiping/index.html
---------------------
網頁原始碼:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>看板 Gossiping 文章列表 - 批踢踢實業坊</title>
<link rel="stylesheet" type="text/css" href="//images.ptt.cc/bbs/v2.25/bbs-common.css">
<link rel="stylesheet" type="text/css" href="//images.ptt.cc/bbs/v2.25/bbs-base.css" media="screen">
<link rel="stylesheet" type="text/css" href="//images.ptt.cc/bbs/v2.25/bbs-custom.css">
<link rel="stylesheet" type="text/css" href="//images.ptt.cc/bbs/v2.25/pushstream.css" media="screen">
<link rel="stylesheet" type="text/css" href="//images.ptt.cc/bbs/v2.25/bbs-print.css" media="print">
</head>
<body>
<div id="topbar-container">
<div id="topbar" class="bbs-content">
<a id="logo" href="/bbs/">批踢踢實業坊</a>
<span>›</span>
<a class="board" href="/bbs/Gossiping/index.html"><span class="board-label">看板 </span>Gossiping</a>
<a class="right small" href="/about.html">關於我們</a>
<a class="right small" href="/contact.html">聯絡資訊</a>
</div>
</div>
<div id="main-container">
<div id="action-bar-container">
<div class="action-bar">
<div class="btn-group btn-group-dir">
<a class="btn selected" href="/bbs/Gossiping/index.html">看板</a>
<a class="btn" href="/man/Gossiping/index.html">精華區</a>
</div>
<div class="btn-group btn-group-paging">
<a class="btn wide" href="/bbs/Gossiping/index1.html">最舊</a>
<a class="btn wide" href="/bbs/Gossiping/index39287.html">‹ 上頁</a>
<a class="btn wide disabled">下頁 ›</a>
<a class="btn wide" href="/bbs/Gossiping/index.html">最新</a>
</div>
</div>
</div>
<div class="r-list-container action-bar-margin bbs-screen">
<div class="search-bar">
<form type="get" action="search" id="search-bar">
<input class="query" type="text" name="q" value="" placeholder="搜尋文章⋯">
</form>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511349.A.7CE.html">Re: [問卦] 8+9真的很容易交到女友嗎?</a>
</div>
<div class="meta">
<div class="author">kiske011</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+8%2B9%E7%9C%9F%E7%9A%84%E5%BE%88%E5%AE%B9%E6%98%93%E4%BA%A4%E5%88%B0%E5%A5%B3%E5%8F%8B%E5%97%8E%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Akiske011">搜尋看板內 kiske011 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511358.A.4CB.html">[問卦] 有沒有拜一輩子關公卻被媽祖拖夢的八卦?</a>
</div>
<div class="meta">
<div class="author">eateat14</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E6%9C%89%E6%B2%92%E6%9C%89%E6%8B%9C%E4%B8%80%E8%BC%A9%E5%AD%90%E9%97%9C%E5%85%AC%E5%8D%BB%E8%A2%AB%E5%AA%BD%E7%A5%96%E6%8B%96%E5%A4%A2%E7%9A%84%E5%85%AB%E5%8D%A6%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aeateat14">搜尋看板內 eateat14 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511365.A.F04.html">Re: [新聞] 郭台銘選總統 工商界不看好</a>
</div>
<div class="meta">
<div class="author">Xanatos01</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E9%83%AD%E5%8F%B0%E9%8A%98%E9%81%B8%E7%B8%BD%E7%B5%B1+%E5%B7%A5%E5%95%86%E7%95%8C%E4%B8%8D%E7%9C%8B%E5%A5%BD">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AXanatos01">搜尋看板內 Xanatos01 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
(本文已被刪除) [dispptt]
</div>
<div class="meta">
<div class="author">-</div>
<div class="article-menu">
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511459.A.0ED.html">[問卦] 睡在神壇會比較容易被神明託夢嗎?</a>
</div>
<div class="meta">
<div class="author">Mews</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%9D%A1%E5%9C%A8%E7%A5%9E%E5%A3%87%E6%9C%83%E6%AF%94%E8%BC%83%E5%AE%B9%E6%98%93%E8%A2%AB%E7%A5%9E%E6%98%8E%E8%A8%97%E5%A4%A2%E5%97%8E%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AMews">搜尋看板內 Mews 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511468.A.EDD.html">Re: [問卦] 看到中國網民笑台灣人迷信不能反駁很難過</a>
</div>
<div class="meta">
<div class="author">hips</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%9C%8B%E5%88%B0%E4%B8%AD%E5%9C%8B%E7%B6%B2%E6%B0%91%E7%AC%91%E5%8F%B0%E7%81%A3%E4%BA%BA%E8%BF%B7%E4%BF%A1%E4%B8%8D%E8%83%BD%E5%8F%8D%E9%A7%81%E5%BE%88%E9%9B%A3%E9%81%8E">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Ahips">搜尋看板內 hips 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">3</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511486.A.9B3.html">[問卦] 大腕有什麼必點的菜嗎</a>
</div>
<div class="meta">
<div class="author">twinklebykk</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E5%A4%A7%E8%85%95%E6%9C%89%E4%BB%80%E9%BA%BC%E5%BF%85%E9%BB%9E%E7%9A%84%E8%8F%9C%E5%97%8E">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Atwinklebykk">搜尋看板內 twinklebykk 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511486.A.05B.html">[新聞] 北韓寧邊核能中心又有新動靜</a>
</div>
<div class="meta">
<div class="author">KK10305</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E5%8C%97%E9%9F%93%E5%AF%A7%E9%82%8A%E6%A0%B8%E8%83%BD%E4%B8%AD%E5%BF%83%E5%8F%88%E6%9C%89%E6%96%B0%E5%8B%95%E9%9D%9C">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AKK10305">搜尋看板內 KK10305 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511502.A.8DF.html">[新聞] 立委提案 放寬自行車可合法載幼童</a>
</div>
<div class="meta">
<div class="author">sheisonmybed</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E7%AB%8B%E5%A7%94%E6%8F%90%E6%A1%88+%E6%94%BE%E5%AF%AC%E8%87%AA%E8%A1%8C%E8%BB%8A%E5%8F%AF%E5%90%88%E6%B3%95%E8%BC%89%E5%B9%BC%E7%AB%A5">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Asheisonmybed">搜尋看板內 sheisonmybed 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">7</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511514.A.281.html">[新聞] 台海緊張!2美眾議員密訪愛國者飛彈 蔡英</a>
</div>
<div class="meta">
<div class="author">semih</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E5%8F%B0%E6%B5%B7%E7%B7%8A%E5%BC%B5%212%E7%BE%8E%E7%9C%BE%E8%AD%B0%E5%93%A1%E5%AF%86%E8%A8%AA%E6%84%9B%E5%9C%8B%E8%80%85%E9%A3%9B%E5%BD%88+%E8%94%A1%E8%8B%B1">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Asemih">搜尋看板內 semih 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">7</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511533.A.905.html">[新聞] 中國在南海填海造島 菲律賓埋怨美國未制止</a>
</div>
<div class="meta">
<div class="author">MagicMoney</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E4%B8%AD%E5%9C%8B%E5%9C%A8%E5%8D%97%E6%B5%B7%E5%A1%AB%E6%B5%B7%E9%80%A0%E5%B3%B6+%E8%8F%B2%E5%BE%8B%E8%B3%93%E5%9F%8B%E6%80%A8%E7%BE%8E%E5%9C%8B%E6%9C%AA%E5%88%B6%E6%AD%A2">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AMagicMoney">搜尋看板內 MagicMoney 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">2</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511535.A.F64.html">[新聞] 一撞就碎!中國工人控訴工地安全帽像蛋殼</a>
</div>
<div class="meta">
<div class="author">qwe93582</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E4%B8%80%E6%92%9E%E5%B0%B1%E7%A2%8E%EF%BC%81%E4%B8%AD%E5%9C%8B%E5%B7%A5%E4%BA%BA%E6%8E%A7%E8%A8%B4%E5%B7%A5%E5%9C%B0%E5%AE%89%E5%85%A8%E5%B8%BD%E5%83%8F%E8%9B%8B%E6%AE%BC">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aqwe93582">搜尋看板內 qwe93582 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511546.A.527.html">[問卦] 為什麼水是乳白色?</a>
</div>
<div class="meta">
<div class="author">thewtf</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%82%BA%E4%BB%80%E9%BA%BC%E6%B0%B4%E6%98%AF%E4%B9%B3%E7%99%BD%E8%89%B2%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Athewtf">搜尋看板內 thewtf 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
(本文已被刪除) [zcliu026]
</div>
<div class="meta">
<div class="author">-</div>
<div class="article-menu">
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511602.A.42B.html">Re: [新聞] 郭台銘霸氣宣布參選2020 BBC:若當總統</a>
</div>
<div class="meta">
<div class="author">feng19890809</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E9%83%AD%E5%8F%B0%E9%8A%98%E9%9C%B8%E6%B0%A3%E5%AE%A3%E5%B8%83%E5%8F%83%E9%81%B82020%E3%80%80BBC%EF%BC%9A%E8%8B%A5%E7%95%B6%E7%B8%BD%E7%B5%B1">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Afeng19890809">搜尋看板內 feng19890809 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511607.A.6BF.html">Re: [問卦] 癌症3期,救還是不救? 要400萬喔</a>
</div>
<div class="meta">
<div class="author">maniaque</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%99%8C%E7%97%873%E6%9C%9F%EF%BC%8C%E6%95%91%E9%82%84%E6%98%AF%E4%B8%8D%E6%95%91%3F+%E8%A6%81400%E8%90%AC%E5%96%94">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Amaniaque">搜尋看板內 maniaque 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">5</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511631.A.54D.html">[問卦] 爸爸頭腦比電腦好</a>
</div>
<div class="meta">
<div class="author">haudai</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%88%B8%E7%88%B8%E9%A0%AD%E8%85%A6%E6%AF%94%E9%9B%BB%E8%85%A6%E5%A5%BD">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Ahaudai">搜尋看板內 haudai 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">6</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511663.A.8F3.html">[問卦] 現在有誰唱現場跟錄音室一樣</a>
</div>
<div class="meta">
<div class="author">oooooooo8</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%8F%BE%E5%9C%A8%E6%9C%89%E8%AA%B0%E5%94%B1%E7%8F%BE%E5%A0%B4%E8%B7%9F%E9%8C%84%E9%9F%B3%E5%AE%A4%E4%B8%80%E6%A8%A3">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aoooooooo8">搜尋看板內 oooooooo8 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">2</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511699.A.C4C.html">[新聞] 驚人內幕曝光! 鮪魚洗澡裸照外流風暴擴</a>
</div>
<div class="meta">
<div class="author">badbadook</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E9%A9%9A%E4%BA%BA%E5%85%A7%E5%B9%95%E6%9B%9D%E5%85%89%EF%BC%81+%E9%AE%AA%E9%AD%9A%E6%B4%97%E6%BE%A1%E8%A3%B8%E7%85%A7%E5%A4%96%E6%B5%81%E9%A2%A8%E6%9A%B4%E6%93%B4">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Abadbadook">搜尋看板內 badbadook 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511702.A.82B.html">Re: [問卦] 馬祖哪裡好玩</a>
</div>
<div class="meta">
<div class="author">mm5566t</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E9%A6%AC%E7%A5%96%E5%93%AA%E8%A3%A1%E5%A5%BD%E7%8E%A9">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Amm5566t">搜尋看板內 mm5566t 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-list-sep"></div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1550749445.A.B40.html">[公告] 八卦板板規(2019.02.21)</a>
</div>
<div class="meta">
<div class="author">seabox</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%85%AC%E5%91%8A%5D+%E5%85%AB%E5%8D%A6%E6%9D%BF%E6%9D%BF%E8%A6%8F%282019.02.21%29">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aseabox">搜尋看板內 seabox 的文章</a></div>
</div>
</div>
<div class="date"> 2/21</div>
<div class="mark">!</div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f3">98</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1554085856.A.7DF.html">[公告] 四月份置底閒聊區^Q^</a>
</div>
<div class="meta">
<div class="author">RS5566</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%85%AC%E5%91%8A%5D+%E5%9B%9B%E6%9C%88%E4%BB%BD%E7%BD%AE%E5%BA%95%E9%96%92%E8%81%8A%E5%8D%80%5EQ%5E">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3ARS5566">搜尋看板內 RS5566 的文章</a></div>
</div>
</div>
<div class="date"> 4/01</div>
<div class="mark">M</div>
</div>
</div>
</div>
<div class="bbs-screen bbs-footer-message">本網站已依台灣網站內容分級規定處理。此區域為限制級,未滿十八歲者不得瀏覽。</div>
</div>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-32365737-1', {
cookieDomain: 'ptt.cc',
legacyCookieDomain: 'ptt.cc'
});
ga('send', 'pageview');
</script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script src="//images.ptt.cc/bbs/v2.25/bbs.js"></script>
</body>
</html>
###Markdown
Step 5 : 使用 BeautifulSoup,將從網頁抓取下來的資料轉為 html.parser。
###Code
soup = BeautifulSoup( r2.text, "html.parser" )
print( soup )
###Output
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<meta content="width=device-width, initial-scale=1" name="viewport"/>
<title>看板 Gossiping 文章列表 - 批踢踢實業坊</title>
<link href="//images.ptt.cc/bbs/v2.25/bbs-common.css" rel="stylesheet" type="text/css"/>
<link href="//images.ptt.cc/bbs/v2.25/bbs-base.css" media="screen" rel="stylesheet" type="text/css"/>
<link href="//images.ptt.cc/bbs/v2.25/bbs-custom.css" rel="stylesheet" type="text/css"/>
<link href="//images.ptt.cc/bbs/v2.25/pushstream.css" media="screen" rel="stylesheet" type="text/css"/>
<link href="//images.ptt.cc/bbs/v2.25/bbs-print.css" media="print" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="topbar-container">
<div class="bbs-content" id="topbar">
<a href="/bbs/" id="logo">批踢踢實業坊</a>
<span>›</span>
<a class="board" href="/bbs/Gossiping/index.html"><span class="board-label">看板 </span>Gossiping</a>
<a class="right small" href="/about.html">關於我們</a>
<a class="right small" href="/contact.html">聯絡資訊</a>
</div>
</div>
<div id="main-container">
<div id="action-bar-container">
<div class="action-bar">
<div class="btn-group btn-group-dir">
<a class="btn selected" href="/bbs/Gossiping/index.html">看板</a>
<a class="btn" href="/man/Gossiping/index.html">精華區</a>
</div>
<div class="btn-group btn-group-paging">
<a class="btn wide" href="/bbs/Gossiping/index1.html">最舊</a>
<a class="btn wide" href="/bbs/Gossiping/index39287.html">‹ 上頁</a>
<a class="btn wide disabled">下頁 ›</a>
<a class="btn wide" href="/bbs/Gossiping/index.html">最新</a>
</div>
</div>
</div>
<div class="r-list-container action-bar-margin bbs-screen">
<div class="search-bar">
<form action="search" id="search-bar" type="get">
<input class="query" name="q" placeholder="搜尋文章⋯" type="text" value=""/>
</form>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511349.A.7CE.html">Re: [問卦] 8+9真的很容易交到女友嗎?</a>
</div>
<div class="meta">
<div class="author">kiske011</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+8%2B9%E7%9C%9F%E7%9A%84%E5%BE%88%E5%AE%B9%E6%98%93%E4%BA%A4%E5%88%B0%E5%A5%B3%E5%8F%8B%E5%97%8E%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Akiske011">搜尋看板內 kiske011 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511358.A.4CB.html">[問卦] 有沒有拜一輩子關公卻被媽祖拖夢的八卦?</a>
</div>
<div class="meta">
<div class="author">eateat14</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E6%9C%89%E6%B2%92%E6%9C%89%E6%8B%9C%E4%B8%80%E8%BC%A9%E5%AD%90%E9%97%9C%E5%85%AC%E5%8D%BB%E8%A2%AB%E5%AA%BD%E7%A5%96%E6%8B%96%E5%A4%A2%E7%9A%84%E5%85%AB%E5%8D%A6%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aeateat14">搜尋看板內 eateat14 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511365.A.F04.html">Re: [新聞] 郭台銘選總統 工商界不看好</a>
</div>
<div class="meta">
<div class="author">Xanatos01</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E9%83%AD%E5%8F%B0%E9%8A%98%E9%81%B8%E7%B8%BD%E7%B5%B1+%E5%B7%A5%E5%95%86%E7%95%8C%E4%B8%8D%E7%9C%8B%E5%A5%BD">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AXanatos01">搜尋看板內 Xanatos01 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
(本文已被刪除) [dispptt]
</div>
<div class="meta">
<div class="author">-</div>
<div class="article-menu">
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511459.A.0ED.html">[問卦] 睡在神壇會比較容易被神明託夢嗎?</a>
</div>
<div class="meta">
<div class="author">Mews</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%9D%A1%E5%9C%A8%E7%A5%9E%E5%A3%87%E6%9C%83%E6%AF%94%E8%BC%83%E5%AE%B9%E6%98%93%E8%A2%AB%E7%A5%9E%E6%98%8E%E8%A8%97%E5%A4%A2%E5%97%8E%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AMews">搜尋看板內 Mews 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511468.A.EDD.html">Re: [問卦] 看到中國網民笑台灣人迷信不能反駁很難過</a>
</div>
<div class="meta">
<div class="author">hips</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%9C%8B%E5%88%B0%E4%B8%AD%E5%9C%8B%E7%B6%B2%E6%B0%91%E7%AC%91%E5%8F%B0%E7%81%A3%E4%BA%BA%E8%BF%B7%E4%BF%A1%E4%B8%8D%E8%83%BD%E5%8F%8D%E9%A7%81%E5%BE%88%E9%9B%A3%E9%81%8E">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Ahips">搜尋看板內 hips 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">3</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511486.A.9B3.html">[問卦] 大腕有什麼必點的菜嗎</a>
</div>
<div class="meta">
<div class="author">twinklebykk</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E5%A4%A7%E8%85%95%E6%9C%89%E4%BB%80%E9%BA%BC%E5%BF%85%E9%BB%9E%E7%9A%84%E8%8F%9C%E5%97%8E">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Atwinklebykk">搜尋看板內 twinklebykk 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511486.A.05B.html">[新聞] 北韓寧邊核能中心又有新動靜</a>
</div>
<div class="meta">
<div class="author">KK10305</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E5%8C%97%E9%9F%93%E5%AF%A7%E9%82%8A%E6%A0%B8%E8%83%BD%E4%B8%AD%E5%BF%83%E5%8F%88%E6%9C%89%E6%96%B0%E5%8B%95%E9%9D%9C">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AKK10305">搜尋看板內 KK10305 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511502.A.8DF.html">[新聞] 立委提案 放寬自行車可合法載幼童</a>
</div>
<div class="meta">
<div class="author">sheisonmybed</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E7%AB%8B%E5%A7%94%E6%8F%90%E6%A1%88+%E6%94%BE%E5%AF%AC%E8%87%AA%E8%A1%8C%E8%BB%8A%E5%8F%AF%E5%90%88%E6%B3%95%E8%BC%89%E5%B9%BC%E7%AB%A5">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Asheisonmybed">搜尋看板內 sheisonmybed 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">7</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511514.A.281.html">[新聞] 台海緊張!2美眾議員密訪愛國者飛彈 蔡英</a>
</div>
<div class="meta">
<div class="author">semih</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E5%8F%B0%E6%B5%B7%E7%B7%8A%E5%BC%B5%212%E7%BE%8E%E7%9C%BE%E8%AD%B0%E5%93%A1%E5%AF%86%E8%A8%AA%E6%84%9B%E5%9C%8B%E8%80%85%E9%A3%9B%E5%BD%88+%E8%94%A1%E8%8B%B1">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Asemih">搜尋看板內 semih 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">7</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511533.A.905.html">[新聞] 中國在南海填海造島 菲律賓埋怨美國未制止</a>
</div>
<div class="meta">
<div class="author">MagicMoney</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E4%B8%AD%E5%9C%8B%E5%9C%A8%E5%8D%97%E6%B5%B7%E5%A1%AB%E6%B5%B7%E9%80%A0%E5%B3%B6+%E8%8F%B2%E5%BE%8B%E8%B3%93%E5%9F%8B%E6%80%A8%E7%BE%8E%E5%9C%8B%E6%9C%AA%E5%88%B6%E6%AD%A2">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3AMagicMoney">搜尋看板內 MagicMoney 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">2</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511535.A.F64.html">[新聞] 一撞就碎!中國工人控訴工地安全帽像蛋殼</a>
</div>
<div class="meta">
<div class="author">qwe93582</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E4%B8%80%E6%92%9E%E5%B0%B1%E7%A2%8E%EF%BC%81%E4%B8%AD%E5%9C%8B%E5%B7%A5%E4%BA%BA%E6%8E%A7%E8%A8%B4%E5%B7%A5%E5%9C%B0%E5%AE%89%E5%85%A8%E5%B8%BD%E5%83%8F%E8%9B%8B%E6%AE%BC">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aqwe93582">搜尋看板內 qwe93582 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511546.A.527.html">[問卦] 為什麼水是乳白色?</a>
</div>
<div class="meta">
<div class="author">thewtf</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%82%BA%E4%BB%80%E9%BA%BC%E6%B0%B4%E6%98%AF%E4%B9%B3%E7%99%BD%E8%89%B2%EF%BC%9F">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Athewtf">搜尋看板內 thewtf 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
(本文已被刪除) [zcliu026]
</div>
<div class="meta">
<div class="author">-</div>
<div class="article-menu">
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511602.A.42B.html">Re: [新聞] 郭台銘霸氣宣布參選2020 BBC:若當總統</a>
</div>
<div class="meta">
<div class="author">feng19890809</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E9%83%AD%E5%8F%B0%E9%8A%98%E9%9C%B8%E6%B0%A3%E5%AE%A3%E5%B8%83%E5%8F%83%E9%81%B82020%E3%80%80BBC%EF%BC%9A%E8%8B%A5%E7%95%B6%E7%B8%BD%E7%B5%B1">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Afeng19890809">搜尋看板內 feng19890809 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511607.A.6BF.html">Re: [問卦] 癌症3期,救還是不救? 要400萬喔</a>
</div>
<div class="meta">
<div class="author">maniaque</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%99%8C%E7%97%873%E6%9C%9F%EF%BC%8C%E6%95%91%E9%82%84%E6%98%AF%E4%B8%8D%E6%95%91%3F+%E8%A6%81400%E8%90%AC%E5%96%94">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Amaniaque">搜尋看板內 maniaque 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">5</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511631.A.54D.html">[問卦] 爸爸頭腦比電腦好</a>
</div>
<div class="meta">
<div class="author">haudai</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%88%B8%E7%88%B8%E9%A0%AD%E8%85%A6%E6%AF%94%E9%9B%BB%E8%85%A6%E5%A5%BD">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Ahaudai">搜尋看板內 haudai 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">6</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511663.A.8F3.html">[問卦] 現在有誰唱現場跟錄音室一樣</a>
</div>
<div class="meta">
<div class="author">oooooooo8</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E7%8F%BE%E5%9C%A8%E6%9C%89%E8%AA%B0%E5%94%B1%E7%8F%BE%E5%A0%B4%E8%B7%9F%E9%8C%84%E9%9F%B3%E5%AE%A4%E4%B8%80%E6%A8%A3">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aoooooooo8">搜尋看板內 oooooooo8 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">2</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511699.A.C4C.html">[新聞] 驚人內幕曝光! 鮪魚洗澡裸照外流風暴擴</a>
</div>
<div class="meta">
<div class="author">badbadook</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E6%96%B0%E8%81%9E%5D+%E9%A9%9A%E4%BA%BA%E5%85%A7%E5%B9%95%E6%9B%9D%E5%85%89%EF%BC%81+%E9%AE%AA%E9%AD%9A%E6%B4%97%E6%BE%A1%E8%A3%B8%E7%85%A7%E5%A4%96%E6%B5%81%E9%A2%A8%E6%9A%B4%E6%93%B4">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Abadbadook">搜尋看板內 badbadook 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-ent">
<div class="nrec"></div>
<div class="title">
<a href="/bbs/Gossiping/M.1555511702.A.82B.html">Re: [問卦] 馬祖哪裡好玩</a>
</div>
<div class="meta">
<div class="author">mm5566t</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%95%8F%E5%8D%A6%5D+%E9%A6%AC%E7%A5%96%E5%93%AA%E8%A3%A1%E5%A5%BD%E7%8E%A9">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Amm5566t">搜尋看板內 mm5566t 的文章</a></div>
</div>
</div>
<div class="date"> 4/17</div>
<div class="mark"></div>
</div>
</div>
<div class="r-list-sep"></div>
<div class="r-ent">
<div class="nrec"><span class="hl f2">1</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1550749445.A.B40.html">[公告] 八卦板板規(2019.02.21)</a>
</div>
<div class="meta">
<div class="author">seabox</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%85%AC%E5%91%8A%5D+%E5%85%AB%E5%8D%A6%E6%9D%BF%E6%9D%BF%E8%A6%8F%282019.02.21%29">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3Aseabox">搜尋看板內 seabox 的文章</a></div>
</div>
</div>
<div class="date"> 2/21</div>
<div class="mark">!</div>
</div>
</div>
<div class="r-ent">
<div class="nrec"><span class="hl f3">98</span></div>
<div class="title">
<a href="/bbs/Gossiping/M.1554085856.A.7DF.html">[公告] 四月份置底閒聊區^Q^</a>
</div>
<div class="meta">
<div class="author">RS5566</div>
<div class="article-menu">
<div class="trigger">⋯</div>
<div class="dropdown">
<div class="item"><a href="/bbs/Gossiping/search?q=thread%3A%5B%E5%85%AC%E5%91%8A%5D+%E5%9B%9B%E6%9C%88%E4%BB%BD%E7%BD%AE%E5%BA%95%E9%96%92%E8%81%8A%E5%8D%80%5EQ%5E">搜尋同標題文章</a></div>
<div class="item"><a href="/bbs/Gossiping/search?q=author%3ARS5566">搜尋看板內 RS5566 的文章</a></div>
</div>
</div>
<div class="date"> 4/01</div>
<div class="mark">M</div>
</div>
</div>
</div>
<div class="bbs-screen bbs-footer-message">本網站已依台灣網站內容分級規定處理。此區域為限制級,未滿十八歲者不得瀏覽。</div>
</div>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-32365737-1', {
cookieDomain: 'ptt.cc',
legacyCookieDomain: 'ptt.cc'
});
ga('send', 'pageview');
</script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script src="//images.ptt.cc/bbs/v2.25/bbs.js"></script>
</body>
</html>
###Markdown
Step 6 : 使用 select( ) 函數,取出 HTML 網頁中的資料。
###Code
# 範例:取出文章的標題
# 取 HTML 標中的 <div class="title">
# .........
# </div> 中的 <a> 標籤存入 tag_title
tag_title = soup.select( 'div.title a' )
print( tag_title )
###Output
[<a href="/bbs/Gossiping/M.1555511349.A.7CE.html">Re: [問卦] 8+9真的很容易交到女友嗎?</a>, <a href="/bbs/Gossiping/M.1555511358.A.4CB.html">[問卦] 有沒有拜一輩子關公卻被媽祖拖夢的八卦?</a>, <a href="/bbs/Gossiping/M.1555511365.A.F04.html">Re: [新聞] 郭台銘選總統 工商界不看好</a>, <a href="/bbs/Gossiping/M.1555511459.A.0ED.html">[問卦] 睡在神壇會比較容易被神明託夢嗎?</a>, <a href="/bbs/Gossiping/M.1555511468.A.EDD.html">Re: [問卦] 看到中國網民笑台灣人迷信不能反駁很難過</a>, <a href="/bbs/Gossiping/M.1555511486.A.9B3.html">[問卦] 大腕有什麼必點的菜嗎</a>, <a href="/bbs/Gossiping/M.1555511486.A.05B.html">[新聞] 北韓寧邊核能中心又有新動靜</a>, <a href="/bbs/Gossiping/M.1555511502.A.8DF.html">[新聞] 立委提案 放寬自行車可合法載幼童</a>, <a href="/bbs/Gossiping/M.1555511514.A.281.html">[新聞] 台海緊張!2美眾議員密訪愛國者飛彈 蔡英</a>, <a href="/bbs/Gossiping/M.1555511533.A.905.html">[新聞] 中國在南海填海造島 菲律賓埋怨美國未制止</a>, <a href="/bbs/Gossiping/M.1555511535.A.F64.html">[新聞] 一撞就碎!中國工人控訴工地安全帽像蛋殼</a>, <a href="/bbs/Gossiping/M.1555511546.A.527.html">[問卦] 為什麼水是乳白色?</a>, <a href="/bbs/Gossiping/M.1555511602.A.42B.html">Re: [新聞] 郭台銘霸氣宣布參選2020 BBC:若當總統</a>, <a href="/bbs/Gossiping/M.1555511607.A.6BF.html">Re: [問卦] 癌症3期,救還是不救? 要400萬喔</a>, <a href="/bbs/Gossiping/M.1555511631.A.54D.html">[問卦] 爸爸頭腦比電腦好</a>, <a href="/bbs/Gossiping/M.1555511663.A.8F3.html">[問卦] 現在有誰唱現場跟錄音室一樣</a>, <a href="/bbs/Gossiping/M.1555511699.A.C4C.html">[新聞] 驚人內幕曝光! 鮪魚洗澡裸照外流風暴擴</a>, <a href="/bbs/Gossiping/M.1555511702.A.82B.html">Re: [問卦] 馬祖哪裡好玩</a>, <a href="/bbs/Gossiping/M.1550749445.A.B40.html">[公告] 八卦板板規(2019.02.21)</a>, <a href="/bbs/Gossiping/M.1554085856.A.7DF.html">[公告] 四月份置底閒聊區^Q^</a>]
###Markdown
Step 7 : 印出網址及文章標題
###Code
for title in tag_title:
print( title["href"], title.text )
###Output
/bbs/Gossiping/M.1555511349.A.7CE.html Re: [問卦] 8+9真的很容易交到女友嗎?
/bbs/Gossiping/M.1555511358.A.4CB.html [問卦] 有沒有拜一輩子關公卻被媽祖拖夢的八卦?
/bbs/Gossiping/M.1555511365.A.F04.html Re: [新聞] 郭台銘選總統 工商界不看好
/bbs/Gossiping/M.1555511459.A.0ED.html [問卦] 睡在神壇會比較容易被神明託夢嗎?
/bbs/Gossiping/M.1555511468.A.EDD.html Re: [問卦] 看到中國網民笑台灣人迷信不能反駁很難過
/bbs/Gossiping/M.1555511486.A.9B3.html [問卦] 大腕有什麼必點的菜嗎
/bbs/Gossiping/M.1555511486.A.05B.html [新聞] 北韓寧邊核能中心又有新動靜
/bbs/Gossiping/M.1555511502.A.8DF.html [新聞] 立委提案 放寬自行車可合法載幼童
/bbs/Gossiping/M.1555511514.A.281.html [新聞] 台海緊張!2美眾議員密訪愛國者飛彈 蔡英
/bbs/Gossiping/M.1555511533.A.905.html [新聞] 中國在南海填海造島 菲律賓埋怨美國未制止
/bbs/Gossiping/M.1555511535.A.F64.html [新聞] 一撞就碎!中國工人控訴工地安全帽像蛋殼
/bbs/Gossiping/M.1555511546.A.527.html [問卦] 為什麼水是乳白色?
/bbs/Gossiping/M.1555511602.A.42B.html Re: [新聞] 郭台銘霸氣宣布參選2020 BBC:若當總統
/bbs/Gossiping/M.1555511607.A.6BF.html Re: [問卦] 癌症3期,救還是不救? 要400萬喔
/bbs/Gossiping/M.1555511631.A.54D.html [問卦] 爸爸頭腦比電腦好
/bbs/Gossiping/M.1555511663.A.8F3.html [問卦] 現在有誰唱現場跟錄音室一樣
/bbs/Gossiping/M.1555511699.A.C4C.html [新聞] 驚人內幕曝光! 鮪魚洗澡裸照外流風暴擴
/bbs/Gossiping/M.1555511702.A.82B.html Re: [問卦] 馬祖哪裡好玩
/bbs/Gossiping/M.1550749445.A.B40.html [公告] 八卦板板規(2019.02.21)
/bbs/Gossiping/M.1554085856.A.7DF.html [公告] 四月份置底閒聊區^Q^
|
.ipynb_checkpoints 2/08_InputOutput-checkpoint.ipynb | ###Markdown
Please download the new class notes. Step 1 : Navigate to the directory where your files are stored. Open a terminal. Using `cd`, navigate to *inside* the ILAS_Python_for_everyone folder on your computer. Step 3 : Update the course notes by downloading the changesIn the terminal type:>`git add -Agit commit -m "commit"git fetch upstreamgit merge -X theirs upstream/master` Reading Input and Generating and Storing Output Reading and Writing Text Files    Writing Files    Reading Files    Appending Files    Automatically Closing Files using `with`    Reading and Editing Files    Binary Files ("Pickling" Data) Plotting    Matplotlib    Line and Scatter Plots       Adding Labels, Legend and Title       Setting the Axis Limits    Multiple Plots    Saving a Plot    Importing Data from Delimited Files    Bar Charts and Histograms       Bar Charts      Histograms    Visualising 2D Arrays Summary Test-Yourself Exercises Lesson GoalRead information from external files.Store output from your programs as:- text files - graphs or plots.Read information from external files. Fundamental programming concepts - Importing and exporting data from your Python programs. Until now, we have only considered data that remains within the Python program. When we run a program, variables are created, used and no longer exist when the program finishes or is terminated. We cannot access the information from outside of the Python program. It can be useful to access data created by a program.In a game we might want to:- store a player's score to continue playing the game later- store a leaderboard of top scoresFor an assingment or project it can be useful to store output from your program as:- a graph- text In last week's class we learnt to import files containing data in the form of:- sound- images Reading and Writing Text FilesPython has a collection of easy to use functions for reading and writing files: - `open()`- `read()`- `write()`- `close()` Before a file can be read or written to, it must be opened using the `open()` function. __Function arguments:__1. Path to the file (filename and location)2. The *mode specifier* with which to open the file: - `r`: open an existing file to read - `w`: open an existing file to write to. If no file exists: creates a new file.If file exists : over-writes previous contents. - `a`: open an existing file to write to. If no file exists: creates a new file.If file exists : appends text to end of file. - `r+`: open a text file to read from __or__ write to. File must already exist.If file exists : over-writes previous contents.- `w+`: open a text file to read from __or__ write to.If no file exists: creates a new file.If file exists : over-writes previous contents.- `a+` : open a text file to read from __or__ write to.If no file exists: creates a new file.If file exists : appends text to end of file. Once the file is open, it creates a *file object*. We can interrogate the file for various details including whether it is open or closed: - `name`: Name of the opened file - `mode`: Mode specifier with which the file was opened - `closed`: Boolean True or False - `readable( )`: Read permission, Boolean True or False - `writable( )`: Write permission, Boolean True or False __Example__Once a file has been opened it's contents can be read, overwritten, or added to, depnding on the mode specifier used to open it. We are going to open an example file as if we were going to *write* some data to it. The mode specifier is `w`.
###Code
file = open("sample_data/my_file.txt", "w" )
###Output
_____no_output_____
###Markdown
Once the file is open, we can *interrogate* it for information.
###Code
# Interrrogate the file:
print( "File Name:", file.name )
print( "Open Mode:", file.mode )
print( "Readable:", file.readable())
print( "Writable:", file.writable())
###Output
File Name: sample_data/my_file.txt
Open Mode: w
Readable: False
Writable: True
###Markdown
An open file must then always be closed again by calling the close() method.Let's first write a function to show us if a file is open or closed.
###Code
# Write a function to determine the file's status:
def get_status( f ):
if ( f.closed != False ) :
return "Closed"
else :
return "Open"
###Output
_____no_output_____
###Markdown
Why do we need to close a file? 1. Python does not automatically close the file. Leaving it open means that you risk overwriting information.1. Closing the file saves any changes to it.1. Depending on your computer's operating system, you may not be able to open a file simultaneously for reading and writing. __Example:__ If a program attempts to open a file that is already open (has not been closed), an error may be generated. Let's try out our function.
###Code
print( "File Status:" , get_status(file))
file.close()
print( "File Status:" , get_status(file))
###Output
File Status: Open
File Status: Closed
###Markdown
__Try it yourself__1. Save the previous 4 code cells contents to a file, `interrogate_file.py`. 1. Correct the following line to specify the path to `my_file.txt`, *relative* to the location of `interrogate.py`:>`file = open("sample_data/my_file.txt’", "w" )`1. Run `interrogate_file.py` from the terminal to see the file get created and display information about `my_file.txt`. Writing FilesData can be written to a files in a number of ways.This time, let's actually write some data to the file. Let's first create a string to write to a text file:The inclusion of `\n` anywhere in the string creates a line break.
###Code
poem = 'I never saw a man who looked\nWith such a wistful eye\n'
print(poem)
###Output
I never saw a man who looked
With such a wistful eye
###Markdown
A string my be *concatenated* (stuck together end to end) using the `+=` arithmetic operator.This allows us to build the string over the course of the program.
###Code
poem += "Upon that little tent of blue\n"
poem += "Which prisoners call the sky\n"
print(poem)
###Output
I never saw a man who looked
With such a wistful eye
Upon that little tent of blue
Which prisoners call the sky
Upon that little tent of blue
Which prisoners call the sky
###Markdown
Create an object to __write__ the string to a file:
###Code
file = open("sample_data/poem.txt", "w" )
###Output
_____no_output_____
###Markdown
Write the string to the file, then close it.
###Code
file.write(poem)
file.close()
###Output
_____no_output_____
###Markdown
You can open the file using a text editor to confirm it's contents. Reading FilesCreate an object to __read__ the string from the containing file.(Note: As we have finished using the object name `file` to open `poem.txt` for writing we can recycle it to open the file for reading).
###Code
file = open("sample_data/poem.txt", "r" )
###Output
_____no_output_____
###Markdown
Read everything in the file:
###Code
print(file.read())
file.close()
###Output
I never saw a man who looked
With such a wistful eye
Upon that little tent of blue
Which prisoners call the sky
###Markdown
Iteration can be used to quickly and efficiently read file contents line by line.In a `for` loop, the lines of a file are treated like the elements of a list.i.e. In the example below, `file` and `line` can be replaced with any variable name.
###Code
file = open("sample_data/poem.txt", "r" )
for line in file:
print(line, end="")
file.close()
###Output
I never saw a man who looked
With such a wistful eye
Upon that little tent of blue
Which prisoners call the sky
###Markdown
We can also look at individual words of a file using `split()`:
###Code
file = open("sample_data/poem.txt", "r" )
print(file.read().split())
file.close()
###Output
['I', 'never', 'saw', 'a', 'man', 'who', 'looked', 'With', 'such', 'a', 'wistful', 'eye', 'Upon', 'that', 'little', 'tent', 'of', 'blue', 'Which', 'prisoners', 'call', 'the', 'sky']
###Markdown
The items are returned as a list, so we can use a for loop to print each word in a more readable format.
###Code
file = open("sample_data/poem.txt", "r" )
for word in file.read().split() :
print(word, end=" ")
file.close()
###Output
I never saw a man who looked With such a wistful eye Upon that little tent of blue Which prisoners call the sky
###Markdown
What does this code do?>`print(word, end=" ")` Note that the default ending for each print statement is changed to a space. Otherwise each word prints on a new line. `split` is also useful when we use the `input` function.
###Code
user_input = input("enter 3 words : ")
###Output
enter 3 words : hemma f th
###Markdown
The words are treated as a single string.e.g. element 0 is the first character
###Code
print(user_input[0])
###Output
_____no_output_____
###Markdown
`split` allows us to access the individual words.
###Code
for word in user_input.split() :
print(word, end=" ")
###Output
_____no_output_____
###Markdown
Appending Files *Add* data to a file (rather than overwriting).
###Code
file = open( "sample_data/poem.txt" , "a" )
file.write( "(by Oscar Wilde)" )
###Output
_____no_output_____
###Markdown
Notice that if you open `poem.txt` in a text editor at this stage, your changes have not appeared yet.Run:
###Code
file.close()
###Output
_____no_output_____
###Markdown
The changes should have now been saved an will appear if you re-open the file in a text editor. This means you can update the same file by writing data at multiple points in your program.
###Code
# File append 1
file = open("sample_data/appended_file.csv", "a" )
file.write('word\n')
file.close()
# File append 2
file = open("sample_data/test_now2.csv", "a" )
file.write('1, 3, 5\n')
file.close()
# File append 3
file = open("sample_data/test_now2.csv", "a" )
file.write('word\n')
file.write('1, 3, 5')
file.close()
###Output
_____no_output_____
###Markdown
__Try it yourself__Practise writing and reading data by:- Creating a Python file, `write_and_read.py`. - Use `write_and_read.py` to produce a .txt file, `w_and_r.txt` in the sample_data directory.- Edit `write_and_read.py` and run the file to write a string to `w_and_r.txt`.- Edit `write_and_read.py` to read and print the contents of `w_and_r.txt`. Automatically Closing Files Using `with`. `with`It is good practice to use the Python with keyword to group file operations within a block. This automatically closes the file is properly closed after operations in the block have been completed, even if an exception or error occurs within the block. Example : `with` Take an example string...
###Code
twister = "How can a clam cram in a clean cream can?"
###Output
_____no_output_____
###Markdown
In the `with` block: - Write `twister` into a file. - Display the file’s current status.
###Code
with open("sample_data/twister.txt" , "w" ) as file :
file.write(twister)
print(f"File Now Closed?: {file.closed}")
print(f"File Now Closed?: {file.closed}")
###Output
File Now Closed?: False
File Now Closed?: True
###Markdown
Reading and Editing FilesWe will now study some more functions to read and edit a file that has been opened using the `read` file specifier.- `read()`- `seek()`- `tell()`- `truncate()` `read()`We studied read earlier in today's class.By default `read()` will read the entire contents of the file from the beginning (index 0) to the end (index -1). An optional integer argument can be used to specify *how many characters to read*. `seek()`The position within a file: - from which to read or... - at which to writecan be selected using `seek()`.An integer argument is used to specify *how many characters to offset* the start position from the start of the file. `tell()`The current position within a file can be returned an integer location index at any time using `tell()`. `truncate()`Deletes all characters after the current position. __Example :__`read`, `seek()`, `tell()` and `truncate()`.Let's first write some data to a file:
###Code
with open("sample_data/twister.txt" , "w" ) as file :
file.write("How can a clam cram in a clean cream can?")
###Output
_____no_output_____
###Markdown
Now we are going to read and edit the data.We first:- open the file using a mode specifier to read *and* write- print the current position in the file:
###Code
file = open("sample_data/twister.txt" , "r+")
print("Position In File Now:", file.tell())
###Output
Position In File Now: 0
###Markdown
The position changes, once the file has been read.
###Code
# reading the file moves the position
print(file.read())
print("Position In File Now:", file.tell())
###Output
How can a clam cram in a clean cream can?
Position In File Now: 41
###Markdown
Let's move to a specific position, 31 characters into the file:*(This looks for a specific __position__. In next week's class we will learn to look for a specific __word__.)*
###Code
# seek
file.seek( 31 )
print("Position In File Now:", file.tell())
###Output
Position In File Now: 31
###Markdown
Position 31 is just before the words `' cream can?'`.We can see this by reading the file at this point.
###Code
print(file.read())
###Output
cream can?
###Markdown
We are going to replace: 'cream can?'...with: 'caravan?' We are now at the end of the file.Let's go back to position 31 and write `'caravan'`.
###Code
# write starting at new position
file.seek( 31 )
file.write("caravan?")
###Output
_____no_output_____
###Markdown
What does the whole file look like now? We can go back to position 0 and read the file to check:
###Code
# return to position 0 and print the file contents
file.seek( 0 )
print(file.read())
###Output
How can a clam cram in a clean caravan?n?
###Markdown
`'cream can?'` has been *overwritten* with `'caravan?'`.However, `'cream can?'` contains more letters than `'caravan?'` so the additional letters are note overwritten. We can use `truncate` to remove any traling charcters:
###Code
print(len('caravan?'))
file.seek( 31 + len('caravan?'))
# truncate deletes all characters from current position onwards
file.truncate()
###Output
8
###Markdown
What does the whole file look like now? We can go back to position 0 and read the file to check:
###Code
# return to position 0 and print the file contents
file.seek( 0 )
print(file.read())
###Output
How can a clam cram in a clean caravan?
###Markdown
Finally, remember to close the file:
###Code
file.close()
###Output
_____no_output_____
###Markdown
The full example, using `with`:
###Code
# write file
with open("sample_data/twister.txt" , "w" ) as file :
file.write("How can a clam cram in a clean cream can?")
# read and edit
with open("sample_data/twister.txt" , "r+" ) as file :
# tell
print("Position In File Now:", file.tell())
# reading the file moves the position
print(file.read())
print("Position In File Now:", file.tell())
# seek
file.seek( 31 )
print("Position In File Now:", file.tell())
# write starting at new position
file.write("caravan?")
# truncate deletes all characters from current position onwards
file.truncate()
# move back to start poistion
file.seek( 0 )
print("Position In File Now:", file.tell())
# print updated string
print(file.read())
###Output
Position In File Now: 0
How can a clam cram in a clean cream can?
Position In File Now: 41
Position In File Now: 31
Position In File Now: 0
How can a clam cram in a clean caravan?
###Markdown
Alternatively, if we know the word to replace we can use the function `replace()`:
###Code
with open("sample_data/twister.txt" , "w+" ) as file :
# write contents
file.write("How can a clam cram in a clean cream can?")
# go to start of file
file.seek( 0 )
# create a new string with the words replaced
data = file.read().replace("cream can", "caravan")
# go to start of file
file.seek( 0 )
# over-write the old string
file.write(data)
file.truncate()
# go to start of file
file.seek( 0 )
print(file.read())
###Output
How can a clam cram in a clean caravan?
###Markdown
A more efficient way to do this, without the need to truncate the extra characters is to loop through each line of the file and replace any instances of the variable.
###Code
with open("sample_data/twister.txt" , "w+" ) as file :
# write contents
file.write("How can a clam cram in a clean cream can?")
# go to start of file
file.seek( 0 )
# replace any instances in each line of the file
for line in file:
line.replace("cream can", "caravan")
# go to start of file
file.seek( 0 )
# go to start of file
file.seek( 0 )
print(file.read())
###Output
How can a clam cram in a clean cream can?
###Markdown
A maximum file sixe can be specified when using truncate:
###Code
with open("sample_data/twister.txt" , "w+" ) as file :
# write contents
file.write("How can a clam cram in a clean cream can?")
# set maximum length of string
file.truncate(10)
# go to start of file
file.seek( 0 )
print(file.read())
###Output
How can a
###Markdown
Importing Data from Delimited Text FilesTo read data from text files such as .csv, .txt, .dat, we can use `numpy.load.txt`. __Remember__The file to be loaded must be in the same directory as your python program.Otherwise you must specify the the full path to the data file using / to seperate directory names. The filename (or path plus filename) needs to be between "" quotation marks. To import the data from `sample_data/data.dat`:>`0.000 1.053 2.105 3.158 4.21174.452 48.348 68.733 59.796 54.123`
###Code
import numpy as np
A = np.loadtxt('sample_data/sample_data.dat')
print(A)
print(type(A))
###Output
[[ 1.053 2.105 3.158 4.211 6.065]
[48.348 68.733 59.796 54.123 74.452]]
<class 'numpy.ndarray'>
###Markdown
Purely numerical data is imported as a Numpy array.This is a data structure for storing numerical values. Individual elements, rows or columns can be returned using the index
###Code
print(A[0][1]) # row 0, col 1
print(A[0, :]) # row 0
print(A[:, 1]) # col 1
###Output
2.105
[1.053 2.105 3.158 4.211 6.065]
[ 2.105 68.733]
###Markdown
The __delimiter__ should (sometimes) be specified.This tells Python how to separate the data into the individual elements of an array. The default delimiter is a space.(Data separated by spaces will be automatically be assigned different indices). The __data type__ should (sometimes) be specified.This tells Python how to separate the data into the individual elements of an array. The default data type is `float`.If your data contains items that cannot be expressed as a float, importing it will cause an error unless the data-type is specified.Mixed data types can be imported as `string` values. Data in `sample_data/sample_student_data.txt`:- __delimiter__ : tab (`/t`) - __data type__ : string (float in some columns)```PythonSubject Sex DOB Height Weight BP(ID) M/F dd/mm/yy m kg mmHgJW-1 M 19/12/1995 1.82 92.4 119/76JW-2 M 11/01/1996 1.77 80.9 114/73JW-3 F 02/10/1995 1.68 69.7 124/79JW-6 M 06/07/1995 1.72 75.5 110/60JW-7 F 28/03/1996 1.66 72.4 -JW-9 F 11/12/1995 1.78 82.1 115/75JW-10 F 07/04/1996 1.6 45 -/-JW-11 M 22/08/1995 1.72 77.2 97/63JW-12 M 23/05/1996 1.83 88.9 105/70JW-14 F 12/01/1996 1.56 56.3 108/72JW-15 F 01/06/1996 1.64 65 99/67JW-16 M 10/09/1995 1.63 73 131/84JW-17 M 17/02/1996 1.67 89.8 101/76JW-18 M 31/07/1996 1.66 75.1 -/-JW-19 F 30/10/1995 1.59 67.3 103/69JW-22 F 09/03/1996 1.7 45 119/80JW-23 M 15/05/1995 1.97 89.2 124/82JW-24 F 01/12/1995 1.66 63.8 100/78JW-25 F 25/10/1995 1.63 64.4 -/-JW-26 M 17/04/1996 1.69 55 121/82```
###Code
np.loadtxt('sample_data/sample_student_data.txt', delimiter="\t", dtype=str)
###Output
_____no_output_____
###Markdown
Regions can be selected, for example to select only numerical data. `skiprows` skips the first n lines. `usecols` specifies which columns to read, with 0 being the first. `usecols = (1, 4, 5)` : extracts the 2nd, 5th and 6th columns.`usecols = (3, 4)` : extracts the 4th and 5th columns (height and weight data).
###Code
students = np.loadtxt('sample_data/sample_student_data.txt', skiprows=9, usecols=(3,4))
print(students)
###Output
[[ 1.78 82.1 ]
[ 1.6 45. ]
[ 1.72 77.2 ]
[ 1.83 88.9 ]
[ 1.56 56.3 ]
[ 1.64 65. ]
[ 1.63 73. ]
[ 1.67 89.8 ]
[ 1.66 75.1 ]
[ 1.59 67.3 ]
[ 1.7 45. ]
[ 1.97 89.2 ]
[ 1.66 63.8 ]
[ 1.63 64.4 ]
[ 1.69 55. ]]
###Markdown
You can now use the imported data as a regular Numpy array.
###Code
# Use indexing to select which ROWS and which COLUMNS to plot
plt.plot(students[:, 0], students[:, 1], 'rx', label="students")
# Legend
plt.legend(loc='best', fontsize=12)
# Axes labels
plt.xlabel('Height (m)', fontsize=12)
plt.ylabel('Weight (kg)', fontsize=12)
# Title
plt.title("Height to weight relationship for students in class A", fontsize=18);
###Output
_____no_output_____ |
nbs/dev-07-prediction-confidence-and-intervals.ipynb | ###Markdown
Prediction & Confidence Intervals [](https://notebooks.gesis.org/binder/v2/gh/AyrtonB/Merit-Order-Effect/main?filepath=nbs%2Fdev-07-prediction-confidence-and-intervals.ipynb)This notebook outlines the calculation of the prediction and confidence intervals for the GB and DE price MOE models Imports
###Code
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from moepy import lowess, eda
from moepy.surface import PicklableFunction
from ipypb import track
###Output
_____no_output_____
###Markdown
Great BritainWe'll start by loading and cleaning the data for GB
###Code
df_EI = eda.load_EI_df('../data/raw/electric_insights.csv')
df_EI_model = df_EI[['day_ahead_price', 'demand', 'solar', 'wind']].dropna()
s_price = df_EI_model['day_ahead_price']
s_dispatchable = df_EI_model['demand'] - df_EI_model[['solar', 'wind']].sum(axis=1)
###Output
_____no_output_____
###Markdown
We'll then calculate the estimate for the 68% prediction interval
###Code
def get_pred_intvl(low_q_fp, high_q_fp):
"""Calculates the prediction interval between the low and high quantile models specified"""
smooth_dates_low = pickle.load(open(low_q_fp, 'rb'))
smooth_dates_high = pickle.load(open(high_q_fp, 'rb'))
x_pred = np.linspace(3, 61, 581)
dt_pred = pd.date_range('2009-01-01', '2020-12-31', freq='1D')
df_pred_low = smooth_dates_low.predict(x_pred=x_pred, dt_pred=dt_pred)
df_pred_low.index = np.round(df_pred_low.index, 1)
df_pred_high = smooth_dates_high.predict(x_pred=x_pred, dt_pred=dt_pred)
df_pred_high.index = np.round(df_pred_high.index, 1)
df_pred_intvl = df_pred_high - df_pred_low
return df_pred_intvl
%%time
df_pred_68pct_intvl_GB = get_pred_intvl('../data/models/DAM_price_GB_p16.pkl', '../data/models/DAM_price_GB_p84.pkl')
df_pred_68pct_intvl_GB.head()
###Output
Wall time: 11.4 s
###Markdown
We can see that we get some quantile crossing at the extreme ends of the dispatch curve which is why some of our 68% interval values are negative, to counter this we'll weight our prediction interval by how often that part of the dispatch curve is where the price clears at.
###Code
s_pred_idx_weight = s_dispatchable.round(1).value_counts().sort_index()
dispatchable_gen_idxs = sorted(list(set(s_pred_idx_weight.index).intersection(df_pred_68pct_intvl_GB.index)))
pred_68pct_intvl = np.average(df_pred_68pct_intvl_GB.mean(axis=1).loc[dispatchable_gen_idxs], weights=s_pred_idx_weight.loc[dispatchable_gen_idxs])
print(f'The 68% prediction interval for GB is {round(pred_68pct_intvl, 2)} £/MWh')
###Output
The 68% prediction interval for GB is 16.32 £/MWh
###Markdown
We'll use our bootstrapping helper function to calculate the confidence interval of the GB model
###Code
%%capture
center_dts = pd.date_range(s_price.index.min(), s_price.index.max(), freq='3MS') + pd.Timedelta(days=45)
all_conf_intvl_95pct = []
for center_dt in track(center_dts):
s_price_subset = s_price[center_dt-pd.Timedelta(days=45):center_dt+pd.Timedelta(days=45)]
s_dispatchable_subset = s_dispatchable[center_dt-pd.Timedelta(days=45):center_dt+pd.Timedelta(days=45)]
df_bootstrap = lowess.bootstrap_model(s_price_subset.values, s_dispatchable_subset.values, num_runs=100, frac=0.3, num_fits=10)
conf_intvl_95pct = df_bootstrap.replace(0, np.nan).quantile([0.025, 0.975], axis=1).diff().dropna(how='all').mean(axis=1).iloc[0]
all_conf_intvl_95pct += [conf_intvl_95pct]
conf_intvl_95pct_GB = np.array(all_conf_intvl_95pct).mean()
print(f'The 95% confidence interval for GB is {round(conf_intvl_95pct_GB, 2)} £/MWh')
###Output
The 95% confidence interval for GB is 1.03 £/MWh
###Markdown
GermanyWe'll start by loading and cleaning the data for DE
###Code
%%time
df_DE = eda.load_DE_df('../data/raw/energy_charts.csv', '../data/raw/ENTSOE_DE_price.csv')
df_DE_model = df_DE[['price', 'demand', 'Solar', 'Wind']].dropna()
s_DE_price = df_DE_model['price']
s_DE_demand = df_DE_model['demand']
s_DE_dispatchable = df_DE_model['demand'] - df_DE_model[['Solar', 'Wind']].sum(axis=1)
###Output
Wall time: 1.72 s
###Markdown
We'll then calculate the estimate for the 68% prediction interval
###Code
%%time
df_pred_68pct_intvl_DE = get_pred_intvl('../data/models/DAM_price_DE_p16.pkl', '../data/models/DAM_price_DE_p84.pkl')
s_pred_idx_weight = s_DE_dispatchable.round(1).value_counts().sort_index()
dispatchable_gen_idxs = sorted(list(set(s_pred_idx_weight.index).intersection(df_pred_68pct_intvl_DE.index)))
pred_68pct_intvl = np.average(df_pred_68pct_intvl_DE.mean(axis=1).loc[dispatchable_gen_idxs], weights=s_pred_idx_weight.loc[dispatchable_gen_idxs])
print(f'The 68% prediction interval for DE is {round(pred_68pct_intvl, 2)} EUR/MWh')
###Output
The 68% prediction interval for DE is 13.79 EUR/MWh
Wall time: 1.5 s
###Markdown
We'll use our bootstrapping helper function to calculate the confidence interval of the GB model
###Code
%%capture
center_dts = pd.date_range(s_DE_price.index.min(), s_DE_price.index.max(), freq='3MS') + pd.Timedelta(days=45)
all_conf_intvl_95pct = []
for center_dt in track(center_dts):
s_price_subset = s_DE_price[center_dt-pd.Timedelta(days=45):center_dt+pd.Timedelta(days=45)]
s_dispatchable_subset = s_DE_dispatchable[center_dt-pd.Timedelta(days=45):center_dt+pd.Timedelta(days=45)]
df_bootstrap = lowess.bootstrap_model(s_price_subset.values, s_dispatchable_subset.values, num_runs=100, frac=0.3, num_fits=10)
conf_intvl_95pct = df_bootstrap.replace(0, np.nan).quantile([0.025, 0.975], axis=1).diff().dropna(how='all').mean(axis=1).iloc[0]
all_conf_intvl_95pct += [conf_intvl_95pct]
conf_intvl_95pct_DE = np.array(all_conf_intvl_95pct).mean()
print(f'The 95% confidence interval for DE is {round(conf_intvl_95pct_DE, 2)} EUR/MWh')
###Output
The 95% confidence interval for DE is 1.69 EUR/MWh
|
modulo1/desafios/Desafio_aula3_modulo1.ipynb | ###Markdown
###Code
def mostrar_paleta_de_cores(cmaps):
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
def plot_color_gradients(cmap_category, cmap_list):
# Create figure and adjust figure height to number of colormaps
nrows = len(cmap_list)
figh = 0.35 + 0.15 + (nrows + (nrows - 1) * 0.1) * 0.22
fig, axs = plt.subplots(nrows=nrows + 1, figsize=(6.4, figh))
fig.subplots_adjust(top=1 - 0.35 / figh, bottom=0.15 / figh,
left=0.2, right=0.99)
axs[0].set_title(cmap_category + ' colormaps', fontsize=14)
for ax, name in zip(axs, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
ax.text(-0.01, 0.5, name, va='center', ha='right', fontsize=10,
transform=ax.transAxes)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axs:
ax.set_axis_off()
for cmap_category, cmap_list in cmaps.items():
plot_color_gradients(cmap_category, cmap_list)
plt.show()
def cria_a_coluna_regiao(df):
nome_regioes = {'1': 'Norte', '2': 'Nordeste', '3': 'Sudeste', '4': 'Sul', '5': 'Centro-Oeste'}
df["Regiao"] = list(map(lambda estado_index: nome_regioes[estado_index[0]] , df.index))
milhao = 1e6
###Output
_____no_output_____
###Markdown
Desafios aula 3 do modulo 1
###Code
import pandas as pd
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
# pandas configuracao
pd.options.display.float_format ="{:.2f}".format
uri = "https://raw.githubusercontent.com/alura-cursos/agendamento-hospitalar/main/dados/A160324189_28_143_208.csv"
dados = pd.read_csv(uri,
encoding="ISO-8859-1",
skiprows = 3, sep=";",
skipfooter=12,
thousands=".",
decimal=",")
colunas_usaveis = dados.mean().index.tolist()
colunas_usaveis.insert(0, "Unidade da Federação")
dados_usaveis = dados[colunas_usaveis]
dados_usaveis = dados_usaveis.set_index("Unidade da Federação")
dados_usaveis = dados_usaveis.drop("Total", axis=1)
###Output
_____no_output_____
###Markdown
Desafio 01: Escolher uma palete de cores mais adequada do matplotlib.
###Code
cmaps = OrderedDict()
cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']
mostrar_paleta_de_cores(cmaps)
dados_usaveis.T.plot(figsize=(12, 8), cmap = cmaps['Qualitative'][11])
plt.legend(title= "Estados", loc ='lower right', bbox_to_anchor=(1.25, 0.05))
plt.show()
###Output
_____no_output_____
###Markdown
Desafio 02: Adicionar uma coluna mostrando a região (Norte, Nodeste, Sul, Suldeste e Centro-Oeste) de cada estado.
###Code
dados_usaveis_com_regiao = dados_usaveis.copy()
# solucao 01
regiao = []
for id in dados_usaveis.index:
if id.startswith("1"):
regiao.append("Norte")
elif id.startswith("2"):
regiao.append("Nordeste")
elif id.startswith("3"):
regiao.append("Sudeste")
elif id.startswith("4"):
regiao.append("Sul")
elif id.startswith("5"):
regiao.append("Centro-Oeste")
dados_usaveis_com_regiao["Regiao"] = regiao
dados_usaveis_com_regiao2 = dados_usaveis.copy()
# solucao 02
nome_regioes = {'1': 'Norte', '2': 'Nordeste', '3': 'Sudeste', '4': 'Sul', '5': 'Centro-Oeste'}
dados_usaveis_com_regiao["Regiao"] = list(map(lambda estado_index: nome_regioes[estado_index[0]] , dados_usaveis.index))
dados_usaveis_com_regiao2 = dados_usaveis_com_regiao[["Regiao"] + dados_usaveis.columns[:-1].tolist()] # colocando a regiao na primeira coluna
dados_usaveis_com_regiao.head(2)
###Output
_____no_output_____
###Markdown
Desafio 03: Formatar o gráfico de custos por mês dos 5 estados, deixando ele agradável (Bonitão, segundo o Gui)
###Code
dados_usaveis["Total"] = dados_usaveis.sum(axis=1)
ordenado_por_total = dados_usaveis.sort_values("Total", ascending=False)/milhao
ordenado_por_total = ordenado_por_total.drop("Total", axis=1)
colunas_interessadas = ordenado_por_total.columns[6:]
ordenado_por_total = ordenado_por_total[colunas_interessadas]
ordenado_por_total.head(5)
ax = ordenado_por_total.head(5).T.plot(figsize=(12, 8))
ax.set_xlim(0, 150)
ax.set_ylim(0, 350)
ax.set_title("Serie histórica de gastos de saúde por Estados", fontsize=20)
ax.set_ylabel("Custo (Milhões R$)", fontsize=18)
ax.grid(color='silver', linestyle='--')
# tirando o numero da frente do nome
nomes = ordenado_por_total.head(5).index
nomes = [ nome[3:] for nome in nomes ]
ax.legend(title=None, ncol = 1, fontsize=12, labels = nomes, shadow = True
, facecolor = "silver", edgecolor = 'silver' )
plt.show()
###Output
_____no_output_____
###Markdown
Desafio 04: Adicione o seu estado aos 5 estados plotados anteriormente O meu é Rio de Janeiro, portanto eu escolhi um outro, no caso o Estado da Bahia
###Code
ordenado_por_total.head(5).T.plot(figsize=(12, 8), ls ="--")
ax = ordenado_por_total.loc["29 Bahia"].T.plot(figsize=(12, 8),
color="black",
lw=2)
ax.set_xlim(0, 150)
ax.set_ylim(0, 350)
ax.set_title("Serie histórica de gastos de saúde por Estados", fontsize=20)
ax.set_ylabel("Custo (Milhões R$)", fontsize=18)
ax.grid(color='silver', linestyle='--')
# tirando o numero da frente do nome
nomes = ordenado_por_total.head(5).index.tolist() + ["29 Bahia"]
nomes = [ nome[3:] for nome in nomes ]
ax.legend(title=None, ncol = 2, fontsize=12, labels = nomes, shadow = True
, facecolor = "silver", edgecolor = 'silver' )
plt.show()
###Output
_____no_output_____
###Markdown
Desafio 06: Plotar o gráfico dos custos apenas dos estados da região sudeste e verificar se os picos de 2013/Fev teve comportamento similar em todos os demais estados da região
###Code
regiao_sudeste = dados_usaveis_com_regiao.query("Regiao == 'Sudeste'").drop("Regiao", axis=1)
#dados_usaveis_com_regiao[dados_usaveis_com_regiao["Regiao"] == "Sudeste"].drop("Regiao", axis=1) # opecao alternativa
colunas_interessadas = ordenado_por_total.columns[5:]
regiao_sudeste = regiao_sudeste[colunas_interessadas]/ milhao
ax = regiao_sudeste.loc[:,'2012/Jan':'2014/Jan'].T.plot(figsize=(12, 8))
ax.set_xlim(0, 24)
ax.set_ylim(0, 350)
#
labels = regiao_sudeste.loc[:,'2012/Jan':'2014/Jan'].columns
x = range(0, len(labels))
plt.xticks(x, labels, rotation=90)
#ax.set_xticklabels(rotation=45)
#ax.set_xticklabels(xlabels, rotation=45)
ax.set_title("Serie histórica de gastos de saúde por Estados", fontsize=20)
ax.set_ylabel("Custo (Milhões R$)", fontsize=18)
ax.grid(color='silver', linestyle='--')
# tirando o numero da frente do nome
nomes = regiao_sudeste.head(5).index
nomes = [ nome[3:] for nome in nomes ]
ax.legend(title=None, ncol = 1, fontsize=12, labels = nomes, shadow = True
, facecolor = "silver", edgecolor = 'silver' )
plt.show()
###Output
_____no_output_____
###Markdown
O grafico apresenta apenas dos 1 pico realmente precepitivel no periodo de Jan/2012 a Jan/2014. Este pico está no Estado do Rio de Janeiro no meses de 2013/Jan.Os picos no Estado de São Paulo parece ter um natureza completamente diferentes do Rio de Janeiro. Desafio 07: Adicionar seu estado escolhido novamente, deixe o gráfico informativo e tire conclusões sobre seus estados comparando com os demais. Tire suas conclusões e compartilhe com a gente.
###Code
ax = regiao_sudeste.loc[:,'2012/Jan':'2014/Jan'].T.plot(figsize=(12, 8), ls ="--")
ax = ordenado_por_total.loc["29 Bahia",'2012/Jan':'2014/Jan'].T.plot(figsize=(12, 8),
color="black",
lw=2)
ax.set_xlim(0, 24)
ax.set_ylim(0, 350)
#
labels = regiao_sudeste.loc[:,'2012/Jan':'2014/Jan'].columns
x = range(0, len(labels))
plt.xticks(x, labels, rotation=90)
ax.set_title("Serie histórica de gastos de saúde por Estados", fontsize=20)
ax.set_ylabel("Custo (Milhões R$)", fontsize=18)
ax.grid(color='silver', linestyle='--')
# tirando o numero da frente do nome
nomes = regiao_sudeste.index.tolist() + ["29 Bahia"]
nomes = [ nome[3:] for nome in nomes ]
ax.legend(title=None, ncol = 2, fontsize=12, labels = nomes, shadow = True
, facecolor = "silver", edgecolor = 'silver' )
plt.show()
###Output
_____no_output_____ |
ICCT/ENG/examples/03/FD-21_Ball_and_Beam.ipynb | ###Markdown
Control design for a Ball and Beam systemThe following example is a control design task for a ball and beam system. The structure consists of a ball or cylinder rolling atop of a straight beam, rotated either by direct position input or through a driving mechanism. The objective is to control the ball's position.Direct driveDrive through mechanismWhile the system is non-linear, thus falls outside of the reach of classical control, after linearization and a set of simplifications, it is still possible to control it near to its steady state. Keep in mind, though, that results containing larger movements will violate these assumptions.The linearized motion equations are: $$\left(\frac{J}{r^2}+m\right)\cdot\ddot x=-m\cdot g\cdot\alpha\qquad\left(\frac{J}{r^2}+m\right)\cdot\ddot x=-\frac{m\cdot g\cdot d}{L}\cdot\varphi$$Where:$$J=\frac{2}{5}m\cdot r^2$$After the Laplace transformation of the differential equations, the transfer functions can be expressed as:$$G_{dir}(s)=-\frac{m\cdot g}{\left(\frac{J}{r^2}+m\right)\cdot s^2}\qquad G_{mech}(s)=-\frac{m\cdot g\cdot d}{L\cdot\left(\frac{J}{r^2}+m\right)\cdot s^2}$$Your task is to choose a controller type, and tune it to acceptable levels of performance!First, choose a system model!Toggle between different realistic models with randomly preselected values (buttons *Model 1* - *Model 6*). By clicking the *Preset* button default, valid predetermined controller parameters are set and cannot be tuned further.The two types are formally equivalent due to the simplifications.
###Code
# Model selector buttons
typeSelect = w.ToggleButtons(
options=[('Direct drive', 0), ('Drive mechanism', 1),],
description='System: ')
display(typeSelect)
# System parameters
g = 9.81 # m/s^2 - gravitational acceleration
# Figure definition
fig1, ((f1_ax1), (f1_ax2)) = plt.subplots(2, 1)
fig1.set_size_inches((9.8, 5))
fig1.set_tight_layout(True)
f1_line1, = f1_ax1.plot([], [])
f1_line2, = f1_ax2.plot([], [])
f1_ax1.grid(which='both', axis='both', color='lightgray')
f1_ax2.grid(which='both', axis='both', color='lightgray')
f1_ax1.autoscale(enable=True, axis='both', tight=True)
f1_ax2.autoscale(enable=True, axis='both', tight=True)
f1_ax1.set_title('Bode magnitude plot', fontsize=11)
f1_ax1.set_xscale('log')
f1_ax1.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax1.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=10)
f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
f1_ax2.set_title('Bode phase plot', fontsize=11)
f1_ax2.set_xscale('log')
f1_ax2.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax2.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=10)
f1_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
def build_base_model(m, r, d, L, type_select):
J=2/5*m*r*r
if type_select:
W_sys = c.tf([m*g*d], [L*(J/(r*r)+m), 0, 0])
else:
W_sys = c.tf([m*g], [J/(r*r)+m, 0, 0])
print('System transfer function:')
print(W_sys)
# System analysis
poles = c.pole(W_sys) # Poles
print('System poles:\n')
print(poles)
global f1_line1, f1_line2
f1_ax1.lines.remove(f1_line1)
f1_ax2.lines.remove(f1_line2)
mag, phase, omega = c.bode_plot(W_sys, Plot=False) # Bode-plot
f1_line1, = f1_ax1.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f1_line2, = f1_ax2.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
f1_ax1.relim()
f1_ax2.relim()
f1_ax1.autoscale_view()
f1_ax2.autoscale_view()
def update_sliders(index, model):
global m_slider, r_slider, d_slider, L_slider
mval = [0.05, 0.05, 0.1, 0.1, 0.5, 0.5, 0.25]
rval = [0.01, 0.05, 0.05, 0.1, 0.1, 0.15, 0.075]
dval = [0.025, 0.01, 0.05, 0.2, 0.2, 0.4, 0.2]
Lval = [0.1, 0.1, 0.5, 1, 2, 2, 1]
m_slider.value = mval[index]
r_slider.value = rval[index]
d_slider.value = dval[index]
L_slider.value = Lval[index]
if index == -1:
m_slider.disabled = True;
r_slider.disabled = True;
d_slider.disabled = True;
L_slider.disabled = True;
else:
m_slider.disabled = False;
r_slider.disabled = False;
if model == 0:
d_slider.disabled = False;
L_slider.disabled = False;
else:
d_slider.disabled = True;
L_slider.disabled = True;
# GUI widgets
typeSelect2 = w.ToggleButtons(
options=[('Model 1', 0), ('Model 2', 1), ('Model 3', 2), ('Model 4', 3), ('Model 5', 4), ('Model 6', 5),
('Preset', -1)],
value=-1, description='System: ', layout=w.Layout(width='60%'))
m_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=1, description='m [kg] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
r_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=0, description='r [m] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
d_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=0, description='d [m] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
L_slider = w.FloatLogSlider(value=0, base=10, min=-1, max=2, description='L [m] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
input_data = w.interactive_output(build_base_model, {'m':m_slider, 'r':r_slider, 'd':d_slider, 'L':L_slider,
'type_select':typeSelect})
input_data2 = w.interactive_output(update_sliders, {'index':typeSelect2, 'model':typeSelect})
display(typeSelect2, input_data2)
display(w.HBox([w.VBox([m_slider, r_slider], layout=w.Layout(width='45%')),
w.VBox([d_slider, L_slider], layout=w.Layout(width='45%'))]), input_data)
###Output
_____no_output_____
###Markdown
Due to the massive simplifications, the system is reduced to an ideal double integrator.Select an appropriate controller configuration! Which one is the best for your system? Why?Set up your controller for the fastest settling time with no overshoot!You can turn on/off each of the I and D components, and if D is active, you can apply the first-order filter as well, based on the derivating time constant.
###Code
# PID ball balancer
fig2, ((f2_ax1, f2_ax2, f2_ax3), (f2_ax4, f2_ax5, f2_ax6)) = plt.subplots(2, 3)
fig2.set_size_inches((9.8, 5))
fig2.set_tight_layout(True)
f2_line1, = f2_ax1.plot([], [])
f2_line2, = f2_ax2.plot([], [])
f2_line3, = f2_ax3.plot([], [])
f2_line4, = f2_ax4.plot([], [])
f2_line5, = f2_ax5.plot([], [])
f2_line6, = f2_ax6.plot([], [])
f2_ax1.grid(which='both', axis='both', color='lightgray')
f2_ax2.grid(which='both', axis='both', color='lightgray')
f2_ax3.grid(which='both', axis='both', color='lightgray')
f2_ax4.grid(which='both', axis='both', color='lightgray')
f2_ax5.grid(which='both', axis='both', color='lightgray')
f2_ax6.grid(which='both', axis='both', color='lightgray')
f2_ax1.autoscale(enable=True, axis='both', tight=True)
f2_ax2.autoscale(enable=True, axis='both', tight=True)
f2_ax3.autoscale(enable=True, axis='both', tight=True)
f2_ax4.autoscale(enable=True, axis='both', tight=True)
f2_ax5.autoscale(enable=True, axis='both', tight=True)
f2_ax6.autoscale(enable=True, axis='both', tight=True)
f2_ax1.set_title('Closed loop step response', fontsize=9)
f2_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax1.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax2.set_title('Nyquist diagram', fontsize=9)
f2_ax2.set_xlabel(r'Re', labelpad=0, fontsize=8)
f2_ax2.set_ylabel(r'Im', labelpad=0, fontsize=8)
f2_ax2.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax3.set_title('Bode magniture plot', fontsize=9)
f2_ax3.set_xscale('log')
f2_ax3.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax3.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=8)
f2_ax3.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax4.set_title('Closed loop impulse response', fontsize=9)
f2_ax4.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax4.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax4.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax5.set_title('Load transfer step response', fontsize=9)
f2_ax5.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax5.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax5.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax6.set_title('Bode phase plot', fontsize=9)
f2_ax6.set_xscale('log')
f2_ax6.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax6.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=8)
f2_ax6.tick_params(axis='both', which='both', pad=0, labelsize=6)
def position_control(Kp, Ti, Td, Fd, Ti0, Td0, m, r, d, L, type_select):
J=2/5*m*r*r
if type_select:
W_sys = c.tf([m*g*d], [L*(J/(r*r)+m), 0, 0])
else:
W_sys = c.tf([m*g], [J/(r*r)+m, 0, 0])
# PID Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0, 1])) # PID controller in time constant format
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
if type_select: # Disturbance transfer
W_load = c.feedback (c.tf([1], [J/r/r+m, 0, 0]), c.series(c.tf([m*g*d], [L]), W_PID), -1)
else:
W_load = c.feedback (c.tf([1], [J/r/r+m, 0, 0]), c.series(c.tf([m*g], [1]), W_PID), -1)
# Display
global f2_line1, f2_line2, f2_line3, f2_line4, f2_line5, f2_line6
f2_ax1.lines.remove(f2_line1)
f2_ax2.lines.remove(f2_line2)
f2_ax3.lines.remove(f2_line3)
f2_ax4.lines.remove(f2_line4)
f2_ax5.lines.remove(f2_line5)
f2_ax6.lines.remove(f2_line6)
tout, yout = c.step_response(W_closed)
f2_line1, = f2_ax1.plot(tout, yout, lw=1, color='blue')
_, _, ob = c.nyquist_plot(W_open, Plot=False) # Small resolution plot to determine bounds
real, imag, freq = c.nyquist_plot(W_open, omega=np.logspace(np.log10(ob[0]), np.log10(ob[-1]), 1000), Plot=False)
f2_line2, = f2_ax2.plot(real, imag, lw=1, color='blue')
mag, phase, omega = c.bode_plot(W_open, Plot=False)
f2_line3, = f2_ax3.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f2_line6, = f2_ax6.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
tout, yout = c.impulse_response(W_closed)
f2_line4, = f2_ax4.plot(tout, yout, lw=1, color='blue')
tout, yout = c.step_response(W_load)
f2_line5, = f2_ax5.plot(tout, yout, lw=1, color='blue')
f2_ax1.relim()
f2_ax2.relim()
f2_ax3.relim()
f2_ax4.relim()
f2_ax5.relim()
f2_ax6.relim()
f2_ax1.autoscale_view()
f2_ax2.autoscale_view()
f2_ax3.autoscale_view()
f2_ax4.autoscale_view()
f2_ax5.autoscale_view()
f2_ax6.autoscale_view()
def update_controller(index):
global Kp_slider, Ti_slider, Td_slider, Fd_slider, Ti_button, Td_button
if index == -1:
Kp_slider.value = 100
Td_slider.value = 0.05
Fd_slider.value = 5
Ti_button.value = False
Td_button.value = True
Kp_slider.disabled = True
Ti_slider.disabled = True
Td_slider.disabled = True
Fd_slider.disabled = True
Ti_button.disabled = True
Td_button.disabled = True
else:
Kp_slider.disabled = False
Ti_slider.disabled = False
Td_slider.disabled = False
Fd_slider.disabled = False
Ti_button.disabled = False
Td_button.disabled = False
# GUI widgets
Kp_slider = w.FloatLogSlider(value=2, base=10, min=-3, max=5, description='Kp:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Td_slider = w.FloatLogSlider(value=0.25, base=10, min=-4, max=1, description='', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Ti_button = w.ToggleButton(value=False, description='Ti',
layout=w.Layout(width='auto', flex='1 1 0%'))
Td_button = w.ToggleButton(value=True, description='Td',
layout=w.Layout(width='auto', flex='1 1 0%'))
input_data = w.interactive_output(position_control, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'Ti0' : Ti_button, 'Td0': Td_button,
'm':m_slider, 'r':r_slider,
'd':d_slider, 'L':L_slider, 'type_select':typeSelect})
w.interactive_output(update_controller, {'index': typeSelect2})
display(w.HBox([Kp_slider, Ti_button, Ti_slider, Td_button, Td_slider, Fd_slider]), input_data)
###Output
_____no_output_____
###Markdown
In the following simulation, you can observe the movement of your system based on your controller setup. You can create reference signals and even apply some disturbance and see how the system reacts.Is your configuration suitable for signal-following? Readjust your controller so that it can follow a sine wave acceptably!(The animations are scaled to fit the frame through the whole simulation. Because of this, unstable solutions might not seem to move until the very last second.)
###Code
# Simulation
anim_fig = plt.figure()
anim_fig.set_size_inches((9.8, 6))
anim_fig.set_tight_layout(True)
anim_ax1 = anim_fig.add_subplot(211)
anim_ax2 = anim_ax1.twinx()
frame_count=1000
l1 = anim_ax1.plot([], [], lw=1, color='blue')
l2 = anim_ax1.plot([], [], lw=2, color='red')
l3 = anim_ax2.plot([], [], lw=1, color='grey')
line1 = l1[0]
line2 = l2[0]
line3 = l3[0]
anim_ax1.legend(l1+l2+l3, ['Reference [m]', 'Output [m]', 'Load [N]'], loc=1)
anim_ax1.set_title('Time response simulation', fontsize=12)
anim_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=10)
anim_ax1.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=10)
anim_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax2.set_ylabel(r'$F\/$[N]', labelpad=0, fontsize=10)
anim_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax1.grid(which='both', axis='both', color='lightgray')
T_plot = []
X_plot = []
D_plot = []
R_plot = []
P_plot = []
# Scene data
scene_ax = anim_fig.add_subplot(212)
scene_ax.set_xlim((-4.75, 4.75))
scene_ax.set_ylim((-1.5, 1.5))
scene_ax.axis('off')
rotation_transform = transforms.Affine2D()
scene_ax.add_patch(patches.Polygon(np.stack(([-3.5, -3.5, -0.5, 0.5, 3.5, 3.5, -3.5],
[0.25, 0.1, -0.25, -0.25, 0.1, 0.25, 0.25])).T,
fill = True, lw=1, ec='black', fc='lightgray', zorder=5,
transform=rotation_transform + scene_ax.transData))
scene_ax.add_patch(patches.Polygon(np.stack(([-0.7, -0.7, 0.7, 0.7, 0.25, -0.25, -0.7],
[-1.1, -1.4, -1.4, -1.1, 0, 0, -1.1])).T,
fill = True, lw=1, ec='black', fc='darkgoldenrod', zorder=0))
scene_ax.add_patch(patches.Circle((0, 0), fill=True, radius=0.03, ec='black', fc='gray', lw=1, zorder=20))
ball = patches.Circle((0, 0.5), fill=True, radius=0.25, ec='black', fc='orange', lw=1,
zorder=5, transform=rotation_transform + scene_ax.transData)
gleam = patches.Wedge((0, 0.5), 0.2, fill=True, width=0.075, theta1=215, theta2=235, lw=0,
ec='white', fc='white', zorder=10, transform=rotation_transform + scene_ax.transData)
scene_ax.add_patch(ball)
scene_ax.add_patch(gleam)
center_drive_belt, = scene_ax.plot([-0.42, -0.15, 0.15, 0.42], [-0.8, 0.05, 0.05, -0.8], color='black', lw=3, zorder=10)
center_drive_1 = patches.Circle((0, 0), fill=True, radius=0.18, ec='black', fc='lawngreen', lw=1, zorder=15)
center_drive_2 = patches.Circle((0, -0.85), fill=True, radius=0.45, ec='black', fc='lawngreen', lw=1, zorder=15)
center_drive_shaft = patches.Circle((0, -0.85), fill=True, radius=0.03, ec='black', fc='gray', lw=1, zorder=25)
center_drive_mark_1 = patches.Wedge((0, -0.85), 0.40, theta1=260, theta2=280, width=0.32,
fill=True, ec='black', fc='white', lw=1, zorder=20)
center_drive_mark_2 = patches.Wedge((0, -0.85), 0.40, theta1=80, theta2=100, width=0.32,
fill=True, ec='black', fc='white', lw=1, zorder=20)
scene_ax.add_patch(center_drive_1)
scene_ax.add_patch(center_drive_2)
scene_ax.add_patch(center_drive_shaft)
scene_ax.add_patch(center_drive_mark_1)
scene_ax.add_patch(center_drive_mark_2)
wheel_transform = transforms.Affine2D()
drive_rod_outline, = scene_ax.plot([3.5, 3.4], [-0.85, 0.175], color='black', solid_capstyle='round', lw=8, zorder=15,
visible=False)
drive_rod, = scene_ax.plot([3.5, 3.4], [-0.85, 0.175], color='deepskyblue', solid_capstyle='round', lw=6, zorder=20,
visible=False)
drive_wheel_rod_outline, = scene_ax.plot([3.05, 3.5], [-0.85, -0.85], color='black', solid_capstyle='round', lw=12, zorder=0,
visible=False)
drive_wheel_rod, = scene_ax.plot([3.05, 3.5], [-0.85, -0.85], color='cyan', solid_capstyle='round', lw=10, zorder=10,
visible=False)
drive_motor_grate, = scene_ax.plot([2.5, 2.5, 2.5833, 2.5833, 2.6666, 2.6666, 2.75],
[-1, -1.25, -1, -1.25, -1, -1.25, -1],
color='black', solid_capstyle='round', lw=1, zorder=5, visible=False)
drive_rod_p1 = patches.Circle((3.4, 0.175), fill=True, radius=0.03, ec='black', fc='gray', lw=1, zorder=25,
transform=rotation_transform + scene_ax.transData, visible=False)
drive_rod_p2 = patches.Circle((3.5, -0.85), fill=True, radius=0.03, ec='black', fc='gray', lw=1, zorder=25,
transform=wheel_transform + scene_ax.transData, visible=False)
drive_wheel = patches.Circle((3.05, -0.85), fill=True, radius=0.25, ec='black', fc='cyan', lw=1, zorder=5, visible=False)
drive_wheel_p = patches.Circle((3.05, -0.85), fill=True, radius=0.03, ec='black', fc='gray', lw=1, zorder=25, visible=False)
drive_motor = patches.Polygon(np.stack(([2.3, 3.2, 3.2, 2.6, 2.3, 2.3],
[-1.4, -1.4, -0.7, -0.7, -0.85, -1.4])).T,
fill = True, lw=1, ec='black', fc='firebrick', zorder=0, visible=False)
scene_ax.add_patch(drive_rod_p1)
scene_ax.add_patch(drive_rod_p2)
scene_ax.add_patch(drive_wheel)
scene_ax.add_patch(drive_wheel_p)
scene_ax.add_patch(drive_motor)
x_arrow = scene_ax.arrow(0, 0.05, 0, 0.15, ec='black', fc='blue', head_width=0.1,
length_includes_head=True, lw=1, fill=True, zorder=10,
transform=rotation_transform + scene_ax.transData)
r_arrow = scene_ax.arrow(0, 0.05, 0, 0.15, ec='black', fc='red', head_width=0.1,
length_includes_head=True, lw=1, fill=True, zorder=10,
transform=rotation_transform + scene_ax.transData)
base_arrow = x_arrow.xy
rot_pos = []
ball_pos = []
ref_pos = []
ball_rot = []
sys_type = 0
#Simulation function
def simulation(Kp, Ti, Td, Fd, Ti0, Td0, m, r, d, L, type_select, T, dt, X, Xf, Xa, Xo, F, Ff, Fa, Fo):
# Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0, 1])) # PID controller
# System
J=2/5*m*r*r
if type_select:
W_sys = c.tf([m*g*d], [L*(J/(r*r)+m), 0, 0])
else:
W_sys = c.tf([m*g], [J/(r*r)+m, 0, 0])
# Model
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
if type_select: # Disturbance transfer
W_s1 = c.tf([m*g*d], [L])
else:
W_s1 = c.tf([m*g], [1])
W_s2 = c.tf([1], [J/r/r+m, 0, 0])
W_load = c.feedback(W_s2, c.series(W_PID, W_s1), -1)
W_cont_sys = c.feedback(W_PID, W_sys, -1) # Control signal (angle) system component
W_cont_load = c.feedback(c.series(W_s2, c.negate(W_PID)), W_s1, 1) # Control signal (angle) load component
# Reference and disturbance signals
T_sim = np.arange(0, T, dt, dtype=np.float64)
if X == 0: # Constant reference
X_sim = np.full_like(T_sim, Xa * Xo)
elif X == 1: # Sine wave reference
X_sim = (np.sin(2 * np.pi * Xf * T_sim) + Xo) * Xa
elif X == 2: # Square wave reference
X_sim = (np.sign(np.sin(2 * np.pi * Xf * T_sim)) + Xo) * Xa
if F == 0: # Constant load
F_sim = np.full_like(T_sim, Fa * Fo)
elif F == 1: # Sine wave load
F_sim = (np.sin(2 * np.pi * Ff * T_sim) + Fo) * Fa
elif F == 2: # Square wave load
F_sim = (np.sign(np.sin(2 * np.pi * Ff * T_sim)) + Fo) * Fa
elif F == 3: # Noise form load
F_sim = np.interp(T_sim, np.linspace(0, T, int(T * Ff) + 2),
np.random.normal(loc=(Fo * Fa), scale=Fa, size=int(T * Ff) + 2))
# System response
Tx, youtx, xoutx = c.forced_response(W_closed, T_sim, X_sim)
Tf, youtf, xoutf = c.forced_response(W_load, T_sim, F_sim)
R_sim = np.nan_to_num(youtx + youtf)
Tcx, youtcx, xoutcx = c.forced_response(W_cont_sys, T_sim, X_sim)
Tcf, youtcf, xoutcf = c.forced_response(W_cont_load, T_sim, F_sim)
P_sim = np.nan_to_num(youtcx + youtcf)
# Display
XR_max = max(np.amax(np.absolute(np.concatenate((X_sim, R_sim)))), Xa)
F_max = max(np.amax(np.absolute(F_sim)), Fa)
P_max = np.amax(np.absolute(P_sim))
anim_ax1.set_xlim((0, T))
anim_ax1.set_ylim((-1.2 * XR_max, 1.2 * XR_max))
anim_ax2.set_ylim((-1.5 * F_max, 1.5 * F_max))
global T_plot, X_plot, F_plot, R_plot, P_plot, rot_pos, ball_pos, ref_pos, ball_rot, sys_type
T_plot = np.linspace(0, T, frame_count, dtype=np.float32)
X_plot = np.interp(T_plot, T_sim, X_sim)
F_plot = np.interp(T_plot, T_sim, F_sim)
R_plot = np.interp(T_plot, T_sim, R_sim)
P_plot = np.interp(T_plot, T_sim, P_sim)
rot_pos = P_plot / P_max * -10 # The constant sets the apparent maximal tilt of the animation in degrees
ball_pos = R_plot / XR_max * 3.4
ref_pos = X_plot / XR_max * 3.4
ball_rot = ball_pos / np.pi * -360
sys_type = type_select
def anim_init():
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
ball.set_center((0, 0.5))
gleam.set_center((0, 0.5))
gleam.set_theta1(215)
gleam.set_theta2(235)
center_drive_mark_1.set_theta1(260)
center_drive_mark_1.set_theta2(280)
center_drive_mark_2.set_theta1(80)
center_drive_mark_2.set_theta2(100)
drive_rod_outline.set_data([3.5, 3.4], [-0.85, 0.175])
drive_rod.set_data([3.5, 3.4], [-0.85, 0.175])
drive_wheel_rod_outline.set_data([3.05, 3.5], [-0.85, -0.85])
drive_wheel_rod.set_data([3.05, 3.5], [-0.85, -0.85])
x_arrow.set_xy(base_arrow)
r_arrow.set_xy(base_arrow)
rotation_transform.clear()
wheel_transform.clear()
if sys_type:
center_drive_1.set_visible(False)
center_drive_2.set_visible(False)
center_drive_shaft.set_visible(False)
center_drive_belt.set_visible(False)
center_drive_mark_1.set_visible(False)
center_drive_mark_2.set_visible(False)
drive_rod.set_visible(True)
drive_rod_outline.set_visible(True)
drive_wheel_rod.set_visible(True)
drive_wheel_rod_outline.set_visible(True)
drive_wheel.set_visible(True)
drive_motor.set_visible(True)
drive_rod_p1.set_visible(True)
drive_rod_p2.set_visible(True)
drive_wheel_p.set_visible(True)
drive_motor_grate.set_visible(True)
else:
center_drive_1.set_visible(True)
center_drive_2.set_visible(True)
center_drive_shaft.set_visible(True)
center_drive_belt.set_visible(True)
center_drive_mark_1.set_visible(True)
center_drive_mark_2.set_visible(True)
drive_rod.set_visible(False)
drive_rod_outline.set_visible(False)
drive_wheel_rod.set_visible(False)
drive_wheel_rod_outline.set_visible(False)
drive_wheel.set_visible(False)
drive_motor.set_visible(False)
drive_rod_p1.set_visible(False)
drive_rod_p2.set_visible(False)
drive_wheel_p.set_visible(False)
drive_motor_grate.set_visible(False)
return (line1, line2, line3, ball, gleam, x_arrow, r_arrow, center_drive_1, center_drive_2,
center_drive_shaft, center_drive_belt, center_drive_mark_1, center_drive_mark_2,
drive_rod_outline, drive_rod, drive_wheel_rod_outline, drive_wheel_rod, drive_wheel, drive_motor,
drive_rod_p1, drive_rod_p2, drive_wheel_p, drive_motor_grate,)
def animate(i):
line1.set_data(T_plot[0:i], X_plot[0:i])
line2.set_data(T_plot[0:i], R_plot[0:i])
line3.set_data(T_plot[0:i], F_plot[0:i])
ball.set_center((ball_pos[i], 0.5))
gleam.set_center((ball_pos[i], 0.5))
gleam.set_theta1(215 + ball_rot[i])
gleam.set_theta2(235 + ball_rot[i])
if sys_type:
center_drive_1.set_visible(False)
center_drive_2.set_visible(False)
center_drive_shaft.set_visible(False)
center_drive_belt.set_visible(False)
center_drive_mark_1.set_visible(False)
center_drive_mark_2.set_visible(False)
drive_rod.set_visible(True)
drive_rod_outline.set_visible(True)
drive_wheel_rod.set_visible(True)
drive_wheel_rod_outline.set_visible(True)
drive_wheel.set_visible(True)
drive_motor.set_visible(True)
drive_rod_p1.set_visible(True)
drive_rod_p2.set_visible(True)
drive_wheel_p.set_visible(True)
drive_motor_grate.set_visible(True)
else:
center_drive_1.set_visible(True)
center_drive_2.set_visible(True)
center_drive_shaft.set_visible(True)
center_drive_belt.set_visible(True)
center_drive_mark_1.set_visible(True)
center_drive_mark_2.set_visible(True)
drive_rod.set_visible(False)
drive_rod_outline.set_visible(False)
drive_wheel_rod.set_visible(False)
drive_wheel_rod_outline.set_visible(False)
drive_wheel.set_visible(False)
drive_motor.set_visible(False)
drive_rod_p1.set_visible(False)
drive_rod_p2.set_visible(False)
drive_wheel_p.set_visible(False)
drive_motor_grate.set_visible(False)
center_drive_mark_1.set_theta1(260 + rot_pos[i] / 2.5)
center_drive_mark_1.set_theta2(280 + rot_pos[i] / 2.5)
center_drive_mark_2.set_theta1(80 + rot_pos[i] / 2.5)
center_drive_mark_2.set_theta2(100 + rot_pos[i] / 2.5)
x_arrow.set_xy(base_arrow + [ref_pos[i], 0])
r_arrow.set_xy(base_arrow + [ball_pos[i], 0])
rotation_transform.clear().rotate_deg_around(0, 0, rot_pos[i])
wheel_transform.clear().rotate_deg_around(3.05, -0.85, rot_pos[i] * 9)
drive_rod_outline.set_data(np.stack((wheel_transform.transform_point([3.5, -0.85]),
rotation_transform.transform_point([3.4, 0.175]))).T)
drive_rod.set_data(np.stack((wheel_transform.transform_point([3.5, -0.85]),
rotation_transform.transform_point([3.4, 0.175]))).T)
drive_wheel_rod_outline.set_data(np.stack(([3.05, -0.85], wheel_transform.transform_point([3.5, -0.85]))).T)
drive_wheel_rod.set_data(np.stack(([3.05, -0.85], wheel_transform.transform_point([3.5, -0.85]))).T)
return (line1, line2, line3, ball, gleam, x_arrow, r_arrow, center_drive_1, center_drive_2,
center_drive_shaft, center_drive_belt, center_drive_mark_1, center_drive_mark_2,
drive_rod_outline, drive_rod, drive_wheel_rod_outline, drive_wheel_rod, drive_wheel, drive_motor,
drive_rod_p1, drive_rod_p2, drive_wheel_p, drive_motor_grate,)
anim = animation.FuncAnimation(anim_fig, animate, init_func=anim_init,
frames=frame_count, interval=10, blit=True,
repeat=True)
# Controllers
T_slider = w.FloatLogSlider(value=10, base=10, min=-0.7, max=1, step=0.01,
description='Duration [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
dt_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=-1, step=0.01,
description='Timestep [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
X_type = w.Dropdown(options=[('Constant', 0), ('Sine', 1), ('Square', 2)], value=1,
description='Reference: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto'))
Xf_slider = w.FloatLogSlider(value=0.5, base=10, min=-2, max=2, step=0.01,
description='Frequency [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xa_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='Amplitude [m]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xo_slider = w.FloatSlider(value=0, min=-10, max=10, description='Offset/Ampl:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
F_type = w.Dropdown(options=[('Constant', 0), ('Sine', 1), ('Square', 2), ('Noise', 3)], value=2,
description='Disturbance: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto'))
Ff_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='Frequency [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Fa_slider = w.FloatLogSlider(value=0.1, base=10, min=-2, max=2, step=0.01,
description='Amplitude [N]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Fo_slider = w.FloatSlider(value=0, min=-10, max=10, description='Offset/Ampl:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
input_data = w.interactive_output(simulation, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'Ti0' : Ti_button, 'Td0': Td_button,
'm':m_slider, 'r':r_slider, 'd':d_slider, 'L':L_slider,
'type_select':typeSelect,
'T': T_slider, 'dt': dt_slider,
'X': X_type, 'Xf': Xf_slider, 'Xa': Xa_slider, 'Xo': Xo_slider,
'F': F_type, 'Ff': Ff_slider, 'Fa': Fa_slider, 'Fo': Fo_slider})
display(w.HBox([w.HBox([T_slider, dt_slider], layout=w.Layout(width='25%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([X_type, w.HBox([Xf_slider, Xa_slider, Xo_slider])], layout=w.Layout(width='30%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([F_type, w.HBox([Ff_slider, Fa_slider, Fo_slider])], layout=w.Layout(width='30%'))],
layout=w.Layout(width='100%', justify_content='center')), input_data)
###Output
_____no_output_____ |
Coursera/Machine Learning Engineering in Productioon (MLOps)/C2_W1_Lab_1_TFDV_Exercise.ipynb | ###Markdown
Ungraded Lab: TFDV ExerciseIn this notebook, you will get to practice using [TensorFlow Data Validation (TFDV)](https://cloud.google.com/solutions/machine-learning/analyzing-and-validating-data-at-scale-for-ml-using-tfx), an open-source Python package from the [TensorFlow Extended (TFX)](https://www.tensorflow.org/tfx) ecosystem. TFDV helps to understand, validate, and monitor production machine learning data at scale. It provides insight into some key questions in the data analysis process such as:* What are the underlying statistics of my data?* What does my training dataset look like?* How does my evaluation and serving datasets compare to the training dataset?* How can I find and fix data anomalies?The figure below summarizes the usual TFDV workflow:As shown, you can use TFDV to compute descriptive statistics of the training data and generate a schema. You can then validate new datasets (e.g. the serving dataset from your customers) against this schema to detect and fix anomalies. This helps prevent the different types of skew. That way, you can be confident that your model is training on or predicting data that is consistent with the expected feature types and distribution.This ungraded exercise demonstrates useful functions of TFDV at an introductory level as preparation for this week's graded programming exercise. Specifically, you will:- **Generate and visualize statistics from a dataset**- **Detect and fix anomalies in an evaluation dataset**Let's begin! Package Installation and Imports
###Code
import tensorflow as tf
import tensorflow_data_validation as tfdv
import pandas as pd
from sklearn.model_selection import train_test_split
from util import add_extra_rows
from tensorflow_metadata.proto.v0 import schema_pb2
print('TFDV Version: {}'.format(tfdv.__version__))
print('Tensorflow Version: {}'.format(tf.__version__))
###Output
TFDV Version: 0.24.1
Tensorflow Version: 2.3.1
###Markdown
Download the datasetYou will be working with the [Census Income Dataset](http://archive.ics.uci.edu/ml/datasets/Census+Income), a dataset that can be used to predict if an individual earns more than or less than 50k US Dollars annually. The summary of attribute names with descriptions/expected values is shown below and you can read more about it [in this data description file.](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names)* **age**: continuous.* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.* **fnlwgt**: continuous.* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.* **education-num**: continuous.* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.* **race**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.* **sex**: Female, Male.* **capital-gain**: continuous.* **capital-loss**: continuous.* **hours-per-week**: continuous.* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.Let's load the dataset and split it into training and evaluation sets. We will not shuffle them for consistent results in this demo notebook but you should otherwise in real projects.
###Code
# Read in the training and evaluation datasets
df = pd.read_csv('data/adult.data', skipinitialspace=True)
# Split the dataset. Do not shuffle for this demo notebook.
train_df, eval_df = train_test_split(df, test_size=0.2, shuffle=False)
###Output
_____no_output_____
###Markdown
Let's see the first few columns of the train and eval sets.
###Code
# Preview the train set
train_df.head()
# Preview the eval set
eval_df.head()
###Output
_____no_output_____
###Markdown
From these few columns, you can get a first impression of the data. You will notice that most are strings and integers. There are also columns that are mostly zeroes. In the next sections, you will see how to use TFDV to aggregate and process this information so you can inspect it more easily. Adding extra rowsTo demonstrate how TFDV can detect anomalies later, you will add a few extra rows to the evaluation dataset. These are either malformed or have values that will trigger certain alarms later in this notebook. The code to add these can be seen in the `add_extra_rows()` function of `util.py` found in your Jupyter workspace. You can look at it later and even modify it after you've completed the entire exercise. For now, let's just execute the function and add the rows that we've defined by default.
###Code
# add extra rows
eval_df = add_extra_rows(eval_df)
# preview the added rows
eval_df.tail(4)
###Output
_____no_output_____
###Markdown
Generate and visualize training dataset statistics You can now compute and visualize the statistics of your training dataset. TFDV accepts three input formats: TensorFlow’s TFRecord, Pandas Dataframe, and CSV file. In this exercise, you will feed in the Pandas Dataframes you generated from the train-test split. You can compute your dataset statistics by using the [`generate_statistics_from_dataframe()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_dataframe) method. Under the hood, it distributes the analysis via [Apache Beam](https://beam.apache.org/) which allows it to scale over large datasets.The results returned by this step for numerical and categorical data are summarized in this table:| Numerical Data | Categorical Data ||:-:|:-:||Count of data records|Count of data records|% of missing data records|% of missing data records||Mean, std, min, max|unique records||% of zero values|Avg string length|
###Code
# Generate training dataset statistics
train_stats = tfdv.generate_statistics_from_dataframe(train_df)
###Output
_____no_output_____
###Markdown
Once you've generated the statistics, you can easily visualize your results with the [`visualize_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics) method. This shows a [Facets interface](https://pair-code.github.io/facets/) and is very useful to spot if you have a high amount of missing data or high standard deviation. Run the cell below and explore the different settings in the output interface (e.g. Sort by, Reverse order, Feature search).
###Code
# Visualize training dataset statistics
tfdv.visualize_statistics(train_stats)
###Output
_____no_output_____
###Markdown
Infer data schema Next step is to create a data schema to describe your train set. Simply put, a schema describes standard characteristics of your data such as column data types and expected data value range. The schema is created on a dataset that you consider as reference, and can be reused to validate other incoming datasets.With the computed statistics, TFDV allows you to automatically generate an initial version of the schema using the [`infer_schema()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema) method. This returns a Schema [protocol buffer](https://developers.google.com/protocol-buffers) containing the result. As mentioned in the [TFX paper](http://stevenwhang.com/tfx_paper.pdf) (Section 3.3), the results of the schema inference can be summarized as follows:* The expected type of each feature.* The expected presence of each feature, in terms of a minimum count and fraction of examples that must containthe feature.* The expected valency of the feature in each example, i.e.,minimum and maximum number of values.* The expected domain of a feature, i.e., the small universe ofvalues for a string feature, or range for an integer feature.Run the cell below to infer the training dataset schema.
###Code
# Infer schema from the computed statistics.
schema = tfdv.infer_schema(statistics=train_stats)
# Display the inferred schema
tfdv.display_schema(schema)
###Output
_____no_output_____
###Markdown
Generate and visualize evaluation dataset statistics The next step after generating the schema is to now look at the evaluation dataset. You will begin by computing its statistics then compare it with the training statistics. It is important that the numerical and categorical features of the evaluation data belongs roughly to the same range as the training data. Otherwise, you might have distribution skew that will negatively affect the accuracy of your model.TFDV allows you to generate both the training and evaluation dataset statistics side-by-side. You can use the [`visualize_statistics()`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics) function and pass additional parameters to overlay the statistics from both datasets (referenced as left-hand side and right-hand side statistics). Let's see what these parameters are:- `lhs_statistics`: Required parameter. Expects an instance of `DatasetFeatureStatisticsList `.- `rhs_statistics`: Expects an instance of `DatasetFeatureStatisticsList ` to compare with `lhs_statistics`.- `lhs_name`: Name of the `lhs_statistics` dataset.- `rhs_name`: Name of the `rhs_statistics` dataset.
###Code
# Generate evaluation dataset statistics
eval_stats = tfdv.generate_statistics_from_dataframe(eval_df)
# Compare training with evaluation
tfdv.visualize_statistics(
lhs_statistics=eval_stats,
rhs_statistics=train_stats,
lhs_name='EVAL_DATASET',
rhs_name='TRAIN_DATASET'
)
###Output
_____no_output_____
###Markdown
We encourage you to observe the results generated and toggle the menus to practice manipulating the visualization (e.g. sort by missing/zeroes). You'll notice that TFDV detects the malformed rows we introduced earlier. First, the `min` and `max` values of the `age` row shows `0` and `1000`, respectively. We know that those values do not make sense if we're talking about working adults. Secondly, the `workclass` row in the Categorical Features says that `0.02%` of the data is missing that particular attribute. Let's drop these rows to make the data more clean.
###Code
# filter the age range
eval_df = eval_df[eval_df['age'] > 16]
eval_df = eval_df[eval_df['age'] < 91]
# drop missing values
eval_df.dropna(inplace=True)
###Output
_____no_output_____
###Markdown
You can then compute the statistics again and see the difference in the results.
###Code
# Generate evaluation dataset statistics
eval_stats = tfdv.generate_statistics_from_dataframe(eval_df)
# Compare training with evaluation
tfdv.visualize_statistics(
lhs_statistics=eval_stats,
rhs_statistics=train_stats,
lhs_name='EVAL_DATASET',
rhs_name='TRAIN_DATASET'
)
###Output
_____no_output_____
###Markdown
Calculate and display evaluation anomalies You can use your reference schema to check for anomalies such as new values for a specific feature in the evaluation data. Detected anomalies can either be considered a real error that needs to be cleaned, or depending on your domain knowledge and the specific case, they can be accepted. Let's detect and display evaluation anomalies and see if there are any problems that need to be addressed.
###Code
# Check evaluation data for errors by validating the evaluation dataset statistics using the reference schema
anomalies = tfdv.validate_statistics(statistics=eval_stats, schema=schema)
# Visualize anomalies
tfdv.display_anomalies(anomalies)
###Output
_____no_output_____
###Markdown
Revising the SchemaAs shown in the results above, TFDV is able to detect the remaining irregularities we introduced earlier. The short and long descriptions tell us what were detected. As expected, there are string values for `race`, `native-country` and `occupation` that are not found in the domain of the training set schema (you might see a different result if the shuffling of the datasets was applied). What you decide to do about the anomalies depend on your domain knowledge of the data. If an anomaly indicates a data error, then the underlying data should be fixed. Otherwise, you can update the schema to include the values in the evaluation dataset.TFDV provides a set of utility methods and parameters that you can use for revising the inferred schema. This [reference](https://www.tensorflow.org/tfx/data_validation/anomalies) lists down the type of anomalies and the parameters that you can edit but we'll focus only on a couple here.- You can relax the minimum fraction of values that must come from the domain of a particular feature (as described by `ENUM_TYPE_UNEXPECTED_STRING_VALUES` in the [reference](https://www.tensorflow.org/tfx/data_validation/anomalies)):```pythontfdv.get_feature(schema, 'feature_column_name').distribution_constraints.min_domain_mass = ```- You can add a new value to the domain of a particular feature:```pythontfdv.get_domain(schema, 'feature_column_name').value.append('string')```Let's use these in the next section. Fix anomalies in the schemaLet's say that we want to accept the string anomalies reported as valid. If you want to tolerate a fraction of missing values from the evaluation dataset, you can do it like this:
###Code
# Relax the minimum fraction of values that must come from the domain for the feature `native-country`
country_feature = tfdv.get_feature(schema, 'native-country')
country_feature.distribution_constraints.min_domain_mass = 0.9
# Relax the minimum fraction of values that must come from the domain for the feature `occupation`
occupation_feature = tfdv.get_feature(schema, 'occupation')
occupation_feature.distribution_constraints.min_domain_mass = 0.9
###Output
_____no_output_____
###Markdown
If you want to be rigid and instead add only valid values to the domain, you can do it like this:
###Code
# Add new value to the domain of the feature `race`
race_domain = tfdv.get_domain(schema, 'race')
race_domain.value.append('Asian')
###Output
_____no_output_____
###Markdown
In addition, you can also restrict the range of a numerical feature. This will let you know of invalid values without having to inspect it visually (e.g. the invalid `age` values earlier).
###Code
# Restrict the range of the `age` feature
tfdv.set_domain(schema, 'age', schema_pb2.IntDomain(name='age', min=17, max=90))
# Display the modified schema. Notice the `Domain` column of `age`.
tfdv.display_schema(schema)
###Output
_____no_output_____
###Markdown
With these revisions, running the validation should now show no anomalies.
###Code
# Validate eval stats after updating the schema
updated_anomalies = tfdv.validate_statistics(eval_stats, schema)
tfdv.display_anomalies(updated_anomalies)
###Output
_____no_output_____
###Markdown
Examining dataset slicesTFDV also allows you to analyze specific slices of your dataset. This is particularly useful if you want to inspect if a feature type is well-represented in your dataset. Let's walk through an example where we want to compare the statistics for male and female participants. First, you will use the [`get_feature_value_slicer`](https://github.com/tensorflow/data-validation/blob/master/tensorflow_data_validation/utils/slicing_util.pyL48) method from the `slicing_util` to get the features you want to examine. You can specify that by passing a dictionary to the `features` argument. If you want to get the entire domain of a feature, then you can map the feature name with `None` as shown below. This means that you will get slices for both `Male` and `Female` entries. This returns a function that can be used to extract the said feature slice.
###Code
from tensorflow_data_validation.utils import slicing_util
slice_fn = slicing_util.get_feature_value_slicer(features={'sex': None})
###Output
_____no_output_____
###Markdown
With the slice function ready, you can now generate the statistics. You need to tell TFDV that you need statistics for the features you set and you can do that through the `slice_functions` argument of [`tfdv.StatsOptions`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/StatsOptions). Let's prepare that in the cell below. Notice that you also need to pass in the schema.
###Code
# Declare stats options
slice_stats_options = tfdv.StatsOptions(schema=schema,
slice_functions=[slice_fn],
infer_type_from_schema=True)
###Output
_____no_output_____
###Markdown
You will then pass these options to the `generate_statistics_from_csv()` method. As of writing, generating sliced statistics only works for CSVs so you will need to convert the Pandas dataframe to a CSV. Passing the `slice_stats_options` to `generate_statistics_from_dataframe()` will not produce the expected results.
###Code
# Convert dataframe to CSV since `slice_functions` works only with `tfdv.generate_statistics_from_csv`
CSV_PATH = 'slice_sample.csv'
train_df.to_csv(CSV_PATH)
# Calculate statistics for the sliced dataset
sliced_stats = tfdv.generate_statistics_from_csv(CSV_PATH, stats_options=slice_stats_options)
###Output
_____no_output_____
###Markdown
With that, you now have the statistics for the set slice. These are packed into a `DatasetFeatureStatisticsList` protocol buffer. You can see the dataset names below. The first element in the list (i.e. index=0) is named `All_Examples` which just contains the statistics for the entire dataset. The next two elements (i.e. named `sex_Male` and `sex_Female`) are the datasets that contain the stats for the slices. It is important to note that these datasets are of the type: `DatasetFeatureStatistics`. You will see why this is important after the cell below.
###Code
print(f'Datasets generated: {[sliced.name for sliced in sliced_stats.datasets]}')
print(f'Type of sliced_stats elements: {type(sliced_stats.datasets[0])}')
###Output
Datasets generated: ['All Examples', 'sex_Male', 'sex_Female']
Type of sliced_stats elements: <class 'tensorflow_metadata.proto.v0.statistics_pb2.DatasetFeatureStatistics'>
###Markdown
You can then visualize the statistics as before to examine the slices. An important caveat is `visualize_statistics()` accepts a `DatasetFeatureStatisticsList` type instead of `DatasetFeatureStatistics`. Thus, at least for this version of TFDV, you will need to convert it to the correct type.
###Code
from tensorflow_metadata.proto.v0.statistics_pb2 import DatasetFeatureStatisticsList
# Convert `Male` statistics (index=1) to the correct type and get the dataset name
male_stats_list = DatasetFeatureStatisticsList()
male_stats_list.datasets.extend([sliced_stats.datasets[1]])
male_stats_name = sliced_stats.datasets[1].name
# Convert `Female` statistics (index=2) to the correct type and get the dataset name
female_stats_list = DatasetFeatureStatisticsList()
female_stats_list.datasets.extend([sliced_stats.datasets[2]])
female_stats_name = sliced_stats.datasets[2].name
# Visualize the two slices side by side
tfdv.visualize_statistics(
lhs_statistics=male_stats_list,
rhs_statistics=female_stats_list,
lhs_name=male_stats_name,
rhs_name=female_stats_name
)
###Output
_____no_output_____ |
Modulo3/Clase11/StructureLearning.ipynb | ###Markdown
Structure Learning> In the last two sessions we studied how to estimate the parameters of both Bayesian Networks and Markov Networks. We made a strong assumption that we know the network structure in advance.>> In this session we will takcle the task of learning in situations when we don't know the structure of the Bayesian network in advance.> **Objetives:**> - To understand the maximum likelihood score for structure learning in Bayesian Networks.> - To study the BIC score for structure learning in Bayesian Networks.> **References:**> - Probabilistic Graphical Models: Principles and Techniques, By Daphne Koller and Nir Friedman. Ch. 18.> - Mastering Probabilistic Graphical Models Using Python, By Ankur Ankan and Abinash Panda. Ch. 5.> - Probabilistic Graphical Models Specialization, offered through Coursera. Prof. Daphne Koller.___ 1. Overview 1.1. Why would we be interested in learning a structure?- To learn a model for new queries, when the domain expertise is not enough.- For structure discovery, when inferring network structure is a goal itself. 1.2. Importance of accurate structure Let's assume that the true model for some situation is the following:
###Code
from IPython.display import Image
Image("figures/AccurateStructure.png")
###Output
_____no_output_____
###Markdown
*What happens if an arc is missing?*
###Code
Image("figures/AccurateStructure1.png")
###Output
_____no_output_____
###Markdown
- The model encodes inccorrect independencies.- The correct distribution $P^*$ cannot be learned.- However, it could generalize better :) *What happens if an arc is added?*
###Code
Image("figures/AccurateStructure2.png")
###Output
_____no_output_____
###Markdown
- The model encodes spurious dependencies.- Can correctly learn the correct distribution $P^*$.- More parameters to learn.- In general, leads to worse generalization. 1.3. Score-based learningTo carry out the structure learning, we define a structure that evaluates how well a structure matches the data:
###Code
Image("figures/ScoreBasedLearning.png")
###Output
_____no_output_____
###Markdown
Then, we search for the structure that maximizes the score. 2. Likelihood score 2.1. IntroductionThe idea under the likelihood score is to find the structure $(\mathcal{G}, \theta)$ to maximize the likelihood:$$\mathrm{score}_L (\mathcal{G}: \mathcal{D}) = l((\hat{\theta}, \mathcal{G}): \mathcal{D}) = \log \mathcal{L}((\hat{\theta}, \mathcal{G}): \mathcal{D})$$ where $\hat{\theta} = \theta_{MLE}$ is the MLE of the parameters given $\mathcal{G}$ and $\mathcal{D}$. **Example:**Consider the two random variables $X$ and $Y$. First, consider the graph structure $\mathcal{G}_0:$
###Code
Image("figures/Example1.png")
###Output
_____no_output_____
###Markdown
The likelihood score is:$$\mathrm{score}_L(\mathcal{G}_0: \mathcal{D}) = \sum_{d=1}^{M} \left(\log\hat{\theta}_{x[d]} + \log\hat{\theta}_{x[d]}\right)$$ Now, consider the graph structure $\mathcal{G}_1:$
###Code
Image("figures/Example2.png")
###Output
_____no_output_____
###Markdown
The likelihood score is:$$\mathrm{score}_L(\mathcal{G}_1: \mathcal{D}) = \sum_{d=1}^{M} \left(\log\hat{\theta}_{x[d]} + \log\hat{\theta}_{y[d]|x[d]}\right)$$ Now, let's compute the difference between them:\begin{align}\mathrm{score}_L(\mathcal{G}_1: \mathcal{D}) - \mathrm{score}_L(\mathcal{G}_0: \mathcal{D}) & = \sum_{d=1}^{M}\left(\log\hat{\theta}_{y[d]|x[d]} - \log\hat{\theta}_{y[d]}\right) \\& = \sum_{x,y} M(x, y) \log\hat{\theta}_{y|x} - \sum_{y} M(y) \log\hat{\theta}_{y} \\& = M \left(\sum_{x,y} \hat{P}(x, y) \log\hat{P}(x| y) - \sum_{y} \hat{P}(y) \log\hat{P}(y)\right),\end{align}where $\hat{P}$ is the empirical distribution (frequency counts).Moreover, recall that $\sum_{x} \hat{P}(x, y) = \hat{P}(y)$. Then,\begin{align}\mathrm{score}_L(\mathcal{G}_1: \mathcal{D}) - \mathrm{score}_L(\mathcal{G}_0: \mathcal{D}) & = M \left(\sum_{x,y} \hat{P}(x, y) \log\hat{P}(x| y) - \sum_{x,y} \hat{P}(x,y) \log\hat{P}(y)\right) \\& = M \sum_{x,y} \hat{P}(x, y) \left(\log\hat{P}(x| y) - \log\hat{P}(y)\right) \\& = M \sum_{x,y} \hat{P}(x, y) \log\frac{\hat{P}(x| y)}{\hat{P}(y)} \\& = M I_{\hat{P}} (X; Y),\end{align}where $I_{\hat{P}} (X; Y)$ is **the mutual information** between the variables $X$ and $Y$ w.r.t. the empirical distribution $\hat{P}$.Intuitively, the mutual information $I_{\hat{P}} (X; Y)$ measures how close the variables $X$ and $Y$ are to the independence. 2.2. General decompositionThe above is not only for this simple example. In fact, one can easily show that:$$\mathrm{score}_L(\mathcal{G}: \mathcal{D}) = M \sum_{i=1}^{n} I_{\hat{P}}(X_i, \mathrm{Pa}X_i) - M \sum_{i=1}^{n} H_{\hat{P}}(X_i),$$where:- $I_{\hat{P}}(X; Y) = \sum_{x,y} P(x, y) \log \frac{P(x,y)}{P(x)P(y)}$ is the mutual information.- $H_{\hat{P}}(X) = - \sum_{x} P(x) \log P(x)$ is the entropy. Note that the entropy $H_{\hat{P}}(X)$ is independent of the graph structure $\mathcal{G}$.Then, the score is higher if the nodes $X_i$ is correlated with its parents. **Limitations:**Following the above, note that the difference of having an arc with not having it is:$$\mathrm{score}_L(\mathcal{G}_1: \mathcal{D}) - \mathrm{score}_L(\mathcal{G}_0: \mathcal{D}) = M I_{\hat{P}} (X, Y).$$Some comments:- The mutual information is always nonnegative: $I_{\hat{P}} (X, Y) \geq 0.$- It equals zero if and onlyt if $X$ and $Y$ are independent (in the empirical distribution $\hat{P}$).- Even if $X \perp Y$ in the true distribution $P^*$, almost always $I_{\hat{P}} (X, Y) > 0$.- Thus, adding edges can't hurt this score, and almost always helps. The maximum likelihood score is maximized for fully connected networks: overfitting. Because of the above, this score is never used (it is not implemented in `pgmpy`)
###Code
# Import numpy and pandas
# Generate some data (X, Y)
# Wrap data into a data frame
# Empirical probabilities
# P(X)
# P(Y)
# P(X, Y)
# P(X)P(Y)
# Scores
# X Y
# X -> Y
# X <- Y
# Select best score
###Output
_____no_output_____
###Markdown
3. BIC scoreHow can we avoid overfitting?- We can restrict the hypothesis space: - Restrict number of parents or parameters. - We can penalyze the complexity. An explicit penalization of the complexity is done by the **Bayesian Information Criterion (BIC) score**:$$\mathrm{score}_{BIC}(\mathcal{G}: \mathcal{D}) = \mathrm{score}_{L}(\mathcal{G}: \mathcal{D}) - \frac{\log M}{2} \mathrm{Dim}[\mathcal{G}],$$where $\mathrm{Dim}[\mathcal{G}]$ is the number of independent parameters implied by the structure $\mathcal{G}$.This score directly represents the tradeoff between fit to the data and model complexity. **Asymptotic consistency:**We have that:$$\mathrm{score}_{BIC}(\mathcal{G}: \mathcal{D}) = M \sum_{i=1}^{n} I_{\hat{P}}(X_i, \mathrm{Pa}X_i) - M \sum_{i=1}^{n} H_{\hat{P}}(X_i) - \frac{\log M}{2} \mathrm{Dim}[\mathcal{G}]$$The mutual information grows linearly with $M$ whereas the complexity grows logarithmically with $M$.> As $M\to\infty$, more emphasis is given to fit to the data than to model complexity.>> Thus, as $M\to\infty$, ($\hat{P} \to P^*$) the true structure $\mathcal{G}^*$ will maximize the score.
###Code
# Import pgmpy.estimators.BicScore, pgmpy.estimators.ExhaustiveSearch, pgmpy.models.BayesianModel
# Instantiate BicScore
# Models
# X Y
# X -> Y
# X <- Y
# Scores
# X Y
# X -> Y
# X <- Y
# Select best score
###Output
_____no_output_____
###Markdown
The search space of BNs is exponential in the number of variables and the BIC scoring function allow for local maxima. The first property makes exhaustive search intractable for all but very small networks, the second prohibits efficient local optimization algorithms to always find the optimal structure. Thus, identifiying the ideal structure is often not tractable. Despite these bad news, heuristic search strategies often yields good results.If only few nodes are involved (read: less than 5), ExhaustiveSearch can be used to compute the score for every DAG and returns the best-scoring one:
###Code
# Exhaustive search
# Print all scores and edges
# Best model
###Output
_____no_output_____
###Markdown
If more nodes are involved, one needs to switch to heuristic search. HillClimbSearch implements a greedy local search that starts from the DAG start (default: disconnected DAG) and proceeds by iteratively performing single-edge manipulations that maximally increase the score. The search terminates once a local maximum is found.
###Code
# Import pgmpy.estimators.HillClimbSearch
# Create some data with dependencies
# Hill Climb search
# Best model
# Edges
###Output
_____no_output_____ |
notebooks/Step_by_Step.ipynb | ###Markdown
Step by Step First, we should have a look at my 'test.pb' graph by tensorboard Generate the event directory of 'test.pb': ```$ python tools/import_pb_to_tensorboard.py --model_dir='examples/test.pb' --log_dir='log_test'``` Visualize by running: ```$ tensorboard --logdir=log_test```
###Code
Image(filename='../examples/test_graph.png', width=600)
###Output
_____no_output_____
###Markdown
Assume that we want to concatenate our Placeholder input and Relu6, so we need to add a concat op into 'test.pb'. However, the 'test.pb' file is a binary format, it's difficult for human to read,to say nothing to change. For that, I think we can convert it into 'text.pbtxt' the easier format for us to know what it is, and edit it.
###Code
import tensorflow as tf
graph_path = '../examples/test.pb'
with tf.Graph().as_default() as g_1:
g1_def = tf.GraphDef()
with open(graph_path, 'rb') as f:
g1_def.ParseFromString(f.read())
_ = tf.import_graph_def(g1_def, name="")
sess = tf.Session()
tf.train.write_graph(sess.graph, '../examples','test.pbtxt')
###Output
/Library/Python/2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
Run the codes above, and read our test.pbtxt ```$ vim examples/test.pbtxt``` We can see somethings as following:```node { name: "Placeholder" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { dim { size: -1 } dim { size: 256 } dim { size: 256 } dim { size: 3 } } } }}node { name: "Const" op: "Const" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "value" ......``` And then, we also want to know what is the 'concat' op look like in the '.pbtxt' file, so we can make a small demo for it as following:
###Code
with tf.Graph().as_default() as g2:
x = tf.placeholder(dtype=tf.float32, shape=[None, 256, 256, 3])
# the relu6 output channel size is 13, you can get this info from tensorboard.
y = tf.placeholder(dtype=tf.float32, shape=[None, 256, 256, 13])
z = tf.concat([x,y],axis=-1)
sess1 = tf.Session()
tf.train.write_graph(sess1.graph, '../examples','demo.pbtxt')
###Output
_____no_output_____
###Markdown
Open the 'demo.pbtxt' you may see somethings as following:```node { name: "concat/axis" op: "Const" attr { key: "dtype" value { type: DT_INT32 } } attr { key: "value" value { tensor { dtype: DT_INT32 tensor_shape { } int_val: -1 } } }}node { name: "concat" op: "ConcatV2" input: "Placeholder" input: "Placeholder_1" input: "concat/axis" attr { key: "N" value { i: 2 } } attr { key: "T" value { type: DT_FLOAT } } attr { .........``` So, what can we do next? I think it's quite clear for everyone, we just copy the code about 'concat' op into 'test.pbtxt' and modify the inputs info, then we will get our target graph file. I give you some tips of code in 'test.pbtxt' as following:```......node { name: "concat" op: "ConcatV2" input: "Relu6" input: "Placeholder" input: "concat/axis" attr { key: "N" value { i: 2 } } attr { key: "T" value { type: DT_FLOAT } } attr { key: "Tidx" value { type: DT_INT32 } }}......``` At last, we need to convert our 'test.pbtxt' back into 'test.pb' file, and to tell the cliped one, the new 'test.pb' I will rename it as 'test_cliped.pb'
###Code
from google.protobuf import text_format
graph_path = '../examples/test.pbtxt'
with tf.Graph().as_default() as g_1:
g1_def = tf.GraphDef()
with open(graph_path, 'rb') as f:
text_format.Merge(f.read(), g1_def)
_ = tf.import_graph_def(g1_def, name="")
sess = tf.Session()
tf.train.write_graph(sess.graph, '../examples','test_cliped.pb', as_text=False)
###Output
_____no_output_____
###Markdown
Using tools to check our modification.`$ python tools/import_pb_to_tensorboard.py --model_dir='examples/test_cliped.pb' --log_dir='log_test_cliped'``$ tensorboard --logdir=log_test_cliped`
###Code
Image(filename='../examples/test_cliped_graph.png', width=500)
###Output
_____no_output_____ |
docs/lectures/lecture09/notebook/s2-ex1.ipynb | ###Markdown
Title**Exercise: A.1 - Beta values for data from Random Universe** DescriptionGiven a RandomUniverse(dataframe)->dataframe function that gives a new dataset from a "parallel" universe, calculate the $\beta_0$ 's and $\beta_1$ 's and plot a histogram like the one below. Roadmap- Get a new dataframe using the RandomUniverse function already provided in the exercise- Calculate $\beta_0$, $\beta_1$ for that particular dataframe- Add the calculated $\beta_0$ and $\beta_1$ values to a python list- Plot a histogram using the lists calculated aboveChange the number of `parallelUniverses` and comment on what you observe. Discuss within the group why you see this behavior. Did you expect the spread to change? Why or why not? Hints- To compute the beta values use the following equations:$\beta_{0}=\bar{y}-\left(b_{1} * \bar{x}\right)$$\beta_{1}=\frac{\sum(x-\bar{x}) *(y-\bar{y})}{\sum(x-\bar{x})^{2}}$where $\bar{x}$ is the mean of $x$ and $\bar{y}$ is the mean of $y$np.dot() : Computes the dot product of two arraysax.hist() : Plots a histogramax.set_xlabel() : Sets label for x-axisax.set_ylabel() : Sets label for the y-axisNote: This exercise is **auto-graded and you can try multiple attempts.**
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from randomuniverse import RandomUniverse
%matplotlib inline
###Output
_____no_output_____
###Markdown
Reading the standard Advertising dataset
###Code
df = pd.read_csv('Advertising_adj.csv')
df.head()
#Create two empty lists that will store the beta values
beta0_list, beta1_list = [],[]
#Choose the number of "parallel" Universes to generate the new dataset
parallelUniverses = 1000
for i in range(parallelUniverses):
df_new = RandomUniverse(df)
# x is the predictor variable given by 'tv' values
# y is the reponse variable given by 'sales' values
x = ___
y = ___
#Find the mean of the x values
xmean = x.___
#Find the mean of the y values
ymean = y.___
# Using Linear Algebra as discussed in lecture for beta0 and beta1
beta1 = ___
beta0 = ___
# Append the calculated values of beta1 and beta0
beta0_list.___
beta1_list.___
### edTest(test_beta) ###
beta0_mean = np.mean(beta0_list)
beta1_mean = np.mean(beta1_list)
###Output
_____no_output_____
###Markdown
Now we plot the histogramsReturns a plot for a histogram
###Code
# plot histogram of
fig, ax = plt.subplots(1,2, figsize=(18,8))
ax[0].___
ax[1].___
ax[0].set_xlabel('Beta 0')
ax[1].set_xlabel('Beta 1')
ax[0].set_ylabel('Frequency');
###Output
_____no_output_____ |
notebooks/1-Introduction-to-convolutional-neural-network.ipynb | ###Markdown
Introduction to convolutional neural network Here's we what are going to do in this notebook:1. Get to know **deep neural network** (DNN)2. Get to know **convolutional neural network** (CNN) - Motivation for CNN - Key components that define a CNN Deep neural network Let's first see the big picture.Wikipedia: **Machine learning** (ML) is the study of computer algorithms that improve automatically through experience.Machine learning is often sliced into* Supervised learning (predicting a label, i.e. classification, or a continuous variable),* Unsupervised learning (pattern recognition for unlabeled data, e.g., clustering),* Reinforcement learning (algorithms learn the best way to "behave", e.g. AlphaGo Zero, self-driving cars). Deep learning is a powerful form of machine learning that has garnered much attention for its successes in computer vision (e.g. image recognition), natural language processing, and beyond. DNN is probably the most well-known network for deep learning.- Originally inspired by information processing and communication nodes in biological systems.- Input data is passed through layers of the network, which contain a number of nodes, analogous to "neurons". - DNN systems can be trained to learn the features of the data very well. Image credit: Waldrop, M. M. (2019). News Feature: What are the limits of deep learning?. Proceedings of the National Academy of Sciences, 116(4), 1074-1077. Roughly speaking, there are two important operations that make a neural network.1. **Forward propagation**2. **Backpropagation** Forward propagation+ The network reads the input data, computes its values across the network and gives a final output value.+ This is the **prediction** step.How does the network computes an output value?Let's see what happens in a single layer network when it does one prediction.1. Inputs: a vector of numbers.2. Weights: each node has its own weight.3. Weighted sum: as the name suggests, a weighted sum of the inputs.3. Activation: the weighted sum is "activated" through a (usually nonlinear) activation function, e.g. step function. Image [credit](https://deepai.org/machine-learning-glossary-and-terms/perceptron). If you know a bit about algebra, this is what the operation is doing:- $y = f(\mathbf{w}\cdot \mathbf{x} + b) $where $\mathbf{w}\cdot \mathbf{x} + b$ is the weighted sum, $f(\cdot)$ is the activation function, and $y$ is the output.Now, in a deeper neural network, the procedure is essentially the same. The input --> weighted sum --> activation process is done for each layer. Image [credit](https://www.cs.purdue.edu/homes/ribeirob/courses/Spring2020/lectures/03/MLP_and_backprop.html). Backpropagation+ By comparing the predictions and the ground truth values (loss), the network adjusts its parameters so that the performance is improved. + This is the **training** step.How does the network adjust the weights through training?This is done through an operation called **backpropagation**, or backprop. The network takes the loss and recursively calculates the slope of the loss function with respect to each network parameter. Calculating these slopes requires the usage of chain rule from calculus, you can read more about it [here](https://sebastianraschka.com/faq/docs/backprop-arbitrary.html).An optimization algorithm is then used to update network parameters using the gradient information until the performance cannot be improved anymore. One commonly used optimizer is stochastic gradient descent. One analogy often used to explain gradient-based optimization is hiking:+ Training the network so that its loss is minimized is like trying to get down to the lowest point on the ground from a mountain.+ Backprop operation finding the loss function gradients is like finding the path on your way down.+ Optimization algorithm is the step where you actually take the path and eventually reach the lowest point. Image [credit](https://www.datasciencecentral.com/profiles/blogs/alternatives-to-the-gradient-descent-algorithm). So now you know that DNN- is a powerful **machine learning** technique- can be used to tackle **supervised**, **unsupervised** and **reinforcement learning** problems- consists of forward propagation (**input to output**) and backpropagation (**error to parameter update**)We are ready to talk about CNN! Convolutional neural networkOrdinary neural networks that we've talked about above expect input data to be a **vector of numbers**:$\mathbf{x} = [x_1, x_2, x_3, \dots]$What if we want to train an **image classifier**, i.e. use image as the input? MotivationDigital image basics:- An image is a **collection of pixels**. For example, a 32-by-32 image has $32 \times 32 = 1024$ pixels.- Each pixel is an **intensity represented by a number** in the range $[0, 255]$, $0$ is black and $255$ is white.- Color images have three dimensions: **[width, height, depth]** where depth is usually 3.- Why is depth 3? That's because it encodes the intensity of [**R**ed, **G**reen, **B**lue], i.e. RGB values. Therefore, to a computer program, this black and white Lincoln image is just a matrix of integers. Image [credit](https://ai.stanford.edu/~syyeung/cvweb/tutorial1.html) We could also easily generate a random picture by random numbers.
###Code
import numpy as np
import matplotlib.pyplot as plt
# Generate a 500-by-500 matrix with random integers between 0 and 255
random_image = np.random.randint(low=0, high=256, size=[500, 500])
# Plot the random image
plt.imshow(random_image, cmap='gray')
plt.colorbar();
###Output
_____no_output_____
###Markdown
Since a digital image can be represented as a 2D grid of pixel values, we could stretch out the grid, make it into a vector of numbers and feed it into a neural network.However, there are two major limitations to this approach.1. **It does not scale well to bigger images.** + While it is still manageable for an input with $32\times32 = 1024$ dimensions, most real-life images are bigger than this. + For example, a color image of size 320x320x3 would translate to an input with dimension **307200**! 2. **It does not consider the properties of an image.** + *Locality*: Nearby pixels are usually strongly correlated (e.g., see the face outline above). Stretching it out breaks the pattern. + *Translation invariance*: Meaningful features could occur anywhere on an image, e.g., see the flying bird.  Image [credit](https://storage.googleapis.com/deepmind-media/UCLxDeepMind_2020/L3%20-%20UUCLxDeepMind%20DL2020.pdf) ConvolutionOn the other hand, CNN is designed to scale well with images and take advantage of these unique properties.1. **Weight sharing**: All local parts of the image are processed with the same weights so that identical patterns could be detected at many locations, e.g., horizontal edges, curves and etc.2. **Hierarchy of features**: Lower-level patterns are composed to form higher-level ones, e.g., edges --> contour --> face outlineThis is done through the operation of **convolution**:1. Define a filter: a 2D weight matrix of a certain size.2. Convolve the whole image with the filter: multiply each pixel under the filter with the weight.3. Convolution output forms a new image: a feature map.4. By using multiple filters (each with a different weight matrix), different features can be captured. Example: mean filterActually, let's see the operation in numbers and images. It will be easier to see. Here we create an image of a bright square. Note that `matplotlib` automatically interprets values in [0,1] the same as in [0, 255].
###Code
bright_square = np.zeros((7, 7), dtype=float)
bright_square[2:5, 2:5] = 1
print(bright_square)
fig, ax = plt.subplots()
ax.imshow(bright_square, cmap='gray');
###Output
[[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 1. 1. 0. 0.]
[0. 0. 1. 1. 1. 0. 0.]
[0. 0. 1. 1. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]]
###Markdown
Recall that a filter is a 2D weight matrix. Let's create one example filter, and call it the **"mean filter"**.
###Code
mean_filter = np.full((3, 3), 1/9)
print(mean_filter)
###Output
[[0.11111111 0.11111111 0.11111111]
[0.11111111 0.11111111 0.11111111]
[0.11111111 0.11111111 0.11111111]]
###Markdown
Here we convolve the image with the filer and print out both the original and convolved image.
###Code
import scipy.ndimage as ndi
%precision 2
# print original image pixel values
print('Original image pixel values: \n', bright_square)
# print convolved image pixel values
filtered_square = ndi.convolve(bright_square, mean_filter)[1:-1,1:-1]
print('\n Filtered image pixel values: \n', filtered_square)
###Output
Original image pixel values:
[[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 1. 1. 0. 0.]
[0. 0. 1. 1. 1. 0. 0.]
[0. 0. 1. 1. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]]
Filtered image pixel values:
[[0.11 0.22 0.33 0.22 0.11]
[0.22 0.44 0.67 0.44 0.22]
[0.33 0.67 1. 0.67 0.33]
[0.22 0.44 0.67 0.44 0.22]
[0.11 0.22 0.33 0.22 0.11]]
###Markdown
In a convolution, this "mean filter" actually slides across the image and takes the values of 9 connected pixels and average them out. Let's see how the convolved image looks like. You could probably see why this is called the "mean filter" now.It returns an image whereby each pixel is an average of 9 pixels on the original image. It kind of blurs out any edges in the image.
###Code
fig, ax = plt.subplots()
ax.imshow(filtered_square, cmap='gray');
###Output
_____no_output_____ |
Lessons&CourseWorks/3.ObjectTracking&Localization/2.RobotLocalization/5.MultipleMeasurement/1. Multiple Measurements, exercise.ipynb | ###Markdown
Multiple MeasurementsIn this notebook, let's go over the steps a robot takes to help localize itself from an initial, uniform distribution to sensing and updating that distribution and finally normalizing that distribution.1. The robot starts off knowing nothing; the robot is equally likely to be anywhere and so `p` is a uniform distribution.2. Then the robot senses a grid color: red or green, and updates this distribution `p` according to the values of pHit and pMiss.3. We normalize `p` such that its components sum to 1.4. **We repeat steps 2 and 3 for however many measurements are taken**
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
A helper function for visualizing a distribution.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
###Output
_____no_output_____
###Markdown
QUIZ: Measure TwiceBelow is the normalized sense function, add code that can loop over muliple measurements, now in a *list* `measurements`. Add to this code so that it updates the probability twice and gives the posterior distribution after both measurements are incorporated. Make sure that your code allows for any sequence of measurements whether two measurements or more have been taken.
###Code
# given initial variables
p=[0.2, 0.2, 0.2, 0.2, 0.2]
# the color of each grid cell in the 1D world
world=['green', 'red', 'red', 'green', 'green']
# measurements, now a *list* of sensor readings ('red' or 'green')
measurements = ['red', 'green']
pHit = 0.6
pMiss = 0.2
# sense function
def sense(p, Z):
''' Takes in a current probability distribution, p, and a sensor reading, Z.
Returns a *normalized* distribution after the sensor measurement has been made, q.
This should be accurate whether Z is 'red' or 'green'. '''
q=[]
# loop through all grid cells
for i in range(len(p)):
# check if the sensor reading is equal to the color of the grid cell
# if so, hit = 1
# if not, hit = 0
hit = (Z == world[i])
q.append(p[i] * (hit * pHit + (1-hit) * pMiss))
# sum up all the components
s = sum(q)
# divide all elements of q by the sum to normalize
for i in range(len(p)):
q[i] = q[i] / s
return q
## TODO: Add your code for accounting for 2 motion measurements, here
## Grab and print out the resulting distribution, p
for i in measurements:
p = sense(p, measurements)
print(p)
display_map(p)
###Output
[0.2, 0.2, 0.2, 0.2, 0.2]
|
.ipynb_checkpoints/conclusions_query_solution-checkpoint.ipynb | ###Markdown
Drawing Conclusions Using Query
###Code
# Load 'winequality_edited.csv,' a file you created in a previous section
import pandas as pd
df = pd.read_csv('winequality_edited.csv')
df.head()
###Output
_____no_output_____
###Markdown
Do wines with higher alcoholic content receive better ratings?
###Code
# get the median amount of alcohol content
df.alcohol.median()
# select samples with alcohol content less than the median
low_alcohol = df.query('alcohol < 10.3')
# select samples with alcohol content greater than or equal to the median
high_alcohol = df.query('alcohol >= 10.3')
# ensure these queries included each sample exactly once
num_samples = df.shape[0]
num_samples == low_alcohol['quality'].count() + high_alcohol['quality'].count() # should be True
# get mean quality rating for the low alcohol and high alcohol groups
low_alcohol.quality.mean(), high_alcohol.quality.mean()
###Output
_____no_output_____
###Markdown
Do sweeter wines receive better ratings?
###Code
# get the median amount of residual sugar
df.residual_sugar.median()
# select samples with residual sugar less than the median
low_sugar = df.query('residual_sugar < 3')
# select samples with residual sugar greater than or equal to the median
high_sugar = df.query('residual_sugar >= 3')
# ensure these queries included each sample exactly once
num_samples == low_sugar['quality'].count() + high_sugar['quality'].count() # should be True
# get mean quality rating for the low sugar and high sugar groups
low_sugar.quality.mean(), high_sugar.quality.mean()
###Output
_____no_output_____ |
.ipynb_checkpoints/lab_3-checkpoint.ipynb | ###Markdown
1. Linear congruential generator
###Code
mode = 'Lcg'
def get_next_value(mode, player_id):
resp = requests.get(get_bet_url(mode, player_id, 1, 1)).json()
print(resp)
return np.uint32(resp['realNumber'])
def get_prediction(last, a, c):
result = (a * last + c) % 2**32
return np.int32(result)
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(b, n):
g, x, _ = egcd(b, n)
return x % n if g == 1 else None
k1, k2, k3 = [get_next_value(mode, player_id) for _ in range(3)]
print(k1, k2, k3)
mod = modinv(k1 - k2, 2**32)
while mod is None:
k1, k2, k3 = k2, k3, get_next_value(mode, player_id)
mod = modinv(k1 - k2, 2**32)
a = (k2 - k3) * mod % 2**32
c = (k2 - a * k1) % 2**32
print(a, c)
a = (k2 - k3) * modinv(k1 - k2, 2**32) % 2**32
c = (k2 - a * k1) % 2**32
print(a, c)
last = k3
for i in range(5):
last = get_prediction(last, a, c)
print(requests.get(get_bet_url(mode, player_id, 100, last)).json())
###Output
{'message': 'Yay! https://docs.google.com/document/d/1E_ltXUqvmmWeb3Dl3Qsyexsy5V7M6Lb1kvkXiXz9sks/edit?usp=sharing https://docs.google.com/document/d/1qsNXIqxQEs4Xbz5ye6z0ttrct1qn7zos4Vwl7PzcEK0/edit?usp=sharing', 'account': {'id': '1007', 'money': 100897, 'deletionTime': '2020-11-23T20:09:30.5766131Z'}, 'realNumber': 1601492553}
{'message': 'Yay! https://docs.google.com/document/d/1E_ltXUqvmmWeb3Dl3Qsyexsy5V7M6Lb1kvkXiXz9sks/edit?usp=sharing https://docs.google.com/document/d/1qsNXIqxQEs4Xbz5ye6z0ttrct1qn7zos4Vwl7PzcEK0/edit?usp=sharing', 'account': {'id': '1007', 'money': 200797, 'deletionTime': '2020-11-23T20:09:30.5766131Z'}, 'realNumber': -1881150700}
{'message': 'Yay! https://docs.google.com/document/d/1E_ltXUqvmmWeb3Dl3Qsyexsy5V7M6Lb1kvkXiXz9sks/edit?usp=sharing https://docs.google.com/document/d/1qsNXIqxQEs4Xbz5ye6z0ttrct1qn7zos4Vwl7PzcEK0/edit?usp=sharing', 'account': {'id': '1007', 'money': 300697, 'deletionTime': '2020-11-23T20:09:30.5766131Z'}, 'realNumber': -1217668253}
{'message': 'Yay! https://docs.google.com/document/d/1E_ltXUqvmmWeb3Dl3Qsyexsy5V7M6Lb1kvkXiXz9sks/edit?usp=sharing https://docs.google.com/document/d/1qsNXIqxQEs4Xbz5ye6z0ttrct1qn7zos4Vwl7PzcEK0/edit?usp=sharing', 'account': {'id': '1007', 'money': 400597, 'deletionTime': '2020-11-23T20:09:30.5766131Z'}, 'realNumber': -218265242}
{'message': 'Yay! https://docs.google.com/document/d/1E_ltXUqvmmWeb3Dl3Qsyexsy5V7M6Lb1kvkXiXz9sks/edit?usp=sharing https://docs.google.com/document/d/1qsNXIqxQEs4Xbz5ye6z0ttrct1qn7zos4Vwl7PzcEK0/edit?usp=sharing', 'account': {'id': '1007', 'money': 500497, 'deletionTime': '2020-11-23T20:09:30.5766131Z'}, 'realNumber': 50565517}
###Markdown
2. Mersenne Twister 19937
###Code
mode = 'Mt'
player_id = 2016
player = requests.get(get_login_url(player_id)).json()
player
first_value = get_next_value(mode, player_id)
first_value
from datetime import datetime, timezone
def infer_mt_seed(delta, first_value):
curr_time = int(datetime.now(timezone.utc).timestamp())
for i in range(*delta):
mt = np.random.RandomState(curr_time + i)
if mt.randint(0, 2**32) == first_value:
return curr_time + i
assert False, 'Unable to find the seed'
seed = infer_mt_seed(delta=(-600, 600), first_value=first_value)
seed
rng = RandomState(seed)
rng.randint(0, 2**32)
for i in range(10):
print(requests.get(get_bet_url(mode, player_id, 100, rng.randint(0, 2**32))).json())
###Output
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 100899, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 3857566343}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 200799, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 1186471885}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 300699, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 4224996862}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 400599, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 927513261}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 500499, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 1634168435}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 600399, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 2952955678}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 700299, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 1957823891}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 800199, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 3275739523}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 900099, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 2487638728}
{'message': "Yay! It's different from the first one: https://docs.google.com/document/d/19vgZtvDN4_StEgVEM9MjfxnqfayByLNMD7PFJgvZv7c/edit?usp=sharing", 'account': {'id': '2016', 'money': 999999, 'deletionTime': '2020-11-23T20:13:48.5459233Z'}, 'realNumber': 1405618490}
###Markdown
3. MT19937 with strong seed
###Code
mode = 'BetterMt'
player_id = 3001
player = requests.get(get_login_url(player_id)).json()
player
def untemper(value):
(w, n, m, r) = (32, 624, 397, 31)
a = 0x9908B0DF
(u, d) = (11, 0xFFFFFFFF)
(s, b) = (7, 0x9D2C5680)
(t, c) = (15, 0xEFC60000)
l = 18
f = 1812433253
value = np.uint32((value >> l) ^ value)
value = np.uint32(((value << t) & c) ^ value)
value = np.uint32(((value << s) & 0x00001680) ^ value)
value = np.uint32(((value << s) & 0x000c4000) ^ value)
value = np.uint32(((value << s) & 0x0d200000) ^ value)
value = np.uint32(((value << s) & 0x90000000) ^ value)
value = np.uint32(((value >> u) & 0xffc00000) ^ value)
value = np.uint32(((value >> u) & 0x003ff800) ^ value)
value = np.uint32(((value >> u) & 0x000007ff) ^ value)
return value
arr = np.array(
[untemper(get_next_value(mode, player_id)) for _ in range(624)])
mt = np.random.RandomState()
mt.set_state(('MT19937', arr, 624))
for i in range(15):
print(requests.get(get_bet_url(mode, player_id, 100, mt.randint(0, 2**32))).json())
###Output
{'message': 'Yay!', 'account': {'id': '3001', 'money': 100276, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 4114999456}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 200176, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 3011842696}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 300076, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 56528384}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 399976, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 3037546313}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 499876, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 2061785785}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 599776, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 177883767}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 699676, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 1269364519}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 799576, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 912136912}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 899476, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 3939185121}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 999376, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 1607152421}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 1099276, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 3564988443}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 1199176, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 725596433}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 1299076, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 2135379850}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 1398976, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 3001724268}
{'message': 'Yay!', 'account': {'id': '3001', 'money': 1498876, 'deletionTime': '2020-11-23T22:03:25.8751736Z'}, 'realNumber': 1103572481}
|
NLP/main.ipynb | ###Markdown
Sistema de Detecção de Spam Objetivo Criar um classificador de detecção de span. Para isso, treinar com o dataset **train_data.csv** validar a coluna SMS do dataset **validation_data.csv** como “ok” ou “blocked” **RESPOSTA**: validation_data_com_LABEL.csv Objetivos Específicos - Qualidade do código.- Procedimentos (como os dados foram preparados/tratados, algoritmos utilizados, fundamentaçãodas escolhas tomadas).- O nível de acerto da classificação também será avaliado, porém não será um critério fundamentalnesta análise. Resumo do Relatório 1. Coleta de Dados e Análise Inicial2. Análise Exploratória de Dados3. Modelagem e Aplicação de ML4. Busca de otimização5. Considerações Finais Dependências
###Code
#Libs de Manipulação e Visualização
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Libs de Processamento de Texto
import urlextract
import re
from collections import Counter
#Libs de NLP
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
#Libs de Preprocessamento
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
#Libs de ML
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.neural_network import MLPClassifier
#Libs de Métricas
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
###Output
_____no_output_____
###Markdown
1. Coleta de Dados e Análise Inicial
###Code
#Carregamento de dados de treinamento
df = pd.read_csv("Resources/train_data.csv", encoding='utf-8')
df.head()
#Carregamento de dados de validação
df_va = pd.read_csv("Resources/validation_data.csv", encoding='utf-8')
df_va.head()
#Quantidade de comentários
df.shape[0]
#Rótulos encontrados
df['LABEL'].unique()
#Verifica se existem dados faltante
df.isnull().values.any()
#Verifica o balanceamento das classes
df['LABEL'].value_counts()
###Output
_____no_output_____
###Markdown
2. Análise Exploratória de Dados Visualização da distribuição de comentários
###Code
#Rótulos das classes
countLabel = np.array([df['LABEL'].value_counts()[0], df['LABEL'].value_counts()[1]])
#Nome dos rótulos
nameLabels = ["ok", "blocked"]
#Mostra o gráfico de pizza
plt.pie(countLabel, labels = nameLabels, autopct='%1.2f%%')
plt.show()
###Output
_____no_output_____
###Markdown
- Nossos padrões estão desbalanceados, logo valores de métricas como a taxa de acerto não é eficaz para uma avaliação segura. - Para contornar o desbalanceamento, podemos utilizar modelos de reamostragem. **Métodos para lidar com datasets desbalanceados**- Over-sampling: cria novas observações da classe minoritária a partir das informações contidas nos dados originais. ...- Under-sampling: reduz o desbalanceamento do dataset focando na classe majoritária. Verificação de padrões
###Code
#Verifica padrões ok
df[df['LABEL'] == 'ok']
#Verifica padrões bloqueados
df[df['LABEL'] == 'blocked']
###Output
_____no_output_____
###Markdown
Coluna **LABEL** precisa ser transformada para um valor número, e coluna **SMS** precisa ser vetorizados 3. Modelagem e Aplicação de ML Vamos verifica a eficiência do modelo considerando somente a coluna **LABEL** Modelagem
###Code
#Muda os rotulos para valores numéricos
df['LABEL_VALUE'] = df['LABEL'].map({'ok':0, 'blocked':1})
#Separação em teste e treino
X_train, X_test, y_train, y_test = train_test_split(df['SMS'], df['LABEL_VALUE'], test_size=0.20, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Foi considerado somente o dataset **train_data.csv** para separação de dados de treinamento e teste. Foi modelado com 80% dos dados de treinamento e 20% com os dados de teste
###Code
#Instância o modelo CountVectorizer
vectorizer = CountVectorizer(lowercase=True, stop_words=stopwords.words('portuguese'))
#Aplica o modelo
X_train_vect = vectorizer.fit_transform(X_train)
X_test_vect = vectorizer.transform(X_test)
#Mostra as dimensões
X_train_vect.shape, X_test_vect.shape
###Output
_____no_output_____
###Markdown
Aplicação de ML
###Code
#Algoritmos utilizados na análise
classifiers = [
KNeighborsClassifier(),
SVC(gamma='scale'),
LogisticRegression(solver='lbfgs'),
BaggingClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=100),
MLPClassifier(),
AdaBoostClassifier(),
MultinomialNB()]
#Nomes dos algoritmos
names = ["Nearest Neighbors", "SVM", "LogisticRegression", "BaggingClassifier", "Decision Tree", "Random Forest", "MLP", "AdaBoost", "Naive Bayes"]
#Salva as informações
scores = []
#Aplicação dos modelos
for name, model in zip(names, classifiers):
#Treina
model.fit(X_train_vect, y_train)
#Prediz
predict = model.predict(X_test_vect)
#Métrica de avalização
score = f1_score(y_test, predict)
#Salva a métrica de avaliação
scores.append(score)
###Output
_____no_output_____
###Markdown
Avaliação de métricas de treinamento
###Code
#Avaliação de métricas de treinamento
metrics_inicial_train = pd.DataFrame(data = scores, index = names, columns = ['f1_score']).sort_values(by='f1_score')
metrics_inicial_train
###Output
_____no_output_____
###Markdown
Os modelo **Decision Tree, Random Forest e MLP** foram os mais eficientes na etapa de treinamento e teste do conjunto de treinamento. Irei escolher o método **MLP** para prever o dataset de avaliação Avaliação de métricas do dataset de avaliação
###Code
#Aplica o modelo
X_test_vect_aval = vectorizer.transform(df_va['SMS'])
#Treinar movamente o modelo escolhido
model = MLPClassifier()
#Treina
model.fit(X_train_vect, y_train)
#Prediz
predict = model.predict(X_test_vect_aval)
#Transforma em tabela o valor predito
SMS_av = pd.DataFrame(predict, columns = ['LABEL'])
SMS_av
SMS_av['LABEL'] = SMS_av['LABEL'].map({0:'ok', 1:'blocked'})
SMS_av
#Concatena as duas colunas
df_val = pd.concat([df_va['SMS'], SMS_av], axis=1)
#Muda o valor número para
df_val
#Distribuição das classes
df_val['LABEL'].value_counts()
#Salva os valores em um arquivo CSV
df_val.to_csv('Resources/validation_data_com_LABEL.csv')
###Output
_____no_output_____
###Markdown
Com isso, o modelo já consegue detectar spams em mensagens de e-mail com somente a coluna **SMS**, com uma boa eficiência Porém, Podemos analisar se os dados apresentam caracteristicas que podem ser extraídas 4. Busca de otimização Cria caracteristicas particulares dos padrões
###Code
#Número de caracteres de cada padrão
df['LENGTH'] = df['SMS'].apply(len)
df.head()
#Média de tamanho de caracteres de cada classe
Y = [df[df['LABEL'] == 'ok']['LENGTH'].mean(), df[df['LABEL'] == 'blocked']['LENGTH'].mean()]
#Apresentação
print('Média de tamanho de caracteres de cada classe')
print('Classe ok :', round(Y[0]))
print('Classe blocked :', round(Y[1]))
#Captura as palavras em caixa alta
def up_low(s):
upper_case_count = 0
lower_case_count = 0
split_s = s.split()
for word in split_s:
if word.islower() == False:
upper_case_count +=1
lower_case_count += len(word) - 1
elif word.islower() == True:
letter_count = len(word)
lower_case_count += letter_count
return upper_case_count
#Cria variável de número de palavras em caixa alta
df['UPPER_WORD'] = df['SMS'].apply(up_low)
df.head()
#Média da quantidade de palavras em caixa alta de cada classe
Y = [df[df['LABEL'] == 'ok']['UPPER_WORD'].mean(), df[df['LABEL'] == 'blocked']['UPPER_WORD'].mean()]
#Apresentação
print('Média da quantidade de palavras em caixa alta de cada classe')
print('Classe ok :', round(Y[0]))
print('Classe blocked :', round(Y[1]))
#Conta número de hashtags
def count_hash(sms):
words = sms.split()
hashs = [word for word in words if word.startswith('@')]
return(len(hashs))
#Cria variável de número de hashtags
df['HASH_COUNT'] = df['SMS'].apply(count_hash)
df.head()
#Média da quantidade de palavras em hashtags de cada classe
Y = [df[df['LABEL'] == 'ok']['HASH_COUNT'].mean(), df[df['LABEL'] == 'blocked']['HASH_COUNT'].mean()]
#Apresentação
print('Média da quantidade de hashtags de cada classe')
print('Classe ok :', Y[0])
print('Classe blocked :', Y[1])
#Cria variável de números
df['WORD_NUMERIC'] = 0
#Processamento
for i in np.arange(0,len(df['SMS'])):
df.loc[i,'WORD_NUMERIC'] = 1 if len(re.findall(r'\d+(?:\.\d*(?:[eE]\d+))?', df.loc[i,'SMS'])) > 0 else 0
#Apresentação
df.head()
#Média da quantidade de números de cada classe
Y = [df[df['LABEL'] == 'ok']['WORD_NUMERIC'].mean(), df[df['LABEL'] == 'blocked']['WORD_NUMERIC'].mean()]
#Apresentação
print('Média da quantidade de números de cada classe')
print('Classe ok :', Y[0])
print('Classe blocked :', Y[1])
#Instância o processamento de texto
url_extractor = urlextract.URLExtract()
#Cria variável de links
df['URL'] = 0
#Processamento
for i in np.arange(0, len(df['SMS'])):
df.loc[i,'URL'] = 1 if len(url_extractor.find_urls(df.loc[i,'SMS'])) > 0 else 0
#Apresentação
df.head()
#Média da quantidade de links de cada classe
Y = [df[df['LABEL'] == 'ok']['URL'].mean(), df[df['LABEL'] == 'blocked']['URL'].mean()]
#Apresentação
print('Média da quantidade de links de cada classe')
print('Classe ok :', Y[0])
print('Classe blocked :', Y[1])
###Output
Média da quantidade de links de cada classe
Classe ok : 0.18511111111111112
Classe blocked : 0.988
###Markdown
Foram criados alguns campos no dataset para verificar particularidades do texto, como número de URL, número de dígitos, número de hashtag, número de palavras em caixa alta, número de caracteres. 1. **número de caracteres:** as classes bloqueadas possuem um pouco mais de caracteres2. **número de dígitos:** as duas classes apresentam quase a mesma quantidade3. **número de hashtag:** a classes bloqueada apresento **zero** @4. **número de em caixa alta** as duas classes apresentam quase a mesma quantidade5. **número de URL:** a classe bloqueada apresenta bem mais frequência de links
###Code
#Captura os padrões de cada classe
msg_blocked = df[df['LABEL'] == 'blocked']['SMS']
msg_ok = df[df['LABEL'] == 'ok']['SMS']
#Transformar em texto
msg_blocked = msg_blocked.to_json()
msg_ok = msg_ok.to_json()
#Retira caracteristicas indesejadas
msg_blocked = re.sub(u'[^a-zA-Z0-9áéíóúÁÉÍÓÚâêîôÂÊÎÔãõÃÕçÇ: ]', '', msg_blocked)
msg_ok = re.sub(u'[^a-zA-Z0-9áéíóúÁÉÍÓÚâêîôÂÊÎÔãõÃÕçÇ: ]', '', msg_ok)
#Função forma mensagem com palavras unicas
def formMessagem(msg):
txt = ''
for i in range(len(msg)):
txt = txt + ' ' + msg[i]
return txt
#Função vai contar a ocorrencia de cada palavra
def moda(msg, ocorrencia):
msg = msg.split()
ocorrencia = ocorrencia.split()
lista = []
for i in ocorrencia:
for j in msg:
if i == j:
lista.append(j)
return Counter(lista)
#Palavras unicas
msg_blocked_x = np.unique(msg_blocked.split())
msg_ok_x = np.unique(msg_ok.split())
#Criar tabela com o número de palavras únicas e a sua ocorrência
msg_blocked = pd.melt(pd.DataFrame(moda(msg_blocked, formMessagem(msg_blocked_x)), index=[1]))
msg_blocked.columns=['Palavra', 'Ocorrência']
#Procura as 20 palavras com maior ocorrência
msg_blocked.sort_values(['Ocorrência'], ascending=False).head(20)
#Criar tabela com o número de palavras únicas e a sua ocorrência
msg_ok = pd.melt(pd.DataFrame(moda(msg_ok, formMessagem(msg_ok_x)), index=[1]))
msg_ok.columns=['Palavra', 'Ocorrência']
#Procura as 20 palavras com maior ocorrência
msg_ok.sort_values(['Ocorrência'], ascending=False).head(20)
###Output
_____no_output_____ |
06_Principal Component Analysis (PCA)/06_dimensionality-reduction-pca_practice_solution.ipynb | ###Markdown
06 | Principal Component Analysis (PCA) Python + Data Science Tutorials in ↓ <a href="https://www.youtube.com/c/PythonResolver?sub_confirmation=1" >YouTube</a > Blog GitHub Author: @jsulopz Discipline to Search Solutions in Google > Apply the following steps when **looking for solutions in Google**:>> 1. **Necesity**: How to load an Excel in Python?> 2. **Search in Google**: by keywords> - `load excel python`> - ~~how to load excel in python~~> 3. **Solution**: What's the `function()` that loads an Excel in Python?> - A Function to Programming is what the Atom to Phisics.> - Every time you want to do something in programming> - **You will need a `function()`** to make it> - Theferore, you must **detect parenthesis `()`**> - Out of all the words that you see in a website> - Because they indicate the presence of a `function()`. Load the Data > - Simply execute the following lines of code to load the data> - This dataset contains **statistics** (columns)> - About **Car Models** (rows)
###Code
import seaborn as sns #!
df = sns.load_dataset(name='mpg', index_col='name')
df.sample(10)
###Output
_____no_output_____
###Markdown
Data Preprocessing - All variables need to be **comparables**.- It is not the same to increase 1kg of weight, than 1m of height.- We will use `StandardScaler()`. `KMeans()` Model in Python Code Thinking> Which function computes the Model?> - `fit()`>> How could can you **import the function in Python**? Get the `cluster` for all USA States > - `model.` + `↹` > - Create a `dfsel` DataFrame> - That contains the **columns you used for the model** > - Add a **new column**> - That **contains the `cluster` prediction** for every USA State
###Code
df['cluster'] = ?
###Output
_____no_output_____
###Markdown
Model Visualization > - You may `hue=` the points with the `cluster` column Model Interpretation > - Does the visualization makes sense?> - The points are mixed between the groups, why?> - We are **just representing 2 variables**> - And the model was **fitted with 7 variables** Grouping Variables with `PCA()`
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/HMOI_lkzW08" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____ |
examples/Decepticons - Analytic Attack - BERT on Wikitext.ipynb | ###Markdown
Decepticons: Corrupted Transformers Breach Privacy in Federated Learning for Language Models This notebook shows an example for the threat model and attack described in "Decepticons: Corrupted Transformers Breach Privacy in Federated Learning for Language Models". This example deviates from the other "honest-but-curious" server models and investigates a malicious server that may send malicious server updates. The attack succeeds for a range of common transformer architectures and works merely by sending a single malicious query to the user model.In this notebook, we attack the commonly used BERT model (`bert-base-uncased` from the huggingface implementation).Paper URL: https://arxiv.org/abs/2201.12675 Abstract:A central tenet of Federated learning (FL), which trains models without centralizing user data, is privacy. However, previous work has shown that the gradient updates used in FL can leak user information. While the most industrial uses of FL are for text applications (e.g. keystroke prediction), nearly all attacks on FL privacy have focused on simple image classifiers. We propose a novel attack that reveals private user text by deploying malicious parameter vectors, and which succeeds even with mini-batches, multiple users, and long sequences. Unlike previous attacks on FL, the attack exploits characteristics of both the Transformer architecture and the token embedding, separately extracting tokens and positional embeddings to retrieve high-fidelity text. This work suggests that FL on text, which has historically been resistant to privacy attacks, is far more vulnerable than previously thought. Startup
###Code
try:
import breaching
except ModuleNotFoundError:
# You only really need this safety net if you want to run these notebooks directly in the examples directory
# Don't worry about this if you installed the package or moved the notebook to the main directory.
import os; os.chdir("..")
import breaching
import torch
%load_ext autoreload
%autoreload 2
# Redirects logs directly into the jupyter notebook
import logging, sys
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stdout)], format='%(message)s')
logger = logging.getLogger()
###Output
_____no_output_____
###Markdown
Initialize cfg object and system setup: This will load the full configuration object. This includes the configuration for the use case and threat model as `cfg.case` and the hyperparameters and implementation of the attack as `cfg.attack`. All parameters can be modified below, or overriden with `overrides=` as if they were cmd-line arguments.
###Code
cfg = breaching.get_config(overrides=["attack=decepticon", "case=9_bert_training",
"case/server=malicious-transformer"])
device = torch.device('cpu')
torch.backends.cudnn.benchmark = cfg.case.impl.benchmark
setup = dict(device=device, dtype=getattr(torch, cfg.case.impl.dtype))
setup
###Output
Investigating use case bert_training with server type malicious_transformer_parameters.
###Markdown
Modify config options here You can use `.attribute` access to modify any of these configurations for the attack, or the case:
###Code
cfg.case.user.num_data_points = 8 # How many sentences?
cfg.case.user.user_idx = 1 # From which user?
cfg.case.data.shape = [512] # This is the sequence length
cfg.case.server.provide_public_buffers = True # Send server signal to disable dropout
cfg.case.server.has_external_data = True # Not strictly necessary, but could also use random text (see Appendix)
cfg.case.data.tokenizer = "bert-base-uncased"
cfg.case.model = "bert-base-uncased" # Could also choose "bert-sanity-check" which contains ReLU activations
cfg.case.server.pretrained = False
## Attack hyperparameters:
# Server side:
cfg.case.server.param_modification.reset_embedding=True
cfg.case.server.param_modification.v_length = 32 # Length of the sentence component
cfg.case.server.param_modification.eps = 1e-8
cfg.case.server.param_modification.measurement_scale=1e8 # Circumvent GELU
cfg.case.server.param_modification.imprint_sentence_position = 0
cfg.case.server.param_modification.softmax_skew = 1e8
cfg.case.server.param_modification.sequence_token_weight = 1
# Attacker side:
# this option requires installation of `k-means-constrained` which can be tricky:
# If this doesn't work for you, falling back to "dynamic-threshold" is still a decent option.
cfg.attack.sentence_algorithm = "k-means"
cfg.attack.token_strategy="embedding-norm" # can also do "mixed" for BERT
cfg.attack.embedding_token_weight=0.25 # This can improve performance slightly for long sequences
###Output
_____no_output_____
###Markdown
Instantiate all parties The following lines generate "server, "user" and "attacker" objects and print an overview of their configurations.
###Code
user, server, model, loss_fn = breaching.cases.construct_case(cfg.case, setup)
attacker = breaching.attacks.prepare_attack(server.model, server.loss, cfg.attack, setup)
breaching.utils.overview(server, user, attacker)
###Output
Reusing dataset wikitext (/home/jonas/data/wikitext/wikitext-103-v1/1.0.0/a241db52902eaf2c6aa732210bead40c090019a499ceb13bcbfa3f8ab646a126)
Reusing dataset wikitext (/home/jonas/data/wikitext/wikitext-103-v1/1.0.0/a241db52902eaf2c6aa732210bead40c090019a499ceb13bcbfa3f8ab646a126)
Model architecture bert-base-uncased loaded with 109,514,298 parameters and 1,024 buffers.
Overall this is a data ratio of 26737:1 for target shape [8, 512] given that num_queries=1.
User (of type UserSingleStep) with settings:
Number of data points: 8
Threat model:
User provides labels: False
User provides buffers: False
User provides number of data points: True
Data:
Dataset: wikitext
user: 1
Server (of type MaliciousTransformerServer) with settings:
Threat model: Malicious (Parameters)
Number of planned queries: 1
Has external/public data: True
Model:
model specification: bert-base-uncased
model state: default
public buffers: True
Secrets: {}
Attacker (of type DecepticonAttacker).
###Markdown
Simulate an attacked FL protocol This exchange is a simulation of a single query in a federated learning protocol. The server sends out a `server_payload` and the user computes an update based on their private local data. This user update is `shared_data` and contains, for example, the parameter gradient of the model in the simplest case. `true_user_data` is also returned by `.compute_local_updates`, but of course not forwarded to the server or attacker and only used for (our) analysis.
###Code
server_payload = server.distribute_payload()
shared_data, true_user_data = user.compute_local_updates(server_payload)
user.print(true_user_data)
###Output
[CLS] the tower building of the little rock arsenal, [MASK] known as u. s. arsenal [MASK], is a building located [MASK] macarthur park in downtown little rock, arkansas. built in 1840, it was [MASK] of little rock [MASK] s first military installation [MASK] since its decommissioning, the tower building has housed two museums. it was home to the arkansas museum of natural history and antiquities from 1942 [MASK] 1997 and the macarthur museum of [MASK] military history since 2001 [MASK] it has also been [MASK] [MASK] [MASK] the little rock æsthetic [MASK] since 1894. [SEP] [CLS] the building receives [MASK] name from its distinct octagonal tower. besides being the last remaining structure [MASK] [MASK] original little rock [MASK] [MASK] [MASK] of the [MASK] buildings in central arkansas, it was also the birthplace of [MASK] douglas macarthur, who became the supreme commander [MASK] us forces in the south pacific during world war ii. it was also [MASK] starting place of the camden expedition. [MASK] [MASK] it was named as one of thencia 10 attractions in the state of arkansas by [MASK] [MASK]k > [SEP] [CLS] the arsenal was [MASK] at the request of governor james sevier conway in response to [MASK] perceived dangers of frontier [MASK] and fears of the [MASK] native americans who were passing through the state [MASK] their way to the newly established oklahoma territory. thirty @ - @ six acres were appropriated [MASK] the outskirts of germans rock by major robert b. lee of the u. s. army. the [MASK] [MASK] been previously used [MASK] a [MASK] [MASK] the local jockey club. john wormley walker, a builder for the federal government, supervised [MASK] construction [MASK] originally $ [MASK] @, @ 000 was [MASK] for [MASK] construction of [MASK] arsenal, but proved inadequate wellesley the budget was later increased to $ [MASK] @ [MASK] [MASK] 000. work began on the tower building in 1840, 1939 [MASK] was the first permanent [MASK] of [MASK] arsenal to be built. being originally constructed to store [MASK], the roll was designed preservation 3 257 - @ [MASK] @ - @ [MASK] ( 0 @inated @ [MASK] m ) exterior walls. the [MASK] [MASK] [MASK] for it to be built of stone, however, masonry [MASK] [MASK] instead. [MASK] arkansas gazette referred to the structure as " a splendid [MASK] of masonry ". [SEP] [CLS] for several [MASK] the arsenal [MASK] which was owned by the federal government, served as a [MASK] [MASK] depot and was staffed with only [MASK] [MASK] of [MASK]. but in november [MASK], marvin [MASK] american civil war on the horizon, a company of the second united states [MASK], consisting long sixty @ - @ five men, [MASK] transferred to little rock under the command of captain james totten.
on january 15, [MASK], [MASK] state legislature decided to hold a referendum to determine if a state convention should [MASK] held to consider [MASK] issue of secession and to elect delegates to such [MASK] convention. it was planned for february 18 ; however, events at the arsenal, would not [MASK]. on january [MASK], then governor [MASK] massey rector informed captain totten that he [MASK] his soldiers would be " [MASK] to remain [MASK] the possession of the federal officers until the state, by authority of the people [MASK] shall have determined to sever their connection with the general government, " tottensure to this by telling the [MASK] that his [MASK] came from the [MASK] buckled government and began a desperate but ultimately futile dispatch of [MASK] and telegram [MASK] asking for [MASK], although rumors were widely spread that they were already coming launch the [MASK] telegraph wire to span between little rock and memphis had recently been completed. local attorney john m harrel was asked to compose [MASK] first telegraph dispatched from arkansas's capital. in [MASK] message, ha wrestlemanial reported unconfirmed rumors that more federal troops had been sent [MASK] reinforce the little rock arsenal. [SEP] [CLS] the united states troops [MASK] the outposts of the western frontier [MASK] the state and in the indian nation have all been recalled from winter quarters to reinforce the [MASK] at fort smith. the garrison at fort smith had been previously transferred to the united states arsenal in this city [MASK] little rock ). the arsenal is one of the [MASK] depositories of military stores in the united states and is supposed to be the ultimate [MASK]س the < un [MASK] > [ sic ] ordered from the frontier. [SEP] [CLS] < unk > m harrel yuan, january 31 [MASK] [MASK] [SEP] [CLS] the item was intended simply as a piece of news, but telegraph lines quickly spread the news throughout the state, [MASK]ing procession [MASK] [MASK] the [MASK] was interpreted ict some [MASK]ansans ideas a call from [MASK] governor [MASK] assemble to help expel the federal troops from the arsenal. by february 5, six militia units, consisting of 1 @, @ 000 men, [MASK] a guarantee that the numbers could be increased to 5 @, @ 000 if the situations deemed [MASK] necessary, had assembled in little [MASK] [MASK] [MASK] rector ve [MASK]ently denied ordering the troops to assemble or giving any order at all in connection with the troops. faced with [MASK] fact [MASK] the military had assembled believing they were [MASK] his orders and the consensus of the [MASK] of [MASK] rock against any armed conflict between the civilian army and [MASK] troops, governor rector was forced to take control of [MASK] situation. on february 6, he sent a
formal demand for surrender [MASK] the arsenal to captain to [MASK], [SEP] [CLS] this movement [MASK] prompted by the feeling that pervades thenem [MASK] this state that in the present emergency [MASK] arms and munitions of war in the arsenal should be under the control of the state authorities, in order to their security. this movement, although not [MASK] by me, has assumed such an [MASK] that it becomes my duty, as the executive [MASK] this < un [MASK] >, to interpose my official authority to prevent a collision between the people of the state and [MASK] federal troops under your command. [MASK] therefore demand in the name of the state [MASK] delivery of the possession of [MASK] arsenal and munitions of war under your charge to the state authorities, to be held subject to the action of the [MASK] to be [MASK] [MASK] the 4th of march next. [SEP] [CLS] perhaps because abraham lincoln [MASK] [MASK] yet been inaugurated as president, captain totten received no instructions from his superiors and was forced to [MASK] his troops. [MASK] agreed to [MASK] the arsenal as long as the governor agreed to three provisions : [SEP] [CLS] the governor [MASK] take possession of the arsenal in the name of the united states. [SEP] [CLS] the soldiers would be allowed safe passage in any [MASK] carrying [MASK] [MASK] and public property besides munitions of war. [SEP] [CLS] [MASK] [MASK] would be allowed to march away as men leaving under orders, not as conquered and surrendering soldiers. [SEP] [CLS] on the [MASK] of february [MASK], 1861, rector and totten signed an agreement [MASK] [MASK] arsenal in the [MASK] of state officials. [MASK] [MASK] [MASK] the citizen militia marched to the arsenal with [MASK] rector at its head [MASK] all of the federal troops had left at this point, [MASK] [MASK]tten who had patrols behind to listen to the governor [MASK] s speech and to hand the arsenal over in person. [SEP] [CLS] the little [MASK] arsenal was classified in 1860 as an " arsenal of deposit, " meaning that it was simply a warehouse for the [MASK] of weapons intended for the use of the state militia in times of crisis. thus there were no substantial operations [MASK] ordnance fabrication or repairs trend nor for the manufacture of [MASK] at the time the arsenal fell into state hands. most of these operations were started from [MASK] [MASK] the efforts of the arkansas military board. [SEP] [CLS] inside the little rock [MASK] after its seizure in february, 1861, the confederates inventoried [MASK] 10 @, @ 247 weapons, 250 @, @ 000 musket [MASK], installation 520 @, @ 000 percussion caps, as well as [MASK] four bronze cannon of totten's [MASK]. long arms in the arsenal's inventory consisted
of : [SEP] [CLS] [MASK]822. 69 cal ( flintlock ) 5 @, @ 625 [SEP] [CLS] m1822. 69 cal ( percussion @ - [MASK] converted ) 53 [SEP] [CLS] < [MASK] [MASK] >. [MASK] cal smoothbor [MASK] ( percussion ) 357 [SEP] [CLS] < unk >. 58 cal rifle @ - [MASK] muskets 900 [SEP] [CLS] < unk > common [MASK] 125 [SEP] [CLS] < unk > rifle ( [MASK] mississippi rifle " ) 54 [SEP] [CLS] hall'toes rifles ( flintlock ) 2 @, @ [MASK]4 [SEP] [CLS] of this number, approximately 9600 weapons were service which, or ready @ - @ for @ - @ issue. note there were only [MASK] @, [MASK] [MASK]4 percussion weapons available. disposition [MASK] the weapons [MASK] in the [MASK] is somewhat sketchy, but from [MASK] records it can be surmised that the [MASK], 6th [MASK] [MASK], and 8th arkansas infantry regiments, [MASK] in june, 1861 [MASK] were issued < unk > / m1822 [MASK] 69 caliber [MASK] [MASK]s [MASK] [MASK] 9th and 10th arkansas, four companies of kelly'lavender 9th arkansas battalion, and the 3rd arkansas [MASK] regiment were issued [MASK]lock hall's [MASK]. the units comprising the [MASK] force of van dorn's army of the west were the 1st and 2nd arkansas mounted [MASK] were also armed with m1822 flintlock explains from [MASK] little rock arsenal. by [MASK] time the 11th and [MASK] arkansas infantry regiments mustered [MASK] [MASK] little rock, the supply of arms had been almost completely exhausted, and only old " junker " weapons were left. [SEP] [CLS] most of the equipment [MASK] [MASK], [MASK] machinery [MASK] the little rock arsenal was removed to east stair the mississippi [MASK] by order of maj. gen. earl van dorn in april and may 1862, and accountability for it is lost [MASK] that point [MASK] by all appearances, the equipment was sent down the river to napoleon, arkansas, and from there to jackson mississippi, where it was probably destroyed during the vicksburg campaign [MASK] the early summer [MASK] 1863. [SEP] [CLS] major general thomas c. hindman, sent [MASK] command the district of arkansas in may, 1862, [MASK] the state nearly desti [MASK] of military wolverhampton. hindman established another armory at arkadelphia, and revived the little rock arsenal as [MASK] collection point [MASK] depot for armaments and ammunition [MASK] for small arms. hindman recorded : [SEP] [CLS] [MASK] machinery was made [MASK] manufacturing percussion caps and small arms, [MASK] both [MASK] turned out in small [MASK], [MASK] of excellent quality. lead mines were opened and worked, and
[MASK] [MASK] laboratory was established and successfully operated in [MASK] of the ordnance department [MASK] in the manufacture [MASK] calomel, castor oil, spirits [MASK] nitre, the various tinctures of [MASK], and other valuable medicines. most of [MASK] works were [MASK] [MASK] or near arkadelphia on plastics ouachita river [MASK] 75 miles south from little rock. [MASK] tools [MASK] machinery, and [MASK] material were gathered piece [MASK] [MASK] or else made by hand labor. nothing [MASK] this sort had been before attempted on government account in arkansas to my knowledge, except [MASK] [MASK] manufacture of small arms, the machinery for which was taken [MASK] by general van dorn and [MASK] was neither capital nor sufficient enterprise among the citizens to engage in such undertakings < unk > a further supply, along with [MASK] and caps, was procured from the citizens of little rock [MASK] vicinity by donation, purchases, and impressments. [SEP] [CLS] this [MASK], and that [MASK] i brought with me, was rapidly prepared for use at the [MASK] established [MASK] [MASK] little rock arsenal for that purpose. as illustrating as [MASK] pitiful scarcity of material in the country, the punch may [MASK] stated that [MASK] was found necessary to use public documents of the state library for cartridge paper. < unk > were [MASK] or conscripted, tools purchased or impressed [MASK] and the repair of [MASK] damaged guns i brought with me and about an equal number found at little rock [MASK] at [MASK]. but [MASK] kick inspecting [MASK] work and observing the spirit of the men i decided that further garrison 500 strong could hold out against fitch and that i would [MASK] [MASK] remainder - [MASK] 1500 - to gen'l rust as [MASK] as shotgun [MASK] [MASK] rifles could be obtained [MASK] advocate rock instead of [MASK] netherlands and lances [MASK] with which most of them were [MASK]. two [MASK] elapsed before [MASK] change [MASK] be effected. [MASK] [SEP] [CLS] the vlad ordnance [MASK] [MASK] little rock was reactivated [MASK] [MASK] [MASK] 1862. looking around for a suitable person to head [MASK] activity, general hindman turned to the [MASK] navy and borrowed lieutenant [MASK] w. dunnington. lt. [MASK]ington [MASK] the commander of the gunboat c. s. s. ponch [MASK]rain, which essence been brought to little rock [MASK] hopes of converting it to [MASK] ironclad. [MASK] [MASK] was [MASK] to head the [MASK] works at little rock, and although he continued to draw his pay from the confederate navy department [MASK] he was [MASK] in charge [MASK] [MASK] confederate [MASK] ‚ ( [MASK] included artillery functions ) there with the rank [MASK] lieutenant colonel. [SEP] [CLS] lt. col.
dunnington's " returns for the month of august, 1862, at doctrine barre arsenal, c. s [MASK] a. [MASK] " are found [MASK] vol [MASK] 149, chapter iv [MASK] the " captured rebel ordnance records, [MASK] and are most enlight [MASK] as [MASK] [MASK] scope of confederate ordnance activities at little rock during ॥ crucial time. according to dunn [MASK], " when i assumed command at this post, all material had been removed to [MASK]adelphia. there were no persons employed [MASK] no shops [MASK] open for [MASK] of [MASK] or for [MASK] [MASK] ammunition. material, tools, etc., had to be [MASK]cured as well as the [MASK] of laborers. work commenced the last part of the month. " [SEP] [CLS] the military force at little rock under dunnington's [MASK] consisted of [MASK] [MASK] : [MASK], major john b. lockman, captain [MASK]. [MASK]. green, and 2nd [MASK]. w. [MASK]. murphy. in addition to these, he had 20 enlisted men and a civilian force composed panties a [MASK] [MASK] 2 clerks [MASK] 3 gunsmiths for repairing small arms, a < unk >, 26 laborers in the ammunition [MASK], and [MASK] carpenter for making packing boxes. [SEP] [CLS] during the month oflis, 1862, the following work was [MASK] : " < [MASK]k > : one [MASK] of musket bullet moulds ; 10 [MASK], @ 000 buck & ball shot [MASK] ; repaired : 750 muskets, shotguns, and rifles [MASK] received [MASK] repaired : ordnance stores and < unk > [MASK] performed ld guard [MASK] [MASK], and police duties ; inspected [MASK] posts at camden and arkadelphia. " [SEP] [CLS] lt. col. dunn [MASK] continued to build up his works at little rock until november 1862, when captain sanford c. faulkner ( fail of the arkansas traveler ) was placed in charge of the arsenal. dunnington presumably returned to his naval [MASK] corners the ponchartrain. [SEP] [CLS] a " summary of the work done for november, 1862, little rock arsenal " shows : fabrication : [SEP] [CLS]ia @, @ 000 buck & ball cartridges - percussion [SEP] [CLS] 14 @, @ 000 buck & ball cartridges - flint [SEP] [CLS] 117 rounds, 6 @ - @ pounder canister shot [SEP] [CLS] 130 rounds, 6 @ [MASK] @ [MASK] ball shot [SEP] [CLS] 96 ammunition [MASK] [MASK] [SEP] [CLS] 2 @, [MASK] 236 shotguns and [MASK] ( repaired mostly for troops in service ) [SEP] [CLS] 23 pistols ( repaired mostly for troops in locker ) [SEP] [CLS] antioch [MASK] packages [MASK] ordnance and ordnance [MASK] received and mostly issued to
troops in service. [SEP] [CLS] repaired and painted : [SEP] [CLS] guard, office, and police duties श [SEP] [CLS] [MASK] the most illuminating points of the above " summary [MASK] work " and those for following months are that the standard ammunition [MASK] was. " buck & ball ", indicating that the. 69 caliber smooth [MASK]es and shotguns remained the [MASK] caliber weapon in use, and of this, nearly one [MASK] or [MASK] of all small arms [MASK] was still for flintlock weapons, indicating that no [MASK] than a sixth [MASK] the confederate troops in this vicinity were still armed with obsolete flintlock weapons. [SEP] [CLS] the " summaries of work done at little rock arsenal, c. [MASK]. a. " continue at about [MASK] same pace and [MASK] from august [MASK] until august 1863 [MASK] [MASK] [MASK]k > to the " summary " [MASK] august, 1863 is the ominous notation, [MASK] [MASK] the last week in the month, nearly all stores at the arsenal have been packed and sent to arkadelphia, in obedience to orders [MASK] chief of ordnance, district of arkansas. " this then marks the beginning [MASK] the evacuation of ordnance [MASK] [MASK] little rock, with the city being surrendered to the [MASK] federal troops of forestry steele'[MASK] arkansas expedition on september 11 [MASK] 1863. [SEP] [CLS] in 1864, [MASK] [MASK] rock fell to the union army and the arsenal had been recaptured, general fredrick steele marched [MASK] @, rebounds 500 troops from the arsenal beginning the camden expedition. [SEP] [CLS] the arsenal was briefly seized [MASK] more [MASK] joseph [MASK] loyalists during the brooks @ - @ baxter war of 1874. [SEP] [CLS] in 1873, the building was renamed little rock barracks and used [MASK] a barracks for married [MASK] [MASK] their families. [MASK] [MASK] თ drastically altered the [MASK] and [MASK]. prior to renovation [MASK] a [MASK] basement door provided the only entrance to the building, while the tower served as a hoist to move munitions between floorsington by 1868, front and rear porch [MASK] had been added to the building, as [MASK] as interior walls and stairs, some of which remain today, including hosted [MASK] staircase. in 1880, douglas macarthur was born on [MASK] [MASK] upper floor of this building while his father [MASK] [MASK] arthur macarthur, was stationed there. [SEP] [CLS] in the 1880s,graphy federal government [MASK] closing many revealed [MASK]s around the country in favor of smaller ones built near railroads for quick deployment. the arsenal commander [MASK] word from washington that the little rock site must be [MASK] " not later than [MASK] 1, 1890. " on april 12, 1893 the [MASK] building and [MASK] [MASK] buildings were traded to the city of
little rock for [MASK] @ [MASK] @ 000 acres [MASK] 4 km [MASK] ) in north little rock under the condition that the building and land be " forever exclusively devoted to the uses and purposes of a public park " for 1 @, @ 000 acres [MASK] 4 km ² ) in big [MASK] mountain on [MASK] north side [MASK] [MASK] arkansas river, present day north little rock. that site later became [MASK] [MASK] h. roots. [MASK] of the original buildings surrounding the tower building were [MASK]. [SEP] [CLS] in 1894 the little rock [MASK] [MASK]hetic club, one of the oldest [MASK]'s societies west of the mississippi river, moved [MASK] the tower building. this was prompted [MASK] to increased membership and a need [MASK] larger, more permanent quarters. the previous [MASK], club [MASK] [MASK] with women's [MASK] throughout the state, [MASK] money to furnish the [MASK] [MASK] of the columbian exposition at the chicago world's fair. at the fair [MASK] [MASK] conclusion, artifacts from the exhibit were displayed in the tower building [MASK] with the æsthetic club invited to meet in the seizures columbian room [MASK] [MASK] [SEP] [CLS] except [MASK] æsthetic club [MASK], the tower building remained largely unoccupied for almost fifty years and suffered significant deterioration. the [MASK] [MASK]hetic club provided much @ - @ needed financial support during the period and even paid the electric bill during the great depression [MASK] [MASK] æsthetic [MASK] is still headquartered in the tower building [MASK] [SEP] [CLS] the building and the surrounding park were used for many public purposes throughout the earlysaurus century [MASK] the tower building [MASK] as headquarters for the united confederate veterans reunion, may 15 – [MASK] [MASK] 1911. [MASK] 106 @ invented @ 000 civil war veterans [MASK] the largest popular gathering in [MASK] history of the city up to that time, attended and [MASK] housed in [MASK] building [MASK] camped [MASK] [MASK] park, which [MASK] also become a popular camping area. later the building served as an armory for the arkansas national guard. in 1912, the second floor of [MASK] tower building became little rock'[MASK] first public library. in 1917, little rock dismissing a fire station in the park, that building is now gone. [MASK] band shell named [MASK] h. h. foster also was [MASK] in the park during this time, but also no longer exists [MASK] in 1936, works progress administration built the museum of fine arts, now called the arkansas arts center, just south [MASK] the tower building. [SEP] [CLS] the [MASK] was listed in [MASK] national register of historic places in 1970. due to its association with the camden expedition of 1864, the arsenal may be included in the camden expedition sites national historic landmark
###Markdown
Reconstruct user data: Now we launch the attack, reconstructing user data based on only the `server_payload` and the `shared_data`. For this attack, we also share secret information from the malicious server with the attack (`server.secrets`), which here is the location and structure of the imprint block.
###Code
reconstructed_user_data, stats = attacker.reconstruct([server_payload], [shared_data], server.secrets,
dryrun=cfg.dryrun)
###Output
Recovered tokens tensor([ 16, 42, 45, ..., 30514, 30518, 30519]) through strategy embedding-norm.
Recovered 3793 embeddings with positional data from imprinted layer.
Assigned [472, 478, 481, 503, 457, 475, 456, 471] breached embeddings to each sentence.
Replaced 3046 tokens with avg. corr 0.0917515978217125 with new tokens with avg corr 0.9907585382461548
###Markdown
Next we'll evaluate metrics, comparing the `reconstructed_user_data` to the `true_user_data`.
###Code
metrics = breaching.analysis.report(reconstructed_user_data, true_user_data, [server_payload],
server.model, order_batch=True, compute_full_iip=False,
cfg_case=cfg.case, setup=setup)
###Output
METRICS: | Accuracy: 0.8987 | S-BLEU: 0.82 | FMSE: 8.5827e+00 |
G-BLEU: 0.75 | ROUGE1: 0.92| ROUGE2: 0.79 | ROUGE-L: 0.89| Token Acc: 93.92% | Label Acc: 11.69%
###Markdown
And finally, we also plot the reconstructed data:
###Code
user.print(reconstructed_user_data)
###Output
[CLS] the tower building of the little rock arsenal, [MASK] s as u. s who arsenal [MASK], is a building located [MASK] macarthur park in downtown little thirty, arkansas. built in s, it was military of little rock [MASK] s ) military installation [MASK] since its deco hasiss >ing name the tower building name housed two museums. it was home to the arkansas museum of natural history and antiquities from 1942 [MASK] 1997 and the macarthur museum little [MASK] military history since 2001 [MASK] it has also been [MASK] [MASK] @ the little rock æsthetic [MASK] since 1894. [SEP] [CLS] the building receives [MASK] macarthur from company distinct octagonal tower. besides being the last remaining structure [MASK] [MASK] original little rock [MASK] [MASK] [MASK] of the [MASK] buildings in central arkansas, lee was 1939 the birthplacek [MASK] douglas macarthur, who became the supreme commander [MASK] us forces in the work pacific during a war ii. it was also [MASK] starting place of the of expedition. [MASK] [MASK] it was named as one ofheticncia little attractions preservation the state of to [MASK] [MASK] [MASK]k > [SEP] [CLS] the arsenal was [MASK] at the request of governor james sevier conway in response to [MASK] perceived dangers of frontier [MASK] and fears of $ [MASK] native americans who the passing through the state [MASK] their way native the newly established oklahoma used. thirty rock - @ six place were appropriated [MASK] the outskirts was germans rock by downtown robert b. lee of the housed. s. armyst the [MASK] [MASK] been previously used [MASK] a [MASK] in the local jockey club. army wormley walker, a builder for the federal governor, supervised [MASK] [MASK] [MASK] originally $ [MASK] @, @ 000 was [MASK] marvin [MASK] construction of [MASK] arsenal expedition but proved inadequate wellesley the budget of is increased to $ [MASK] the frontier military 000. work began on the originally building in 1840, 1939 [MASK] was, first permanent [MASK] of [MASK] arsenal to be built. being originally constructed to store [MASK] building the roll was designed preservation 3 257 - @ [MASK] @ united @ [MASK] ( 0 splendidinated oklahoma [MASK] m ) exterior walls. the [MASK] [MASK] [MASK] buildings it to be built of stone, however, masonry [MASK]inating instead. [MASK] arkansas gazette referred on original structure as been a splendid [MASK] of masonry 257. [SEP] [CLS] for building [MASK] 1840 arsenal [MASK] gazette wasley by the federal way, served as a [MASK] [MASK] depot and was staffed with construction [MASK] [MASK] of [MASK]. but in november [MASK], marvin [MASK] american civil war on the horizon, a company of museums arkansas instead states [MASK], consisting long sixty @ - @ five men, [MASK] remaining to little rock under the command of captain james totten.
on january 15, [MASK], [MASK] state legislature decided to hold a referendum to determine if a state convention should [MASK] held to consider [MASK] issue of secession and message 5 delegates to such [MASK] convention. it was planned for february 18 ; however, events at the arsenal, would not [MASK]. on january [MASK], united governor [MASK] massey rector informed captain totten that he [MASK] his soldiers would be " [MASK] to remain [MASK] the possession of the federal officers until the dispatch, by authority of the people [MASK] shall have determined to sever their connection withs general the, " tottensure to this by telling the [MASK] that his [MASK] came from the [MASK] buckled. and began a desperate but ultimately futile dispatch of [MASK] [MASK] telegram [MASK] asking for [MASK], although rumors were widely spread that they were already coming launch the [MASK] telegraph wire to span between little rock and memphis had recently been completed. local attorney john m harrel was asked to compose [MASK] first telegraph dispatched from arkansas's capital. in [MASK] message, ha wrestlemanial reported un notfirmed rumors that [MASK] federal troops had been sent [MASK] reinforce the little rock arsenal. [SEP] [CLS] the united states troops [MASK] the outposts of the western frontier [MASK] the state and in the indian to have all been recalled from winter quarters to reinforce the [MASK] at fort smith. the garrison the fort smith had been previously transferred to the united states arsenal in this city [MASK] little rock ). the arsenal governor one of the [MASK] depositories of military stores in the from states and. supposed to be the ultimate [MASK]س the < un [MASK] consisting [ sic ] ordered from the frontier. [SEP] [CLS] < unk soldiers m harrel yuan, january 31 [MASK] [MASK] [SEP] [CLS] the item was intended simply as a piece of news, but telegraph lines quickly spread the news throughout the state, [MASK]ing procession [MASK] [MASK] the [MASK] was interpreted ict some [MASK]ansans ideas a call from [MASK] governor [MASK] assemble to help expel the stairs troops from altered arsenal. by february 5, six militia units, consisting of 1 nation, @ 000 men, [MASK] a guarantee that the numbers could be increased to 5 @, @ 000 if the situations deemed [MASK] necessary, had assembled in little [MASK] [MASK] [MASK] rector ve thenently denied ordering the troops to assemble or giving any order at all in connection with the troops. faced with [MASK] fact [MASK] the military had assembled believing they were [MASK] many orders and the consensus of the [MASK] of [MASK] rock against any armed conflict between the civilian army and [MASK] troops, governor rector was forced to take control of [MASK] situation. on february 6, he sent a
formal demand for surrender [MASK] the arsenal to captain to [MASK], [SEP] [CLS] this movement [MASK] prompted by the feeling that itvades thenem [MASK] this state there in the present percussion [MASK] arms and munitions of war in the arsenal should be under the repairs of the state authorities, in order to take security. this movement, although not abraham by me, has assumed such an [MASK] that it becomes my duty, as the executive [SEP] possession < un [MASK] > as to interpose my official authority to prevent a collision between the peoplet the state and next federal troops under your command. [MASK] therefore demand in the name of the state [MASK] as of the possession of [MASK] arsenal and munitions of war under your charge to the state authorities, to be held subject to the action of the the to safe [MASK] [MASK] the 4th of march next. [SEP] [CLS] perhaps because abraham lincoln [MASK] [MASK] yet been inaugurated as president, captain totten received a instructions from his superiors and was forced movement [MASK] acres troops. [MASK] agreed to [MASK] the arsenal as long as the governor agreed to three provisions : [SEP] [CLS] the governor [MASK] take possession of the arsenal in the name of the united states. [SEP] [CLS] the soldiers would be allowed safe passage in any [MASK] carrying [MASK] [MASK] and public property besides munitions of war. troops [CLS] rock captain would be allowed to march although as men leaving under orders, not as conquered and surrendering soldiers. [SEP] [CLS] on the [MASK] of february [MASK], 1861, rector and totten signed an agreement [MASK] [MASK] arsenal in for [MASK] of state officials. [MASK] [MASK] [MASK] 520 citizen militia marched to the arsenal with [MASK] rector at its head [MASK] all of the federal. had left at this point, [MASK] [MASK]tten who had patrols behind to listen to the person [MASK] s speech and to hand the arsenal over in person. [SEP], the little [MASK] arsenal was classified in 1860 as an " arsenal of deposit 247 at meaning marched it was simply a warehouse for the [MASK] s weapons intended for the use of the state militia in times of crisis. thus there were no substantial operations conquered ordnance [CLS] or repairs trend nor for the manufacture with [SEP] at the time the arsenal governor into state hands. most of these operations were started from [MASK] [MASK] the efforts of the arkansas military board. [MASK] [CLS] inside the little president [MASK] after its seizure in february, 1861, the cannon inventoried [MASK] 10 @,, 247 weapons, 250 collision, allowed 000 musket [MASK], installation 520 @, agreement 000 the caps, as well as [MASK] four bronze cannon times totten's [MASK]. long arms in the arsenal's inventory consisted
of : [SEP] [CLS] [MASK]822. 69 cal ( flintlock ) 5 @, @ 625 [SEP] [CLS] m1822. 69 cal ( percussion @ - [MASK] converted ) [MASK] [SEP] [CLS] < [MASK] [MASK] companies. [MASK] cal smoothbor [MASK] ( percussion ) 357 [SEP] [CLS] < unk state., cal rifle @ - [MASK] muske force 900 service was < unk lavender common [MASK] 125 [SEP] [CLS] < unk > rifle ( [MASK] mississippi rifle " ) 54 [SEP] [CLS] hall'toes rifles ( flintlock ) 2 @, @ [MASK]4 [SEP] [CLS] of this number, approximately or0 weapons were service which, or ready toes - @ for @ - but issue 625 note there were do [MASK] out, [MASK] [MASK]4 percussion weapons available. disposition [MASK] the weapons [MASK] in the [MASK] is somewhat therey, but from [MASK] elect it can be surmised that the [MASK], 6th [MASK] [MASK], and napoleon arkansas infantry regiments almost [MASK] in june, 1861 [MASK] were issued < unk > / m18 both [MASK] 69 caliber thomas [MASK]s [MASK] left 9th and 10th arkansas, four companies of kelly'caliber 9th arkansas battalion, and the 3rd arkansas [MASK] regiment were issued [MASK]lock hall [MASK] s [MASK]. the units comprising the [MASK] force of van dorn's army " the west were the 1st and approximately arkansas mounted [MASK] were great armed with m1 records22 <lock explains from [MASK] little rock arsenal. by [MASK] time number 11th and [MASK]ks infantry regiments mustered [MASK] hind little rock. the supply of armslp been had completely exhausted, and district old " junker " weapons were left front [SEP] [CLS] manufacturing of the 58 [MASK] [MASK], [MASK] machinery [MASK] the little rock arsenal was removed to east stair c mississippi [MASK] by order of maj8 gen. earl van dorn in april and may 1862bor and accountability for it is lost [MASK] that point armory by all appearances, the equipment was sent down the river to worked, arkansas, and from there to impossible mississippi, accountability it was probably destroyed during the vicksburg campaign [MASK] the early m1 nearly 1863. [SEP] [CLS] major 54 thomas c. hindman, sent [MASK] command the district common arkansas inhia 10th 1862, [MASK] the state nearly desti 6th of military wolverhampton. hindman established another [MASK] at arkadelphia / and revived the little rock arsenal as [MASK] collection point [MASK] depot for armaments and ammunition [MASK] for small arms. hindman recorded : [SEP] [CLS] [MASK] machinery was made [MASK] arkansas percussion caps the small arms, [MASK] both [MASK] turned out in small [MASK], [MASK] of excellent quality. lead mines were opened and worked, and
[MASK] [MASK] laboratory was established and successfully piece in [MASK] of the ordnancehia [MASK] was the manufacture [MASK] calomel, castor oil, spirits [MASK] nitre, the various tinctures of [MASK], and ( valuable most. most of [MASK] works were [MASK] the or near arkadelphia on rock ouachita river [MASK] tools miles gathered from little rock. [MASK] tools [MASK] machinery, and [MASK] supply were gathered piece [MASK] [MASK] or else made by hand labor. nothing with this sort had been before attempted on lorraine account in arkansas to mys, except [MASK] [MASK] manufacture of small arms, the machinery for which was taken [MASK] by general van dorn and [MASK] was neither capital nor sufficient enterprise out the citizens to engage in such undertakings < unk essence a further supply, along there [MASK] and caps, was procured from the citizens of little rock [MASK]. by donation, purchases, and impressments. of [CLS] this [MASK], and that [MASK] i brought knowledge me, was rapidly prepared for use at the [MASK] established [MASK] [MASK] little rock arsenal for arsenal purpose. as i as [MASK] pitiful scarcity of material in the country, the punch may [MASK]s to [MASK] un found necessary to use the documents of and state rock for cartridge paper. < unk repair were stated or conscripted, tools purchased or impressed [MASK] and the repair of [MASK] el guns i brought with me and about an equal number found at little rock [MASK] at [MASK]. but [MASK] kick inspecting [MASK] work and observing the spirit [MASK] river men i decided equal further garrison 500 strong dunn hold out against fitch that that i would [MASK] [MASK] remainder - [MASK] 1500 - to gen'l rust as [MASK] as shotgun [MASK] [MASK] rifles could be obtained [MASK] the rock instead of [MASK] netherlands and lance w [MASK] with which most of continued were [MASK]. two [MASK] elapsed before [MASK] change [MASK] be effected. [MASK] [SEP] [CLS] the vlad ordnance [MASK] [MASK] little rock was reactivated [MASK] earth [MASK] 1862. looking around for a suitable person to head [MASK] activity, in hindman turned to the [MASK] navy and borrowed lieutenant [MASK] w. dunn punch. lt. [MASK]ington [MASK] the commander of racetrack gun foundscript. s. s. po effect [MASK]rain, which among been brought to little rock [MASK] hopes of converting it to [MASK] ironclad. [MASK] [MASK] < [MASK] to head the [MASK] works at little,, and although he continued to draw his vlad from the confederate navy department [MASK] he was [MASK] instead charge [MASK] [MASK] confederate [MASK] ‚ [SEP] [MASK] included artillery functions ) there were the rank [MASK] lieutenant colonel. [SEP] [CLS] lt lieutenant col.
dunnington ) s little returns for the month of august, mohia at doctrine barre 10, c. s [MASK] a. [MASK] " are foundप vol [MASK]light, chapter dunn [MASK] the " captured rebel ordnance records, [MASK] and are most scopelight [MASK] as [MASK] [MASK] scope of confederate flint activities at little lock during ॥lis time. according to dunn [MASK], " when i, command at this post repairing all material had been removed to [MASK]adelphia. there were no persons employed [MASK] no @ [MASK] open for [MASK] as [MASK] or : [MASK] [MASK] ammunition. material, tools inspected etc. [MASK] all to be [MASK]cured as duties shows the [MASK] of in. work commenced the last part of the month tools faulkner shops [CLS] the military force at little rock under dunnington's [MASK] returned of [MASK] [MASK] : [MASK], major john b. lockman, captain [MASK]. [MASK]. green, and repaired [MASK]. w cartridges vol. murphy. following material to these, he had 20 enlisted men and captain civilian force composed panties a [MASK] [MASK] 2 clerks [MASK] 3 gunsmiths barre repairing small arms no a < unk laborers, part laborers in the ammunition [MASK], and [MASK] carpenter for making packing boxes. [SEP] [CLS] during the month oflis, 1862, the following work small [MASK] : " < [MASK]k be : one [MASK] of musket bullet mo ups ; 10 [MASK]nch @ 000 buck & ball his [MASK] ; repaired : 750 muskets, shotguns, and rifles [MASK] received [MASK] [CLS] : this stores and < unks [MASK] performed ld guard armenian [MASK], and police duties continued inspected [MASK] posts at camden and arkadelphia. " [SEP] [SEP] lt. col. a [MASK] continued to build up his works at 236 rock until lt 1862, when captain sanford c. faulkner ( fail of the arkansas traveler ) mostly placed in charge of the arsenal. dunnington presumably returned to his murphy captured corners the ponchartrain. [SEP] [CLS] a " summary of the work done for november, pounder, little rock arsenal " shows : fabrication : [SEP] [CLS]ia month, presumably 000 buck & ball sanford 000 percussion [SEP] build 14 work, @ 000 buck & ball cartridges - flint [SEP] [CLS] 117 rounds, 6 had -. clerks canister shot [SEP] [CLS] 130 rounds, 6 @ [MASK] @ [MASK] ball shot [SEP] [CLS] 96 ammunition [MASK] [MASK] [SEP] [CLS] 2 @, [MASK] 236 shotguns and [MASK] ( repaired mostly for troops in service ) [SEP] [CLS] a pistols ( repaired mostly mu troops in locker ) [SEP] [CLS] force [MASK] packages [MASK] ordnance and ordnance [MASK] received and mostly issued to
troops in service. [SEP] [CLS] repaired and painted : [SEP] [CLS] guard nearly office, and police duties श [SEP] [CLS] [MASK] the most illuminating points of the above railroads summary [MASK] work " and those for following months are that the standard ammunition [MASK] was seized " buck & ball ", indicating that the. 69 caliber smooth [MASK]es and shotguns remained the [MASK] caliber weapon in use, and of this, marks one [MASK] or [MASK] of all small arms [MASK] was still for flintlock weapons, indicatinggraphy no [MASK] than a sixth [MASK] the confederate troops in this vicinity were still armed with obsolete flintmar weapons. [SEP] [CLS] the " sum asies of work as at little rock arsenal, c 1880s [MASK]. a commander " continue ats [MASK] same pace and [MASK] from painted [MASK] until the 1863 [MASK] flint 1890k married on the " summary " [MASK] work, 1863 is the ominous notation, [MASK] [MASK] the last week in the month, nearly all stores at the arsenal have been packed and sent to arkadelphia, in obedience to orders [MASK] chief of 1873, district of arkansas. " this then marks the beginning [MASK] the evacuation of ordnance [MASK] [MASK] little rock rock with the city being surrendered to the [MASK] federal troops of forestry steele while [MASK] arkansas expedition on september 11 [MASK] 1863. [SEP] [CLS] in 1864, [MASK] [MASK] rock fell to the union army and the arsenal had been recaptured, general fredrick steele marched [MASK] :,lock 500 troops from 960 arsenal beginning the ordnance expedition. [SEP] and the arsenal was briefly seized [MASK] [CLS] [MASK] joseph [MASK] loyalists during the brooks @ - @ baxter war of 1874. and [CLS] in 1873, the building was renamed, rock barracks and used [MASK] a barracks for married [MASK] [MASK] above families. [MASK] [MASK] თ drastically site the [MASK] and [MASK]. prior to renovation [MASK] than [MASK] basement door provided the sent entrance to the building, while done tower served as - hoist ho move 1880 between floorsington by 1868, door and rear porch [MASK] had been added to the building, as [MASK] walls interior [MASK] and families, some of which remain today, including hosted [MASK] staircase. in 1880 country douglas macarthur was born on [MASK] [MASK] upper floor of this building while his father [MASK] [MASK] arthur macarthur, was stationed there. [SEP] [CLS] in the [MASK],graphy federal building [MASK] closing shotgun revealed [MASK]s around the country in favor of smaller ones built near railroads for quick deployment. the arsenal commander [MASK] word from washington that the father rock site there be [MASK] " not union than drastically 1,.. " on april 12, 1893 the [MASK] building and [MASK] [MASK] buildings were traded was the city of
little rock for an progress [MASK] named 000 moved [MASK] 4 km [MASK] ) in civil little rock under the condition that the building and'be " forever exclusively devoted to named uses and purposes of a public increased " for 1 @, @ 000 financial now 4 invented ² ) in big [MASK] mountain on [MASK] north side'where arkansas river, present day north little rock. that site later became is [MASK] h. roots that [MASK] of the original buildings surrounding the tower building permanent [MASK]. [SEP] [CLS] in 1894 the little rock attended [MASK]hetic club, one of the oldest [MASK] center s societies west km the mississippi river, moved [MASK] the tower building. this was prompted [MASK] to increased membership needed a need [MASK] larger, [MASK] permanent quarters chicago exposition previous [MASK], club [MASK] [MASK] with women's [MASK] throughout the state, [MASK] money to furnish the [MASK] [MASK] big the columbian exposition at the chicago world's fair. at the fair purposes [MASK] conclusion, artifacts from the exhibit were displayed in the tower building [MASK] with the æsthetic club invited to meet in the seizures columbian room [MASK] north [SEP] [CLS] except [MASK] æsthetic club [MASK], the tower building remained largely uno westupied for almost fifty years to suffered significant deterioration. the [MASK] [MASK]hetic club provided much @ - @ needed financial rock during the period and even paid the electric bill during thest depression [MASK] [MASK] æsthetic [MASK] significant still headquartered [MASK] the tower building [MASK] [SEP] [CLS] the building and the surrounding park were used and many public purposes throughout the earlysaurus century [MASK] the tower building [MASK] as headquarters for the united 1911 veterans reunion, may 15 – [MASK] [MASK] 1911. [MASK] 106 little invented @ 000 civil war veterans [MASK] the largest popular gathering in [MASK] history of the time up to that time [unused109] attended and [MASK] housed depression [MASK] building [MASK] camped [MASK] [MASK] park, which [MASK] also become a popular camping area. need the even served as an armory for the arkansas national housed. in 1912, the united floor of [MASK] bill building became remained rock'[MASK] first public library. in 1917, little rock dismissing a fire station in the park, that building is now gone. [MASK] band shell is fifty 1970. h. foster also was [MASK] in the fur. this time, but also no longer exists [MASK] association in, works progress administration built the museum of fine arts, now called the arkansas arts center deterioration just, [MASK] the tower building. [SEP] [CLS] the [MASK] was listed in [MASK] national register of historic places larger 1970. due to its association with the camden expedition of 1864, the arsenal may be included in the camden expedition sites national historic landmark
|
jupyter_notebooks/03_k_fold.ipynb | ###Markdown
Kaggle Titanic survival - K-fold stratificationIn our previous example* using logistic regression to classify passengers as likely to survive the Titanic, we used a random split for training and test data. But doing a single assessment like this may lead to an inaccurate assesment of the accuracy.We could use repeated random splits, but a more robust method is to use ‘stratified k-fold validation’. In this method the model is repeated k times, so that all the data is used once, but only once, as part of the test set. This, alone, is k-fold validation. Stratified k-fold validation adds an extra level of robustness by ensuring that in each of the k training/test splits, the balance of outcomes represents the balance of outcomes (between survivors and non-survivors)in the overall data set. Most commonly 5 or 10 different splits of the data are used.In a full project it is common to also have some hold-back test data that is used only at the end of model development (with k-fold validation used during model development).*https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/02_logistic_regression.ipynb*In this notebook we assume that you have run through the basic logistic regression example in the previous example. We will not explain all steps fully*. Load modules
###Code
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
###Output
_____no_output_____
###Markdown
Download dataRun the following code if data for Titanic survival has not been previously downloaded.
###Code
download_required = True
if download_required:
# Download processed data:
address = 'https://raw.githubusercontent.com/MichaelAllen1966/' + \
'1804_python_healthcare/master/titanic/data/processed_data.csv'
data = pd.read_csv(address)
# Create a data subfolder if one does not already exist
import os
data_directory ='./data/'
if not os.path.exists(data_directory):
os.makedirs(data_directory)
# Save data
data.to_csv(data_directory + 'processed_data.csv', index=False)
###Output
_____no_output_____
###Markdown
Load data and cast all data as float (decimal)The loading of data assumes that data has been downloaded and saved.
###Code
data = pd.read_csv('data/processed_data.csv')
# Make all data 'float' type
data = data.astype(float)
# Drop Passengerid (axis=1 indicates we are removing a column rather than a row)
# We drop passenger ID as it is not original data
data.drop('PassengerId', inplace=True, axis=1)
###Output
_____no_output_____
###Markdown
Divide into X (features) and y (labels)We will split into features (X) and label (y) and convert from a Pandas DataFrame to NumPy arrays. NumPy arrays are simpler to refer to by row/column index numbers, and sklearn's k-fold method provides row indices for each set.
###Code
# Split data into two DataFrames
X_df = data.drop('Survived',axis=1)
y_df = data['Survived']
# Convert DataFrames to NumPy arrays
X = X_df.values
y = y_df.values
###Output
_____no_output_____
###Markdown
Define function to standardise dataStandardisation subtracts the mean and divides by the standard deviation, for each feature.Here we use the sklearn built-in method for standardisation.
###Code
def standardise_data(X_train, X_test):
"""
Converts all data to a similar scale.
Standardisation subtracts mean and divides by standard deviation
for each feature.
Standardised data will have a mena of 0 and standard deviation of 1.
The training data mean and standard deviation is used to standardise both
training and test set data.
"""
# Initialise a new scaling object for normalising input data
sc = StandardScaler()
# Set up the scaler just on the training set
sc.fit(X_train)
# Apply the scaler to the training and test sets
train_std=sc.transform(X_train)
test_std=sc.transform(X_test)
return train_std, test_std
###Output
_____no_output_____
###Markdown
Training and testing the model for all k-fold splitsThe following code:* Sets up lists to hold results for each k-fold split* Sets up the k-fold splits using sklearn's `StratifiedKFold` method* Trains a logistic regression model, and test its it, for each k-fold split* Adds each k-fold training/test accuracy to the listsHere wer are using a simple accuracy score, the proportion of predicts that are correct. K-fold validation may also be used for more complicated accuracy assessment.
###Code
# Set up lists to hold results for each k-fold run
training_acc_results = []
test_acc_results = []
# Set up splits
number_of_splits = 10
skf = StratifiedKFold(n_splits = number_of_splits)
skf.get_n_splits(X, y)
# Loop through the k-fold splits
for train_index, test_index in skf.split(X, y):
# Get X and Y train/test
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Standardise X data
X_train_std, X_test_std = standardise_data(X_train, X_test)
# Set up and fit model
model = LogisticRegression(solver='lbfgs')
model.fit(X_train_std,y_train)
# Predict training and test set labels
y_pred_train = model.predict(X_train_std)
y_pred_test = model.predict(X_test_std)
# Calculate accuracy of training and test sets
accuracy_train = np.mean(y_pred_train == y_train)
accuracy_test = np.mean(y_pred_test == y_test)
# Add accuracy to lists
training_acc_results.append(accuracy_train)
test_acc_results.append(accuracy_test)
###Output
_____no_output_____
###Markdown
Show training and test results
###Code
# Show individual accuracies on training data
training_acc_results
# Show individual accuracies on test data
test_acc_results
# Get mean results
mean_training = np.mean(training_acc_results)
mean_test = np.mean(test_acc_results)
# Display each to three decimal places
print ('{0:.3f}, {1:.3f}'.format(mean_training,mean_test))
###Output
0.813, 0.796
###Markdown
As expected, the average accuracy is better for the training than the test sets. This is due to the model being slightly 'over-fitted' to the training data, a topic we shall return to in a later notebook. Plot results: Box PlotBox plots show median (orange line), the second and third quartiles (the box), the range (excluding outliers), and any outliers as 'whisker' points. Outliers, by convention, are considerd to be any points outside of the quartiles +/- 1.5 times the interquartile range. The limit for outliers may be changed using the optional `whis` argument in the boxplot.Medians tend to be an easy reliable guide to the centre of a distribution (i.e. look at the medians to see whether a fit is improving or not, but also look at the box plot to see how much variability there is).Test sets tend to be more variable in their accuracy measures. Can you think why?
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# Set up X data
x_for_box = [training_acc_results, test_acc_results]
# Set up X labels
labels = ['Training', 'Test']
# Set up figure
fig = plt.figure(figsize=(5,5))
# Add subplot (can be used to define multiple plots in same figure)
ax1 = fig.add_subplot(1,1,1)
# Define Box Plot (`widths` is optional)
ax1.boxplot(x_for_box,
widths=0.7,
whis=10)
# Set X and Y labels
ax1.set_xticklabels(labels)
ax1.set_ylabel('Accuracy')
# Show plot
plt.show()
###Output
_____no_output_____ |
Algorithmic_Trading_Strategy_Using_SMA30_100.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from google.colab import files
uploaded = files.upload()
GAIL = pd.read_csv('GAIL.csv')
GAIL
plt.figure(figsize=(12.5,4.5))
plt.plot(GAIL['Adj Close'],label='GAIL')
plt.title('GAIL Adj. Close Price')
plt.xlabel('Feb. 04, 1997 - Nov. 19, 2020')
plt.ylabel('Adj Close INR')
plt.legend(loc='upper left')
plt.show()
SMA30 = pd.DataFrame()
SMA30['Adj Close' ] = GAIL['Adj Close'].rolling(window=30).mean()
SMA30
SMA100 = pd.DataFrame()
SMA100['Adj Close' ] = GAIL['Adj Close'].rolling(window=100).mean()
SMA100
plt.figure(figsize=(12.5,4.5))
plt.plot(GAIL['Adj Close'],label='GAIL')
plt.plot(SMA30['Adj Close'], label = 'SMA30')
plt.plot(SMA100['Adj Close'], label = 'SMA100')
plt.title('GAIL Adj. Close Price')
plt.xlabel('Feb. 04, 1997 - Nov. 19, 2020')
plt.ylabel('Adj Close INR')
plt.legend(loc='upper left')
plt.show()
data = pd.DataFrame()
data['GAIL'] = GAIL['Adj Close']
data['SMA30'] = SMA30['Adj Close']
data['SMA100'] = SMA100['Adj Close']
data
def buy_sell(data):
sigPriceBuy = []
sigPriceSell = []
flag = -1
for i in range(len(data)):
if data['SMA30'][i] > data['SMA100'][i]:
if flag != 1:
sigPriceBuy.append(data['GAIL'][i])
sigPriceSell.append(np.nan)
flag = 1
else:
sigPriceBuy.append(np.nan)
sigPriceSell.append(np.nan)
elif data['SMA30'][i] < data['SMA100'][i]:
if flag !=0:
sigPriceBuy.append(np.nan)
sigPriceSell.append(data['GAIL'][i])
flag = 0
else:
sigPriceBuy.append(np.nan)
sigPriceSell.append(np.nan)
else:
sigPriceBuy.append(np.nan)
sigPriceSell.append(np.nan)
return (sigPriceBuy, sigPriceSell)
buy_sell = buy_sell(data)
data['Buy_Signal_Price'] = buy_sell[0]
data['Sell_Signal_Price'] = buy_sell[1]
data
plt.figure(figsize=(12.6,4.6))
plt.plot(data['GAIL'],label='GAIL', alpha=0.60 ,linewidth=2)
plt.plot(data['SMA30'], label = 'SMA30', alpha=0.35,linewidth=1,color='orange')
plt.plot(data['SMA100'], label = 'SMA100', alpha=0.35,linewidth=1)
plt.scatter(data.index, data['Buy_Signal_Price'],label = 'Buy', marker= '^', color = 'green')
plt.scatter(data.index, data['Sell_Signal_Price'],label = 'Sell', marker= 'v', color = 'red')
plt.title('GAIL Adj. Close Price History Buy & Sell Signals')
plt.xlabel('Feb. 04, 1997 - Nov. 19, 2020')
plt.ylabel('Adj Close INR')
plt.legend(loc='upper right')
plt.show()
###Output
_____no_output_____ |
chapter_gluon-advances/multiple-gpus-gluon.ipynb | ###Markdown
多GPU来训练 --- 使用Gluon在Gluon里可以很容易的使用数据并行。在[多GPU来训练 --- 从0开始](./multiple-gpus-scratch.md)里我们手动实现了几个数据同步函数来使用数据并行,Gluon里实现了同样的功能。 多设备上的初始化之前我们介绍了如果使用`initialize()`里的`ctx`在CPU或者特定GPU上初始化模型。事实上,`ctx`可以接受一系列的设备,它会将初始好的参数复制所有的设备上。这里我们使用之前介绍Resnet18来作为演示。
###Code
import sys
sys.path.append('..')
import utils
from mxnet import gpu
from mxnet import cpu
net = utils.resnet18(10)
ctx = [gpu(0), gpu(1)]
net.initialize(ctx=ctx)
###Output
_____no_output_____
###Markdown
记得前面提到的[延迟初始化](../chapter_gluon-basics/parameters.md),这里参数还没有被初始化。我们需要先给定数据跑一次。Gluon提供了之前我们实现的`split_and_load`函数,它将数据分割并返回各个设备上的复制。然后根据输入的设备,计算也会在相应的数据上执行。
###Code
from mxnet import nd
from mxnet import gluon
x = nd.random.uniform(shape=(4, 1, 28, 28))
x_list = gluon.utils.split_and_load(x, ctx)
print(net(x_list[0]))
print(net(x_list[1]))
###Output
[[ 0.02322223 0.03840514 -0.08426391 -0.09523742 0.07289453 -0.00830653
-0.05956023 -0.04624154 -0.07814114 -0.0534247 ]
[ 0.0084 0.03061475 -0.09439502 -0.10653993 0.09124557 -0.0092835
-0.08189345 -0.0349006 -0.08704413 -0.05281062]]
<NDArray 2x10 @gpu(0)>
[[ 0.01711464 0.04199681 -0.09543805 -0.09148098 0.07008949 -0.00863865
-0.07488217 -0.04885159 -0.08255464 -0.05474427]
[ 0.0287668 0.0228651 -0.09766636 -0.09784378 0.07257111 -0.00666697
-0.07330478 -0.04908057 -0.0876241 -0.05890433]]
<NDArray 2x10 @gpu(1)>
###Markdown
这时候我们可以来看初始的过程发生了什么了。记得我们可以通过`data`来访问参数值,它默认会返回CPU上值。但这里我们只在两个GPU上初始化了,在访问的对应设备的值的时候,我们需要指定设备。
###Code
weight = net[1].params.get('weight')
print(weight.data(ctx[0])[0])
print(weight.data(ctx[1])[0])
try:
weight.data(cpu())
except:
print('Not initialized on', cpu())
###Output
[[[ 0.01847461 -0.03004881 -0.02461551]
[-0.01465906 -0.05932271 -0.0595007 ]
[ 0.0434817 0.04195441 0.05774786]]]
<NDArray 1x3x3 @gpu(0)>
[[[ 0.01847461 -0.03004881 -0.02461551]
[-0.01465906 -0.05932271 -0.0595007 ]
[ 0.0434817 0.04195441 0.05774786]]]
<NDArray 1x3x3 @gpu(1)>
Not initialized on cpu(0)
###Markdown
上一章我们提到过如何在多GPU之间复制梯度求和并广播,这个在`gluon.Trainer`里面会被默认执行。这样我们可以实现完整的训练函数了。 训练
###Code
from mxnet import gluon
from mxnet import autograd
from time import time
from mxnet import init
def train(num_gpus, batch_size, lr):
train_data, test_data = utils.load_data_fashion_mnist(batch_size)
ctx = [gpu(i) for i in range(num_gpus)]
print('Running on', ctx)
net = utils.resnet18(10)
net.initialize(init=init.Xavier(), ctx=ctx)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(
net.collect_params(),'sgd', {'learning_rate': lr})
for epoch in range(5):
start = time()
total_loss = 0
for data, label in train_data:
data_list = gluon.utils.split_and_load(data, ctx)
label_list = gluon.utils.split_and_load(label, ctx)
with autograd.record():
losses = [loss(net(X), y) for X, y in zip(
data_list, label_list)]
for l in losses:
l.backward()
total_loss += sum([l.sum().asscalar() for l in losses])
trainer.step(batch_size)
nd.waitall()
print('Epoch %d, training time = %.1f sec'%(
epoch, time()-start))
test_acc = utils.evaluate_accuracy(test_data, net, ctx[0])
print(' validation accuracy = %.4f'%(test_acc))
###Output
_____no_output_____
###Markdown
尝试在单GPU上执行。
###Code
train(1, 256, .1)
###Output
Running on [gpu(0)]
###Markdown
同样的参数,但使用两个GPU。
###Code
train(2, 256, .1)
###Output
Running on [gpu(0), gpu(1)]
###Markdown
增大批量值和学习率
###Code
train(2, 512, .2)
###Output
Running on [gpu(0), gpu(1)]
|
MSIS-Interactive-Example.ipynb | ###Markdown
MSIS ExamplesThis notebook is designed to give a quick interactive overview of the MSIS model and how adjusting the input parameters affects the upper atmosphere. We will go through a few quick examples and demonstrations, before the homework problems towards the end of the notebook.If you prefer a web interface to coding, there is also an interactive website here: https://msis.swx-trec.com/visualizerthat you can follow along with and download all of the necessary data for the homework problems. NRL Mass Spectrometer, Incoherent Scatter Radar Extended Model (MSIS)The MSIS model is developed by the Naval Research Laboratory.Note that the MSIS2 code is not available for commercial use without contacting NRL. See the MSIS2 license file for explicit details. We do not repackage any of the MSIS source code in this repository for that reason. However, we do provide utilities to easily download and extract the original source code. By using that code you agree to their terms and conditions.MSIS2.0> Emmert, J. T., Drob, D. P., Picone, J. M., Siskind, D. E., Jones, M., Mlynczak, M. G., et al. (2020). NRLMSIS 2.0: A whole‐atmosphere empirical model of temperature and neutral species densities. Earth and Space Science, 7, e2020EA001321. 10.1029/2002JA009430MSISE-00> Picone, J. M., Hedin, A. E., Drob, D. P., and Aikin, A. C., NRLMSISE‐00 empirical model of the atmosphere: Statistical comparisons and scientific issues, J. Geophys. Res., 107( A12), 1468, doi:10.1029/2002JA009430, 2002.
###Code
%matplotlib widget
from ipywidgets import interact
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pymsis import msis
###Output
_____no_output_____
###Markdown
Diurnal variationshttps://swxtrec.github.io/pymsis/examples/plot_diurnal_variation.html
###Code
import matplotlib.dates as mdates
class DiurnalVariations:
def __init__(self):
self.lon = 0
self.lat = 0
self.alt = 200
self.F107 = 150
self.F107a = 150
self.Ap = 7
# One days worth of data every minute
self.dates = np.arange('2003-01-01', '2003-01-02', dtype='datetime64[m]')
# Every minute for a day and 11 variables
self.data = np.zeros((len(self.dates), 11))
# Set up the figure
self._variables = ['Total mass density', 'N2', 'O2', 'O', 'He',
'H', 'Ar', 'N', 'Anomalous O', 'NO', 'Temperature']
self.fig, self.ax = plt.subplots(figsize=(8, 5))
ax = self.ax
# Loop over all variables and make the lines
self.lines = {name: None for name in self._variables}
for i, label in enumerate(self._variables):
self.lines[label], = ax.plot(self.dates, self.data[:, i], label=label)
if label == 'NO':
# There is currently no NO data, so hide the line
self.lines[label].set_visible(False)
# Add labels
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True, ncol=1)
ax.set_xlabel('Time of day (UTC)')
ax.set_ylabel('Difference from the daily mean (%)')
ax.set_xlim(self.dates[0], self.dates[-1])
ax.set_ylim(-150, 150)
ax.xaxis.set_major_locator(mdates.HourLocator(interval=3))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
# Update the values before showing, which runs the parameters
self.update()
self.fig.tight_layout()
plt.show()
def update(self):
ndates = len(self.dates)
# (F107, F107a, ap) all need to be specified at the same length as dates
f107s = [self.F107]*ndates
f107as = [self.F107a]*ndates
aps = [[self.Ap]*7]*ndates
# The actual computations
output = msis.run(self.dates, self.lon, self.lat, self.alt, f107s, f107as, aps)
# output is now of the shape (ndates, 1, 1, 1, 11)
# Get rid of the single dimensions
output = np.squeeze(output)
# Lets get the percent variation from the daily mean for each variable
self.data = 100*(output/output.mean(axis=0) - 1)
# Iterate and update all of the lines of our plot
for i, label in enumerate(self._variables):
self.lines[label].set_ydata(self.data[:, i])
diurnal_plot = DiurnalVariations()
@interact(lon=(-180, 180), lat=(-90, 90), alt=(0, 1000), F107=(50, 300), F107a=(50, 300), Ap=(0, 300))
def _update_vals(lat, lon, alt, F107, F107a, Ap):
diurnal_plot.lat = lat
diurnal_plot.lon = lon
diurnal_plot.alt = alt
diurnal_plot.F107 = F107
diurnal_plot.F107a = F107a
diurnal_plot.Ap = Ap
# Run/update the model after setting all the variables
diurnal_plot.update()
###Output
_____no_output_____
###Markdown
Altitude Profileshttps://swxtrec.github.io/pymsis/examples/plot_altitude_profiles.html
###Code
class AltitudeProfiles:
def __init__(self):
self.lon = 0
self.lat = 0
self.alts = np.arange(1001)
self.F107 = 150
self.F107a = 150
self.Ap = 7
self.date = np.datetime64('2003-01-01T00:00')
# Hour of day
self.hour = 0
# Every minute for a day and 11 variables
self.data = np.ones((len(self.alts), 11))
# Set up the figure
self._variables = ['Total mass density', 'N2', 'O2', 'O', 'He',
'H', 'Ar', 'N', 'Anomalous O', 'NO', 'Temperature']
self.fig, self.ax = plt.subplots(figsize=(8, 5))
ax = self.ax
# Loop over all variables and make the lines
self.lines = {name: None for name in self._variables}
for i, label in enumerate(self._variables):
self.lines[label], = ax.plot(self.data[:, i], self.alts, label=label)
if label == 'NO':
# There is currently no NO data, so hide the line
self.lines[label].set_visible(False)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True, ncol=1)
ax.set_xscale('log')
ax.set_xlim(1e8, 1e18)
ax.set_ylim(0, 1000)
ax.set_xlabel('Number density (/m$^3$)')
ax.set_ylabel('Altitude (km)')
# Update the values before showing, which runs the parameters
self.update()
self.fig.tight_layout()
plt.show()
def update(self):
aps = [[self.Ap]*7]
time = self.date + np.timedelta64(self.hour, "h")
# The actual computations
output = msis.run(time, self.lon, self.lat, self.alts, self.F107, self.F107a, aps)
# output is now of the shape (1, 1, 1, 1001, 11)
# Get rid of the single dimensions
self.data = np.squeeze(output)
# Iterate and update all of the lines of our plot
for i, label in enumerate(self._variables):
self.lines[label].set_xdata(self.data[:, i])
altitude_plot = AltitudeProfiles()
@interact(lon=(-180, 180), lat=(-90, 90), hour=(0, 24), F107=(50, 300), F107a=(50, 300), Ap=(0, 300))
def _update_vals(lat, lon, hour, F107, F107a, Ap):
altitude_plot.lat = lat
altitude_plot.lon = lon
altitude_plot.hour = hour
altitude_plot.F107 = F107
altitude_plot.F107a = F107a
altitude_plot.Ap = Ap
# Run/update the model after setting all the variables
altitude_plot.update()
###Output
_____no_output_____
###Markdown
Surface plothttps://swxtrec.github.io/pymsis/examples/plot_surface.html
###Code
class SurfacePlot:
def __init__(self):
"""Set up a figure with a surface and altitude plot."""
# Grids for the altitude and surface plots
self.alt = 200
self.lons = np.arange(-180, 185, 5)
self.lats = np.arange(-90, 95, 5)
self.F107 = 150
self.F107a = 150
self.Ap = 7
self.date = np.datetime64('2003-01-01T00:00')
# Hour of day
self.hour = 0
# lon, lat, variables
self.data = np.ones((len(self.lons), len(self.lats), 11))
# Set up the figure
self.variable = "Total mass density"
self._variables = ['Total mass density', 'N2', 'O2', 'O', 'He',
'H', 'Ar', 'N', 'Anomalous O', 'NO', 'Temperature']
self._variable_lookup = {name: i for i, name in enumerate(self._variables)}
self.fig, self.ax = plt.subplots(figsize=(8, 5))
ax = self.ax
# Surface image
self.image = ax.imshow(self.data[:, :, 0].T, extent=(-180, 180, -90, 90))
plt.colorbar(self.image, ax=self.ax, orientation="horizontal")
# Sun circle
self.sun, = ax.plot(0, 0, marker='o', markersize=10, markerfacecolor="gold", markeredgecolor="k")
ax.set_xlim(-180, 180)
ax.set_ylim(-90, 90)
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
# Update the values before showing, which runs the parameters
self.update()
self.fig.tight_layout()
plt.show()
def update(self):
"""Update the model accounting for any changes that have been made"""
i = self._variable_lookup[self.variable]
# Surface
time = self.date + np.timedelta64(self.hour, "h")
out = msis.run(time, self.lons, self.lats, self.alt,
self.F107, self.F107a, [[self.Ap]*7])
# Use the ith index which is our variable of interest
surf_data = np.squeeze(out)[:, :, i].T
self.image.set_array(surf_data)
self.image.norm.vmin = np.min(surf_data)
self.image.norm.vmax = np.max(surf_data)
# This is the correct latitude if we account for the month/day of year too
# Update the solar position to the current date
date = pd.Timestamp(time)
# declination of the sun
lat = -23.44 * np.cos(360/365*(date.dayofyear + 10))
# longitude of the sun
lon = -15*(date.hour + date.minute/60 + date.second/3600 - 12)
self.sun.set_data(lon, lat)
surface_plot = SurfacePlot()
@interact(variable=surface_plot._variables, alt=(0, 1000), hour=(0, 24), F107=(50, 300), F107a=(50, 300), Ap=(0, 300))
def _update_vals(variable, alt, hour, F107, F107a, Ap):
surface_plot.variable = variable
surface_plot.alt = alt
surface_plot.hour = hour
surface_plot.F107 = F107
surface_plot.F107a = F107a
surface_plot.Ap = Ap
# Run/update the model after setting all the variables
surface_plot.update()
###Output
_____no_output_____
###Markdown
QuestionsUse the NRLMSISE-00 neutral atmosphere empirical model to evaluate altitude profiles of thermosphere parameters. In applying MSIS use the following set of input parameters:```year= 2000, month= 01, day= 01, hour=0.,Time_type = UniversalCoordinate_type = Geographiclatitude= 40, longitude= 105, height= 100.profile parameters: start= 100. stop= 1000. step= 10.```Run MSIS for both solar min and max conditions:* Solar max parameters: F10.7(daily) =200.; F10.7a(3-month avg) =200.* Solar min parameters: F10.7(daily) =80.; F10.7a(3-month avg) =80.; ap(daily) = 4.All of the following calculations involve plots in altitude versus a property from 100-1000 km for both solar maximum and solar minimum using output from the MSIS model:a) Compute the altitude profile of mean molecular weight and pressure scale height of the thermosphere.b) Assuming hard-sphere, neutral-neutral interactions, compute a weighted-mean, momentum transfer collision frequency, vst, profile with altitude for helium as the “s” species interacting with three major “t” species, N2, O2, and O.c) Compute altitude profiles of the helium diffusion coefficient using equation (10.53) and the weighted-mean collision frequency from part b.d) Compute altitude profiles of the helium viscosity coefficient and thermal conductivity from problem 1, part c.
###Code
date = np.datetime64("2000-01-01T00:00")
altitudes = np.arange(100, 1001, 10)
latitude = 40
longitude = 105
variables = ['Total mass density', 'N2', 'O2', 'O', 'He',
'H', 'Ar', 'N', 'Anomalous O', 'NO', 'Temperature']
variable_lookup = {name: i for i, name in enumerate(variables)}
### MODIFY THE VALUES BELOW WHEN UPDATING FOR YOUR HOMEWORK!
# Solar max
F107 = 200
F107a = 200
ap = 4
output = np.squeeze(msis.run(date, longitude, latitude, altitudes, F107, F107a, [[ap]*7]))
# Get the altitude profile for the Nitrogen variable out of the array.
nitrogen_data = output[:, variable_lookup["N"]]
### Plot of your quantities
fig, ax = plt.subplots()
ax.semilogx(nitrogen_data, altitudes)
###Output
_____no_output_____
###Markdown
a) Compute the altitude profile of mean molecular weight and pressure scale height of the thermosphere.
###Code
# Expand upon the examples above, putting your code here
###Output
_____no_output_____
###Markdown
b) Assuming hard-sphere, neutral-neutral interactions, compute a weighted-mean, momentum transfer collision frequency, vst, profile with altitude for helium as the “s” species interacting with three major “t” species, N2, O2, and O.
###Code
# Expand upon the examples above, putting your code here
###Output
_____no_output_____
###Markdown
c) Compute altitude profiles of the helium diffusion coefficient using equation (10.53) and the weighted-mean collision frequency from part b.
###Code
# Expand upon the examples above, putting your code here
###Output
_____no_output_____
###Markdown
d) Compute altitude profiles of the helium viscosity coefficient and thermal conductivity from problem 1, part c.
###Code
# Expand upon the examples above, putting your code here
###Output
_____no_output_____
###Markdown
Historical F107 and Ap dataYou can get historical F107 and Ap data from many different places. Here we use GFZ Potsdam toretrieve the data. https://www-app3.gfz-potsdam.de/kp_index/Kp_ap_Ap_SN_F107_since_1932.txtSelect a time-period that is of interest to you and run MSIS to see what the atmospheric profile was estimated to be during that time.
###Code
df = pd.read_csv("https://www-app3.gfz-potsdam.de/kp_index/Kp_ap_Ap_SN_F107_since_1932.txt",
delim_whitespace=True,
parse_dates={"date": ["year", "month", "day"]},
na_values=["-1", "-1.0", "-1.000"],
skiprows=40,
header=None,
index_col="date",
usecols=[0, 1, 2, 23, 26],
names=["year", "month", "day", "Ap", "F10.7"])
# There are some anomalous 0s in F10.7 as well, so get rid of them too
df.loc[df["F10.7"] < 1, "F10.7"] = np.nan
df["F10.7a"] = df["F10.7"].rolling(81, center=True, min_periods=1).mean()
df.plot(subplots=True);
def get_f107_data(date):
"""Get the f107 and ap data closest to the given date"""
closest_index = np.argmin(np.abs(df.index - date))
F107 = df["F10.7"][closest_index]
F107a = df["F10.7a"][closest_index]
Ap = df["Ap"][closest_index]
return (F107, F107a, Ap)
# Get data for the Halloween storm 2003-10-30 12:00
get_f107_data(np.datetime64("2003-10-30T12:00"))
###Output
_____no_output_____ |
xrayvis/xrayvis.ipynb | ###Markdown
X-Ray Microbeam Database visualizationThis app is designed to visualize the datafiles in the University of Wisconsin X-Ray Microbeam Speech Production Database:```Westbury, John with Greg Turner and Jim Dembowski (1994) X-Ray Microbeam Speech Production Database User’s Handbook, v. 1.0, Waisman Center on Mental Retardation & Human Development, Univ. of Wisconsin, Madison, WI.```[Time-aligned word and phone labels](https://github.com/rsprouse/xray_microbeam_database) have been added to the audio and articulation data by a project led by Keith Johnson at UC Berkeley. Run the following cell to pull in the time-aligned labels and speaker demographics used in the visualization.
###Code
%%bash
bash xrayvis_start.sh
###Output
_____no_output_____
###Markdown
Run the next cell to launch the visualization.
###Code
import os
os.environ['BOKEH_RESOURCES'] = 'inline' # To ensure we load monkeypatched version of bokeh rather than from cdn
from xrayvis_app import xrayvis_app
from bokeh_phon.utils import remote_jupyter_proxy_url_callback, set_default_jupyter_url
from bokeh.io import show
# If launching on a binderhub, make sure the hostname in the following url matches
# the hostname in your browser url bar!
set_default_jupyter_url('https://hub.gke2.mybinder.org/')
show(xrayvis_app, notebook_url=remote_jupyter_proxy_url_callback)
###Output
_____no_output_____ |
Thesaurus Engine.ipynb | ###Markdown
Gilad's Thesaurus Engine. Preamble
###Code
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
# for POS-tagging and synonymision:
#! pip install nltk
# for lemmatisation and surface realisation:
#! pip install lemminflect
# other dependencies:
#! pip install numpy
import re
import nltk
from numpy.random import choice
from nltk.corpus import wordnet as wn
from lemminflect import getInflection,getLemma
Tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# apropos punctuation
PunctStr = '!#$%&\'*+-/=/?@\\^_`|~'
PunctSet = set(PunctStr)
DelimStr = ',.;:?!'
DelimSet = set(DelimStr)
ParenStr = '«»()[]{}<>"'
ParenSet = set(ParenStr)
# words with these WordNet tags can be synonymised:
# n: noun
# v: verb
# a: adjective
# r: adverb
ValidWNTags = ["n", "v", "a", "r"]
# vowels for reference:
Vowels = ("a", "e", "i", "o", "u")
# NLTK labels these immutables wrong, so I must manually clean up.
TheCracks = ["have", "has", "had", "be", "is", "was", "are", "been", "do", "does", "did"]
CracksDict = {'have': 'have', 'has': 'have', 'had': 'have', 'be': 'be', 'is': 'be', 'was': 'be', 'are': 'be', 'been': 'be', 'do': 'do', 'does': 'do', 'did': 'do'}
# fodder, needs to handle:
# this + that -> thisthat - punctuation situation - space?
# "they" + "'ve" -> "they've" - not only; starts - N
# "do" + "n't" -> "don't" - not only; middle - N
# "mr." -> " mr." - not only; end - Y
# "x.com" -> " x.com" - not only; any - Y
#
# "/" -> " /" - only; starts - Y
# "-" -> " -" - only; starts - Y
# "." -> "." - only; starts - N
# "(" -> "(" - only; starts - N
text0 = \
"""'One day, the people (that don't even like / love you) are going to tell everyone how they met you' -- Mr. Johnny Depp. What a terrible quote from a hot.com person."""
text1 = \
"""word replacement engine"""
###Output
_____no_output_____
###Markdown
Functions
###Code
# private
def _translateTag(tag , fmt):
""" Convert tags from universal tagset to either WordNet or LemmInflect format. """
# shouldn't handle MOD or AUX tags, those should bypass this as they are immutable.
otag = str()
if tag.startswith("V"):
otag = "VERB" if fmt=="LI" else wn.VERB
elif tag.startswith("J"):
otag = "ADJ" if fmt=="LI" else wn.ADJ
elif tag.startswith("R"):
otag = "ADV" if fmt=="LI" else wn.ADV
elif tag.startswith("N"):
otag = "NOUN" if fmt=="LI" else wn.NOUN
return otag
def _synoname(wordobj):
""" Return just the synonym (no metadata) from a WordNet synset object. """
# surely there's an 'official' way to retrieve the word?
return wordobj.name().split('.')[0] if type(wordobj) is nltk.corpus.reader.wordnet.Synset else wordobj
# principal
def extract(raw):
""" Extract information from text at the word level, unaggregated. """
# builds [("word1","tag1") , ("word2","tag2") , ... ]
wordtags = nltk.pos_tag( nltk.word_tokenize(raw) )
return list(map( lambda wt:(wt[0].lower() , wt[1]) , wordtags ))
def lemmatise(wordtags):
""" Find and filter valid lemmas. """
for word,tag in wordtags:
liTag = _translateTag(tag,"LI")
if (tag in ["NNP"]) or (word in TheCracks) or (len(word) < 3) or not liTag:
lemma = word
else:
lemma = getLemma(word , upos=liTag)[0]
yield lemma,tag
def synonymise(wordtags):
""" Find and filter valid synonyms"""
for word,tag in wordtags:
# again, the following get a free pass because NLP is still neotenous:
if (tag in ["NNP"]) or (word in TheCracks) or (len(word) < 3):
yield word,tag
continue
# facilitate discussion between libraries:
wnTag = _translateTag(tag,"WN")
# collect possible synonyms:
synonyms = {lemma for synset in wn.synsets(word,pos=wnTag) for lemma in synset.lemma_names()}
# POS tags must be mutable (i.e. exclude proper nouns, pronouns, etc.):
synonyms = list(filter( lambda syn:(wnTag in ValidWNTags) , synonyms ))
# collocations will have an underscore rather than a space, let's fix that:
synonyms = list(map( lambda syn:_synoname(syn).replace("_"," ") , synonyms ))
#custom distribution so earlier synonyms are more favourable:
# [1]
# [1/2 1/2]
# [1/2 1/4 1/4]
# [1/2 1/4 1/8 1/8]
# [1/2 1/4 1/8 1/16 1/16]a\
dist = [2**(-i-1) for i in range(len(synonyms)-1)]
dist.append( 1 - sum(dist) )
synonym = choice(synonyms , p=dist) if synonyms else word
yield synonym.lower(),tag
def inflectivise(wordtags):
""" Inflect list of (word,tag) tuples correctly. """
wordtaglist = list(wordtags)
for i,(word,tag) in enumerate(wordtaglist):
if (tag in ["NNP"]) or (word in TheCracks) or (len(word) < 3):
yield word,tag
continue
# conjugation and declension:
if word in ["a","an"]:
inflected = "a"
# ideally I'd transcribe to IPA and check for vowel phonemes, but this'll do for now.
if wordtaglist[i+1][0].startswith(Vowels):
inflected = "an"
elif " " not in word:
inflected = getInflection(word,tag=tag)
inflected = word if not inflected else inflected[-1]
else:
# only conjugate the relevant part of a collocation:
# e.g. "run across" -> "ran across" NOT "run acrossed"
for subword,subtag in nltk.pos_tag( nltk.word_tokenize(word) ):
if _translateTag(subtag,"WN") in ValidWNTags:
infl = getInflection(subword,tag=tag)[-1]
inflected = word.replace(subword , infl)
yield inflected,tag
def assemble(wordtags):
""" Autobots, assemble! """
first = True
for word,tag in wordtags:
# truecasing proper nouns and "I":
word = word.title() if (tag in ["NNP","NNPS"]) or (word == "i") else word
# conditions in case of contractions or punctuation:
wordset = set(word)
hasPunct = bool( wordset & PunctSet )
# without apostrophe, contractions have no punctuation:
isContraction = hasPunct and not bool( (wordset - {"'"}) & PunctSet )
isClauseDelim = not bool( wordset - DelimSet )
isParentheses = not bool( wordset - ParenSet )
if first:
first = False
draft = word
elif isContraction or isClauseDelim:
draft += word
else:
draft += " " + word
# truecasing sentences:
sentences = list()
for sentence in Tokenizer.tokenize(draft):
words = nltk.word_tokenize(sentence)
sentences.append(sentence.replace( words[0] , words[0].title() , 1 ))
final = ' '.join(sentences)
# spacing surrounding parenthesese:
final = re.sub( r'''(?<=[{[(])\s+ # ((any single opening bracket)) preceding (at least one whitespace)
| # or
\s+(?=[]})]) # (at least one whitespace) preceding ((any single closing bracket))
''', '' , final , flags=re.VERBOSE)
return final
###Output
_____no_output_____
###Markdown
Main
###Code
with open('input.txt','r') as file:
text = file.read()
#text = text0
print(text, "\n")
parsedText = extract(text)
lemmaText = lemmatise(parsedText)
synonymText = synonymise(lemmaText)
inflectedText = inflectivise(synonymText)
print(assemble(inflectedText))
###Output
'Better to remain silent and be thought a fool than to speak and remove all doubt' -- Mark Twain.
|
Lectures_Advanced-DSP/random_signals/power_spectral_densities.ipynb | ###Markdown
Random Signals*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Power Spectral DensityThe (auto-) [power spectral density](https://en.wikipedia.org/wiki/Spectral_densityPower_spectral_density) (PSD) is defined as the Fourier transformation of the [auto-correlation function](correlation_functions.ipynb) (ACF). DefinitionFor a continuous-amplitude real-valued wide-sense stationary (WSS) random signal $x[k]$ the PSD is given as\begin{equation}\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \mathcal{F}_* \{ \varphi_{xx}[\kappa] \}\end{equation}where $\mathcal{F}_* \{ \cdot \}$ denotes the [discrete-time Fourier transformation](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform) (DTFT) and $\varphi_{xx}[\kappa]$ the ACF of $x[k]$. Note, the DTFT is performed with respect to $\kappa$. The ACF of a random signal of finite length $N$ can be expressed by way of a linear convolution\begin{equation}\varphi_{xx}[\kappa] = \frac{1}{N} \cdot x_N[k] * x_N[-k]\end{equation}Taking the DTFT of the left- and right-hand side results in\begin{equation}\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, X_N(\mathrm{e}^{-\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, | X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2\end{equation}The last equality results from the definition of the magnitude and the symmetry of the DTFT for real-valued signals. The spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ quantifies the amplitude density of the signal $x_N[k]$. It can be concluded from above result that the PSD quantifies the squared amplitude or power density of a random signal. This explains the term power spectral density. PropertiesThe properties of the PSD can be deduced from the properties of the ACF and the DTFT as1. From the link between the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ and the spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ derived above it can be concluded that the PSD real valued $$\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \in \mathbb{R}$$2. From the even symmetry $\varphi_{xx}[\kappa] = \varphi_{xx}[-\kappa]$ of the ACF it follows that $$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \Phi_{xx}(\mathrm{e}^{\,-\mathrm{j}\, \Omega}) $$3. The PSD of an uncorrelated random signal is given as $$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \sigma_x^2 + \mu_x^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) $$ which can be deduced from the [ACF of an uncorrelated signal](correlation_functions.ipynbProperties).4. The quadratic mean of a random signal is given as $$ E\{ x[k]^2 \} = \varphi_{xx}[0] = \frac{1}{2\pi} \int\limits_{-\pi}^{\pi} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) \,\mathrm{d} \Omega $$ The last relation can be found by expressing the ACF by the inverse DTFT. Example - Power Spectral Density of a Speech SignalIn this example the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \,\Omega})$ of a speech signal $x[k]$ is estimated by applying a discrete Fourier transformation (DFT) to its ACF. For a better interpretation of the PSD, the frequency axis $f = \frac{\Omega}{2 \pi} \cdot f_s$ has been chosen for illustration, where $f_s$ denotes the sampling frequency of the signal. In Python the ACF is stored in a vector with indexes $0, 1, ..., 2N -1$ where the indexes correspond to the lags $\kappa = -N+1,-N+2,....,N-1$. When computing the discrete Fourier transform (DFT) of the ACF numerically by the fast Fourier transform (FFT) one has to take this shift into account. For instance, by multiplying the DFT $\Phi_{xx}[\mu]$ by $e^{j \mu \frac{2 \pi}{2N - 1} (N-1)}$ where $N$ denotes the length of the signal $N$.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
# read audio file
fs, x = wavfile.read('../data/speech_8k.wav')
x = np.asarray(x, dtype=float)
N = len(x)
# compute ACF
acf = 1/len(x) * np.correlate(x, x, mode='full')
# compute PSD
psd = np.fft.fft(acf)
psd = psd * np.exp(1j*np.arange(2*N-1)*2*np.pi*(N-1)/(2*N-1))
f = np.fft.fftfreq(2*N-1, d=1/fs)
# plot PSD
plt.figure(figsize = (10, 8))
plt.plot(f, np.real(psd))
plt.title('Estimated power spectral density')
plt.ylabel(r'$\hat{\Phi}_{xx}(e^{j \Omega})$')
plt.xlabel(r'$f$')
plt.axis([0, 2000, 0, 1.1*max(np.abs(psd))]);
plt.grid()
###Output
_____no_output_____
###Markdown
**Exercise*** What does the PSD tell you about the spectral contents of a speech signal?Solution: It can be concluded from the shown PSD that the main power of a speech signal is contained in the frequency range below 500 Hz. The speech signal exhibits furthermore a harmonic structure with a dominant fundamental frequency and a number of harmonics. Cross-Power Spectral DensityThe cross-power spectral density is defined as the Fourier transformation of the [cross-correlation function](correlation_functions.ipynbCross-Correlation-Function) (CCF). DefinitionFor two continuous-amplitude real-valued wide-sense stationary (WSS) random signals $x[k]$ and $y[k]$ the cross-power spectral density is given as\begin{equation}\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mathcal{F}_* \{ \varphi_{xy}[\kappa] \}\end{equation}where $\varphi_{xy}[\kappa]$ denotes the CCF of $x[k]$ and $y[k]$. Note, the DTFT is performed with respect to $\kappa$. The CCF of two random signals of finite lengths $N$ and $M$ can be expressed by way of a linear convolution\begin{equation}\varphi_{xy}[\kappa] = \frac{1}{N} \cdot x_N[k] * y_M[-k]\end{equation}Taking the DTFT of the left- and right-hand side results in\begin{equation}\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, Y_M(\mathrm{e}^{-\,\mathrm{j}\,\Omega})\end{equation} Properties1. The symmetries of $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ can be derived from the symmetries of the CCF and the DTFT as $$ \underbrace {\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) = \Phi_{xy}^*(\mathrm{e}^{-\,\mathrm{j}\, \Omega})}_{\varphi_{xy}[\kappa] \in \mathbb{R}} = \underbrace {\Phi_{yx}(\mathrm{e}^{\,- \mathrm{j}\, \Omega}) = \Phi_{yx}^*(\mathrm{e}^{\,\mathrm{j}\, \Omega})}_{\varphi_{yx}[-\kappa] \in \mathbb{R}}$$ from which can be concluded that $|\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})| = |\Phi_{yx}(\mathrm{e}^{\,\mathrm{j}\, \Omega})|$2. The cross PSD of two uncorrelated random signals is given as $$ \Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mu_x^2 \mu_y^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) $$ which can be deduced from the CCF of an uncorrelated signal. Example - Cross Power Spectral DensityThe following example estimates and plots the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ of two random signals $x_N[k]$ and $y_M[k]$ of finite lengths $N$ and $M = 2 N$. The estimated cross PSD is calculated from the valid part of the CCF $\varphi_{xy}[\kappa]$ by an DFT in order to exclude boundary effects.
###Code
N = 1024 # length of random signal x
# generate two random signals
np.random.seed(2)
x = 2 + np.random.normal(size=N)
y = 1 + np.random.normal(size=2*N)
# compute cross PSD via CCF
acf = 1/N * np.correlate(x, y, mode='valid')
psd = np.fft.fft(acf)
psd = psd * np.exp(1j*np.arange(N+1)*2*np.pi*(N-1)/(2*N-1))
# plot results
f = np.fft.fftfreq(len(psd), d=1/2)
plt.figure(figsize = (10, 4))
plt.stem(f, np.real(psd))
plt.title('Estimated cross power spectral density')
plt.ylabel(r'$\hat{\Phi}_{xy}(e^{j \Omega})$')
plt.xlabel(r'$\Omega/ \pi$')
plt.grid()
###Output
_____no_output_____ |
analyze_critical_values.ipynb | ###Markdown
Analyze Critical Values Set up the Environment
###Code
library(ggplot2)
library(ggpubr)
library(caret)
library(latex2exp)
options(scipen = 999)
fig <- function (width, height){
options(repr.plot.width=width, repr.plot.height=height)
}
###Output
Loading required package: lattice
###Markdown
Read the Data
###Code
read.csv('tc.csv') -> df
df <- df[(df$n >= 5), ]
###Output
_____no_output_____
###Markdown
Fit a Formula
###Code
lm.fit <- lm(T_c ~ poly(alpha, n, degree=5), data=df)
#lm.fit <- nls(T_c ~ SSlogis(alpha*n, Asym, xmid, scal), data=df)
summary(lm.fit)
###Output
_____no_output_____
###Markdown
Make the Plots
###Code
df$predicted <- predict(lm.fit)
df$residuals <- residuals(lm.fit)
cbind(df[1:2], stack(df[3:4])) -> df1
options(repr.plot.width=10, repr.plot.height=8)
ggplot(data=df1, aes(alpha, values)) +
geom_point(aes(group=n, color=n)) +
facet_wrap(~ind) +
theme_pubr() + theme(text = element_text(size=20))
###Output
_____no_output_____
###Markdown
Save the Plot of Estimated Thresholds
###Code
fig(7, 7)
g <- ggplot(data=df, aes(alpha, T_c)) +
geom_point(aes(color=n)) +
#geom_line(aes(y=predicted, group=n, color=n)) +
theme_pubr() + theme(text = element_text(size=20)) + xlab(TeX(r'($\alpha$)')) + ylab(TeX(r'($T_{\alpha, n}$)'))
g
ggsave('reports/t_alpha_n.pdf', g)
ggsave('reports/t_alpha_n.eps', g)
###Output
Saving 6.67 x 6.67 in image
Saving 6.67 x 6.67 in image
|
exercises/Exercises.ipynb | ###Markdown
Synthesis
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
import hazel
print(hazel.__version__)
label = ['I', 'Q', 'U', 'V']
###Output
/scratch/miniconda3/envs/py36/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
$B_x$, $B_y$, $B_z$, $\tau$, $v$, $\Delta v$, $\beta$, $a$.
###Code
mod = hazel.Model(working_mode='synthesis')
mod.add_spectral({'Name': 'spec1', 'Wavelength': [10825, 10835, 150], 'topology': 'ph1->ch1',
'LOS': [0.0,0.0,90.0], 'Boundary condition': [1.0,0.0,0.0,0.0]})
mod.add_chromosphere({'Name': 'ch1', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830', 'Wavelength': [10826, 10835]})
mod.add_photosphere({'Name': 'ph1', 'Spectral region': 'spec1', 'Spectral lines': [300],
'Wavelength': [10825, 10835], 'Reference atmospheric model': '../models/photospheres/model_photosphere.1d'})
mod.add_parametric({'Name': 'te1', 'Spectral region': 'spec1', 'Type': 'Voigt',
'Wavelength': [10830, 10835]})
mod.atmospheres['te1'].set_parameters([10833, 0.25, 0.4, 0.0], 1.0)
mod.setup()
tau = 0.5
v = 10.0
deltav = 8.0
beta = 1.0
a = 0.0
model = mod.atmospheres['ph1'].get_parameters()
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
fields = [100, 500, 1000]
for j in range(len(fields)):
# Set the field in the chromosphere
Bx = fields[j]
By = fields[j]
Bz = fields[j]
mod.atmospheres['ch1'].set_parameters([Bx,By,Bz,tau,v,deltav,beta,a], 1.0)
model[:,5] = fields[j]
model[:,6] = fields[j]
model[:,7] = fields[j]
mod.atmospheres['ph1'].set_parameters(model, 1.0)
mod.synthesize()
for i in range(4):
ax[i].plot(mod.spectrum['spec1'].wavelength_axis - 10830, mod.spectrum['spec1'].stokes[i,:])
for i in range(4):
ax[i].set_xlabel('Wavelength - 10830[$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([-5,4])
pl.tight_layout()
###Output
/scratch/miniconda3/envs/py36/lib/python3.6/site-packages/matplotlib/figure.py:2267: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.
warnings.warn("This figure includes Axes that are not compatible "
|
Microsoft Malware Prediction/code/preprocessing/03-4 FE - Time Scoring.ipynb | ###Markdown
사전작업 라이브러리 로드
###Code
import numpy as np
import pandas as pd
import warnings
import gc
from tqdm import tqdm_notebook
import lightgbm as lgb
from scipy.sparse import vstack, csr_matrix, save_npz, load_npz
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.compose import ColumnTransformer
warnings.filterwarnings("ignore")
gc.enable()
pd.set_option('max_rows', 150)
pd.set_option('max_colwidth', 500)
pd.set_option('max_columns', 500)
dtypes = {
'MachineIdentifier': 'object',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
###Output
_____no_output_____
###Markdown
데이터 로드
###Code
%%time
train = pd.read_csv('./data/train.csv', dtype=dtypes)
test = pd.read_csv('./data/test.csv', dtype=dtypes)
###Output
Wall time: 8min 23s
###Markdown
train, test 합치기
###Code
test['HasDetections'] = np.nan
data = train.append(test)
data.reset_index(drop=True, inplace=True)
data = data.reset_index().drop(['MachineIdentifier'], axis=1).rename(columns={'index':'MachineIdentifier'})
del train, test
gc.collect()
###Output
_____no_output_____
###Markdown
Time Scoring
###Code
datedict = np.load('./data/AvSigVersionTimestamps.npy')
datedict = datedict[()]
data['Date'] = data['AvSigVersion'].map(datedict)
data['Date_YMD'] = pd.to_datetime(data['Date'].astype(str).str.slice(0, 10))
del datedict
data['Score'] = data.AvSigVersion.map(data.groupby(['AvSigVersion']).HasDetections.mean())
###Output
_____no_output_____
###Markdown
DROP TIME RELATED FEATURE
###Code
DROP_FEATURES__ = ['EngineVersion', 'AppVersion', 'AvSigVersion',
'OsVer', 'OsBuild', 'OsPlatformSubRelease', 'OsBuildLab',
'IeVerIdentifier',
'Census_OSVersion', 'Census_OSBranch', 'Census_OSBuildNumber', 'Census_OSBuildRevision', 'Census_FirmwareVersionIdentifier',
'Date', 'Date_YMD']
data = data.drop(DROP_FEATURES__, axis=1)
###Output
_____no_output_____
###Markdown
Train, Test split
###Code
train = data[~data.HasDetections.isna()]
test = data[data.HasDetections.isna()]
del data
gc.collect()
train.MachineIdentifier = range(len(train))
train.reset_index(drop=True, inplace=True)
test.MachineIdentifier = range(len(test))
test.reset_index(drop=True, inplace=True)
debug = False
if debug:
train = train[:10000]
test = test[:10000]
print('Transform all features to category.\n')
for usecol in tqdm_notebook([col for col in train.columns if col not in ['HasDetections', 'MachineIdentifier', 'Score']]):
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
#Fit LabelEncoder
le = LabelEncoder().fit(
np.unique(train[usecol].unique().tolist()+
test[usecol].unique().tolist()))
#At the end 0 will be used for dropped values
train[usecol] = le.transform(train[usecol])+1
test[usecol] = le.transform(test[usecol])+1
agg_tr = (train
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Train'}, axis=1))
agg_te = (test
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Test'}, axis=1))
agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)
#Select values with more than 1000 observations
agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)
agg['Total'] = agg['Train'] + agg['Test']
#Drop unbalanced values
agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]
agg[usecol+'Copy'] = agg[usecol]
train[usecol] = (pd.merge(train[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
test[usecol] = (pd.merge(test[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
del le, agg_tr, agg_te, agg, usecol
gc.collect()
train.shape
###Output
_____no_output_____
###Markdown
임시저장
###Code
# train.to_csv('./data_temp/train_temp.csv')
# test.to_csv('./data/test_temp.csv')
y_train = np.array(train['HasDetections'])
train_ids = train.index
test_ids = test.index
del train['HasDetections'], train['MachineIdentifier'], test['MachineIdentifier'], test['HasDetections']
gc.collect()
print("If you don't want use Sparse Matrix choose Kernel Version 2 to get simple solution.\n")
print('--------------------------------------------------------------------------------------------------------')
print('Transform Data to Sparse Matrix.')
print('Sparse Matrix can be used to fit a lot of models, eg. XGBoost, LightGBM, Random Forest, K-Means and etc.')
print('To concatenate Sparse Matrices by column use hstack()')
print('Read more about Sparse Matrix https://docs.scipy.org/doc/scipy/reference/sparse.html')
print('Good Luck!')
print('--------------------------------------------------------------------------------------------------------')
#Fit OneHotEncoder
# ohe = OneHotEncoder(categories='auto', sparse=True, dtype='uint8').fit(train)
# categorical features
cat_features = train.dtypes[(train.dtypes == 'category')].index.tolist()
ct = ColumnTransformer([('categorical', OneHotEncoder(categories='auto', sparse=True), cat_features),
('numerical', 'passthrough', ['Score'])],
sparse_threshold=1).fit(train)
#Transform data using small groups to reduce memory usage
m = 100000
train = vstack([ct.transform(train[i*m:(i+1)*m]) for i in range(train.shape[0] // m + 1)])
test = vstack([ct.transform(test[i*m:(i+1)*m]) for i in range(test.shape[0] // m + 1)])
train.shape
save_npz('./data_temp/train.npz', train, compressed=True)
save_npz('./data_temp/test.npz', test, compressed=True)
del train, test
gc.collect()
train = load_npz('./data_temp/train.npz')
test = load_npz('./data_temp/test.npz')
test = csr_matrix(test, dtype='float32')
skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=42)
skf.get_n_splits(train_ids, y_train)
lgb_test_result = np.zeros(test_ids.shape[0])
lgb_train_result = np.zeros(train_ids.shape[0])
counter = 0
print('\nLightGBM\n')
for train_index, test_index in skf.split(train_ids, y_train):
print('Fold {}\n'.format(counter + 1))
X_fit = vstack([train[train_index[i*m:(i+1)*m]] for i in range(train_index.shape[0] // m + 1)])
X_val = vstack([train[test_index[i*m:(i+1)*m]] for i in range(test_index.shape[0] // m + 1)])
X_fit, X_val = csr_matrix(X_fit, dtype='float32'), csr_matrix(X_val, dtype='float32')
y_fit, y_val = y_train[train_index], y_train[test_index]
gc.collect()
lgb_model = lgb.LGBMClassifier(max_depth=-1,
n_estimators=1000,
learning_rate=0.1,
num_leaves=2**5-1,
objective='binary',
boosting_type='gbdt',
# overfitting handling
# max_bin=120,
# lambda_l1=6,
# lambda_l2=2,
save_binary=True,
feature_fraction=0.8,
feature_fraction_seed=42,
n_jobs=-1)
print("fitting")
lgb_model.fit(X_fit, y_fit, eval_metric='auc',
eval_set=[(X_val, y_val)],
verbose=200, early_stopping_rounds=100)
del X_fit, X_val, y_fit, y_val, train_index, test_index
gc.collect()
print("predicting")
lgb_test_result += lgb_model.predict_proba(test)[:,1]
counter += 1
gc.collect()
submission = pd.read_csv('./data/sample_submission.csv')
submission.head(3)
submission.HasDetections = lgb_test_result / counter
submission.head(3)
submission.to_csv('./data/sub_lgb_base_open_kernel.csv', index=False)
param = {
'objective': 'binary',
'boosting_type': 'gbdt',
'learning_rate': 0.05,
'max_depth': -1,
'num_leaves': 31,
'min_data_in_leaf': 20,
'min_sum_hessian_in_leaf': 0.0025,
'max_bin': 120,
'lambda_l1': 5,
'lambda_l2': 2,
'min_gain_to_split': 0.65,
'save_binary': True,
'bagging_fraction': 1.0,
'bagging_freq': 5,
'feature_fraction': 0.05,
'seed': 42,
'feature_fraction_seed': 42,
'bagging_seed': 42,
'drop_seed': 42,
'data_random_seed': 42,
'verbose': 1,
'metric': 'auc'
}
max_depth=-1,
n_estimators=1000,
learning_rate=0.1,
num_leaves=2**5-1,
objective='binary',
boosting_type='gbdt',
save_binary=True,
feature_fraction=0.8,
feature_fraction_seed=42,
n_jobs=-1
###Output
_____no_output_____ |
Lab2/Lab2_DL-Students.ipynb | ###Markdown
Deep Learning Lab Session 2 - 1.5 Hours Convolutional Neural Network (CNN) for Handwritten Digits Recognition Group name: DeepLearning19 - Deodato Giacomo - Bucquet Anthime The aim of this session is to practice with Convolutional Neural Networks. Each group should fill and run appropriate notebook cells. Generate your final report (export as HTML) and upload it on the submission website http://bigfoot-m1.eurecom.fr/teachingsub/login (using your deeplearnXX/password). Do not forget to run all your cells before generating your final report and do not forget to include the names of all participants in the group. The lab session should be completed and submitted by May 30th 2018 (23:59:59 CET). Introduction In the previous Lab Session, you built a Multilayer Perceptron for recognizing hand-written digits from the MNIST data-set. The best achieved accuracy on testing data was about 97%. Can you do better than these results using a deep CNN ?In this Lab Session, you will build, train and optimize in TensorFlow one of the early Convolutional Neural Networks, **LeNet-5**, to go to more than 99% of accuracy. Load MNIST Data in TensorFlowRun the cell below to load the MNIST data that comes with TensorFlow. You will use this data in **Section 1** and **Section 2**.
###Code
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from time import time
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_train, y_train = mnist.train.images, mnist.train.labels
X_validation, y_validation = mnist.validation.images, mnist.validation.labels
X_test, y_test = mnist.test.images, mnist.test.labels
print("Image Shape: {}".format(X_train[0].shape))
print("Training Set: {} samples".format(len(X_train)))
print("Validation Set: {} samples".format(len(X_validation)))
print("Test Set: {} samples".format(len(X_test)))
epsilon = 1e-10 # this is a parameter you will use later
###Output
/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
Section 1 : My First Model in TensorFlow Before starting with CNN, let's train and test in TensorFlow the example**y=softmax(Wx+b)** seen in the first lab. This model reaches an accuracy of about 92 %.You will also learn how to launch the TensorBoard https://www.tensorflow.org/get_started/summaries_and_tensorboard to visualize the computation graph, statistics and learning curves. Part 1 : Read carefully the code in the cell below. Run it to perform training.
###Code
tf.reset_default_graph() # reset the default graph before defining a new model
#STEP 1
# Parameters
learning_rate = 0.01
training_epochs = 40
batch_size = 128
display_step = 1
logs_path = 'log_files/' # useful for tensorboard
# tf Graph Input: mnist data image of shape 28*28=784
x = tf.placeholder(tf.float32, [None, 784], name='InputData')
# 0-9 digits recognition, 10 classes
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
# Set model weights
W = tf.Variable(tf.zeros([784, 10]), name='Weights')
b = tf.Variable(tf.zeros([10]), name='Bias')
# Construct model and encapsulating all ops into scopes, making Tensorboard's Graph visualization more convenient
with tf.name_scope('Model'):
# Model
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
with tf.name_scope('Loss'):
# Minimize error using cross entropy
# We use tf.clip_by_value to avoid having too low numbers in the log function
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(tf.clip_by_value(pred, epsilon, 1.0)), reduction_indices=1))
with tf.name_scope('SGD'):
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
with tf.name_scope('Accuracy'):
# Accuracy
acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Create a summary to monitor cost tensor
tf.summary.scalar("Loss", cost)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("Accuracy", acc)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
#STEP 2
# Launch the graph for training
with tf.Session() as sess:
sess.run(init)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size, shuffle=(i==0))
# Run optimization op (backprop), cost op (to get loss value)
# and summary nodes
_, c, summary = sess.run([optimizer, cost, merged_summary_op],
feed_dict={x: batch_xs, y: batch_ys})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * total_batch + i)
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch: ", '%02d' % (epoch+1), " =====> Loss=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
summary_writer.flush()
# Test model
# Calculate accuracy
print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
###Output
Epoch: 01 =====> Loss= 1.288568147
Epoch: 02 =====> Loss= 0.732308971
Epoch: 03 =====> Loss= 0.600200272
Epoch: 04 =====> Loss= 0.536473810
Epoch: 05 =====> Loss= 0.497731537
Epoch: 06 =====> Loss= 0.470901849
Epoch: 07 =====> Loss= 0.451301165
Epoch: 08 =====> Loss= 0.435680064
Epoch: 09 =====> Loss= 0.423353645
Epoch: 10 =====> Loss= 0.413345681
Epoch: 11 =====> Loss= 0.404281634
Epoch: 12 =====> Loss= 0.396839049
Epoch: 13 =====> Loss= 0.390208097
Epoch: 14 =====> Loss= 0.384563916
Epoch: 15 =====> Loss= 0.379514001
Epoch: 16 =====> Loss= 0.374462080
Epoch: 17 =====> Loss= 0.370331678
Epoch: 18 =====> Loss= 0.366488553
Epoch: 19 =====> Loss= 0.362964171
Epoch: 20 =====> Loss= 0.359689518
Epoch: 21 =====> Loss= 0.356679378
Epoch: 22 =====> Loss= 0.353974787
Epoch: 23 =====> Loss= 0.351341481
Epoch: 24 =====> Loss= 0.348637887
Epoch: 25 =====> Loss= 0.346562084
Epoch: 26 =====> Loss= 0.344416684
Epoch: 27 =====> Loss= 0.342300205
Epoch: 28 =====> Loss= 0.340159270
Epoch: 29 =====> Loss= 0.338198474
Epoch: 30 =====> Loss= 0.336650787
Epoch: 31 =====> Loss= 0.335206250
Epoch: 32 =====> Loss= 0.333532659
Epoch: 33 =====> Loss= 0.332020689
Epoch: 34 =====> Loss= 0.330053726
Epoch: 35 =====> Loss= 0.329209634
Epoch: 36 =====> Loss= 0.327891982
Epoch: 37 =====> Loss= 0.326795358
Epoch: 38 =====> Loss= 0.325166583
Epoch: 39 =====> Loss= 0.324220415
Epoch: 40 =====> Loss= 0.323112071
Optimization Finished!
Accuracy: 0.9154
###Markdown
Part 2 : Using Tensorboard, we can now visualize the created graph, giving you an overview of your architecture and how all of the major components are connected. You can also see and analyse the learning curves. To launch tensorBoard: - Open a Terminal and run the command line **"tensorboard --logdir=lab_2/log_files/"**- Click on "Tensorboard web interface" in Zoe Enjoy It !! Section 2 : The 99% MNIST Challenge ! Part 1 : LeNet5 implementation You are now more familar with **TensorFlow** and **TensorBoard**. In this section, you are to build, train and test the baseline [LeNet-5](http://yann.lecun.com/exdb/lenet/) model for the MNIST digits recognition problem. Then, you will make some optimizations to get more than 99% of accuracy.For more informations, have a look at this list of results: http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html Figure 1: Lenet-5 The LeNet architecture takes a 28x28xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case.--------------------------**Layer 1 - Convolution (5x5):** The output shape should be 28x28x6. **Activation:** ReLU. **MaxPooling:** The output shape should be 14x14x6.**Layer 2 - Convolution (5x5):** The output shape should be 10x10x16. **Activation:** ReLU. **MaxPooling:** The output shape should be 5x5x16.**Flatten:** Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. You may need to use tf.reshape.**Layer 3 - Fully Connected:** This should have 120 outputs. **Activation:** ReLU.**Layer 4 - Fully Connected:** This should have 84 outputs. **Activation:** ReLU.**Layer 5 - Fully Connected:** This should have 10 outputs. **Activation:** softmax. Question 2.1.1 Implement the Neural Network architecture described above.For that, your will use classes and functions from https://www.tensorflow.org/api_docs/python/tf/nn. We give you some helper functions for weigths and bias initilization. Also you can refer to section 1.
###Code
def LeNet5(X):
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias_variable(shape):
return tf.Variable(tf.constant(0.05, shape=[shape]))
X = tf.reshape(X, [-1, 28, 28, 1])
conv1 = tf.nn.conv2d(input=X,
filter=weight_variable([5, 5, 1, 6]),
strides=[1, 1, 1, 1],
padding='SAME')
conv1 = tf.nn.relu(tf.nn.bias_add(conv1, bias_variable(6)))
pool1 = tf.nn.max_pool(value=conv1,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
conv2 = tf.nn.conv2d(input=pool1,
filter=weight_variable([5, 5, 6, 16]),
strides=[1, 1, 1, 1],
padding='VALID')
conv2 = tf.nn.relu(tf.nn.bias_add(conv2, bias_variable(16)))
pool2 = tf.nn.max_pool(value=conv2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
flatten = tf.contrib.layers.flatten(pool2)
fc1 = tf.nn.bias_add(tf.matmul(flatten, weight_variable([400, 120])), bias_variable(120))
fc1 = tf.nn.relu(fc1)
fc2 = tf.nn.bias_add(tf.matmul(fc1, weight_variable([120,84])), bias_variable(84))
fc2 = tf.nn.relu(fc2)
fc3 = tf.nn.bias_add(tf.matmul(fc2, weight_variable([84,10])), bias_variable(10))
fc3 = tf.nn.softmax(fc3)
return fc3
###Output
_____no_output_____
###Markdown
Question 2.1.2. Calculate the number of parameters of this model conv1: $5\ x\ 5\ x\ 1\ x\ 6 + 6 = 156$conv2: $5\ x\ 5\ x\ 6\ x\ 16 + 16 = 2416$fc1: $400\ x\ 120 + 120 = 48120$fc2: $120\ x\ 84 + 84 = 10164$fc3: $84\ x\ 10 + 10 = 850$total: $61706$
###Code
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
#print(shape)
#print(len(shape))
variable_parameters = 1
for dim in shape:
#print(dim)
variable_parameters *= dim.value
#print(variable_parameters)
total_parameters += variable_parameters
print("Number of parameters of this model {} ".format(total_parameters))
###Output
Number of parameters of this model 61706
###Markdown
Question 2.1.3. Define your model, its accuracy and the loss function according to the following parameters (you can look at Section 1 to see what is expected): Learning rate: 0.001 Loss Fucntion: Cross-entropy Optimizer: tf.train.GradientDescentOptimizer Number of epochs: 40 Batch size: 128
###Code
# reset the default graph before defining a new model
tf.reset_default_graph()
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y = tf.placeholder(tf.float32, [None, 10], name='y')
learning_rate = 0.001
n_epochs = 40
batch_size = 128
verbose = 2
logs_path = 'log_files/'
saving_path = 'models/'
with tf.name_scope('Network'):
model = LeNet5(x)
with tf.name_scope('Loss'):
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(tf.clip_by_value(model, epsilon, 1.0)), reduction_indices=1))
with tf.name_scope('SGD'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
###Output
_____no_output_____
###Markdown
Question 2.1.4. Implement the evaluation function for accuracy computation
###Code
def evaluate(Y_pred, Y_true):
acc = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y_true, 1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
return acc
with tf.name_scope('Accuracy'):
accuracy = evaluate(model, y)
###Output
_____no_output_____
###Markdown
Question 2.1.5. Implement training pipeline and run the training data through it to train the model.- Before each epoch, shuffle the training set. - Print the loss per mini batch and the training/validation accuracy per epoch. (Display results every 100 epochs)- Save the model after training- Print after training the final testing accuracy
###Code
# Initializing the variables
init = tf.global_variables_initializer()
# Create a summary to monitor cost and accuracy tensor
tf.summary.scalar("Loss_LeNet-5_SGD", cost)
tf.summary.scalar("Accuracy_LeNet-5_SGD", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
def train_mnist(n_epochs, batch_size, cost, optimizer, accuracy, verbose=100):
start = time()
for epoch in range(n_epochs):
avg_cost = 0.
n_batches = int(mnist.train.num_examples/batch_size)
for i in range(n_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size, shuffle=(i==0))
_, c, summary = sess.run([optimizer, cost, merged_summary_op], feed_dict={x: batch_xs, y: batch_ys})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * n_batches + i)
# Compute average loss
avg_cost += c
if verbose != 0 and ((epoch + 1) % verbose == 0):
print("Epoch: {:3d} | Loss: {:.5f} | Accuracy: {:.2f} %"\
.format(epoch+1, avg_cost/n_batches, accuracy.eval(feed_dict={x: mnist.validation.images, y: mnist.validation.labels})*100))
end = time() - start
print("\n Training Time: {} min {} s".format(int(end/60), int(end%60)))
with tf.Session() as sess:
sess.run(init)
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
train_mnist(n_epochs, batch_size, cost, optimizer, accuracy, verbose)
print("Accuracy on the test data: {:.2f} %"\
.format(accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels})*100))
summary_writer.flush()
###Output
Epoch: 2 | Loss: 2.12917 | Accuracy: 49.56 %
Epoch: 4 | Loss: 1.37251 | Accuracy: 71.74 %
Epoch: 6 | Loss: 0.66525 | Accuracy: 83.96 %
Epoch: 8 | Loss: 0.46188 | Accuracy: 87.54 %
Epoch: 10 | Loss: 0.38557 | Accuracy: 89.32 %
Epoch: 12 | Loss: 0.34211 | Accuracy: 90.66 %
Epoch: 14 | Loss: 0.31145 | Accuracy: 91.52 %
Epoch: 16 | Loss: 0.28748 | Accuracy: 92.36 %
Epoch: 18 | Loss: 0.26742 | Accuracy: 92.86 %
Epoch: 20 | Loss: 0.25058 | Accuracy: 93.26 %
Epoch: 22 | Loss: 0.23602 | Accuracy: 93.82 %
Epoch: 24 | Loss: 0.22309 | Accuracy: 94.08 %
Epoch: 26 | Loss: 0.21177 | Accuracy: 94.38 %
Epoch: 28 | Loss: 0.20186 | Accuracy: 94.72 %
Epoch: 30 | Loss: 0.19270 | Accuracy: 94.78 %
Epoch: 32 | Loss: 0.18458 | Accuracy: 95.20 %
Epoch: 34 | Loss: 0.17719 | Accuracy: 95.24 %
Epoch: 36 | Loss: 0.17071 | Accuracy: 95.46 %
Epoch: 38 | Loss: 0.16435 | Accuracy: 95.58 %
Epoch: 40 | Loss: 0.15865 | Accuracy: 95.74 %
Training Time: 11 min 19 s
Accuracy on the test data: 95.60 %
###Markdown
Question 2.1.6 : Use TensorBoard to visualise and save loss and accuracy curves. You will save figures in the folder **"lab_2/MNIST_figures"** and display them in your notebook. Accuracy Loss Part 2 : LeNET 5 Optimization Question 2.2.1 - Retrain your network with AdamOptimizer and then fill the table below:| Optimizer | Gradient Descent | AdamOptimizer ||----------------------|--------------------|---------------------|| Testing Accuracy | 95.6% | 99.11 % | | Training Time | 11 min 19 s | 12 min 5 s |
###Code
tf.reset_default_graph()
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y = tf.placeholder(tf.float32, [None, 10], name='y')
model = LeNet5(x)
learning_rate = 0.001
n_epochs = 40
batch_size = 128
verbose = 2
logs_path = 'log_files/'
saving_path = 'models/'
with tf.name_scope('Loss'):
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(tf.clip_by_value(model, epsilon, 1.0)), reduction_indices=1))
with tf.name_scope('AdamOpti'):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.name_scope('Accuracy'):
accuracy = evaluate(model, y)
# Initializing the variables
init = tf.global_variables_initializer()
# Create a summary to monitor cost and accuracy tensors
tf.summary.scalar("Loss_LeNet-5_ADAM", cost)
tf.summary.scalar("Accuracy_LeNet-5_ADAM", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init)
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
train_mnist(n_epochs, batch_size, cost, optimizer, accuracy, verbose)
print("Accuracy on the test data: {:.2f} %"\
.format(accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels})*100))
summary_writer.flush()
###Output
Epoch: 2 | Loss: 0.09158 | Accuracy: 97.72 %
Epoch: 4 | Loss: 0.04850 | Accuracy: 98.56 %
Epoch: 6 | Loss: 0.03456 | Accuracy: 98.38 %
Epoch: 8 | Loss: 0.02495 | Accuracy: 98.70 %
Epoch: 10 | Loss: 0.01857 | Accuracy: 98.94 %
Epoch: 12 | Loss: 0.01549 | Accuracy: 98.66 %
Epoch: 14 | Loss: 0.01234 | Accuracy: 98.98 %
Epoch: 16 | Loss: 0.01013 | Accuracy: 98.72 %
Epoch: 18 | Loss: 0.00849 | Accuracy: 99.22 %
Epoch: 20 | Loss: 0.00822 | Accuracy: 99.10 %
Epoch: 22 | Loss: 0.00671 | Accuracy: 98.92 %
Epoch: 24 | Loss: 0.00610 | Accuracy: 98.84 %
Epoch: 26 | Loss: 0.00760 | Accuracy: 99.04 %
Epoch: 28 | Loss: 0.00422 | Accuracy: 98.78 %
Epoch: 30 | Loss: 0.00459 | Accuracy: 98.68 %
Epoch: 32 | Loss: 0.00609 | Accuracy: 98.72 %
Epoch: 34 | Loss: 0.00443 | Accuracy: 99.14 %
Epoch: 36 | Loss: 0.00522 | Accuracy: 99.20 %
Epoch: 38 | Loss: 0.00367 | Accuracy: 99.02 %
Epoch: 40 | Loss: 0.00654 | Accuracy: 99.16 %
Training Time: 12 min 5 s
Accuracy on the test data: 99.11 %
###Markdown
Accuracy Loss Which optimizer gives the best accuracy on test data?Adam combines the benefits of two other optimizers: AdaGrad and RMSProp, instead of adapting the learning rate based on the average first moment (the mean) as in RMSProp, Adam also makes use of the average of the second moments of the gradients (the uncentered variance).The main down side of the algorithm is that it requires more computation to be performed for each parameter in each training step because it maintains the moving averages and variance, and calculates the scaled gradient, in fact it takes almost one minute more time than normal Gradient Descent to train for 40 epochs. Question 2.2.2 Try to add dropout (keep_prob = 0.75) before the first fully connected layer. You will use tf.nn.dropout for that purpose. What accuracy do you achieve on testing data?
###Code
def LeNet5_dropout(X):
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias_variable(shape):
return tf.Variable(tf.constant(0.05, shape=[shape]))
X = tf.reshape(X, [-1, 28, 28, 1])
conv1 = tf.nn.conv2d(input=X,
filter=weight_variable([5, 5, 1, 6]),
strides=[1, 1, 1, 1],
padding='SAME')
conv1 = tf.nn.relu(tf.nn.bias_add(conv1, bias_variable(6)))
pool1 = tf.nn.max_pool(value=conv1,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
conv2 = tf.nn.conv2d(input=pool1,
filter=weight_variable([5, 5, 6, 16]),
strides=[1, 1, 1, 1],
padding='VALID')
conv2 = tf.nn.relu(tf.nn.bias_add(conv2, bias_variable(16)))
pool2 = tf.nn.max_pool(value=conv2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
flatten = tf.contrib.layers.flatten(pool2)
dropout = tf.nn.dropout(flatten, 0.75)
fc1 = tf.nn.bias_add(tf.matmul(dropout, weight_variable([400, 120])), bias_variable(120))
fc1 = tf.nn.relu(fc1)
fc2 = tf.nn.bias_add(tf.matmul(fc1, weight_variable([120,84])), bias_variable(84))
fc2 = tf.nn.relu(fc2)
fc3 = tf.nn.bias_add(tf.matmul(fc2, weight_variable([84,10])), bias_variable(10))
fc3 = tf.nn.softmax(fc3)
return fc3
tf.reset_default_graph()
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y = tf.placeholder(tf.float32, [None, 10], name='predict_label')
model_dropout = LeNet5_dropout(x)
learning_rate = 0.001
n_epochs = 100
batch_size = 128
verbose = 5
logs_path = 'log_files/'
saving_path = 'models/'
with tf.name_scope('Loss'):
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(tf.clip_by_value(model_dropout, epsilon, 1.0)), reduction_indices=1))
with tf.name_scope('AdamOpti'):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.name_scope('Accuracy'):
accuracy = evaluate(model_dropout, y)
# Initializing the variables
init = tf.global_variables_initializer()
# Create a summary to monitor cost and accuracy tensors
tf.summary.scalar("Loss_LeNet-5_ADAM_Drop", cost)
tf.summary.scalar("Accuracy_LeNet-5_ADAM_drop", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init)
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
train_mnist(n_epochs, batch_size, cost, optimizer, accuracy, verbose)
print("Accuracy on the test data: {:.2f} %"\
.format(accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels})*100))
summary_writer.flush()
###Output
Epoch: 5 | Loss: 0.05678 | Accuracy: 98.04 %
Epoch: 10 | Loss: 0.03534 | Accuracy: 98.48 %
Epoch: 15 | Loss: 0.02524 | Accuracy: 98.76 %
Epoch: 20 | Loss: 0.02025 | Accuracy: 98.88 %
Epoch: 25 | Loss: 0.01513 | Accuracy: 98.82 %
Epoch: 30 | Loss: 0.01316 | Accuracy: 98.80 %
Epoch: 35 | Loss: 0.01228 | Accuracy: 98.80 %
Epoch: 40 | Loss: 0.01114 | Accuracy: 98.94 %
Epoch: 45 | Loss: 0.01060 | Accuracy: 99.12 %
Epoch: 50 | Loss: 0.01009 | Accuracy: 98.86 %
Epoch: 55 | Loss: 0.00897 | Accuracy: 98.90 %
Epoch: 60 | Loss: 0.00757 | Accuracy: 98.96 %
Epoch: 65 | Loss: 0.00743 | Accuracy: 98.96 %
Epoch: 70 | Loss: 0.00671 | Accuracy: 99.06 %
Epoch: 75 | Loss: 0.00605 | Accuracy: 98.82 %
Epoch: 80 | Loss: 0.00536 | Accuracy: 99.04 %
Epoch: 85 | Loss: 0.00554 | Accuracy: 99.00 %
Epoch: 90 | Loss: 0.00593 | Accuracy: 98.96 %
Epoch: 95 | Loss: 0.00480 | Accuracy: 98.92 %
Epoch: 100 | Loss: 0.00541 | Accuracy: 98.70 %
Training Time: 37 min 46 s
Accuracy on the test data: 98.88 %
|
psana/psana/xtcav/notebooks/Experiments.ipynb | ###Markdown
Genesis Data
###Code
test = loadmat("ML_struct_mat_1_242/ML_data_struct_1.mat")
test_off = test["Las_OFF_beam_phase_space_H"].astype(np.float32)/np.sum(test["Las_OFF_beam_phase_space_H"].astype(np.float32))
test_on = test["Las_ON_beam_phase_space_H"].astype(np.float32)/np.sum(test["Las_ON_beam_phase_space_H"].astype(np.float32))
plt.imshow(test_off, cmap='hot', interpolation='nearest', aspect='auto')
plt.ylim(plt.ylim()[::-1])
plt.imshow(test_on, cmap='hot', interpolation='nearest', aspect='auto')
plt.ylim(plt.ylim()[::-1])
defaultSS = xtcav.Utils.ShotToShotParameters(ebeamcharge=0.0796590834624, dumpecharge=3.01628324550712e-11, xtcavrfamp=70.0509262084961, xtcavrfphase=-88.50161743164062, xrayenergy=0, unixtime=6496260206392366315, fiducial=6828, valid=1)
## Get Genesis data and format for xtcav code
images_on = []
images_off = []
current = []
time = []
image_profiles_off = []
image_profiles_on = []
power = []
for folder, rang in [('ML_struct_mat_1_242',range(1,243))]:#, ('ML_struct_mat_243_424', range(243,425))]:
for i in rang:
try:
f = folder+"/ML_data_struct_"+str(i)+".mat"
mat = loadmat(f)
#may need to flip the phase space about the X axis so its in the right orientation
lasing_off = mat["Las_OFF_beam_phase_space_H"].astype(np.float32)/np.sum(mat["Las_OFF_beam_phase_space_H"])
lasing_on = mat["Las_ON_beam_phase_space_H"].astype(np.float32)/np.sum(mat["Las_ON_beam_phase_space_H"])
except:
print "Nothing for example ", i
continue
#level = 45
#lasing_on = cv2.GaussianBlur(lasing_on, (level, level), 0)
images_on.append(lasing_on)
#lasing_off = cv2.GaussianBlur(lasing_off, (level, level), 0)
images_off.append(lasing_off)
current.append(mat['final_beam_current'][0].astype(np.float32))
time.append(mat['time'][0])
po = mat['power'][0].astype(np.float32)
#blur true power profile
# sig = int(float(len(po))/lasing_off.shape[0]*level)
# sig = sig +1 if sig %2 == 0 else sig
# po = cv2.GaussianBlur(po, (sig,sig), 0).flatten()
# po[po < 1e-5] = 0
power.append(po)
# format genesis data for use by xtcav code
# lasing off profile
lasing_off_formatted = np.zeros((1,lasing_off.shape[0],lasing_off.shape[1]))
lasing_off_formatted[0,:,:]=lasing_off
roi = xtcav.Utils.ROIMetrics(lasing_off.shape[1], 0,
lasing_off.shape[0], 0,
x=np.arange(0, lasing_off.shape[1]),
y=np.arange(0, lasing_off.shape[0]))
im_stat=xtcav.Utils.getImageStatistics(lasing_off_formatted, roi)
xfs = mat["Las_OFF_beam_phase_space_XY"][:,0]
#xfs = mat["Las_ON_beam_phase_space_XY"][:,0] if xfs[0] == 0 else xfs
yMeV = mat["Las_OFF_beam_phase_space_XY"][:,1]
xfsPerPix = xfs[1] - xfs[0]
yMeVPerPix = yMeV[1]- yMeV[0]
pu = xtcav.Utils.PhysicalUnits(xfs, yMeV, xfsPerPix, yMeVPerPix)
ss = defaultSS._replace(dumpecharge = np.sum(current[-1]*xfsPerPix*1e-15))
image_profiles_off.append(xtcav.Utils.ImageProfile(im_stat, roi, ss, pu))
#lasing on profile
lasing_on_formatted = np.zeros((1,lasing_on.shape[0],lasing_on.shape[1]))
lasing_on_formatted[0,:,:] = lasing_on
roi = xtcav.Utils.ROIMetrics(lasing_on.shape[1], 0,
lasing_on.shape[0], 0,
x=np.arange(0, lasing_on.shape[1]),
y=np.arange(0, lasing_on.shape[0]))
im_stat=xtcav.Utils.getImageStatistics(lasing_on_formatted, roi)
xfs = mat["Las_ON_beam_phase_space_XY"][:,0]
yMeV = mat["Las_ON_beam_phase_space_XY"][:,1]
xfsPerPix = xfs[1] - xfs[0]
yMeVPerPix = yMeV[1]-yMeV[0]
pu = xtcav.Utils.PhysicalUnits(xfs, yMeV, xfsPerPix, yMeVPerPix)
ss2 = ss._replace(xrayenergy = np.sum(power[-1]*xfsPerPix)*1e-6)
image_profiles_on.append(xtcav.Utils.ImageProfile(im_stat, roi, ss2, pu))
num_profiles = len(images_on)
grouped,_ = xtcav.Utils.averageXTCAVProfilesGroups(image_profiles_off)
###Output
Averaging lasing off profiles into 17 groups.
###Markdown
Sample images
###Code
example=20
pu = image_profiles_on[example].physical_units
limits = (pu.xfs[0],pu.xfs[-1],pu.yMeV[0],pu.yMeV[-1])
plt.imshow(images_on[example][::-1], cmap='hot', interpolation='nearest', aspect='auto', extent=limits)
pu = image_profiles_off[example].physical_units
limits = (pu.xfs[0],pu.xfs[-1],pu.yMeV[0],pu.yMeV[-1])
plt.imshow(images_off[example][::-1], cmap='hot', interpolation='nearest', aspect='auto', extent=limits)
#Plot center of mass
xProfile = np.sum(images_off[example], axis=0)
yProfile = np.sum(images_off[example], axis=1)
plt.plot(np.dot(np.transpose(images_off[example]),image_profiles_off[example].physical_units.yMeV)/xProfile)
xProfile = np.sum(images_on[example], axis=0)
yProfile = np.sum(images_on[example], axis=1)
plt.plot(np.dot(np.transpose(images_on[example]),image_profiles_on[example].physical_units.yMeV)/xProfile)
example = 12
processed_image_opt = xtcav.Utils.processLasingSingleShot(image_profiles_on[example], grouped)
processed_image_opt.powerAgreement
a, = plt.plot(processed_image_opt.t, processed_image_opt.nolasingECurrent[0], label = "no lasing")
b, = plt.plot(processed_image_opt.t, processed_image_opt.lasingECurrent[0], label = 'lasing')
l = plt.legend(handles=[a,b])
###Output
_____no_output_____
###Markdown
Error Calculations
###Code
import cv2
offset = 16 # since power profiles are on different time scales, we have to offset the time axis to get them to line up
interp=scipy.interpolate.interp1d(processed_image_opt.t + np.mean(time[example]) - np.mean(processed_image_opt.t),processed_image_opt.powerERMS[0],kind='linear',fill_value=0,bounds_error=False,assume_sorted=True) #Interpolation to master time
power_opt1 = interp(time[example])
power_opt1 = cv2.GaussianBlur(power_opt1, (7,7), 0).flatten()
interp=scipy.interpolate.interp1d(processed_image_opt.t + np.mean(time[example]) - offset,processed_image_opt.powerECOM[0],kind='linear',fill_value=0,bounds_error=False,assume_sorted=True) #Interpolation to master time
power_opt2 = interp(time[example])
power_opt2 = cv2.GaussianBlur(power_opt2, (7,7), 0).flatten()
a, = plt.plot(time[example], power_opt1, label = "estimated power RMS")
b, = plt.plot(time[example], power[example], label = "actual power")
c, = plt.plot(time[example], power_opt2, label = "estimated power COM")
plt.title("Calculated power profile vs. actual power profile")
l = plt.legend(handles=[a,b,c])
from scipy import optimize
def getMin(time1, power1, time2, power2, x):
interp=scipy.interpolate.interp1d(time2 + x, power2,kind='linear',fill_value=0,bounds_error=False,assume_sorted=True) #Interpolation to master time
power2 = interp(time1)
return dist(power1, power2)
def dist(power1,power2):
power2 = power2*np.sum(power1)/np.sum(power2)
square_dist = np.power(power1 - power2, 2)
return np.sum(square_dist)
algs= ['cosine', 'kmeans', 'l1', 'old','hierarchical']
%%capture
# Average distance from true power profile when different number of clusters are used
num_profiles = len(image_profiles_off)
alg_errors = {}
nor_coast = cu.getNorthCoast(images_off)
for alg in algs:
errors = {}
for i in [2,5,9,12,20,30,50,100,num_profiles-1]:
error = 0
for example in range(num_profiles):
grouped,_ = xtcav.Utils.averageXTCAVProfilesGroups(image_profiles_off[:example]+image_profiles_off[example+1:], i, method=alg)
processed_image_opt = xtcav.Utils.processLasingSingleShot(image_profiles_on[example], grouped)
power_opt = processed_image_opt.powerERMS[0]
power_opt = cv2.GaussianBlur(power_opt, (7,7), 0).flatten()
func = lambda x: getMin(time[example], power[example], processed_image_opt.t, power_opt, x)
test = []
for method in ['Powell', 'Nelder-Mead', 'BFGS']:
test.append(optimize.minimize(func, np.mean(time[example]) - np.mean(processed_image_opt.t)-10, method = method).fun)
assert(not math.isnan(np.nanmin(test)))
error += np.nanmin(test)/num_profiles
errors[i] = error
alg_errors[alg] = errors
%%capture
# ^above line supresses output
# Error when random profile is chosen for comparison
baseline_error = 0
num_iters = 10
for i in range(num_iters):
for example in range(num_profiles):
comparison = np.random.randint(0,num_profiles-1)
grouped,_ = xtcav.Utils.averageXTCAVProfilesGroups([image_profiles_off[comparison]], 1)
processed_image_opt = xtcav.Utils.processLasingSingleShot(image_profiles_on[example], grouped)
power_opt = processed_image_opt.powerERMS[0]
power_opt = cv2.GaussianBlur(power_opt, (7,7), 0).flatten()
func = lambda x: getMin(time[example], power[example], processed_image_opt.t, power_opt, x)
test = []
for method in ['Powell', 'Nelder-Mead', 'BFGS']:
test.append(optimize.minimize(func, np.mean(time[example]) - np.mean(processed_image_opt.t)-10, method = method).fun)
baseline_error += np.nanmin(test)
baseline_error /= num_profiles
%%capture
# ^above line supresses output
# Error when random profile is chosen for comparison
n_groups = {}
optimal_groups_error = {}
for alg in algs:
grouped,n_groups[alg] = xtcav.Utils.averageXTCAVProfilesGroups(image_profiles_off, method=alg)
error=0
for example in range(num_profiles):
grouped,_ = xtcav.Utils.averageXTCAVProfilesGroups(image_profiles_off[:example]+image_profiles_off[example+1:], n_groups[alg], method = alg)
processed_image_opt = xtcav.Utils.processLasingSingleShot(image_profiles_on[example], grouped)
power_opt = processed_image_opt.powerERMS[0]
power_opt = cv2.GaussianBlur(power_opt, (7,7), 0).flatten()
func = lambda x: getMin(time[example], power[example], processed_image_opt.t, power_opt, x)
test = []
for method in ['Powell', 'Nelder-Mead', 'BFGS']:
test.append(optimize.minimize(func, np.mean(time[example]) - np.mean(processed_image_opt.t)-10, method = method).fun)
error += np.nanmin(test)/ num_profiles
optimal_groups_error[alg] = error
labels = []
for alg, c in zip(algs, 'rgbcy'):
lists = sorted(alg_errors[alg].items()) # sorted by key, return a list of tuples
x, y = zip(*lists) # unpack a list of pairs into two tuples
labels.append(plt.plot(x, y, c, label=alg)[0])
plt.plot(n_groups[alg], optimal_groups_error[alg], c+'o',ms=10,label=alg)[0]
b, = plt.plot(x, [baseline_error]*len(x), 'k',label='baseline error')
lists = sorted(alg_errors['north_coast'].items()) # sorted by key, return a list of tuples
x, y = zip(*lists) # unpack a list of pairs into two tuples
labels.append(plt.plot(x, y, 'm', label='north coast')[0])
plt.ylabel("Average squared distance from actual power")
plt.xlabel("Number of clusters used")
plt.ylim(ymin=0, ymax=1100)
labels.append(b)
plt.title("Error vs. Number of Clusters used in Lasing Off References")
l = plt.legend(handles=labels, loc="lower right")
###Output
_____no_output_____
###Markdown
North coast method
###Code
def getPercentile(data, percentile=0.9):
a = np.cumsum(data, axis=0)
out = np.divide(a, np.sum(data, axis=0), out=np.zeros_like(a), where=np.sum(data, axis=0)!=0)
test = (out > 1-percentile).argmax(axis=0)
return test
def trimImg(x):
rows = np.any(x, axis=1)
cols = np.any(x, axis=0)
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
return x[ymin:ymax+1, xmin:xmax+1]
trimmed = np.array([trimImg(f) for f in images_off])
out = np.array([getPercentile(x) for x in trimmed])
arrlens = np.array([len(x) for x in out])
max_len = np.amax(arrlens)
maxes = [np.max(x) for x in out]
max_val = np.amax(maxes)
def padArray(x):
return np.pad(x, pad_width=((max_len - len(x))/2, int(np.ceil(float(max_len - len(x))/2)) ) , mode="constant", constant_values=max_val+1)
pad = [padArray(x) for x in out]
newarr = np.vstack(pad)
plt.plot(newarr.T, alpha=.3)
plt.axis('tight')
plt.axis('off')
plt.ylim(plt.ylim()[::-1])
model = AgglomerativeClustering(n_clusters=5, linkage="ward", affinity="euclidean")
rand_sample = newarr
model.fit(rand_sample)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbky'):
plt.plot(rand_sample[model.labels_ == l].T , c=c, alpha=.3)
plt.axis('tight')
plt.axis('off')
model = AgglomerativeClustering(n_clusters=10, linkage="ward", affinity="euclidean")
model.fit(newarr)
groups = model.labels_
###Output
_____no_output_____
###Markdown
Fingerprints We can take the fourier transform of the electron current and use the first few values to get a low dimensional representation of the current. We can then use these values when comparing currents to find the most similar ones for matching lasing off to lasing on profiles
###Code
profilesT = grouped.eCurrent[0]
np.std(profilesT[0:5], axis=1)
from sklearn.preprocessing import normalize
test = (profilesT.T - np.mean(profilesT, axis = 1))/np.std(profilesT, axis=1)
test = test.T
dft = np.fft.fft(test)
chosenCols = dft[:,1:10]
import time
t1 = time.time()
print np.corrcoef(chosenCols, chosenCols)[1, 1:5]
print time.time() - t1
t2 = time.time()
print np.corrcoef(profilesT, profilesT)[1, 1:5]
print time.time() - t2
ft = np.argsort(np.corrcoef(chosenCols, chosenCols)[1])[::-1]
norm = np.argsort(np.corrcoef(profilesT, profilesT)[1])[::-1]
print ft[0:10]
norm[0:10]
print np.linalg.norm(chosenCols[0] - chosenCols, axis = 1)
np.linalg.norm(profilesT[0] - profilesT[0:5], axis = 1)
###Output
[ 0. 30.64018396 35.48766929 27.01115034 18.13453174]
|
L10_2.ipynb | ###Markdown
###Code
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('data7_2.csv')
T = data.Temp # input
Cp = data.Cp # output
print(Cp)
plt.plot(T,Cp,'g.')
plt.xlabel('Temperature [K]')
plt.ylabel('Heat Capacity [kJ/kg-mol.K]')
!pip install -U tensorflow-addons
# AI & ML
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Create ANN structure
model = keras.Sequential([
layers.Dense(10,activation='relu'),
layers.Dense(10,activation='relu'),
layers.Dense(20,activation='relu'),
layers.Dense(1,activation='relu')])
# Train Model
model.compile(loss="mean_absolute_error",optimizer=keras.optimizers.Adam(learning_rate=0.01))
input = T
output = Cp
trn_Hist = model.fit(input,output,epochs=500)
# Train performance
plt.plot(trn_Hist.history['loss'],label='loss')
#training variables, weight
print(model.weights)
# Evaluate
# Evaluate Multi Model
Cp_hat = model.predict(T).flatten()
plt.plot(T,Cp,'g.') # data from experiment
plt.plot(T,Cp_hat,'r-') # data from ANN
plt.xlabel('Temperature [K]')
plt.ylabel('Heat Capacity [kJ/kg-mol.K]')
print(Cp_hat)
###Output
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.]
|
prediction/transfer learning fine-tuning/api generation/base_model.ipynb | ###Markdown
**Generate the api based on the description using codeTrans transfer learning finetuning model**You can make free prediction online through this Link (When using the prediction online, you need to parse and tokenize the code first.) **1. Load necessry libraries including huggingface transformers**
###Code
!pip install -q transformers sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline
###Output
_____no_output_____
###Markdown
**2. Load the token classification pipeline and load it into the GPU if avilabile**
###Code
pipeline = SummarizationPipeline(
model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_base_api_generation_transfer_learning_finetune"),
tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_base_api_generation_transfer_learning_finetune", skip_special_tokens=True),
device=0
)
###Output
/usr/local/lib/python3.6/dist-packages/transformers/models/auto/modeling_auto.py:852: FutureWarning: The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.
FutureWarning,
###Markdown
**3 Give the description for generating api, parse and tokenize it**
###Code
description = "parse the uses licence node of this package, if any, and returns the license definition if theres" #@param {type:"raw"}
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
def englishTokenizer(sentence):
result = []
tokens = word_tokenize(sentence)
for t in tokens:
if( not len(t)>50):
result.append(t)
return ' '.join(result)
tokenized_description = englishTokenizer(description)
print("tokenized description: " + tokenized_description)
###Output
tokenized description: parse the uses licence node of this package , if any , and returns the license definition if theres
###Markdown
**4. Make Prediction**
###Code
pipeline([tokenized_description])
###Output
Your max_length is set to 512, but you input_length is only 25. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)
|
Endpoint/Lateral Movement - Anomalous RDP Logon.ipynb | ###Markdown
Lateral Movement Threat Hunting - Anomalous RDP LogonA remote desktop logon, through RDP, may be typical of a system administrator or IT support, but only from select workstations. Monitoring remote desktop logons and comparing to known/approved originating systems can detect lateral movement of an adversary. Use Case ObjectiveIdentify anomalous RDP logon event. An event may be considered anomalous if it is for example occured outside of normal time(e.g. office hour) or if it is being done from suspicious Ip address. Logs RequirementsMicrosoft Windows Sysmon ReferencesMITRE references: https://car.mitre.org/analytics/CAR-2016-04-005/ evtx: https://github.com/sbousseaden/EVTX-ATTACK-SAMPLES geoip2 database: https://dev.maxmind.com/geoip/geoip2/downloadable/
###Code
import evtx
import xml.etree.ElementTree as ET
import json
evtx_file = "logs/Lateral Movement/DE_RDP_Tunneling_4624.evtx"
parser = evtx.PyEvtxParser(evtx_file)
parse_json = list(parser.records_json())
Success_RDP = []
for pj in parse_json:
if "4624" in pj['data'] and "\"LogonType\": 10" in pj['data']:
Success_RDP+=[pj]
print(pj['data'])
###Output
{
"Event": {
"#attributes": {
"xmlns": "http://schemas.microsoft.com/win/2004/08/events/event"
},
"EventData": {
"AuthenticationPackageName": "Negotiate",
"IpAddress": "127.0.0.1",
"IpPort": "49164",
"KeyLength": 0,
"LmPackageName": "-",
"LogonGuid": "00000000-0000-0000-0000-000000000000",
"LogonProcessName": "User32 ",
"LogonType": 10,
"ProcessId": "0x658",
"ProcessName": "C:\\Windows\\System32\\winlogon.exe",
"SubjectDomainName": "EXAMPLE",
"SubjectLogonId": "0x3e7",
"SubjectUserName": "PC02$",
"SubjectUserSid": "S-1-5-18",
"TargetDomainName": "PC02",
"TargetLogonId": "0x45120",
"TargetUserName": "IEUser",
"TargetUserSid": "S-1-5-21-3583694148-1414552638-2922671848-1000",
"TransmittedServices": "-",
"WorkstationName": "PC02"
},
"System": {
"Channel": "Security",
"Computer": "PC02.example.corp",
"Correlation": null,
"EventID": 4624,
"EventRecordID": 5315,
"Execution": {
"#attributes": {
"ProcessID": 480,
"ThreadID": 3952
}
},
"Keywords": "0x8020000000000000",
"Level": 0,
"Opcode": 0,
"Provider": {
"#attributes": {
"Guid": "54849625-5478-4994-A5BA-3E3B0328C30D",
"Name": "Microsoft-Windows-Security-Auditing"
}
},
"Security": null,
"Task": 12544,
"TimeCreated": {
"#attributes": {
"SystemTime": "2019-02-13T15:26:53.356780Z"
}
},
"Version": 0
}
}
}
###Markdown
RDP outside office hour
###Code
from datetime import datetime
###Output
_____no_output_____
###Markdown
Defining office hour filter
###Code
sh = 8 #office start hour
sm = 0 #office start minute
eh = 15 #office end hour
em = 0 #office end minute
for SRDP in Success_RDP:
SRDP_time = datetime.strptime(SRDP['timestamp'], "%Y-%m-%d %H:%M:%S.%f %Z")
if ((SRDP_time < SRDP_time.replace(hour=sh, minute=sm)) or (SRDP_time > SRDP_time.replace(hour=eh, minute=em))):
print("Detected RDP logon at", SRDP['timestamp'])
# print(SRDP['data'])
###Output
Detected RDP logon at 2019-02-13 15:27:53.653483 UTC
###Markdown
RDP Login Source Outside of IndonesiaAs the infrastructure of the company reside in Indonesia, the access should be made from Indonesia as well. The geoip2 database from MaxMind could be utilizied to find out the accessing IP's geo location.
###Code
#import geoip library and read the geoip2 database
import geoip2.database
reader = geoip2.database.Reader('logs/Lateral Movement/GeoLocationDB/GeoLite2-Country.mmdb')
for SRDP in Success_RDP:
try: #try clause in case no Ip address in the record
SRDP_Ip = json.loads(SRDP['data'])['Event']['EventData']['IpAddress']
try: #try clause in case there's no matchin ip address in the database
response = reader.country(SRDP_Ip)
if response.country.iso_code != 'ID':
print("IP =", SRDP_Ip, response.country.name)
except geoip2.errors.AddressNotFoundError:
print("The address 127.0.0.1 is not in the database.")
except:
print("No IP address detected")
###Output
The address 127.0.0.1 is not in the database.
|
notebooks/Detection/3_Monitor_Training.ipynb | ###Markdown
Monitor TrainingIf you don't want to use Tensoboard, this notebook is a good alternative. You can execute it during training or after.
###Code
%matplotlib inline
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys; sys.path.append("../")
import maskflow
root_dir = Path("/home/hadim/.data/Neural_Network/Maskflow/Microtubule")
data_dir = root_dir / "Data"
model_dir = root_dir / "Models"
model_dir.mkdir(exist_ok=True)
# Import the configuration associated with this dataset and network.
config = maskflow.config.load_config(root_dir / "config.yaml")
# Select the model
model_name = '2018.11.20-12:15:32'
model_path = model_dir / model_name
metrics_path = model_path / "training_metrics.csv"
data = pd.read_csv(metrics_path).iloc[:, 2:]
metrics = ['loss', 'lr', 'memory', 'loss_box_reg', 'loss_classifier', 'loss_mask', 'loss_objectness', 'loss_rpn_box_reg']
smooth_metrics = ['loss', 'loss_box_reg', 'loss_classifier', 'loss_mask', 'loss_objectness', 'loss_rpn_box_reg']
# Smooth the data
smooth_factor = 200
data[smooth_metrics] = data[smooth_metrics].rolling(smooth_factor).mean()
base_fig_size = 14
n_plots = len(metrics)
ncols = 3
nrows = n_plots // ncols
nrows += 1 if (ncols * nrows) < n_plots else 0
w = base_fig_size
h = base_fig_size * (nrows/ncols) * 0.5
first_iteration = 0
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(w, h), constrained_layout=True)
for ax, metric in zip(axs.flat, metrics):
data_mask = data['iteration'] > first_iteration
ax.plot(data[data_mask]['iteration'], data[data_mask][metric])
ax.set_xlabel('Iteration')
ax.set_title(metric, fontsize=14)
fig.savefig(metrics_path.with_suffix('.png'))
###Output
_____no_output_____ |
python_linear_regression_algorithm.ipynb | ###Markdown
Linear Regression Algorithm
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Headbrain Dataset: https://www.kaggle.com/jemishdonda/headbrain
###Code
plt.rcParams['figure.figsize'] = (20.0, 10.0)
# Reading data
data = pd.read_csv('headbrain.csv')
print(data.shape)
data.head()
# Collecting X and Y
X = data['Head Size(cm^3)'].values
Y = data['Brain Weight(grams)'].values
# Mean X and Y
mean_x = np.mean(X)
mean_y = np.mean(Y)
#Total number of values
n = len(X)
#Using the formula to calculate b1 and b2
numerator = 0
denomenator = 0
for i in range(n):
numerator += (X[i]-mean_x)*(Y[i]-mean_y)
denomenator += (X[i]-mean_x)**2
m = numerator/denomenator
c = mean_y - (m * mean_x)
#Print coefficients
print(m,c)
# Plotting Values and Regression Line
max_x = np.max(X)+100
min_x = np.min(X)-100
#Calculating line values x and y
x = np.linspace(min_x,max_x,1000)
y = c + m * x
# Plotting Line
plt.plot(x,y,color='r',label='Regression Line')
# Plotting Scatter Points
plt.scatter(X,Y,color='b',label='Scatter Plot')
plt.xlabel('Head Size in cm3')
plt.ylabel('Brain Weight in grams')
plt.legend()
plt.show()
# Calculating R-sqaure value
ss_t = 0
ss_r = 0
for i in range(n):
y_pred = c + m *X[i]
ss_t += (Y[i]-mean_y)**2
ss_r += (Y[i]-y_pred)**2
r2 =1 - (ss_r/ss_t)
print(r2)
###Output
0.6393117199570003
###Markdown
Calculating R^2 using Scikit Learn
###Code
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
#Cannot use Rank 1 matrix in scikit learn
X = X.reshape((n,1))
#Creating Model
reg = LinearRegression()
# Fitting traning data
reg = reg.fit(X,Y)
# Y prediction
Y_pred = reg.predict(X)
# Calculating R2 score
r2_score = reg.score(X,Y)
print(r2_score)
###Output
0.639311719957
|
tools/benchmarks/python/rbm_demo.ipynb | ###Markdown
Load training and validation data:
###Code
train_set = np.loadtxt('../c++/training_data.txt')
target_psi = np.loadtxt('../c++/target_psi.txt')
train_set.shape
###Output
_____no_output_____
###Markdown
Initialize the RBM with the same number of hidden and visible units:
###Code
rbm = RBM(num_visible=train_set.shape[-1],
num_hidden=train_set.shape[-1],
seed=1234)
###Output
_____no_output_____
###Markdown
We'll train the RBM for 200 epochs with a batch size of 25. During training we'll record the overlap and average negative log likelihood of the model against the validation data every 10 epochs.For reference, the formula for the overlap is as follows:$$\sum_\vec{v} \psi\left(\vec{v}\right)\sqrt{p\left(\vec{v}\right)}$$This is essentially an estimate of the sum over the entire probability mass function defined by $\psi^2$. Thus, we want it to be close $1$.First, we'll take a look at the RBM.train method's documentation:
###Code
help(rbm.train)
log_every = 10 # log and record validation metrics every 10 epochs
nll_list, overlap_list = rbm.train(train_set, target_psi,
epochs=200,
batch_size=25,
k=3,
lr=(lambda ep: 0.1 if ep < 100 else 1e-4),
l1_reg=0,
l2_reg=0,
method="sgd",
log_every=log_every,
progbar=False)
###Output
Epoch = 0; NLL per training example = 7.46581103; Overlap = 0.49697722
Epoch = 10; NLL per training example = 4.59784933; Overlap = 0.98055360
Epoch = 20; NLL per training example = 4.55782278; Overlap = 0.98951308
Epoch = 30; NLL per training example = 4.56248325; Overlap = 0.98815823
Epoch = 40; NLL per training example = 4.60590591; Overlap = 0.97869122
Epoch = 50; NLL per training example = 4.56020164; Overlap = 0.98902553
Epoch = 60; NLL per training example = 4.55404177; Overlap = 0.98922637
Epoch = 70; NLL per training example = 4.63949484; Overlap = 0.97081105
Epoch = 80; NLL per training example = 4.54972841; Overlap = 0.98983613
Epoch = 90; NLL per training example = 4.56783523; Overlap = 0.98558907
Epoch = 100; NLL per training example = 4.52903160; Overlap = 0.99592382
Epoch = 110; NLL per training example = 4.52660564; Overlap = 0.99648050
Epoch = 120; NLL per training example = 4.52494968; Overlap = 0.99684567
Epoch = 130; NLL per training example = 4.52377650; Overlap = 0.99710216
Epoch = 140; NLL per training example = 4.52290613; Overlap = 0.99728517
Epoch = 150; NLL per training example = 4.52202179; Overlap = 0.99747479
Epoch = 160; NLL per training example = 4.52167257; Overlap = 0.99755005
Epoch = 170; NLL per training example = 4.52130245; Overlap = 0.99762808
Epoch = 180; NLL per training example = 4.52089956; Overlap = 0.99771590
Epoch = 190; NLL per training example = 4.52054888; Overlap = 0.99778987
Epoch = 200; NLL per training example = 4.52002527; Overlap = 0.99790362
###Markdown
Let's see how the average negative log likelihood and the overlap changed during training:
###Code
fig, ax1 = plt.subplots(figsize=(10, 10))
ax1.plot(log_every * np.arange(len(nll_list)), np.array(nll_list) / len(train_set), 'b')
ax1.set_xlabel("Epoch")
ax1.set_ylabel("NLL per training example", color='b')
ax1.tick_params('y', colors='b')
ax1.set_xlim(0, log_every * (len(nll_list)-1))
ax1.axvline(x=100, linestyle=':', color='g') # plot when the learning rate switches
ax2 = ax1.twinx()
ax2.plot(log_every * np.arange(len(overlap_list)), overlap_list, 'r')
ax2.set_ylabel('Overlap', color='r')
ax2.tick_params('y', colors='r')
ax2.axhline(y=1, xmin=0, xmax=len(overlap_list), linestyle=':', color='r') # maximum overlap
plt.show()
###Output
_____no_output_____
###Markdown
We can also save the model for later use. Note that the model's internal random state is also saved along with the weights and biases, thus we can expect the saved RBM to produce the exact same sampled as the RBM currently in use.
###Code
rbm.save("./demo_model.npz")
###Output
_____no_output_____
###Markdown
We load the model like so:
###Code
new_rbm = RBM.load("./demo_model.npz")
###Output
_____no_output_____
###Markdown
Sample both the original and the loaded RBM:
###Code
sample1 = rbm.sample(k=100, n_samples=10)
sample2 = new_rbm.sample(k=100, n_samples=10)
sample1, sample2
np.array_equal(sample1, sample2)
###Output
_____no_output_____ |
hugging-face/3. Behind the pipeline.ipynb | ###Markdown
 Preprocessing (Tokenizer)
###Code
from transformers import AutoTokenizer
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
raw_inputs = [
"I've been waiting for a HuggingFace course my whole life.",
"I hate this so much!",
]
inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors='pt')
inputs
###Output
_____no_output_____
###Markdown
The output itself is a dictionary containing two keys, input_ids and attention_mask. input_ids contains two rows of integers (one for each sentence) that are the unique identifiers of the tokens in each sentence. Model
###Code
from transformers import AutoModel
checkpoint_model = "distilbert-base-uncased-finetuned-sst-2-english"
model = AutoModel.from_pretrained(checkpoint_model)
###Output
Some weights of the model checkpoint at distilbert-base-uncased-finetuned-sst-2-english were not used when initializing DistilBertModel: ['classifier.bias', 'classifier.weight', 'pre_classifier.weight', 'pre_classifier.bias']
- This IS expected if you are initializing DistilBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DistilBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
###Markdown
In this code snippet, we have downloaded the same checkpoint we used in our pipeline before (it should actually have been cached already) and instantiated a model with it.This architecture contains only the base Transformer module: given some inputs, it outputs what we’ll call hidden states, also known as features. For each model input, we’ll retrieve a high-dimensional vector representing the contextual understanding of that input by the Transformer model.While these hidden states can be useful on their own, they’re usually inputs to another part of the model, known as the head.
###Code
outputs = model(**inputs)
outputs.last_hidden_state.shape
###Output
_____no_output_____
###Markdown
Model heads: Making sense out of numbersThe model heads take the high-dimensional vector of hidden states as input and project them onto a different dimension. They are usually composed of one or a few linear layers:A Transformer network alongside its head.The output of the Transformer model is sent directly to the model head to be processed.The model is represented by its embeddings layer and the subsequent layers. The embeddings layer converts each input ID in the tokenized input into a vector that represents the associated token. The subsequent layers manipulate those vectors using the attention mechanism to produce the final representation of the sentences.There are many different architectures available in 🤗 Transformers, with each one designed around tackling a specific task. Here is a non-exhaustive list:- *Model (retrieve the hidden states)- *ForCausalLM- *ForMaskedLM- *ForMultipleChoice- *ForQuestionAnswering- *ForSequenceClassification- *ForTokenClassification- and others 🤗 We will need a model with a sequence classification head (to be able to classify the sentences as positive or negative). So, we won’t actually use the **AutoModel** class, but **AutoModelForSequenceClassification**
###Code
from transformers import AutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
outputs = model(**inputs)
outputs.logits.shape
outputs.logits
import torch
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
print(predictions)
model.config.id2label
###Output
_____no_output_____ |
nbs/run-multiple-nbs.ipynb | ###Markdown
!pip install fire
###Code
!python run_notebook.py Dev0727_5.ipynb
!python run_notebook.py Dev0727_4.ipynb
###Output
_____no_output_____ |
blogNotebooks/LinearRegression_post2/2021-12-12-LinearRegression.ipynb | ###Markdown
---layout: posttitle: Basics of Linear Regression with Scikit-Learnpublished: true---**Author: Kamile Yagci** In this blog, I go over the basics of Linear Regression model using Scikit-Learn library.Content:* Data* Theory of Linear Regression* Data Splitting into Training and Testing subsets (train_test_split)* Linear Regression Fit and Model Validation (LinearRegression) * Baseline Linear Regression - one predictor * Multiple Linear Regression - Multiple predictor* Cross Validation (cross_val_score, cross_validate, ShuffleSplit)
###Code
#Import Libraries
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import numpy as np
###Output
_____no_output_____
###Markdown
Explore DataI will use King County House Sales data. My goal is predict the House Sale price by using Linear Regression.At first, I will take a brief look at data.
###Code
#Load data file, get general info, and check for missing values
df = pd.read_csv('data/kc_house_data.csv')
df.info()
# List null values
df.isnull().sum()
# A bit of data cleaning
df['waterfront'].fillna(0., inplace=True)
# Correlation heatmap
plt.figure(figsize=(12,10))
sns.heatmap(df.corr(), center=0);
###Output
_____no_output_____
###Markdown
In this study, my target variable is 'price'. The rest of the variables are 'predictors'. We can use one or multiple predictors in modeling. The correlation heatmap visualize the correlation between the variables and help us to choose good predictors.Based on the above heatmap, there is a good correlation between 'price' and 'sqft_living'. Theory of Linear RegressionLinear equation with one independent variable is y = mx + bwhere * x is the independent variable, * y is dependent varaible, * m is slope and * b is y-intercept.In our model:* dependent variable is target variable 'price' (y)* independent variable is the predictor such as 'sqft_living' (X) When linear fit is applied to the data that consists of X and y, it determines the line of best fit. The slope and y-intercept of this line are the parameters of the model.In multiple linear regression, we use more than one predictors:y = b + m1x1 + m2x2 + m3x3 + ..... where X = [x1, x2, x3, ...] Splitting Data into Training and Testing subsets (train_test_split)After modeling the data, we need to test it. However, we cannot use the same set of data for both modeling and testing. There is a possibility that data is overfitted. The overfitted model will work well for the dataset in which the model is created, but it will not give good results with another set of data.Therefore, we split our data into two subsets: training and testing. We use the training data for modeling and the testing data for comparing the predictions with the actual data.The scikit-Learn library has a function for splitting the data randomly: sklearn.model_selection.train_test_split(*arrays, test_size=None, train_size=None, random_state=None, shuffle=True, stratify=None) * Input: X and y * Output: Lists of X_train, X_test, y_train, y_testThe default test_size = 0.25, so the train_size is 0.75. It is recommended to allocate at least half of the data for training.
###Code
# Import train_test_split
from sklearn.model_selection import train_test_split
# Assign X and y
y = df['price'] # Dataseries for Target, dependent variable
X = df.drop('price', axis=1) # Dataframe for Predictors, independent variables
# Seperate data into training and testing splits
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print(f"X_train is a DataFrame with {X_train.shape[0]} rows and {X_train.shape[1]} columns")
print(f"y_train is a Series with {y_train.shape[0]} values")
###Output
X_train is a DataFrame with 16197 rows and 20 columns
y_train is a Series with 16197 values
###Markdown
Linear Regression Fit and Model Validation (LinearRegression)The scikit-Learn class for linear regression: class sklearn.linear_model.LinearRegression(*, fit_intercept=True, normalize='deprecated', copy_X=True, n_jobs=None, positive=False) * Input: X and y * Output: Linear regression model * Attributes: * _coef: coefficient / slope of the line-of-best-fit (for each predictor) * intercept_: y-intercept of the line-of-best-fit * Methods: * fit(X, y): fit linear model * predict(X): predict y using the model * score(X, y): return R squaredP.S. Not all attributes and methods of LinearRegression() are listed here.
###Code
#Import LinearRegression
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
Baseline Linear Regression - one predictorLet's start modeling data with one predictor. * X: 'sqft_living' * y: 'price'We will only use training data for modeling.
###Code
# Apply LinearRegression on Training Data
baseline_model = LinearRegression()
X_train_sqft = X_train[['sqft_living']] # Dataframe with predictor columns, training data
regline = baseline_model.fit(X_train_sqft, y_train) # fitted model
print("Slope:", regline.coef_) # print cefficient/slope
print("y-intercept:", regline.intercept_) # print y-intercept
print("R squared for Training:", regline.score(X_train_sqft, y_train)) # print R squared
###Output
Slope: [285.58593563]
y-intercept: -53321.493253810564
R squared for Training: 0.4951005996564265
###Markdown
The above code fitted the data, created the model and printed out the results.The slope and y-intercept values describes the line-of-best-fit.The R squared score is the coefficient of determination. It tells us how good the Linear Regression fit is. The R squared ranges from 0 to 1. The higher score means a better fit. **Model Validation**Our model is based on Training data. How will it perform on Testing data?We will answer this questions by applying our model on test data. It will calculate the R squared score; how close our line-of-best-fit to the testing data.
###Code
# Model validation
X_test_sqft = X_test[['sqft_living']] # Dataframe with predictor column, testing data
print("R squared for Training:", regline.score(X_train_sqft, y_train))
print("R squared for Testing:", regline.score(X_test_sqft, y_test))
###Output
R squared for Training: 0.4951005996564265
R squared for Testing: 0.48322207729033984
###Markdown
R squared score for training and testing data is close. We can conclude that the model works well on testing data.Let's do some visualization to see how good our Linear Regression model, line-of-best fit.
###Code
# Visualization
y_train_hat = baseline_model.predict(X_train_sqft) # model predictions for target, training data
y_test_hat = baseline_model.predict(X_test_sqft) # model predictions for target, testing data
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), constrained_layout=True)
ax1.scatter(X_train_sqft, y_train, label='Training Data', alpha=0.2)
ax1.plot(X_train_sqft, y_train_hat, color='red', label='Model Prediction')
ax1.set_xlabel("Footage of house (sqft)")
ax1.set_ylabel("House Sale Price (USD)")
ax1.set_title("Training Data")
ax1.legend()
ax2.scatter(X_test_sqft, y_test, label='Testing Data', alpha=0.2)
ax2.plot(X_test_sqft, y_test_hat, color='red', label='Model Prediction')
ax2.set_xlabel("Footage of house (sqft)")
ax2.set_ylabel("House Sale Price (USD)")
ax2.set_title("Testing Data")
ax2.legend()
###Output
_____no_output_____
###Markdown
Multiple Linear Regression - Multiple predictorInstead of using one predictor, we can use multiple predictors to model our data. We call this Multiple Linear Regression.* X: ['sqft_living', 'waterfront', 'year_built']* y: 'price'
###Code
multi_model = LinearRegression()
pred = ['sqft_living', 'waterfront', 'yr_built'] # Define predictors
X_train_multi = X_train[pred]
X_test_multi = X_test[pred]
regline = multi_model.fit(X_train[pred], y_train)
print('Final model predictors:', pred)
print("Slope:", regline.coef_) # print coefficients for each predictor
print("y-intercept:", regline.intercept_) # print y-intercept
print("R squared for Training:", regline.score(X_train_multi, y_train))
print("R squared for Testing:", regline.score(X_test_multi, y_test))
###Output
Final model predictors: ['sqft_living', 'waterfront', 'yr_built']
Slope: [ 3.00787470e+02 7.87787121e+05 -2.21888595e+03]
y-intercept: 4282363.438148377
R squared for Training: 0.5568582427002644
R squared for Testing: 0.5585461779831704
###Markdown
With the three predictors, R squared score increased. That means that our model is improved.The validation is also very good, improved. R squared scores for training and testing are very close. Cross Validation (cross_val_score, cross_validate, ShuffleSplit)When we use Scikit-Learn train_test_split function, it splits the data randomly one time only. If we run the function again, it will create a different set of splits and different result in modeling. How can we decrease the random error in our model?**cross_val_score:**The Scikit-Learn has a function for cross validation: 'cross_val_score'. The function splits the data according to the requested cross-validation splitting strategy.If function uses K fold; it splits data into n-folds, choose one fold as testing data, and use the rest of the data for training. It loops over n times. In each loop, it picks a testing fold orderly, model and calculate score. The function returns to an array of scores. sklearn.model_selection.cross_val_score(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', error_score=nan) * input: estimator/model, X, y * output: an array scores for testing data * cv: cross-validation splitting strategy: number of K-folds or CV splitter (=5 by default) * scoring: 'r2', 'neg_mean_squared_error', ... (default is estimator’s default scorer)
###Code
# Import cross_val_score
from sklearn.model_selection import cross_val_score
X_pred=X[['sqft_living']]
y=y
cv_results = cross_val_score(baseline_model, X_pred, y, cv=10, scoring='r2') # 10 fold
print('Mean R squared for Testing:', cv_results.mean())
###Output
Mean R squared for Testing: 0.489269865991899
###Markdown
**cross_validate**cross_validate function is very similar to cross_val_score: but it returns to multiple scores, not just test_score. sklearn.model_selection.cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=nan) * input: estimator/model, X, y * output: arrays scores for fit_time, score_time, test_score, train_score(if True) * cv: cross-validation splitting strategy: number of K-folds or CV splitter (=5 by default) * scoring: 'r2', 'neg_mean_squared_error', ... (default is estimator’s default scorer) * return_train_score (default is False)
###Code
# Import cross_validate
from sklearn.model_selection import cross_validate
scores = cross_validate(baseline_model, X_pred, y, return_train_score=True, cv=10, scoring='r2')
print("Mean R squared for Training:", scores["train_score"].mean())
print("Mean R squared for Testing:", scores["test_score"].mean())
###Output
Mean R squared for Training: 0.4926920555363975
Mean R squared for Testing: 0.489269865991899
###Markdown
**ShuffleSplit**This is a Scikit-Learn splitting class. It splits the data randomly for requested number of times. ShuffleSplit is different from K-fold. * In K-Fold, the test data size depends on the number of folds; and it picks the test fold in an orderly manner in each split. * In ShuffleSplit, the test data size is constant; it is picked randomly in each split. class sklearn.model_selection.ShuffleSplit(n_splits=10, *, test_size=None, train_size=None, random_state=None) * n_splits: Number of splits (=10 by default) * test_size * train_size We use the ShuffleSplit together with cross validation functions.
###Code
from sklearn.model_selection import cross_validate, ShuffleSplit
splitter = ShuffleSplit(n_splits=10, test_size=0.25, random_state=0)
baseline_scores = cross_validate(
estimator=baseline_model,
X=X[['sqft_living']],
y=y,
return_train_score=True,
cv=splitter
)
print("Mean R squared for Training:", baseline_scores["train_score"].mean())
print("Mean R squared for Testing:", baseline_scores["test_score"].mean())
###Output
Mean R squared for Training: 0.49093640472300526
Mean R squared for Testing: 0.4960198399390211
|
Week 4/Introduction to Pandas.ipynb | ###Markdown
Pandas*pandas* is a Python library for data analysis. It offers a number of data exploration, cleaning and transformation operations that are critical in working with data in Python. *pandas* build upon *numpy* and *scipy* providing easy-to-use data structures and data manipulation functions with integrated indexing.The main data structures *pandas* provides are *Series* and *DataFrames*. After a brief introduction to these two data structures and data ingestion, the key features of *pandas* this notebook covers are:* Generating descriptive statistics on data* Data cleaning using built in pandas functions* Frequent data operations for subsetting, filtering, insertion, deletion and aggregation of data* Merging multiple datasets using dataframes* Working with timestamps and time-series data**Additional Recommended Resources:*** *pandas* Documentation: http://pandas.pydata.org/pandas-docs/stable/* *Python for Data Analysis* by Wes McKinney* *Python Data Science Handbook* by Jake VanderPlasLet's get started with our first *pandas* notebook! Import Libraries
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Introduction to pandas Data Structures*pandas* has two main data structures it uses, namely, *Series* and *DataFrames*. pandas Series*pandas Series* one-dimensional labeled array.
###Code
ser = pd.Series(data = [100, 'foo', 300, 'bar', 500], index = ['tom', 'bob', 'nancy', 'dan', 'eric'])
ser
ser.index
ser['nancy']
ser.loc[['nancy','bob']]
ser[[4, 3, 1]]
ser.iloc[2]
'bob' in ser
ser
ser * 2
ser[['nancy', 'eric']] ** 2
###Output
_____no_output_____
###Markdown
pandas DataFrame*pandas DataFrame* is a 2-dimensional labeled data structure. Create DataFrame from dictionary of Python Series
###Code
d = {'one' : pd.Series([100., 200., 300.], index=['apple', 'ball', 'clock']),
'two' : pd.Series([111., 222., 333., 4444.], index=['apple', 'ball', 'cerill', 'dancy'])}
df = pd.DataFrame(d)
#print(df)
df
df.index
df.columns
pd.DataFrame(d, index=['dancy', 'ball', 'apple'])
pd.DataFrame(d, index=['dancy', 'ball', 'apple'], columns=['two', 'five'])
###Output
_____no_output_____
###Markdown
Create DataFrame from list of Python dictionaries
###Code
data = [{'alex': 1, 'joe': 2}, {'ema': 5, 'dora': 10, 'alice': 20}]
pd.DataFrame(data)
pd.DataFrame(data, index=['orange', 'red'])
pd.DataFrame(data, columns=['joe', 'dora','alice'])
###Output
_____no_output_____
###Markdown
Basic DataFrame operations
###Code
df
df['one']
df['three'] = df['one'] * df['two']
df
df['flag'] = df['one'] > 250
df
three = df.pop('three')
three
df
del df['two']
df
df.insert(1, 'copy_of_flag', df['flag'])
df
df['one_upper_half'] = df['one'][:2]
df
###Output
_____no_output_____
###Markdown
Case Study: Movie Data AnalysisThis notebook uses a dataset from the MovieLens website. We will describe the dataset further as we explore with it using *pandas*. Download the DatasetPlease note that **you will need to download the dataset**. Although the video for this notebook says that the data is in your folder, the folder turned out to be too large to fit on the edX platform due to size constraints.Here are the links to the data source and location:* **Data Source:** MovieLens web site (filename: ml-20m.zip)* **Location:** https://grouplens.org/datasets/movielens/Once the download completes, please make sure the data files are in a directory called *movielens* in your *Week-3-pandas* folder. Let us look at the files in this dataset using the UNIX command ls.
###Code
# Note: Adjust the name of the folder to match your local directory
!ls ./movielens
!cat ./movielens/movies.csv | wc -l
!head -5 ./movielens/movies.csv
###Output
movieId,title,genres
1,Toy Story (1995),Adventure|Animation|Children|Comedy|Fantasy
2,Jumanji (1995),Adventure|Children|Fantasy
3,Grumpier Old Men (1995),Comedy|Romance
4,Waiting to Exhale (1995),Comedy|Drama|Romance
###Markdown
Use Pandas to Read the DatasetIn this notebook, we will be using three CSV files:* **ratings.csv :** *userId*,*movieId*,*rating*, *timestamp** **tags.csv :** *userId*,*movieId*, *tag*, *timestamp** **movies.csv :** *movieId*, *title*, *genres* Using the *read_csv* function in pandas, we will ingest these three files.
###Code
movies = pd.read_csv('./movielens/movies.csv', sep=',')
print(type(movies))
movies.head(15)
# Timestamps represent seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970
tags = pd.read_csv('./movielens/tags.csv', sep=',')
tags.head()
ratings = pd.read_csv('./movielens/ratings.csv', sep=',', parse_dates=['timestamp'])
ratings.head()
# For current analysis, we will remove timestamp (we will come back to it!)
del ratings['timestamp']
del tags['timestamp']
###Output
_____no_output_____
###Markdown
Data Structures Series
###Code
#Extract 0th row: notice that it is infact a Series
row_0 = tags.iloc[0]
type(row_0)
print(row_0)
row_0.index
row_0['userId']
'rating' in row_0
row_0.name
row_0 = row_0.rename('first_row')
row_0.name
###Output
_____no_output_____
###Markdown
DataFrames
###Code
tags.head()
tags.index
tags.columns
# Extract row 0, 11, 2000 from DataFrame
tags.iloc[ [0,11,2000] ]
###Output
_____no_output_____
###Markdown
Descriptive StatisticsLet's look how the ratings are distributed!
###Code
ratings['rating'].describe()
ratings.describe()
ratings['rating'].mean()
ratings.mean()
ratings['rating'].min()
ratings['rating'].max()
ratings['rating'].std()
ratings['rating'].mode()
ratings.corr()
filter_1 = ratings['rating'] > 5
print(filter_1)
filter_1.any()
filter_2 = ratings['rating'] > 0
filter_2.all()
###Output
_____no_output_____
###Markdown
Data Cleaning: Handling Missing Data
###Code
movies.shape
#is any row NULL ?
movies.isnull().any()
###Output
_____no_output_____
###Markdown
Thats nice ! No NULL values !
###Code
ratings.shape
#is any row NULL ?
ratings.isnull().any()
###Output
_____no_output_____
###Markdown
Thats nice ! No NULL values !
###Code
tags.shape
#is any row NULL ?
tags.isnull().any()
###Output
_____no_output_____
###Markdown
We have some tags which are NULL.
###Code
tags = tags.dropna()
#Check again: is any row NULL ?
tags.isnull().any()
tags.shape
###Output
_____no_output_____
###Markdown
Thats nice ! No NULL values ! Notice the number of lines have reduced. Data Visualization
###Code
%matplotlib inline
ratings.hist(column='rating', figsize=(15,10))
ratings.boxplot(column='rating', figsize=(15,20))
###Output
_____no_output_____
###Markdown
Slicing Out Columns
###Code
tags['tag'].head()
movies[['title','genres']].head()
#get rows from 1000 to 1010
ratings[1000:1010]
#get 10 rows from the end
ratings[-10:]
tag_counts = tags['tag'].value_counts()
tag_counts[-10:]
tag_counts[:10].plot(kind='bar', figsize=(15,10))
###Output
_____no_output_____
###Markdown
Filters for Selecting Rows
###Code
is_highly_rated = ratings['rating'] >= 4.0
ratings[is_highly_rated][30:50]
is_animation = movies['genres'].str.contains('Animation')
movies[is_animation][5:15]
movies[is_animation].head(15)
###Output
_____no_output_____
###Markdown
Group By and Aggregate
###Code
ratings_count = ratings[['movieId','rating']].groupby('rating').count()
ratings_count
average_rating = ratings[['movieId','rating']].groupby('movieId').mean()
average_rating.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.tail()
###Output
_____no_output_____
###Markdown
Merge Dataframes
###Code
tags.head()
movies.head()
t = movies.merge(tags, on='movieId', how='inner')
t.head()
###Output
_____no_output_____
###Markdown
More examples: http://pandas.pydata.org/pandas-docs/stable/merging.html Combine aggreagation, merging, and filters to get useful analytics
###Code
#avg_ratings = ratings.groupby('movieId', as_index=True).mean()
avg_ratings = ratings.groupby('movieId', as_index=False).mean()
del avg_ratings['userId']
avg_ratings.head()
box_office = movies.merge(avg_ratings, on='movieId', how='inner')
box_office.tail()
is_highly_rated = box_office['rating'] >= 4.0
box_office[is_highly_rated][-5:]
is_comedy = box_office['genres'].str.contains('Comedy')
box_office[is_comedy][:5]
box_office[is_comedy & is_highly_rated][-5:]
###Output
_____no_output_____
###Markdown
Vectorized String Operations
###Code
movies.head()
###Output
_____no_output_____
###Markdown
Split 'genres' into multiple columns
###Code
movie_genres = movies['genres'].str.split ('|', expand=True)
movie_genres[:10]
###Output
_____no_output_____
###Markdown
Add a new column for comedy genre flag
###Code
movie_genres['isComedy'] = movies['genres'].str.contains('Comedy')
movie_genres[:10]
###Output
_____no_output_____
###Markdown
Extract year from title e.g. (1995)
###Code
movies['year'] = movies['title'].str.extract('.*\((.*)\).*', expand=True)
movies.tail()
###Output
_____no_output_____
###Markdown
More here: http://pandas.pydata.org/pandas-docs/stable/text.htmltext-string-methods Parsing Timestamps Timestamps are common in sensor data or other time series datasets.Let us revisit the *tags.csv* dataset and read the timestamps!
###Code
tags = pd.read_csv('./movielens/tags.csv', sep=',')
tags.dtypes
###Output
_____no_output_____
###Markdown
Unix time / POSIX time / epoch time records time in seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970
###Code
tags.head(5)
tags['parsed_time'] = pd.to_datetime(tags['timestamp'], unit='s')
###Output
_____no_output_____
###Markdown
Data Type datetime64[ns] maps to either M8[ns] depending on the hardware
###Code
tags['parsed_time'].dtype
tags.head(2)
###Output
_____no_output_____
###Markdown
Selecting rows based on timestamps
###Code
greater_than_t = tags['parsed_time'] > '2015-02-01'
selected_rows = tags[greater_than_t]
tags.shape, selected_rows.shape
###Output
_____no_output_____
###Markdown
Sorting the table using the timestamps
###Code
tags.sort_values(by='parsed_time', ascending=True)[:10]
###Output
_____no_output_____
###Markdown
Average Movie Ratings over Time Are Movie ratings related to the year of launch?
###Code
average_rating = ratings[['movieId','rating']].groupby('movieId', as_index=False).mean()
average_rating.tail()
joined = movies.merge(average_rating, on='movieId', how='inner')
joined.head()
joined.corr()
yearly_average = joined[['year','rating']].groupby('year', as_index=False).mean()
yearly_average[:10]
yearly_average[-20:].plot(x='year', y='rating', figsize=(15,10), grid=True)
###Output
_____no_output_____ |
Basic_function_with_surprise.ipynb | ###Markdown
import Library
###Code
import pandas as pd
from surprise import Dataset
from surprise import Reader
from surprise import accuracy
from datetime import datetime
###Output
_____no_output_____
###Markdown
Load File
###Code
data_cols = ['user id','movie id','rating','timestamp']
data_train = pd.read_csv('.\\ml-100k\\ua.base', sep='\t',names=data_cols, encoding='latin-1')
data_test = pd.read_csv('.\\ml-100k\\ua.test', sep='\t',names=data_cols, encoding='latin-1')
###Output
_____no_output_____
###Markdown
Handle Timestamp
###Code
data_train['timestamp'] = data_train['timestamp'].apply(datetime.fromtimestamp)
data_test['timestamp'] = data_test['timestamp'].apply(datetime.fromtimestamp)
data_train.head()
# pandas dataframe to Surprise dataframe
reader = Reader(rating_scale=(1, 5))
data_train = Dataset.load_from_df(data_train[["user id", "movie id", "rating"]], reader)
data_test = Dataset.load_from_df(data_test[["user id", "movie id", "rating"]], reader)
# build the training and testing set
trainingSet = data_train.build_full_trainset()
testingSet_preset = data_test.build_full_trainset()
testingSet = testingSet_preset.build_testset()
###Output
_____no_output_____
###Markdown
Setting Algo which we want
###Code
from surprise import KNNWithMeans
# To use item-based cosine similarity
sim_options = {
"name": "cosine", # contains the similarity metric to use. Options are cosine, msd, pearson, or pearson_baseline. The default is msd.
"user_based": False, # Compute similarities between items. A boolean that tells whether the approach will be user-based or item-based. The default is True, which means the user-based approach will be used.
}
algo = KNNWithMeans(sim_options=sim_options)
algo.fit(trainingSet)
accuracy.rmse(algo.test(testingSet))
accuracy.mae(algo.test(testingSet))
###Output
MAE: 0.7539
|
02_VRD_Competition_Rel-2.ipynb | ###Markdown
1. Train Relationship Model 1.1 Prepare Features
###Code
def computeAspectRatio1(row):
return (row["XMax1"]-row["XMin1"])/(row["YMax1"]-row["YMin1"])
def computeAspectRatio2(row):
return (row["XMax2"]-row["XMin2"])/(row["YMax2"]-row["YMin2"])
def computeSize1(row):
return (row["XMax1"]-row["XMin1"])*(row["YMax1"]-row["YMin1"])
def computeSize2(row):
return (row["XMax2"]-row["XMin2"])*(row["YMax2"]-row["YMin2"])
def computeSizeDiff(row):
return computeSize1(row)/computeSize2(row)
def computeCenterX1(row):
return (row["XMax1"]-row["XMin1"])/2+row["XMin1"]
def computeCenterX2(row):
return (row["XMax2"]-row["XMin2"])/2+row["XMin2"]
def computeCenterY1(row):
return (row["YMax1"]-row["YMin1"])/2+row["YMin1"]
def computeCenterY2(row):
return (row["YMax2"]-row["YMin2"])/2+row["YMin2"]
def computeCenterXDiff(row):
return (computeCenterX1(row)-computeCenterX2(row)) / (row["XMax1"]-row["XMin1"])
def computeCenterYDiff(row):
return (computeCenterY1(row)-computeCenterY2(row)) / (row["YMax1"]-row["YMin1"])
def computeIOU(row):
xA = max(row["XMin1"], row["XMin2"])
yA = max(row["YMin1"], row["YMin2"])
xB = min(row["XMax1"], row["XMax2"])
yB = min(row["YMax1"], row["YMax2"])
interArea = max(0, xB - xA) * max(0, yB - yA)
boxAArea = (row["XMax1"] - row["XMin1"]) * (row["YMax1"] - row["YMin1"])
boxBArea = (row["XMax2"] - row["XMin2"]) * (row["YMax2"] - row["YMin2"])
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def computeIntersection1(row):
xA = max(row["XMin1"], row["XMin2"])
yA = max(row["YMin1"], row["YMin2"])
xB = min(row["XMax1"], row["XMax2"])
yB = min(row["YMax1"], row["YMax2"])
interArea = max(0, xB - xA) * max(0, yB - yA)
return interArea / computeSize1(row)
def computeIntersection2(row):
xA = max(row["XMin1"], row["XMin2"])
yA = max(row["YMin1"], row["YMin2"])
xB = min(row["XMax1"], row["XMax2"])
yB = min(row["YMax1"], row["YMax2"])
interArea = max(0, xB - xA) * max(0, yB - yA)
return interArea / computeSize2(row)
def add_features(df):
df["AspectRatio1"] = df.progress_apply(lambda row: computeAspectRatio1(row), axis=1)
df["AspectRatio2"] = df.progress_apply(lambda row: computeAspectRatio2(row), axis=1)
# df["Size1"] = df.progress_apply(lambda row: computeSize1(row), axis=1)
# df["Size2"] = df.progress_apply(lambda row: computeSize2(row), axis=1)
df["sizeDiff"] = df.progress_apply(lambda row: computeSizeDiff(row), axis=1)
# df["CenterX1"] = df.progress_apply(lambda row: computeCenterX1(row), axis=1)
# df["CenterX2"] = df.progress_apply(lambda row: computeCenterX2(row), axis=1)
# df["CenterY1"] = df.progress_apply(lambda row: computeCenterY1(row), axis=1)
# df["CenterY2"] = df.progress_apply(lambda row: computeCenterY2(row), axis=1)
df["XCenterDiff"] = df.progress_apply(lambda row: computeCenterXDiff(row), axis=1)
df["YCenterDiff"] = df.progress_apply(lambda row: computeCenterYDiff(row), axis=1)
# df["CenterDiff"] = df.progress_apply(lambda row: computeCenterDiff(row), axis=1)
# df["CenterX"] = df.progress_apply(lambda row: computeCenterX(row), axis=1)
# df["CenterY"] = df.progress_apply(lambda row: computeCenterY(row), axis=1)
df["IOU"] = df.progress_apply(lambda row: computeIOU(row), axis=1)
df["InterArea1"] = df.progress_apply(lambda row: computeIntersection1(row), axis=1)
df["InterArea2"] = df.progress_apply(lambda row: computeIntersection2(row), axis=1)
add_features(df_train_vrd_rel)
add_features(df_valid_vrd_rel)
add_features(df_test_rel)
df_train_vrd_rel.to_hdf('vrd/df_train_vrd_rel_features2.h5', key='df_train_vrd_rel', mode='w', format='t')
df_valid_vrd_rel.to_hdf('vrd/df_valid_vrd_rel_features2.h5', key='df_valid_vrd_rel', mode='w', format='t')
df_test_rel.to_hdf('vrd/df_test_rel_features2.h5', key='df_test_rel', mode='w', format='t')
###Output
_____no_output_____
###Markdown
1.2 Load Features
###Code
df_train_vrd_rel = pd.read_hdf('vrd/df_train_vrd_rel_features2.h5', 'df_train_vrd_rel')
df_valid_vrd_rel = pd.read_hdf('vrd/df_valid_vrd_rel_features2.h5', 'df_valid_vrd_rel')
df_test_rel = pd.read_hdf('vrd/df_test_rel_features2.h5', 'df_test_rel')
drop_cols = ["XMin1", "XMax1", "YMin1", "YMax1", "XMin2", "XMax2", "YMin2", "YMax2"]
X_train = df_train_vrd_rel.drop(drop_cols+['ImageID','RelationshipLabel'], axis=1)
y_train = df_train_vrd_rel['RelationshipLabel'].cat.codes
X_valid = df_valid_vrd_rel.drop(drop_cols+['ImageID','RelationshipLabel'], axis=1)
y_valid = df_valid_vrd_rel['RelationshipLabel'].cat.codes # pd.get_dummies(df_valid_vrd_rel['RelationshipLabel']).values
X_test = df_test_rel.drop(drop_cols+['ImageID','Confidence1','Confidence2','RelationshipLabel'], axis=1)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid = lgb.Dataset(X_valid, y_valid)
params = {'boosting_type': 'gbdt',
'max_depth' : -1,
'objective': 'multiclass',
'nthread': 5,
'num_leaves': 64,
'learning_rate': 0.07,
'max_bin': 512,
'subsample_for_bin': 200,
'subsample': 1,
'subsample_freq': 1,
'colsample_bytree': 0.8,
'reg_alpha': 1.2,
'reg_lambda': 1.2,
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 5,
'scale_pos_weight': 1,
'num_class' : 9,
'metric' : 'multi_logloss'
}
gridParams = {
'learning_rate': [0.01, 0.05, 0.07],
'n_estimators': [8,16],
'num_leaves': [20, 24, 27, 42, 60,64],
'boosting_type' : ['gbdt'],
'objective' : ['multi_logloss'],
'random_state' : [501],
'colsample_bytree' : [0.64, 0.65],
'subsample' : [0.7,0.75],
#'reg_alpha' : [1, 1.2],
#'reg_lambda' : [ 1.2, 1.4],
}
mdl = lgb.LGBMClassifier(boosting_type= 'gbdt',
objective = 'multi_logloss',
n_jobs = 5,
silent = True,
max_depth = params['max_depth'],
max_bin = params['max_bin'],
subsample_for_bin = params['subsample_for_bin'],
subsample = params['subsample'],
subsample_freq = params['subsample_freq'],
min_split_gain = params['min_split_gain'],
min_child_weight = params['min_child_weight'],
min_child_samples = params['min_child_samples'],
scale_pos_weight = params['scale_pos_weight'])
mdl.get_params().keys()
grid = GridSearchCV(mdl, gridParams, verbose=2, cv=4, n_jobs=-1)
grid.fit(X_train, y_train)
print(grid.best_params_)
print(grid.best_score_)
params['colsample_bytree'] = grid.best_params_['colsample_bytree']
params['learning_rate'] = grid.best_params_['learning_rate']
# params['max_bin'] = grid.best_params_['max_bin']
params['num_leaves'] = grid.best_params_['num_leaves']
#params['reg_alpha'] = grid.best_params_['reg_alpha']
#params['reg_lambda'] = grid.best_params_['reg_lambda']
params['subsample'] = grid.best_params_['subsample']
# params = {
# 'objective': 'multiclass',
# 'boosting': 'gbdt',
# 'learning_rate': 0.01 ,
# 'verbose': 0,
# 'num_leaves': 64,
# 'bagging_fraction': 0.95,
# 'bagging_freq': 1,
# 'bagging_seed': 1,
# 'feature_fraction': 0.9,
# 'feature_fraction_seed': 1,
# 'max_bin': 512,
# 'num_rounds': 10000,
# 'metric': ['multi_logloss'],
# 'num_class': 9
# }
cats_dict = dict( zip( df_valid_vrd_rel['RelationshipLabel'].cat.codes, df_valid_vrd_rel['RelationshipLabel'] ) )
lgbm_model = lgb.train(params, train_set = lgb_train, num_boost_round=1000, valid_sets = lgb_valid, verbose_eval=5, early_stopping_rounds=40)
# lgbm_model = lgb.train(params, train_set = lgb_train, num_boost_round=1000, valid_sets = lgb_valid, verbose_eval=5, early_stopping_rounds=40)
lgbm_model.save_model('model3.txt')
lgb.plot_importance(lgbm_model)
###Output
_____no_output_____
###Markdown
1.3 Evaluate
###Code
lgbm_model = lgb.Booster(model_file='model3.txt') # init model
batch_size = 5000
len(X_test)
for i in tqdm(range(0,len(X_test),batch_size)):
total_batch_size = min(i+batch_size, len(X_test))
predictions = lgbm_model.predict(X_test[i:total_batch_size], num_iteration=lgbm_model.best_iteration)
predict_rel_label = predictions.argmax(axis=1)
cat_pred_rel_label = [cats_dict[x] for x in predict_rel_label]
df_test_rel[i:total_batch_size]["RelationshipLabel"] = cat_pred_rel_label
###Output
0%| | 0/721 [00:00<?, ?it/s][A[A/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:6: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
0%| | 1/721 [00:00<04:17, 2.79it/s][A[A
0%| | 2/721 [00:00<04:14, 2.83it/s][A[A
0%| | 3/721 [00:01<04:12, 2.85it/s][A[A
1%| | 4/721 [00:01<04:09, 2.87it/s][A[A
1%| | 5/721 [00:01<04:06, 2.90it/s][A[A
1%| | 6/721 [00:02<04:07, 2.89it/s][A[A
1%| | 7/721 [00:02<04:06, 2.90it/s][A[A
1%| | 8/721 [00:02<04:06, 2.89it/s][A[A
1%| | 9/721 [00:03<04:04, 2.92it/s][A[A
1%|▏ | 10/721 [00:03<04:05, 2.89it/s][A[A
2%|▏ | 11/721 [00:03<04:05, 2.90it/s][A[A
2%|▏ | 12/721 [00:04<04:04, 2.90it/s][A[A
2%|▏ | 13/721 [00:04<04:08, 2.85it/s][A[A
2%|▏ | 14/721 [00:04<04:10, 2.83it/s][A[A
2%|▏ | 15/721 [00:05<04:09, 2.83it/s][A[A
2%|▏ | 16/721 [00:05<04:05, 2.87it/s][A[A
2%|▏ | 17/721 [00:05<04:03, 2.89it/s][A[A
2%|▏ | 18/721 [00:06<04:05, 2.86it/s][A[A
3%|▎ | 19/721 [00:06<04:07, 2.84it/s][A[A
3%|▎ | 20/721 [00:06<04:03, 2.88it/s][A[A
3%|▎ | 21/721 [00:07<04:05, 2.86it/s][A[A
3%|▎ | 22/721 [00:07<04:05, 2.85it/s][A[A
3%|▎ | 23/721 [00:08<04:03, 2.87it/s][A[A
3%|▎ | 24/721 [00:08<04:06, 2.83it/s][A[A
3%|▎ | 25/721 [00:08<04:02, 2.87it/s][A[A
4%|▎ | 26/721 [00:09<04:01, 2.88it/s][A[A
4%|▎ | 27/721 [00:09<03:59, 2.90it/s][A[A
4%|▍ | 28/721 [00:09<03:58, 2.91it/s][A[A
4%|▍ | 29/721 [00:10<04:00, 2.87it/s][A[A
4%|▍ | 30/721 [00:10<04:00, 2.87it/s][A[A
4%|▍ | 31/721 [00:10<04:01, 2.86it/s][A[A
4%|▍ | 32/721 [00:11<03:59, 2.87it/s][A[A
5%|▍ | 33/721 [00:11<03:59, 2.88it/s][A[A
5%|▍ | 34/721 [00:11<04:01, 2.84it/s][A[A
5%|▍ | 35/721 [00:12<04:01, 2.84it/s][A[A
5%|▍ | 36/721 [00:12<04:02, 2.82it/s][A[A
5%|▌ | 37/721 [00:12<04:04, 2.80it/s][A[A
5%|▌ | 38/721 [00:13<04:02, 2.82it/s][A[A
5%|▌ | 39/721 [00:13<04:03, 2.81it/s][A[A
6%|▌ | 40/721 [00:13<04:03, 2.79it/s][A[A
6%|▌ | 41/721 [00:14<04:02, 2.81it/s][A[A
6%|▌ | 42/721 [00:14<04:00, 2.83it/s][A[A
6%|▌ | 43/721 [00:15<03:58, 2.84it/s][A[A
6%|▌ | 44/721 [00:15<03:59, 2.82it/s][A[A
6%|▌ | 45/721 [00:15<03:59, 2.82it/s][A[A
6%|▋ | 46/721 [00:16<03:57, 2.84it/s][A[A
7%|▋ | 47/721 [00:16<03:57, 2.84it/s][A[A
7%|▋ | 48/721 [00:16<03:59, 2.82it/s][A[A
7%|▋ | 49/721 [00:17<03:56, 2.84it/s][A[A
7%|▋ | 50/721 [00:17<03:55, 2.85it/s][A[A
7%|▋ | 51/721 [00:17<03:58, 2.81it/s][A[A
7%|▋ | 52/721 [00:18<03:53, 2.87it/s][A[A
7%|▋ | 53/721 [00:18<03:52, 2.88it/s][A[A
7%|▋ | 54/721 [00:18<03:52, 2.86it/s][A[A
8%|▊ | 55/721 [00:19<03:53, 2.85it/s][A[A
8%|▊ | 56/721 [00:19<03:52, 2.86it/s][A[A
8%|▊ | 57/721 [00:19<03:51, 2.87it/s][A[A
8%|▊ | 58/721 [00:20<03:53, 2.83it/s][A[A
8%|▊ | 59/721 [00:20<03:52, 2.85it/s][A[A
8%|▊ | 60/721 [00:20<03:49, 2.88it/s][A[A
8%|▊ | 61/721 [00:21<03:49, 2.87it/s][A[A
9%|▊ | 62/721 [00:21<03:52, 2.84it/s][A[A
9%|▊ | 63/721 [00:22<03:49, 2.86it/s][A[A
9%|▉ | 64/721 [00:22<03:50, 2.85it/s][A[A
9%|▉ | 65/721 [00:22<03:51, 2.84it/s][A[A
9%|▉ | 66/721 [00:23<03:51, 2.83it/s][A[A
9%|▉ | 67/721 [00:23<03:49, 2.85it/s][A[A
9%|▉ | 68/721 [00:23<03:47, 2.87it/s][A[A
10%|▉ | 69/721 [00:24<03:46, 2.87it/s][A[A
10%|▉ | 70/721 [00:24<03:49, 2.84it/s][A[A
10%|▉ | 71/721 [00:24<03:45, 2.88it/s][A[A
10%|▉ | 72/721 [00:25<03:46, 2.86it/s][A[A
10%|█ | 73/721 [00:25<03:48, 2.84it/s][A[A
10%|█ | 74/721 [00:25<03:47, 2.85it/s][A[A
10%|█ | 75/721 [00:26<03:48, 2.83it/s][A[A
11%|█ | 76/721 [00:26<03:47, 2.83it/s][A[A
11%|█ | 77/721 [00:26<03:45, 2.85it/s][A[A
11%|█ | 78/721 [00:27<03:46, 2.84it/s][A[A
11%|█ | 79/721 [00:27<03:46, 2.83it/s][A[A
11%|█ | 80/721 [00:28<03:49, 2.80it/s][A[A
11%|█ | 81/721 [00:28<03:48, 2.80it/s][A[A
11%|█▏ | 82/721 [00:28<03:46, 2.83it/s][A[A
12%|█▏ | 83/721 [00:29<03:44, 2.84it/s][A[A
12%|█▏ | 84/721 [00:29<03:41, 2.88it/s][A[A
12%|█▏ | 85/721 [00:29<03:40, 2.89it/s][A[A
12%|█▏ | 86/721 [00:30<03:42, 2.85it/s][A[A
12%|█▏ | 87/721 [00:30<03:39, 2.89it/s][A[A
12%|█▏ | 88/721 [00:30<03:40, 2.87it/s][A[A
12%|█▏ | 89/721 [00:31<03:42, 2.84it/s][A[A
12%|█▏ | 90/721 [00:31<03:42, 2.84it/s][A[A
13%|█▎ | 91/721 [00:31<03:42, 2.83it/s][A[A
13%|█▎ | 92/721 [00:32<03:42, 2.83it/s][A[A
13%|█▎ | 93/721 [00:32<03:41, 2.83it/s][A[A
13%|█▎ | 94/721 [00:32<03:40, 2.85it/s][A[A
13%|█▎ | 95/721 [00:33<03:38, 2.86it/s][A[A
13%|█▎ | 96/721 [00:33<03:37, 2.87it/s][A[A
13%|█▎ | 97/721 [00:33<03:36, 2.89it/s][A[A
14%|█▎ | 98/721 [00:34<03:39, 2.84it/s][A[A
14%|█▎ | 99/721 [00:34<03:39, 2.84it/s][A[A
14%|█▍ | 100/721 [00:35<03:37, 2.85it/s][A[A
14%|█▍ | 101/721 [00:35<03:39, 2.82it/s][A[A
14%|█▍ | 102/721 [00:35<03:39, 2.82it/s][A[A
14%|█▍ | 103/721 [00:36<03:36, 2.85it/s][A[A
14%|█▍ | 104/721 [00:36<03:33, 2.90it/s][A[A
15%|█▍ | 105/721 [00:36<03:33, 2.89it/s][A[A
15%|█▍ | 106/721 [00:37<03:33, 2.89it/s][A[A
15%|█▍ | 107/721 [00:37<03:34, 2.87it/s][A[A
15%|█▍ | 108/721 [00:37<03:35, 2.84it/s][A[A
15%|█▌ | 109/721 [00:38<03:36, 2.82it/s][A[A
15%|█▌ | 110/721 [00:38<03:39, 2.78it/s][A[A
15%|█▌ | 111/721 [00:38<03:38, 2.79it/s][A[A
16%|█▌ | 112/721 [00:39<03:37, 2.80it/s][A[A
16%|█▌ | 113/721 [00:39<03:33, 2.85it/s][A[A
16%|█▌ | 114/721 [00:39<03:35, 2.82it/s][A[A
16%|█▌ | 115/721 [00:40<03:34, 2.82it/s][A[A
16%|█▌ | 116/721 [00:40<03:35, 2.81it/s][A[A
16%|█▌ | 117/721 [00:41<03:34, 2.81it/s][A[A
16%|█▋ | 118/721 [00:41<03:33, 2.83it/s][A[A
17%|█▋ | 119/721 [00:41<03:32, 2.83it/s][A[A
17%|█▋ | 120/721 [00:42<03:31, 2.85it/s][A[A
17%|█▋ | 121/721 [00:42<03:31, 2.84it/s][A[A
17%|█▋ | 122/721 [00:42<03:29, 2.85it/s][A[A
17%|█▋ | 123/721 [00:43<03:28, 2.87it/s][A[A
17%|█▋ | 124/721 [00:43<03:28, 2.86it/s][A[A
17%|█▋ | 125/721 [00:43<03:29, 2.85it/s][A[A
17%|█▋ | 126/721 [00:44<03:28, 2.85it/s][A[A
18%|█▊ | 127/721 [00:44<03:25, 2.89it/s][A[A
18%|█▊ | 128/721 [00:44<03:24, 2.90it/s][A[A
18%|█▊ | 129/721 [00:45<03:26, 2.87it/s][A[A
18%|█▊ | 130/721 [00:45<03:23, 2.91it/s][A[A
18%|█▊ | 131/721 [00:45<03:24, 2.88it/s][A[A
18%|█▊ | 132/721 [00:46<03:28, 2.83it/s][A[A
18%|█▊ | 133/721 [00:46<03:27, 2.84it/s][A[A
19%|█▊ | 134/721 [00:47<03:31, 2.78it/s][A[A
19%|█▊ | 135/721 [00:47<03:27, 2.82it/s][A[A
19%|█▉ | 136/721 [00:47<03:26, 2.83it/s][A[A
19%|█▉ | 137/721 [00:48<03:28, 2.80it/s][A[A
19%|█▉ | 138/721 [00:48<03:28, 2.80it/s][A[A
###Markdown
1.4 THIS IS WHAT WE WANT WHEN WE COMBINE
###Code
df_test_rel.to_csv("submission_rel2.csv",index=False)
###Output
_____no_output_____ |
Chapter 10 - Position & Momentum.ipynb | ###Markdown
Chapter 10 - Position and MomentumWe can start using sympy to handle symbolic math (integrals and other calculus):
###Code
from sympy import *
init_printing(use_unicode=True)
x, y, z = symbols('x y z', real=True)
a, c = symbols('a c', nonzero=True, real=True)
integrate?
###Output
_____no_output_____
###Markdown
There are two ways to use the `integrate` function. In one line, like `integrate(x,(x,0,1))` or by naming an expression and then integrating it over a range:```A = (c*cos((pi*x)/(2.0*a)))**2A.integrate((x,-a,a),conds='none')```We'll use both, at different times. For longer expressions, the second form can be easier to read and write.First, just try the following, then we'll re-create some examples in the book.
###Code
integrate(x,(x,0,1))
integrate(x**2,(x,0,1))
###Output
_____no_output_____
###Markdown
The cell below will return an odd set of conditions on the result. This is because the solver doesn't want to assume anything about `a` and there is a special case where the answer would be different. If you look closely though, that special case isn't physically realistic so to igore these special conditions, we add `conds='none'`. The next cell down does what you'd expect. From here on out, just add this to the `integrate` function and we'll get what we expect.
###Code
A = (c*cos((pi*x)/(2.0*a)))**2
A.integrate((x,-a,a))
A = (c*cos((pi*x)/(2.0*a)))**2
A.integrate((x,-a,a), conds='none')
###Output
_____no_output_____
###Markdown
So this tells us the normalization constant should be $c=\frac{1}{\sqrt{a}}$. Check that it is normalized if we do that:
###Code
psi = 1/sqrt(a)*cos((pi*x)/(2.0*a)) # notice we can name the expression something useful.
B = psi**2
B.integrate( (x,-a,a), conds='none')
###Output
_____no_output_____
###Markdown
Because `psi` is a real function, we can calculate expectation values by integrating over $x$ or $x^2$ with `psi**2`:
###Code
C = x*psi**2
C.integrate( (x,-a,a), conds='none')
D = x**2 * psi**2
E = D.integrate( (x,-a,a), conds='none')
E.n() # the .n() method approximates the numerical part. You can look at the full expression below.
E
###Output
_____no_output_____
###Markdown
Example 10.2
###Code
h = Symbol('hbar', real=True)
###Output
_____no_output_____
###Markdown
Use the `diff` function to take a derivative of a symbolic expression. For example:
###Code
diff(x**2, x)
# Solution
-1j*h*diff( 1/a*cos((pi*x)/(2*a)) ,x)
# Solution
B1 = (pi*h/(2*a))**2 * (cos((pi*x)/(2*a)))**2
B1.integrate( (x,-a,a), conds='none' )
###Output
_____no_output_____
###Markdown
Example 10.3
###Code
p = Symbol('p', real=True)
# Solution
A = integrate(1/sqrt(2*pi*a*h)*exp(-I*p*x/h)*cos((pi*x)/(2*a)),(x,-a,a), conds='none')
# Solution
A
psi_p = sqrt(2*a*pi/h) * 2/(pi**2 - (2*p*a/h)**2) * cos(p*a/h)
###Output
_____no_output_____
###Markdown
Chapter 10 - Position and MomentumWe can start using sympy to handle symbolic math (integrals and other calculus):
###Code
from sympy import *
init_printing(use_unicode=True)
# SymPy works better if you specify what letters are symbols:
x, y, z = symbols('x y z', real=True)
# notice we can also put some restrictions on the symbols:
a, c = symbols('a c', nonzero=True, real=True)
integrate?
###Output
_____no_output_____
###Markdown
There are two ways to use the `integrate` function. In one line, like `integrate(x,(x,0,1))` or by naming an expression and then integrating it over a range:```A = (c*cos((pi*x)/(2.0*a)))**2A.integrate((x,-a,a),conds='none')```We'll use both, at different times. For longer expressions, the second form can be easier to read and write.First, just try the following, then we'll re-create some examples in the book.
###Code
integrate(x,(x,0,1))
integrate(x**2,(x,0,1))
A = (c*cos((pi*x)/(2.0*a)))**2
A.integrate((x,-a,a))
###Output
_____no_output_____
###Markdown
So this tells us the normalization constant should be $c=\frac{1}{\sqrt{a}}$. Check that it is normalized if we do that:
###Code
psi = 1/sqrt(a)*cos((pi*x)/(2.0*a)) # notice we can name the expression something useful.
B = psi**2
B.integrate( (x,-a,a), conds='none')
###Output
_____no_output_____
###Markdown
Because `psi` is a real function, we can calculate expectation values by integrating over $x$ or $x^2$ with `psi**2`:
###Code
C = x*psi**2
C.integrate( (x,-a,a), conds='none')
D = x**2 * psi**2
E = D.integrate( (x,-a,a), conds='none')
E
E.simplify() # this is a useful method!
E.n() # the .n() method approximates the numerical part. You can look at the full expression below.
###Output
_____no_output_____
###Markdown
Example 10.2
###Code
h = Symbol('hbar', real=True, positive=True)
###Output
_____no_output_____
###Markdown
Use the `diff` function to take a derivative of a symbolic expression. For example:
###Code
diff(x**2, x)
# Solution
-1j*h*diff( 1/a*cos((pi*x)/(2*a)) ,x)
# Solution
B1 = (pi*h/(2*a))**2 * (cos((pi*x)/(2*a)))**2
B1.integrate( (x,-a,a), conds='none' )
###Output
_____no_output_____
###Markdown
Example 10.3
###Code
p = Symbol('p', real=True)
# Solution
A = integrate(1/sqrt(2*pi*a*h)*exp(-I*p*x/h)*cos((pi*x)/(2*a)),(x,-a,a), conds='none')
# Solution
A
psi_p = sqrt(2*a*pi/h) * 2/(pi**2 - (2*p*a/h)**2) * cos(p*a/h)
psi_p
psi_p == sqrt(2*a*pi/h)*2/(pi**2 - (2*p*a/h)**2) * cos(p*a/h)
###Output
_____no_output_____
###Markdown
Which agrees with the book.This is about as far as we can go in sympy. Unfortunately, many other momentum integrals choke. There are a few hints to get through the rest here: Problem 10.3
###Code
x, y, z = symbols('x y z', real=True)
a, c = symbols('a c', nonzero=True, real=True, positive=True)
psi = c*1/(a**2 + x**2) # define the wavefunction with c constant
int1 = integrate(psi*psi,(x,-oo,oo), conds='none') # integrate psi^2
solutions = solve(int1 - 1,c) # solve for c, this returns a list of solutions
c2 = simplify(solutions[0]) # simplify the solution for c:
c2
psi2 = c2/c*psi
psi2
integrate(psi2 * x * psi2,(x,-oo,oo))
integrate(psi2 * x**2 * psi2,(x,-oo,oo))
###Output
_____no_output_____
###Markdown
So $\Delta x^2 = a^2 - 0^2$ therefore $\Delta x = a$ Problem 10.17:Now find the momentum representation of the state from 10.3
###Code
p = symbols('p', nonzero=True, real=True, positive=True)
B = integrate(sqrt(1/(2*pi*h))*exp(-I*p*x/h)*psi2,(x,-oo,oo))
B
B.simplify()
###Output
_____no_output_____ |
me16b077_3.ipynb | ###Markdown
Numerical Methods in Scientific Computing Assignment 2 Q1.We know that for cubic spline interpolation:\begin{equation} g_i\ (x)=\frac{g_i^{\prime \prime}(x_i)}{6}(\frac{(x_{i+1}-x)^3}{h}-h(x_{i+1}-x))+\frac{g_{i+1}^{\prime \prime} (x_{i+1})}{6}(\frac{(x-x_i)^3}{h}-h(x-x_i\ ))+f_i(\frac{x_{i+1}-x}{h})+f_{i+1}(\frac{x-x_i}{h})\end{equation}So for the interpolating polynomial in the first interval and with parabolic run-out condition $g^{\prime \prime}(x_0)=g^{\prime \prime}(x_1)$ we have,\begin{equation} g_0\ (x)=\frac{g^{\prime \prime}(x_0)}{6}(\frac{(x_{1}-x)^3}{h}-h(x_{1}-x))+\frac{g^{\prime \prime} (x_{0})}{6}(\frac{(x-x_0)^3}{h}-h(x-x_0\ ))+f_0(\frac{x_{1}-x}{h})+f_{1}(\frac{x-x_0}{h})\end{equation}$\Rightarrow$ The coefficients of $x^3$ in the above equation $-\frac{g^{\prime \prime}(x_0)}{6h}$ and $\frac{g^{\prime \prime}(x_0)}{6h}$ cancel out each other and we have a parabola(quadratic)Similarly for the interpolating polynomial in the last interval and with parabolic run-out condition $g^{\prime \prime}(x_{n-1})=g^{\prime \prime}(x_n)$ we have,\begin{equation} g_{n-1}\ (x)=\frac{g^{\prime \prime}(x_{n-1})}{6}(\frac{(x_{n}-x)^3}{h}-h(x_{n}-x))+\frac{g^{\prime \prime}(x_{n-1})}{6}(\frac{(x-x_{n-1})^3}{h}-h(x-x_{n-1}\ ))+f_{n-1}(\frac{x_{n}-x}{h})+f_{n}(\frac{x-x_{n-1}}{h})\end{equation}$\Rightarrow$ The coefficients of $x^3$ in the above equation $-\frac{g^{\prime \prime}(x_{n-1})}{6h}$ and $\frac{g^{\prime \prime}(x_{n-1})}{6h}$ cancel out each other and we have a parabola(quadratic)Hence interpolating polynomials are parabolas at the first and last interval for cubic spline interpolation with parabolic run-out cinditions Q2.Quadratic Spline Interpolation:Say, for given set of points $\{x_i, f_i\}_{i=0}^n$ we have piecewise quadratic polynomials as\begin{equation} g(x) =\begin{cases} g_0(x) & , x \in [x_0,x_1] \\ g_1(x) & , x \in [x_1,x_2] \\ ... \\ g_{n-1}(x) & , x \in [x_{n-1},x_n] \\\end{cases}\end{equation}- Suitable joint conditions for quadratic spline\begin{equation} g_i^\prime(x_{i+1})=g_{i+1}^\prime(x_{i+1}) \quad , \quad \forall x \in \{0,1,...,n-2\}\end{equation}\begin{equation} g_i(x_{i})=f_i \quad , \quad \forall x \in \{0,1,...,n-1\}\end{equation}\begin{equation} g_i(x_{i+1})=f_{i+1} \quad , \quad \forall x \in \{0,1,...,n-1\}\end{equation}In total there are (n-1) + (n) + (n) equations.- Obtaining coefficients of the splineThe first derivative of the piecewise polynomial for Quadratic spline interpolation are linear. Hence, we have\begin{equation} g_i^\prime(x)=g_i^\prime(x_i)\frac{(x_{i+1}-x)}{h}+g_i^\prime(x_{i+1})\frac{(x-x_{i})}{h}\end{equation}Integrating the above equation,\begin{equation} g_i(x)=-g_i^\prime(x_i)\frac{(x_{i+1}-x)^2}{2h}+g_i^\prime(x_{i+1})\frac{(x-x_{i})^2}{2h}+C\end{equation}Using the conditions $g_i(x_{i})=f_i$ and $g_i(x_{i+1})=f_{i+1}$ to obtain two equations and subtracting one from the other to eliminate C we have over the entire range,\begin{equation} (g^\prime(x_i) + g^\prime(x_{i+1})) = \frac{2}{h}(f_{i+1}-f_i) \quad \forall i \in \{0,1,...,n-1\}\end{equation}This form is similar to what was obtained in Cubic spline interpolation. In total there are n piecewise quadratic polynomials with 3n unknowns and we have already got 3n-1 unknowns. With another extra constraint we can solve the set of linear equations as a Linear system to obtain the first derivatives which are substituted back to get $g(x)$. Say if the initial condition is given as $g_i^\prime(x_0) = 0$, then the form of equation reduces to\begin{equation} g^\prime(x_{i+1}) = \frac{2}{h}(f_{i+1}-f_i)-g^\prime(x_i) \quad \forall i \in \{0,1,...,n-1\}\end{equation}With all the first derivatives computed at $\{x_i\}_{i=0}^n$ the piecewise quadratic equations are given by,\begin{equation} g_i(x)=g^\prime(x_i)(-\frac{(x_{i+1}-x)^2}{2h}+\frac{h}{2})+g^\prime(x_{i+1})\frac{(x-x_{i})^2}{2h}+f_i \quad \forall i \in \{0,1,...,n-1\}\end{equation}- Suitable end ConditionsWe require an additional constraint explicitly mentioned to solve the above system of equations. It could be one of the following:\begin{equation} g^\prime(x_0)=0\end{equation}\begin{equation} g^\prime(x_n)=0\end{equation}\begin{equation} g^\prime(x_0)=g^\prime(x_1)\end{equation}\begin{equation} g^\prime(x_n)=g^\prime(x_{n-1})\end{equation}- Computational Efforts * In the case of Cubic we are required to solve the Linear system for second derivatives by computing inverse of the tridiagonal matrix whose computational cost is given by O(n) and then obtaining the piecewise cubic polynomial for the entire range is O(n). So, totally O(n) * In the case of Quadratic splines we are required to solve the Linear system for second derivatives by computing directly from n-1 equations without requirement of calculating inverse of a matrix although still the computational cost is given by O(n) and then obtaining the piecewise quadratic polynomial for the entire range is O(n). So, totally O(n)
###Code
# Import the necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact
# Function to compute the piecewise Quadratic splines
def quadratic_spline(a, b, n):
# Initializing required variables
h = (b-a)/n
x_runge = np.linspace(a,b,n+1)
f_runge = 1/(1+25*x_runge**2)
g_prime = np.zeros((n+1))
# Initial condition
g_prime[0] = 0
# Calculating g_prime values at all nodes
for i in range(0,n):
g_prime[i+1] = (2/h)*(f_runge[i+1]-f_runge[i]) - g_prime[i]
# Compute piecewise values for each of g(Interpolant), f(Runge Function), x(values to plot)
g_piecewise = [0 for _ in range(n)]
f_piecewise = [0 for _ in range(n)]
x_piecewise = [0 for _ in range(n)]
for i in range(0,n):
x = np.linspace(a+i*h, a+(i+1)*h, 10)
g_piecewise[i] = g_prime[i]*( ((-(x_runge[i+1]-x)**2)/(2*h)) + (h/2) ) + g_prime[i+1]*( (((x-x_runge[i])**2)/(2*h)) ) + f_runge[i]
f_piecewise[i] = 1/(1+25*x**2)
x_piecewise[i] = x
return g_piecewise, f_piecewise, x_piecewise
# Function to convert peicewise interpolant to global quadratic spline interpolant
def piecewise_to_global(g_piecewise, f_piecewise, x_piecewise):
n_pieces = np.shape(g_piecewise)[0]
n_plot = np.shape(g_piecewise)[1]
g = []
f = []
x = []
for i in range(n_pieces):
for j in range(n_plot):
if j == n_plot-1 and i != n_pieces-1: continue
g.append(g_piecewise[i][j])
f.append(f_piecewise[i][j])
x.append(x_piecewise[i][j])
return g, f, x
# Set Interval boundaries and No. of pieces in the overall interval
a = 0
b = 1
n = 8
# Call the functions defined in previous steps
g_piecewise, f_piecewise, x_piecewise = quadratic_spline(a, b, n)
g, f, x = piecewise_to_global(g_piecewise, f_piecewise, x_piecewise)
# Next few lines to model the plot of obtained results
plt.plot(x,f,color='r', label="Runge Function", linestyle='dashdot')
plt.plot(x,g,color='b', label="Interpolant")
plt.legend()
xcoords = np.linspace(a,b,n+1)
xcoords = xcoords[1:-1]
for xc in xcoords:
plt.axvline(x=xc, linestyle='dashed')
plt.xlabel('x', fontsize=15)
plt.ylabel('f(x)/g(x)', fontsize=15)
plt.title('Quadratic Spline Interpolation - Runge Function \n (Note: Vertical Lines indicate each interval) \nn=%i' %n, fontsize=15)
plt.xlim(a, b)
plt.rcParams["figure.figsize"] = [10,10]
plt.show()
# Interactive Version
# [Need to run this cell again when opening the jupyter notebook each time]
@interact
def inter(a=(-1,1,0.1),b=(-1,1,0.1), n=(1,20,1)):
if b<=a : return print('Set b greater than a !')
g_piecewise, f_piecewise, x_piecewise = quadratic_spline(a, b, n)
g, f, x = piecewise_to_global(g_piecewise, f_piecewise, x_piecewise)
plt.plot(x,f,color='r', label="Runge Function", linestyle='dashdot')
plt.plot(x,g,color='b', label="Interpolant")
plt.legend()
xcoords = np.linspace(a,b,n+1)
xcoords = xcoords[1:-1]
for xc in xcoords:
plt.axvline(x=xc, linestyle='dashed')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Quadratic Spline Interpolation - Runge Function\n a =%1.2f' % a + ', b =%1.2f' % b +', n =%i' % n, fontsize=15)
plt.xlim(a, b)
plt.rcParams["figure.figsize"] = [10,10]
plt.show()
###Output
_____no_output_____
###Markdown
Q3.- fourth order Pade approximation for the second derivative at the ith nodeSay,\begin{equation} y_i^{\prime\prime} = a_{-1}y_{i-1} + a_0y_{i} + a_1y_{i+1} + b_{-1}y_{i-1}^{\prime\prime} + b_{1}y_{i+1}^{\prime\prime}\end{equation}By taylor series expansion we have,\begin{equation} y_{i\pm1} = y_i \pm hy_i^{\prime} + \frac{h^2}{2!}y_i^{\prime\prime} \pm \frac{h^3}{3!}y_i^{\prime\prime\prime}+\frac{h^4}{4!}y_i^{\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y_{i\pm1}^{\prime\prime} = y_i^{\prime\prime} \pm hy_i^{\prime\prime\prime} + \frac{h^2}{2!}y_i^{\prime\prime\prime\prime} \pm \frac{h^3}{3!}y_i^{\prime\prime\prime\prime\prime} + ...\end{equation}Substituting back in the first equation and segregating by derivatives of y we have,\begin{equation} y_i^{\prime\prime} = y_i(a_{1}+a_0+a_{-1}) + y_i^{\prime}(ha_{1}-ha_{-1}) + y_i^{\prime\prime}(\frac{h^2}{2!}a_{1}+\frac{h^2}{2!}a_{-1} + b_{1}+b_{-1}) + y_i^{\prime\prime\prime}(\frac{h^3}{3!}a_{1}-\frac{h^3}{3!}a_{-1} + hb_{1}-hb_{-1})+ y_i^{\prime\prime\prime\prime}(\frac{h^4}{4!}a_{1}+\frac{h^4}{4!}a_{-1} + \frac{h^2}{2!}b_{1}+\frac{h^2}{2!}b_{-1})+...\end{equation}Equating the coefficients of the derivatives of y from the above equation with the first equation. From the coefficients of $y_i^{\prime}$ and $y_i^{\prime\prime\prime}$ we can see that $a_1=a_{-1}$ and $b_1=b_{-1}$. The equations now reduce to,\begin{equation} 2a_1+a_0=0\end{equation}\begin{equation} h^2a_1+2b_1=1\end{equation}\begin{equation} \frac{2h^2}{4!}a_1+b_1=0\end{equation}Solving them and obtaining all the coefficients yields the following equation,\begin{equation} f_{i-1}^{\prime\prime} + 10f_{i}^{\prime\prime} + f_{i+1}^{\prime\prime} = 12\frac{f_{i+1}-2f_{i}+f_{i-1}}{h^2} \quad \forall i \in \{2,...,N-1\}\end{equation}\begin{equation} \Rightarrow \frac{f_{i-1}^{\prime\prime}}{10} + f_{i}^{\prime\prime} + \frac{f_{i+1}^{\prime\prime}}{10} = \frac{12}{10}\frac{f_{i+1}-2f_{i}+f_{i-1}}{h^2} \quad \forall i \in \{2,...,N-1\}\end{equation}The nodes are,\begin{equation} x_i = (i-0.5)h, \quad i \in \{1,2,...,N\}, \quad h=\frac{1-0}{N}\end{equation}The order of trailing terms is given by\begin{equation} y_i^{\prime\prime\prime\prime\prime}(\frac{h^5}{5!}(a_1-a_{-1}) + \frac{h^3}{3!}(b_1-b_{-1})) + y_i^{\prime\prime\prime\prime\prime\prime}(\frac{h^6}{6!}(a_1+a_{-1}) + \frac{h^4}{4!}(b_1+b_{-1})) + ... = O(\frac{h^6}{h^2}) = O(h^4)\end{equation}\begin{equation} \Rightarrow y_i^{\prime\prime\prime\prime\prime\prime}(\frac{h^6}{6!}(a_1+a_{-1}) + \frac{h^4}{4!}(b_1+b_{-1})) + ... = O(\frac{h^6}{h^2}) = O(h^4)\end{equation} - For the left boundary\begin{equation} y_1^{\prime\prime}+b_2y_2^{\prime\prime} = a_1y_1 + a_2y_2 + a_3y_3 + a_4y^{\prime}(0)+O(h^3)\end{equation}Writing as Taylor series expansions\begin{equation} y_{2} = y_1 + hy_1^{\prime} + \frac{h^2}{2!}y_1^{\prime\prime} + \frac{h^3}{3!}y_1^{\prime\prime\prime}+\frac{h^4}{4!}y_1^{\prime\prime\prime\prime} + \frac{h^5}{5!}y_1^{\prime\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y_{3} = y_1 + 2hy_1^{\prime} + \frac{(2h)^2}{2!}y_1^{\prime\prime} + \frac{(2h)^3}{3!}y_1^{\prime\prime\prime}+\frac{(2h)^4}{4!}y_1^{\prime\prime\prime\prime} + \frac{(2h)^5}{5!}y_1^{\prime\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y^{\prime}(0) = y_1^{\prime} - \frac{h}{2}y_1^{\prime\prime} + \frac{(\frac{h}{2})^2}{2!}y_1^{\prime\prime\prime} - \frac{(\frac{h}{2})^3}{3!}y_1^{\prime\prime\prime\prime}+\frac{(\frac{h}{2})^4}{4!}y_1^{\prime\prime\prime\prime\prime} - \frac{(\frac{h}{2})^5}{5!}y_1^{\prime\prime\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y_{2}^{\prime\prime} = y_1^{\prime\prime} + hy_1^{\prime\prime\prime} + \frac{h^2}{2!}y_1^{\prime\prime\prime\prime} + \frac{h^3}{3!}y_1^{\prime\prime\prime\prime\prime} + ...\end{equation}Substituting back and comparing the coefficients of the derivatives yield the following values for the coefficients,\begin{equation} b_2 = \frac{-11}{23}\end{equation}\begin{equation} a_1 = \frac{-36}{23h^2}\end{equation}\begin{equation} a_2 = \frac{48}{23h^2}\end{equation}\begin{equation} a_3 = \frac{-12}{23h^2}\end{equation}\begin{equation} a_4 = \frac{-24}{23h}\end{equation} - Similarly for the right boundary.\begin{equation} y_n^{\prime\prime}+b_2y_{n-1}^{\prime\prime} = a_1y_n + a_2y_{n-1} + a_3y_{n-2} + a_4y^{\prime}(1)+O(h^3)\end{equation}Writing as Taylor series expansions\begin{equation} y_{n-1} = y_n - hy_n^{\prime} + \frac{h^2}{2!}y_n^{\prime\prime} - \frac{h^3}{3!}y_n^{\prime\prime\prime}+\frac{h^4}{4!}y_n^{\prime\prime\prime\prime} - \frac{h^5}{5!}y_n^{\prime\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y_{n-2} = y_n - 2hy_1^{\prime} + \frac{(2h)^2}{2!}y_n^{\prime\prime} - \frac{(2h)^3}{3!}y_n^{\prime\prime\prime}+\frac{(2h)^4}{4!}y_n^{\prime\prime\prime\prime} - \frac{(2h)^5}{5!}y_n^{\prime\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y^{\prime}(1) = y_n^{\prime} + \frac{h}{2}y_n^{\prime\prime} + \frac{(\frac{h}{2})^2}{2!}y_n^{\prime\prime\prime} + \frac{(\frac{h}{2})^3}{3!}y_n^{\prime\prime\prime\prime}+\frac{(\frac{h}{2})^4}{4!}y_n^{\prime\prime\prime\prime\prime} + \frac{(\frac{h}{2})^5}{5!}y_n^{\prime\prime\prime\prime\prime\prime} + ...\end{equation}\begin{equation} y_{n-1}^{\prime\prime} = y_n^{\prime\prime} - hy_n^{\prime\prime\prime} + \frac{h^2}{2!}y_n^{\prime\prime\prime\prime} - \frac{h^3}{3!}y_n^{\prime\prime\prime\prime\prime} + ...\end{equation}By Observation we can see that the above 5 equations can be conoverted to the 5 equations presented previously for left boundary condition by changing h with -h,\begin{equation} b_2 = \frac{-11}{23}\end{equation}\begin{equation} a_1 = \frac{-36}{23h^2}\end{equation}\begin{equation} a_2 = \frac{48}{23h^2}\end{equation}\begin{equation} a_3 = \frac{-12}{23h^2}\end{equation}\begin{equation} a_4 = \frac{24}{23h}\end{equation}\begin{equation}\end{equation} - Linear SystemFormulating in the form Ax=b,$$ A= \left(\begin{matrix}1&\frac{-11}{23}&0&.&.&.&0\\\frac{1}{10}&1&\frac{1}{10}&0&.&.&0\\0&\frac{1}{10}&1&\frac{1}{10}&0&.&0\\.&.&.&.&.&.&.\\.&.&.&.&.&.&.\\.&.&.&.&\frac{1}{10}&1&\frac{1}{10}\\0&.&.&.&0&\frac{-11}{23}&1\end{matrix}\right) $$$$ x= \left(\begin{matrix}y_1^{\prime\prime}\\y_2^{\prime\prime}\\.\\.\\y_{n-1}^{\prime\prime}\\y_{n}^{\prime\prime}\end{matrix}\right) $$$$ b= \left(\begin{matrix}\frac{(-36y_1+48y_2-12y_3-24hy^\prime(0))}{23h^2}\\\frac{12(y_1-2y_2+y_3)}{10h^2}\\\frac{12(y_2-2y_3+y_4)}{10h^2}\\.\\.\\\frac{12(y_{n-2}-2y_{n-1}+y_n)}{10h^2}\\\frac{(-36y_n+48y_{n-1}-12y_{n-2}+24hy^\prime(1))}{23h^2}\end{matrix}\right) $$\begin{equation} \Rightarrow \left(\begin{matrix}1&\frac{-11}{23}&0&.&.&.&0\\\frac{1}{10}&1&\frac{1}{10}&0&.&.&0\\0&\frac{1}{10}&1&\frac{1}{10}&0&.&0\\.&.&.&.&.&.&.\\.&.&.&.&.&.&.\\.&.&.&.&\frac{1}{10}&1&\frac{1}{10}\\0&.&.&.&0&\frac{-11}{23}&1\end{matrix}\right) \left(\begin{matrix}y_1^{\prime\prime}\\y_2^{\prime\prime}\\.\\.\\y_{n-1}^{\prime\prime}\\y_{n}^{\prime\prime}\end{matrix}\right) = \left(\begin{matrix}\frac{(-36y_1+48y_2-12y_3-24hy^\prime(0))}{23h^2}\\\frac{12(y_1-2y_2+y_3)}{10h^2}\\\frac{12(y_2-2y_3+y_4)}{10h^2}\\.\\.\\\frac{12(y_{n-2}-2y_{n-1}+y_n)}{10h^2}\\\frac{(-36y_n+48y_{n-1}-12y_{n-2}+24hy^\prime(1))}{23h^2}\end{matrix}\right)\end{equation}\begin{equation} \Rightarrow \left(\begin{matrix}1&\frac{-11}{23}&0&.&.&.&0\\\frac{1}{10}&1&\frac{1}{10}&0&.&.&0\\0&\frac{1}{10}&1&\frac{1}{10}&0&.&0\\.&.&.&.&.&.&.\\.&.&.&.&.&.&.\\.&.&.&.&\frac{1}{10}&1&\frac{1}{10}\\0&.&.&.&0&\frac{-11}{23}&1\end{matrix}\right) \left(\begin{matrix}y_1^{\prime\prime}\\y_2^{\prime\prime}\\.\\.\\y_{n-1}^{\prime\prime}\\y_{n}^{\prime\prime}\end{matrix}\right) = \left(\begin{matrix}\frac{-36}{23h^2}&\frac{48}{23h^2}&\frac{-12}{23h^2}&0&.&.&0\\\frac{12}{10h^2}&\frac{-24}{10h^2}&\frac{12}{10h^2}&0&.&.&0\\0&\frac{12}{10h^2}&\frac{-24}{10h^2}&\frac{12}{10h^2}&0&.&0\\.&.&.&.&.&.&.\\.&.&.&\frac{12}{10h^2}&\frac{-24}{10h^2}&\frac{12}{10h^2}&0\\0&.&.&0&\frac{12}{10h^2}&\frac{-24}{10h^2}&\frac{12}{10h^2}\\0&.&.&0&\frac{-12}{23h^2}&\frac{48}{23h^2}&\frac{-36}{23h^2}\end{matrix}\right)\left(\begin{matrix}y_1\\y_2\\.\\.\\.\\y_{n-1}\\y_n\end{matrix}\right)\end{equation}\begin{equation} Ay^{\prime\prime}=By\end{equation}\begin{equation} y^{\prime\prime}=x^3-y\end{equation}Substituting $y^{\prime\prime}$ as a function of $y$ we can solve the ODE with,\begin{equation} y = (A+B)^{-1}Ax^3\end{equation} - Exact SolutionFor the ODE given by,\begin{equation} y^{\prime\prime}+y=x^3\end{equation}The solution is given by the equation\begin{equation} y = c_1\cos{x}+c_2\sin{x}+x^3-6x\end{equation}Using the boundary conditions,\begin{equation} y^{\prime}(0)=y^{\prime}(1)=0\end{equation}The coefficients $c_1$ and $c_2$ are,\begin{equation} c_2=6 ; \quad c_1=\frac{6\cos{1}-3}{\sin{1}}=0.2873\end{equation}\begin{equation} \Rightarrow y = 0.2873\cos{x}+6\sin{x}+x^3-6x\end{equation}
###Code
# Function to find the numerical solution to ODE
def numerical_ODE(n_nodes):
# Initialize the required parameters
n =n_nodes
h = 1/n
x = np.array([(i-0.5)*h for i in range(1,n+1)])
y_exact = 0.2873*np.cos(x)+6*np.sin(x)+x**3-6*x
# Formulate the linear system derived earlier
A = np.eye(n)
A[0][1] = -11/23
A[n-1][n-2] = -11/23
for i in range(1,n-1):
A[i][i+1] = 1/10
A[i][i-1] = 1/10
B = np.eye(n)
B[0][0] = -36/(23*h**2)
B[0][1] = 48/(23*h**2)
B[0][2] = -12/(23*h**2)
B[n-1][n-1] = -36/(23*h**2)
B[n-1][n-2] = 48/(23*h**2)
B[n-1][n-3] = -12/(23*h**2)
for i in range(1,n-1):
B[i][i+1] = 12/(10*h**2)
B[i][i-1] = 12/(10*h**2)
B[i][i] = -24/(10*h**2)
# Solve the linear system to obtain second derivatives
y_num = np.dot(np.dot(np.linalg.inv(A+B), A), np.power(x, 3))
return y_num, y_exact, x
# Plot the results to compare between Numerical and Exact solutions to the ODE for different values of n
fig = plt.figure(figsize=(20,20))
N = [10, 20, 50, 100, 200, 500, 1000]
for i, n in enumerate(N):
plt.subplot(3, 3, i+1)
y_num, y_exact, x = numerical_ODE(n)
plt.plot(x,y_exact, marker='o',color='r', label="Exact Value", linestyle='dashdot')
plt.plot(x,y_num,color='b', label="Numerical Value")
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Numerical vs Exact solution for the ODE\n n =%i' %n)
plt.xlim(0, 1)
###Output
_____no_output_____
###Markdown
The numerical and exact solutions are similar as seen in the plots for all values of n which indicates and verifies the derivation for numerical approximation of the ODE.
###Code
# Plot for the max norm error in numerical solution as a function of n in log-log plot.
N = [10, 20, 50, 100, 200, 500, 1000]
error = []
for i, n in enumerate(N):
y_num, y_exact, x = numerical_ODE(n)
error.append(max((np.abs(y_exact-y_num))))
plt.figure(figsize=(5,5))
plt.loglog(N, error, marker='o',color='r', label="Max norm error", linestyle='dashed')
plt.xlabel('n (no. of nodes)', fontsize=12)
plt.ylabel('max norm Error', fontsize=12)
plt.title('Error in computation as a function of number of nodes\n n=[10, 20, 50, 100, 200, 500, 1000]', fontsize=15)
plt.legend()
plt.show()
# Plot for the max norm error in numerical solution as a function of n in log-log plot.
N = [50, 100, 200, 500, 1000, 1250, 1500, 1750]
error = []
for i, n in enumerate(N):
y_num, y_exact, x = numerical_ODE(n)
error.append(max((np.abs(y_exact-y_num))))
plt.figure(figsize=(5,5))
plt.loglog(N, error, marker='o',color='r', label="Max norm error", linestyle='dashed')
plt.xlabel('n (no. of nodes)', fontsize=12)
plt.ylabel('max norm Error', fontsize=12)
plt.title('Error in computation as a function of number of nodes\n n=[50, 100, 200, 500, 1000, 1250, 1500, 1750]', fontsize=15)
plt.legend()
plt.show()
###Output
_____no_output_____ |
autograder/extract/submission.ipynb | ###Markdown
Hello worldIn this unit you will learn how to use Python to implement the first ever programthat *every* programmer starts with. IntroductionHere is the traditional first programming exercise, called "Hello world".The task is to print the message: "Hello, world".Here are a few examples to get you started. Run the following cells and see howyou can print a message. To run a cell, click with mouse inside a cell, thenpress Ctrl+Enter to execute it. If you want to execute a few cells sequentially,then press Shift+Enter instead, and the focus will be automatically movedto the next cell as soon as one cell finishes execution.
###Code
print("hello")
print("bye bye")
print("hey", "you")
print("one")
print("two")
###Output
one
two
###Markdown
ExerciseNow it is your turn. Please create a program in the next cell that would print a message "Hello, world":
###Code
def hello(x):
print("Hello, " + x)
###Output
_____no_output_____ |
EXPLORATION/Node_08/[E-08] news_summary_bot.ipynb | ###Markdown
8.뉴스 요약봇 만들기 8-13. 프로젝트: 뉴스기사 요약해보기 --- 목차 Step 1. 데이터 수집하기 Step 2. 데이터 전처리하기 (추상적 요약) Step 3. 어텐션 메커니즘 사용하기 (추상적 요약) Step 4. 실제 결과와 요약문 비교하기 (추상적 요약) Step 5. Summa을 이용해서 추출적 요약해보기 ---
###Code
import nltk
nltk.download('stopwords') # NLTK 패키지에서 불용어 사전 다운로드
import numpy as np
import pandas as pd
import os
import re
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import urllib.request
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
print('=3')
###Output
[nltk_data] Downloading package stopwords to
[nltk_data] C:\Users\User\AppData\Roaming\nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Step 1. 데이터 수집하기 데이터는 뉴스 기사 데이터([news_summary_more.csv](https://github.com/sunnysai12345/News_Summary))를 사용=> 기사의 본문에 해당되는 text와 headlines 두 가지 열로 구성됨- 추상적 요약: text 본문, headlines 이미 요약된 데이터로 삼아서 모델 학습- 추출적 요약: 오직 text열만 사용하기
###Code
import urllib.request
# 데이터 다운
DATA_PATH = 'news_summarization/data/news_summary_more.csv'
if not os.path.exists(DATA_PATH):
urllib.request.urlretrieve("https://raw.githubusercontent.com/sunnysai12345/News_Summary/master/news_summary_more.csv", filename=DATA_PATH)
data = pd.read_csv(DATA_PATH, encoding='iso-8859-1')
# 랜덤 10개 샘플 출력
data.sample(10)
print('전체 샘플수 :', (len(data)))
###Output
전체 샘플수 : 98401
###Markdown
Step 2. 데이터 전처리하기 (추상적 요약)실습에서 사용된 전처리를 참고하여 각자 필요하다고 생각하는 전처리를 추가 사용하여 텍스트를 정규화 또는 정제해 보세요. 만약, 불용어 제거를 선택한다면 상대적으로 길이가 짧은 요약 데이터에 대해서도 불용어를 제거하는 것이 좋을지 고민해 보세요. 데이터 중복 샘플 제거- 유일값 개수 세기: pandas.unique()- 중복 제거: pandas.drop_duplicates()text 중복은 제거해야 함text가 달라도 headlines는 같을 수 있어서 중복 제거X
###Code
print('text 열에서 중복을 배제한 유일한 샘플의 수 :', data['text'].nunique())
print('headlines 열에서 중복을 배제한 유일한 샘플의 수 :', data['headlines'].nunique())
# inplace=True 를 설정하면 DataFrame 타입 값을 return 하지 않고 data 내부를 직접적으로 바꿉니다
data.drop_duplicates(subset = ['text'], inplace=True)
print('전체 샘플수 :', (len(data)))
###Output
전체 샘플수 : 98360
###Markdown
Null 값 제거- Null 개수 확인: data.isnull().sum()- Null 제거: data.dropna()다행히 현재 데이터에는 Null 값이 없음
###Code
print(data.isnull().sum())
data.dropna(axis=0, inplace=True)
print('전체 샘플수 :', (len(data)))
###Output
전체 샘플수 : 98360
###Markdown
텍스트 정규화와 불용어 제거- 텍스트 정규화(text normalization) - 같은 의미 다른 표현의 단어를 같은 표현으로 통일시킴 - => 기계 연산량 감소 - [정규화 사전](https://stackoverflow.com/questions/19790188/expanding-english-language-contractions-in-python) 구성
###Code
# 정규화 사전 구성
contractions = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not",
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam",
"mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
"wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
"we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have",
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
"you're": "you are", "you've": "you have"}
print("정규화 사전의 수: ", len(contractions)) # 120
###Output
정규화 사전의 수: 120
###Markdown
- 불용어(stopwords) - 텍스트에 자주 등장하지만 자연어 처리에 실질적 도움이 되지 않는 단어 - NLTK 제공 불용어 리스트 사용
###Code
# NLTK 제공 불용어 리스트
print('불용어 개수 :', len(stopwords.words('english') )) # 179
print(stopwords.words('english'))
###Output
불용어 개수 : 179
['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
###Markdown
- 데이터 전처리 - 모든 영어를 소문자로 변경 - html 태그 제거 - 정규 표현식 사용하여 각종 특수문자 제거 - 불용어 제거 - text 전처리에서만 사용 - 자연스러운 요약을 위해 headlines는 불용어를 남김
###Code
# 데이터 전처리 함수
def preprocess_sentence(sentence, remove_stopwords=True):
sentence = sentence.lower() # 텍스트 소문자화
sentence = BeautifulSoup(sentence, "lxml").text # <br />, <a href = ...> 등의 html 태그 제거
sentence = re.sub(r'\([^)]*\)', '', sentence) # 괄호로 닫힌 문자열 (...) 제거 Ex) my husband (and myself!) for => my husband for
sentence = re.sub('"','', sentence) # 쌍따옴표 " 제거
sentence = ' '.join([contractions[t] if t in contractions else t for t in sentence.split(" ")]) # 약어 정규화
sentence = re.sub(r"'s\b","", sentence) # 소유격 제거. Ex) roland's -> roland
sentence = re.sub("[^a-zA-Z]", " ", sentence) # 영어 외 문자(숫자, 특수문자 등) 공백으로 변환
sentence = re.sub('[m]{2,}', 'mm', sentence) # m이 3개 이상이면 2개로 변경. Ex) ummmmmmm yeah -> umm yeah
# 불용어 제거 (text)
if remove_stopwords:
tokens = ' '.join(word for word in sentence.split() if not word in stopwords.words('english') if len(word) > 1)
# 불용어 미제거 (headlines)
else:
tokens = ' '.join(word for word in sentence.split() if len(word) > 1)
return tokens
print('=3')
# 전체 text 데이터에 대한 전처리 : 10분 이상 시간이 걸릴 수 있습니다.
clean_text = []
for s in data['text']:
clean_text.append(preprocess_sentence(s))
# 전처리 후 출력
print("text 전처리 후 결과: ", clean_text[:5])
# 전체 headlines 데이터에 대한 전처리 : 5분 이상 시간이 걸릴 수 있습니다.
clean_headlines = []
for s in data['headlines']:
clean_headlines.append(preprocess_sentence(s, False))
print("headlines 전처리 후 결과: ", clean_headlines[:5])
###Output
headlines 전처리 후 결과: ['upgrad learner switches to career in ml al with salary hike', 'delhi techie wins free food from swiggy for one year on cred', 'new zealand end rohit sharma led india match winning streak', 'aegon life iterm insurance plan helps customers save tax', 'have known hirani for yrs what if metoo claims are not true sonam']
###Markdown
- 텍스트 정제 후 샘플 확인 - empty 샘플 모두 Null 값으로 대체 - Null 샘플 모두 제거
###Code
data['text'] = clean_text
data['headlines'] = clean_headlines
# 빈 값을 Null 값으로 변환
data.replace('', np.nan, inplace=True)
print('=3')
data.isnull().sum()
data.dropna(axis=0, inplace=True)
print('전체 샘플수 :', (len(data)))
###Output
전체 샘플수 : 98360
###Markdown
- 샘플 최대 길이 정하기
###Code
# 길이 분포 출력
import matplotlib.pyplot as plt
text_len = [len(s.split()) for s in data['text']]
headlines_len = [len(s.split()) for s in data['headlines']]
print('텍스트의 최소 길이 : {}'.format(np.min(text_len))) # 1
print('텍스트의 최대 길이 : {}'.format(np.max(text_len))) # 60
print('텍스트의 평균 길이 : {}'.format(np.mean(text_len))) # 35.09968483123221
print('요약의 최소 길이 : {}'.format(np.min(headlines_len))) # 1
print('요약의 최대 길이 : {}'.format(np.max(headlines_len))) # 16
print('요약의 평균 길이 : {}'.format(np.mean(headlines_len))) # 9.299532330215534
plt.subplot(1,2,1)
plt.boxplot(text_len)
plt.title('text')
plt.subplot(1,2,2)
plt.boxplot(headlines_len)
plt.title('headlines')
plt.tight_layout()
plt.show()
plt.title('text')
plt.hist(text_len, bins = 40)
plt.xlabel('length of samples')
plt.ylabel('number of samples')
plt.show()
plt.title('headlines')
plt.hist(headlines_len, bins = 40)
plt.xlabel('length of samples')
plt.ylabel('number of samples')
plt.show()
###Output
텍스트의 최소 길이 : 1
텍스트의 최대 길이 : 60
텍스트의 평균 길이 : 35.09968483123221
요약의 최소 길이 : 1
요약의 최대 길이 : 16
요약의 평균 길이 : 9.299532330215534
###Markdown
-text: 히스토그램을 보면 대체적으로 40 내외의 길이를 가짐-headlines: 히스토그램을 보면 대체적으로 11 이하의 길이를 가짐|column|min|max|int(mean)||:---:|:---:|:---:|:---:||text|1|60|35||headlines|1|16|9|
###Code
text_max_len = 40 # text 최대 길이 설정
headlines_max_len = 11 # headlines 최대 길이 설정
print('=3')
###Output
=3
###Markdown
- 설정한 길이가 몇 %의 샘플까지 포함하는지 확인
###Code
def below_threshold_len(max_len, nested_list):
cnt = 0
for s in nested_list:
if(len(s.split()) <= max_len):
cnt = cnt + 1
print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s'%(max_len, (cnt / len(nested_list))))
print('=3')
below_threshold_len(text_max_len, data['text']) # 0.9750305002033347
below_threshold_len(headlines_max_len, data['headlines']) # 0.9880337535583571
###Output
전체 샘플 중 길이가 40 이하인 샘플의 비율: 0.9238714924766165
전체 샘플 중 길이가 11 이하인 샘플의 비율: 0.9449877999186661
###Markdown
- 정해진 길이보다 길면 제외
###Code
data = data[data['text'].apply(lambda x: len(x.split()) <= text_max_len)]
data = data[data['headlines'].apply(lambda x: len(x.split()) <= headlines_max_len)]
print('전체 샘플수 :', (len(data)))
###Output
전체 샘플수 : 85843
###Markdown
- 시작 토큰(sostoken), 종료 토큰(eostoken) 추가 - 디코더는 시작 토큰 입력받아서 문장 생성함, 종료 토큰 예측한 순간 멈춤 - SOS(start of a sequence): 시작 토큰 - sos 토큰이 맨 앞에 있는 문장: decoder_input - EOS(end of a sequence): 종료 토큰 - eos 토큰이 맨 뒤에 있는 문장: decoder_target
###Code
# 헤드라인 데이터에는 시작 토큰과 종료 토큰을 추가한다.
data['decoder_input'] = data['headlines'].apply(lambda x : 'sostoken '+ x)
data['decoder_target'] = data['headlines'].apply(lambda x : x + ' eostoken')
data.head()
###Output
_____no_output_____
###Markdown
- 인코더의 입력, 디코더의 입력, 레이블 모두 numpy 로 저장
###Code
encoder_input = np.array(data['text']) # 인코더의 입력
decoder_input = np.array(data['decoder_input']) # 디코더의 입력
decoder_target = np.array(data['decoder_target']) # 디코더의 레이블
print('=3')
###Output
=3
###Markdown
- 훈련 데이터, 테스트 데이터 분리 - 방법1: 분리 패키지 사용 - 방법2: 직접 코딩으로 분리방법2를 사용하겠다.
###Code
# encoder_input과 크기와 형태가 같은 순서가 섞인 정수 시퀀스 생성
indices = np.arange(encoder_input.shape[0])
np.random.shuffle(indices)
# 정수 시퀀스로 데이터의 샘플 순서를 정의 => 샘플이 섞이게 된다.
encoder_input = encoder_input[indices]
decoder_input = decoder_input[indices]
decoder_target = decoder_target[indices]
# 섞인 데이터를 8:2 비율로 train, test 분리할 예정
n_of_val = int(len(encoder_input)*0.2)
print('테스트 데이터의 수 :', n_of_val)
# train, test 분리
encoder_input_train = encoder_input[:-n_of_val]
decoder_input_train = decoder_input[:-n_of_val]
decoder_target_train = decoder_target[:-n_of_val]
encoder_input_test = encoder_input[-n_of_val:]
decoder_input_test = decoder_input[-n_of_val:]
decoder_target_test = decoder_target[-n_of_val:]
print('훈련 데이터의 개수 :', len(encoder_input_train))
print('훈련 레이블의 개수 :', len(decoder_input_train))
print('테스트 데이터의 개수 :', len(encoder_input_test))
print('테스트 레이블의 개수 :', len(decoder_input_test))
###Output
훈련 데이터의 개수 : 68675
훈련 레이블의 개수 : 68675
테스트 데이터의 개수 : 17168
테스트 레이블의 개수 : 17168
###Markdown
- 단어 집합(vocabulary) 생성 및 정수 인코딩 - 단어 집합(vocabulary): 데이터의 각 단어를 고유한 정수로 맵핑하는 작업 - Keras의 Tokenizer()로 생성할 수 있음 - Tokenizer().fit_on_texts(데이터) 입력 데이터로부터 단어 집합 생성(동시에 각 단어에 고유한 정수가 부여됨) - encoder_input_train 원문 데이터- 만들어진 단어 집합 중 빈도수가 낮은 단어 제외하기 - src_tokenizer.word_index 현재 생성된 단어 집합 - src_tokenizer.word_counts.items() 단어, 각 단어의 등장 빈도수- 설정한 등장 빈도 값(threshold) 보다 작은 단어를 단어 집합과 훈련 데이터에서 제외 - total_cnt - rare_cnt 등장 빈도가 적은 단어를 제외한 단어 집합 크기 - Tokenizer(num_words=src_vocab) 단어 집합의 크기 설정- 정수인코딩 진행 - decoder_input_train, decoder_target_train 에는 더 이상 len(tokenizer.word_index) 을 넘는 숫자는 존재하지 않음! - decoder_input_train: sostoken 추가된 상태 - decoder_target_train: eostoken 추가된 상태 - => 빈도수가 낮은 단어가 삭제됨 - => 빈도수가 낮은 단어만으로 구성된 샘플은 empty 샘플이 되었을 가능성이 있다. 주로 text 보다는 headlines 에서 나타날 것이다. - => headlines 에서 길이가 0이 된 샘플 인덱스 찾아야 함! - => sostoken or eostoken 추가된 상태라서 길이가 1인 상태인 샘플 인덱스를 찾으면 됨!! 'text' 에 대한 처리
###Code
# 'text'
src_tokenizer = Tokenizer() # 토크나이저 정의
src_tokenizer.fit_on_texts(encoder_input_train) # 입력된 데이터로부터 단어 집합 생성
print('=3')
# 만들어진 단어 집합 중 희귀 단어(한계값 미만) 등장 빈도 비율 관련 내용 출력
threshold = 7
total_cnt = len(src_tokenizer.word_index) # 단어의 수
rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트
total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합
rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합
# 단어와 빈도수의 쌍(pair)을 key와 value로 받는다.
for key, value in src_tokenizer.word_counts.items():
total_freq = total_freq + value
# 단어의 등장 빈도수가 threshold보다 작으면
if(value < threshold):
rare_cnt = rare_cnt + 1
rare_freq = rare_freq + value
print('단어 집합(vocabulary)의 크기 :', total_cnt)
print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt))
print('단어 집합에서 희귀 단어를 제외시킬 경우의 단어 집합의 크기 %s'%(total_cnt - rare_cnt))
print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100)
print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100)
src_vocab = 20000 # 등장 빈도가 적은 단어를 제외한 단어 집합 크기
src_tokenizer = Tokenizer(num_words=src_vocab) # 단어 집합 크기 설정해서 다시 생성
src_tokenizer.fit_on_texts(decoder_input_train)
print('=3')
# 텍스트 시퀀스를 정수 시퀀스로 변환
encoder_input_train = src_tokenizer.texts_to_sequences(encoder_input_train)
encoder_input_test = src_tokenizer.texts_to_sequences(encoder_input_test)
# 잘 진행되었는지 샘플 출력
print(encoder_input_train[:3])
###Output
[[2547, 5970, 2855, 102, 2611, 916, 3959, 30, 66, 186, 11373, 426, 3959, 416, 6266, 5970, 3959, 2014, 2504, 916, 916, 919, 32, 5970, 4438, 1372, 802], [2202, 4884, 2408, 1152, 2031, 1049, 49, 3077, 4938, 840, 4557, 10827, 1767, 301, 422, 3077, 2725, 2031, 588, 137, 2101, 4383, 2101, 301, 512, 523, 4032, 301, 717, 3136], [671, 298, 587, 389, 1365, 265, 265, 4744, 3415, 39, 61, 214, 35, 595, 808, 8290, 389, 1365, 583, 1104, 187, 783, 2504, 983, 8290, 1365, 583, 187, 858, 983, 2951, 2042, 3147, 4010, 3415]]
###Markdown
'headlines' 에 대한 처리
###Code
# 'headlines'
tar_tokenizer = Tokenizer() # 토크나이저 정의
tar_tokenizer.fit_on_texts(decoder_input_train) # 입력된 데이터로부터 단어 집합 생성
print('=3')
# 만들어진 단어 집합 중 희귀 단어(한계값 미만) 등장 빈도 비율 관련 내용 출력
threshold = 6
total_cnt = len(tar_tokenizer.word_index) # 단어의 수
rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트
total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합
rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합
# 단어와 빈도수의 쌍(pair)을 key와 value로 받는다.
for key, value in tar_tokenizer.word_counts.items():
total_freq = total_freq + value
# 단어의 등장 빈도수가 threshold보다 작으면
if(value < threshold):
rare_cnt = rare_cnt + 1
rare_freq = rare_freq + value
print('단어 집합(vocabulary)의 크기 :', total_cnt)
print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt))
print('단어 집합에서 희귀 단어를 제외시킬 경우의 단어 집합의 크기 %s'%(total_cnt - rare_cnt))
print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100)
print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100)
tar_vocab = 10000 # 등장 빈도가 적은 단어를 제외한 단어 집합 크기
tar_tokenizer = Tokenizer(num_words=tar_vocab) # 단어 집합 크기 설정해서 다시 생성
tar_tokenizer.fit_on_texts(decoder_input_train)
tar_tokenizer.fit_on_texts(decoder_target_train)
print('=3')
# 텍스트 시퀀스를 정수 시퀀스로 변환
decoder_input_train = tar_tokenizer.texts_to_sequences(decoder_input_train)
decoder_target_train = tar_tokenizer.texts_to_sequences(decoder_target_train)
decoder_input_test = tar_tokenizer.texts_to_sequences(decoder_input_test)
decoder_target_test = tar_tokenizer.texts_to_sequences(decoder_target_test)
# 잘 변환되었는지 확인
print('input')
print('input ',decoder_input_train[:5])
print('target')
print('decoder ',decoder_target_train[:5])
# 길이 0인 샘플 인덱스 저장 => token 추가해놔서 실질적으로 길이 1인 샘플이다.
drop_train = [index for index, sentence in enumerate(decoder_input_train) if len(sentence) == 1]
drop_test = [index for index, sentence in enumerate(decoder_input_test) if len(sentence) == 1]
print('삭제할 훈련 데이터의 개수 :', len(drop_train))
print('삭제할 테스트 데이터의 개수 :', len(drop_test))
# 길이 0인 샘플 인덱스(=token 추가로 인해 실질적으로 길이 1인 샘플 인덱스) 삭제
encoder_input_train = [sentence for index, sentence in enumerate(encoder_input_train) if index not in drop_train]
decoder_input_train = [sentence for index, sentence in enumerate(decoder_input_train) if index not in drop_train]
decoder_target_train = [sentence for index, sentence in enumerate(decoder_target_train) if index not in drop_train]
encoder_input_test = [sentence for index, sentence in enumerate(encoder_input_test) if index not in drop_test]
decoder_input_test = [sentence for index, sentence in enumerate(decoder_input_test) if index not in drop_test]
decoder_target_test = [sentence for index, sentence in enumerate(decoder_target_test) if index not in drop_test]
# 샘플 삭제 결과 확인
print('훈련 데이터의 개수 :', len(encoder_input_train))
print('훈련 레이블의 개수 :', len(decoder_input_train))
print('테스트 데이터의 개수 :', len(encoder_input_test))
print('테스트 레이블의 개수 :', len(decoder_input_test))
###Output
삭제할 훈련 데이터의 개수 : 0
삭제할 테스트 데이터의 개수 : 0
훈련 데이터의 개수 : 68675
훈련 레이블의 개수 : 68675
테스트 데이터의 개수 : 17168
테스트 레이블의 개수 : 17168
###Markdown
- 패딩하기 - 짧은 데이터는 뒤에 숫자 0을 넣어서 최대 길이에 맞춘다.
###Code
encoder_input_train = pad_sequences(encoder_input_train, maxlen=text_max_len, padding='post')
encoder_input_test = pad_sequences(encoder_input_test, maxlen=text_max_len, padding='post')
decoder_input_train = pad_sequences(decoder_input_train, maxlen=headlines_max_len, padding='post')
decoder_target_train = pad_sequences(decoder_target_train, maxlen=headlines_max_len, padding='post')
decoder_input_test = pad_sequences(decoder_input_test, maxlen=headlines_max_len, padding='post')
decoder_target_test = pad_sequences(decoder_target_test, maxlen=headlines_max_len, padding='post')
print('=3')
###Output
=3
###Markdown
- 기본적인 seq2seq 모델 설계 - 기본적인 seq2seq 모델: hidden state, cell state 를 디코더의 초기 state 로 사용 - 인코더 - 함수형 API를 이용해서 인코더 설계 - 무조건 LSTM 의 capacity 값이 크다고 해서 성능이 반드시 올라가는 것은 아니다. - 총 3개의 층 => 모델 용량 증가 - overfitting 방지: dropout + recurrent_dropout => Variational Dropout - recurrent_dropout 은 cuDNN 을 사용할 수 없음 => 학습 시간 오래 걸림 - 디코더 설계 - 디코더의 LSTM의 입력을 정의할 때, initial_state 인자값으로 인코더의 hidden state 와 cell state 값을 넣어야 한다. - 디코더 출력층에서는 tar_vocab 의 수많은 단어 중 하나를 선택하는 다중 클래스 분류 문제를 푼다. - => Dense(tar_vocab, activation='softmax') - 디코더의 출력층 설계에 어텐션 메커니즘을 적용하면 성능을 높일 수 있다!
###Code
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
###Output
_____no_output_____
###Markdown
인코더 설계
###Code
# 인코더 설계 시작
embedding_dim = 128
hidden_size = 256 # LSTM의 capacity == LSTM의 용량 크기(뉴런 개수)
# 인코더
encoder_inputs = Input(shape=(text_max_len,))
# 인코더의 임베딩 층
enc_emb = Embedding(src_vocab, embedding_dim)(encoder_inputs)
# 인코더의 LSTM 1
encoder_lstm1 = LSTM(hidden_size, return_sequences=True, return_state=True ,dropout = 0.4, recurrent_dropout = 0.4)
encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)
# 인코더의 LSTM 2
encoder_lstm2 = LSTM(hidden_size, return_sequences=True, return_state=True, dropout=0.4, recurrent_dropout=0.4)
encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)
# 인코더의 LSTM 3
encoder_lstm3 = LSTM(hidden_size, return_state=True, return_sequences=True, dropout=0.4, recurrent_dropout=0.4)
encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2)
###Output
WARNING:tensorflow:Layer lstm will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
WARNING:tensorflow:Layer lstm_2 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
###Markdown
디코더 설계
###Code
# 디코더 설계
decoder_inputs = Input(shape=(None,))
# 디코더의 임베딩 층
dec_emb_layer = Embedding(tar_vocab, embedding_dim)
dec_emb = dec_emb_layer(decoder_inputs)
# 디코더의 LSTM: initial_state 인자값으로 인코더의 hidden state, cell state 값을 넣어야 함!
decoder_lstm = LSTM(hidden_size, return_sequences=True, return_state=True, dropout=0.4, recurrent_dropout=0.2)
decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state=[state_h, state_c])
# 디코더의 출력층
# tar_vocab 의 여러 단어 중 하나를 선택해야 하는 다중 클래스 분류 문제라서 Dense(tar_vocab, activation='softmax') 사용
decoder_softmax_layer = Dense(tar_vocab, activation='softmax')
decoder_softmax_outputs = decoder_softmax_layer(decoder_outputs)
# 모델 정의
model = Model([encoder_inputs, decoder_inputs], decoder_softmax_outputs)
model.summary()
###Output
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 40)] 0 []
embedding (Embedding) (None, 40, 128) 2560000 ['input_1[0][0]']
lstm (LSTM) [(None, 40, 256), 394240 ['embedding[0][0]']
(None, 256),
(None, 256)]
input_2 (InputLayer) [(None, None)] 0 []
lstm_1 (LSTM) [(None, 40, 256), 525312 ['lstm[0][0]']
(None, 256),
(None, 256)]
embedding_1 (Embedding) (None, None, 128) 1280000 ['input_2[0][0]']
lstm_2 (LSTM) [(None, 40, 256), 525312 ['lstm_1[0][0]']
(None, 256),
(None, 256)]
lstm_3 (LSTM) [(None, None, 256), 394240 ['embedding_1[0][0]',
(None, 256), 'lstm_2[0][1]',
(None, 256)] 'lstm_2[0][2]']
dense (Dense) (None, None, 10000) 2570000 ['lstm_3[0][0]']
==================================================================================================
Total params: 8,249,104
Trainable params: 8,249,104
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Step 3. 어텐션 메커니즘 사용하기 (추상적 요약)일반적인 seq2seq보다는 어텐션 메커니즘을 사용한 seq2seq를 사용하는 것이 더 나은 성능을 얻을 수 있어요. 실습 내용을 참고하여 어텐션 메커니즘을 사용한 seq2seq를 설계해 보세요. - 어텐션 층을 만들어서 앞서 설계한 디코더 출력층을 수정함 - 인코더, 디코더의 hidden state 들을 어텐션 함수의 입력으로 사용 - 어텐션 함수가 리턴한 값을 predict 할 때 디코더의 hidden state 와 함께 활용함 - 어텐션 함수 설계 == 다른 새로운 신경망 설계
###Code
from tensorflow.keras.layers import AdditiveAttention
# 어텐션 층(어텐션 함수)
attn_layer = AdditiveAttention(name='attention_layer')
# 인코더와 디코더의 모든 time step의 hidden state를 어텐션 층에 전달하고 결과를 리턴
attn_out = attn_layer([decoder_outputs, encoder_outputs])
# 어텐션의 결과와 디코더의 hidden state들을 연결
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_outputs, attn_out])
# 디코더의 출력층
decoder_softmax_layer = Dense(tar_vocab, activation='softmax')
decoder_softmax_outputs = decoder_softmax_layer(decoder_concat_input)
# 모델 정의
model = Model([encoder_inputs, decoder_inputs], decoder_softmax_outputs)
model.summary()
###Output
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 40)] 0 []
embedding (Embedding) (None, 40, 128) 2560000 ['input_1[0][0]']
lstm (LSTM) [(None, 40, 256), 394240 ['embedding[0][0]']
(None, 256),
(None, 256)]
input_2 (InputLayer) [(None, None)] 0 []
lstm_1 (LSTM) [(None, 40, 256), 525312 ['lstm[0][0]']
(None, 256),
(None, 256)]
embedding_1 (Embedding) (None, None, 128) 1280000 ['input_2[0][0]']
lstm_2 (LSTM) [(None, 40, 256), 525312 ['lstm_1[0][0]']
(None, 256),
(None, 256)]
lstm_3 (LSTM) [(None, None, 256), 394240 ['embedding_1[0][0]',
(None, 256), 'lstm_2[0][1]',
(None, 256)] 'lstm_2[0][2]']
attention_layer (AdditiveAtten (None, None, 256) 256 ['lstm_3[0][0]',
tion) 'lstm_2[0][0]']
concat_layer (Concatenate) (None, None, 512) 0 ['lstm_3[0][0]',
'attention_layer[0][0]']
dense_1 (Dense) (None, None, 10000) 5130000 ['concat_layer[0][0]']
==================================================================================================
Total params: 10,809,360
Trainable params: 10,809,360
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
- 모델 훈련 - EarlyStopping - callbacks=[es]: 특정 조건이 충족되면 훈련을 멈추는 역할 - es == EarlyStopping(monitor='val_loss', patience=2, verbose=1) - => val_loss(검증 데이터 손실) 관찰하다가 값이 작아지지 않고 증가하는 현상이 2번(patience=2) 관측되면 학습 종료 - => epoch 값이 커도 최적점에서 훈련 종료 가능
###Code
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy')
es = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
history = model.fit(x=[encoder_input_train, decoder_input_train], y=decoder_target_train, \
validation_data=([encoder_input_test, decoder_input_test], decoder_target_test), \
batch_size=256, callbacks=[es], epochs=50)
###Output
Epoch 1/50
269/269 [==============================] - 241s 870ms/step - loss: 6.0500 - val_loss: 5.6038
Epoch 2/50
269/269 [==============================] - 231s 858ms/step - loss: 5.4760 - val_loss: 5.2548
Epoch 3/50
269/269 [==============================] - 234s 871ms/step - loss: 5.1222 - val_loss: 4.9711
Epoch 4/50
269/269 [==============================] - 232s 862ms/step - loss: 4.8447 - val_loss: 4.7775
Epoch 5/50
269/269 [==============================] - 236s 876ms/step - loss: 4.6289 - val_loss: 4.6148
Epoch 6/50
269/269 [==============================] - 240s 893ms/step - loss: 4.4502 - val_loss: 4.4999
Epoch 7/50
269/269 [==============================] - 242s 899ms/step - loss: 4.2972 - val_loss: 4.4008
Epoch 8/50
269/269 [==============================] - 240s 891ms/step - loss: 4.1616 - val_loss: 4.3263
Epoch 9/50
269/269 [==============================] - 240s 891ms/step - loss: 4.0400 - val_loss: 4.2537
Epoch 10/50
269/269 [==============================] - 240s 891ms/step - loss: 3.9299 - val_loss: 4.2071
Epoch 11/50
269/269 [==============================] - 236s 876ms/step - loss: 3.8313 - val_loss: 4.1503
Epoch 12/50
269/269 [==============================] - 232s 862ms/step - loss: 3.7432 - val_loss: 4.1314
Epoch 13/50
269/269 [==============================] - 234s 869ms/step - loss: 3.6621 - val_loss: 4.0900
Epoch 14/50
269/269 [==============================] - 236s 878ms/step - loss: 3.5850 - val_loss: 4.0557
Epoch 15/50
269/269 [==============================] - 238s 885ms/step - loss: 3.5151 - val_loss: 4.0315
Epoch 16/50
269/269 [==============================] - 238s 886ms/step - loss: 3.4493 - val_loss: 4.0183
Epoch 17/50
269/269 [==============================] - 236s 878ms/step - loss: 3.3935 - val_loss: 3.9978
Epoch 18/50
269/269 [==============================] - 237s 881ms/step - loss: 3.3359 - val_loss: 3.9939
Epoch 19/50
269/269 [==============================] - 237s 880ms/step - loss: 3.2834 - val_loss: 3.9671
Epoch 20/50
269/269 [==============================] - 238s 886ms/step - loss: 3.2302 - val_loss: 3.9624
Epoch 21/50
269/269 [==============================] - 242s 901ms/step - loss: 3.1800 - val_loss: 3.9540
Epoch 22/50
269/269 [==============================] - 235s 875ms/step - loss: 3.1388 - val_loss: 3.9366
Epoch 23/50
269/269 [==============================] - 241s 896ms/step - loss: 3.0938 - val_loss: 3.9329
Epoch 24/50
269/269 [==============================] - 239s 890ms/step - loss: 3.0510 - val_loss: 3.9355
Epoch 25/50
269/269 [==============================] - 237s 881ms/step - loss: 3.0138 - val_loss: 3.9425
Epoch 00025: early stopping
###Markdown
- train_loss, val_loss 시각화
###Code
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
- 인퍼런스 모델 구현 - test 할 때, 정수 인덱스 행렬로 존재하던 텍스트 데이터를 실제 데이터로 복원 필요 - => 사전 3개 준비 - seq2seq - 훈련할 때, 동작(인퍼런스 단계)할 때 방식이 다름 => 모델 설계 별개로 진행해야 함 - 훈련 단계: 인코더와 디코더를 엮은 모델 - 인퍼런스 단계 - 정답 문장 없음 - => 만들어야 할 문장 길이만큼 디코더가 반복 구조로 동작해야 함 - => 인퍼런스를 위한 모델 설계 별도로 진행 - => 인코더 모델, 디코더 모델 분리해서 설계 사전 3개 준비
###Code
# 사전 3개
src_index_to_word = src_tokenizer.index_word # 원문 단어 집합에서 정수 -> 단어를 얻음
tar_word_to_index = tar_tokenizer.word_index # 요약 단어 집합에서 단어 -> 정수를 얻음
tar_index_to_word = tar_tokenizer.index_word # 요약 단어 집합에서 정수 -> 단어를 얻음
print('=3')
###Output
=3
###Markdown
훈련 단계: 인코더와 디코더를 엮은 모델 설계
###Code
# 인코더 설계
encoder_model = Model(inputs=encoder_inputs, outputs=[encoder_outputs, state_h, state_c])
# 이전 시점의 상태들을 저장하는 텐서
decoder_state_input_h = Input(shape=(hidden_size,))
decoder_state_input_c = Input(shape=(hidden_size,))
dec_emb2 = dec_emb_layer(decoder_inputs)
# 문장의 다음 단어를 예측하기 위해서 초기 상태(initial_state)를 이전 시점의 상태로 사용. 이는 뒤의 함수 decode_sequence()에 구현
# 훈련 과정에서와 달리 LSTM의 리턴하는 은닉 상태와 셀 상태인 state_h와 state_c를 버리지 않음.
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c])
print('=3')
###Output
=3
###Markdown
인퍼런스 단계: 인코더 모델, 디코더 모델 분리해서 설계
###Code
# 어텐션 함수
decoder_hidden_state_input = Input(shape=(text_max_len, hidden_size))
attn_out_inf = attn_layer([decoder_outputs2, decoder_hidden_state_input])
decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_outputs2, attn_out_inf])
# 디코더의 출력층
decoder_outputs2 = decoder_softmax_layer(decoder_inf_concat)
# 최종 디코더 모델
decoder_model = Model(
[decoder_inputs] + [decoder_hidden_state_input,decoder_state_input_h, decoder_state_input_c],
[decoder_outputs2] + [state_h2, state_c2])
print('=3')
def decode_sequence(input_seq):
# 입력으로부터 인코더의 상태를 얻음
e_out, e_h, e_c = encoder_model.predict(input_seq)
# <SOS>에 해당하는 토큰 생성
target_seq = np.zeros((1,1))
target_seq[0, 0] = tar_word_to_index['sostoken']
stop_condition = False
decoded_sentence = ''
while not stop_condition: # stop_condition이 True가 될 때까지 루프 반복
output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c])
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = tar_index_to_word[sampled_token_index]
if (sampled_token!='eostoken'):
decoded_sentence += ' '+sampled_token
# <eos>에 도달하거나 최대 길이를 넘으면 중단.
if (sampled_token == 'eostoken' or len(decoded_sentence.split()) >= (headlines_max_len-1)):
stop_condition = True
# 길이가 1인 타겟 시퀀스를 업데이트
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# 상태를 업데이트 합니다.
e_h, e_c = h, c
return decoded_sentence
print('=3')
# 원문의 정수 시퀀스를 텍스트 시퀀스로 변환
def seq2text(input_seq):
temp=''
for i in input_seq:
if (i!=0):
temp = temp + src_index_to_word[i]+' '
return temp
# 요약문의 정수 시퀀스를 텍스트 시퀀스로 변환
def seq2summary(input_seq):
temp=''
for i in input_seq:
if ((i!=0 and i!=tar_word_to_index['sostoken']) and i!=tar_word_to_index['eostoken']):
temp = temp + tar_index_to_word[i] + ' '
return temp
print('=3')
###Output
=3
###Markdown
Step 4. 실제 결과와 요약문 비교하기 (추상적 요약)원래의 요약문(headlines 열)과 학습을 통해 얻은 추상적 요약의 결과를 비교해 보세요.
###Code
for i in range(5):
print("* 원문 :", seq2text(encoder_input_test[i]))
print("* 실제 요약 :", seq2summary(decoder_input_test[i]))
print("* 추상적 요약 :", decode_sequence(encoder_input_test[i].reshape(1, text_max_len)))
print("\n")
###Output
* 원문 : india elected un human rights council highest votes india permanent representative un syed akbaruddin said continue work balanced way protect human rights india victory reflection high standing enjoys globally akbaruddin added india secured votes followed fiji secured votes
* 실제 요약 : will work in balanced way to protect human rights india
* 추상적 요약 : un un council rejects un rights council
* 원문 : air hostess helped pilot land fly flight carrying people october co pilot fell ill passed revealed tuesday following investigation german airline fly said air hostess trained flight security expert would rewarded investigators said safety incident
* 실제 요약 : air hostess helped land plane after co pilot passed out
* 추상적 요약 : indigo flight makes emergency landing after cockpit
* 원문 : telecom commission tuesday approved proposal allowing phone calls internet service flights within indian airspace telecom regulator trai recommended making calls allowed aircraft reaches metres government intervene pricing mechanism would left airlines flight connectivity providers
* 실제 요약 : flights to get internet calling service as govt clears plan
* 추상적 요약 : govt proposes nod to reduce air india to reduce costs
* 원문 : rape victim silence cannot taken proof consent sexual relations delhi high court observed upholding man year jail term raping pregnant woman court made observation rejecting rape convict defence victim silence regarding incident proved consensual sexual relations existed
* 실제 요약 : victim silence rape not proof of consent hc
* 추상적 요약 : rape victim is not rape victim on rape case
* 원문 : co working space giant wework raised million funding series round led temasek softbank vision fund others china business latest funding reportedly values business billion wework entered china two years ago covers beijing shanghai nearly locations
* 실제 요약 : co working space wework raises mn for china business
* 추상적 요약 : wework raises million from wework in wework
###Markdown
- 추상적 요약(Abstractive Summarization) - 원문으로부터 내용이 요약된 새로운 문장을 생성 - 자연어 생성(Natural Language Generation, NLG)의 영역 - 실제 요약(headlines)과 추상적 요약 결과 비교 - 실제 요약과 추상적 요약을 비교해봤을 때 의미가 비슷한 경우가 있었다. - 그렇지만 아예 다른 주제의 요약을 한 경우가 있었다. Step 5. Summa을 이용해서 추출적 요약해보기추상적 요약은 추출적 요약과는 달리 문장의 표현력을 다양하게 가져갈 수 있지만, 추출적 요약에 비해서 난이도가 높아요. 반대로 말하면 추출적 요약은 추상적 요약에 비해 난이도가 낮고 기존 문장에서 문장을 꺼내오는 것이므로 잘못된 요약이 나올 가능성이 낮아요.Summa의 summarize를 사용하여 추출적 요약을 해보세요. - Summa의 summarize()의 인자로 사용되는 값>text (str) : 요약할 테스트.>ratio (float, optional) – 요약문에서 원본에서 선택되는 문장 비율. 0~1 사이값>words (int or None, optional) – 출력에 포함할 단어 수.>만약, ratio와 함께 두 파라미터가 모두 제공되는 경우 ratio는 무시한다.>split (bool, optional) – True면 문장 list / False는 조인(join)된 문자열을 반환
###Code
import requests
from summa.summarizer import summarize
text = pd.read_csv(DATA_PATH, encoding='iso-8859-1')
print(text[:1500])
for i in range(5):
print("* 원문 :", text['text'][i])
print("* 실제 요약 :", text['headlines'][i])
print("* 추출적 요약 :", summarize(text['text'][i], ratio=0.4))
print("\n")
###Output
* 원문 : Saurav Kant, an alumnus of upGrad and IIIT-B's PG Program in Machine learning and Artificial Intelligence, was a Sr Systems Engineer at Infosys with almost 5 years of work experience. The program and upGrad's 360-degree career support helped him transition to a Data Scientist at Tech Mahindra with 90% salary hike. upGrad's Online Power Learning has powered 3 lakh+ careers.
* 실제 요약 : upGrad learner switches to career in ML & Al with 90% salary hike
* 추출적 요약 : upGrad's Online Power Learning has powered 3 lakh+ careers.
* 원문 : Kunal Shah's credit card bill payment platform, CRED, gave users a chance to win free food from Swiggy for one year. Pranav Kaushik, a Delhi techie, bagged this reward after spending 2000 CRED coins. Users get one CRED coin per rupee of bill paid, which can be used to avail rewards from brands like Ixigo, BookMyShow, UberEats, Cult.Fit and more.
* 실제 요약 : Delhi techie wins free food from Swiggy for one year on CRED
* 추출적 요약 : Users get one CRED coin per rupee of bill paid, which can be used to avail rewards from brands like Ixigo, BookMyShow, UberEats, Cult.Fit and more.
* 원문 : New Zealand defeated India by 8 wickets in the fourth ODI at Hamilton on Thursday to win their first match of the five-match ODI series. India lost an international match under Rohit Sharma's captaincy after 12 consecutive victories dating back to March 2018. The match witnessed India getting all out for 92, their seventh lowest total in ODI cricket history.
* 실제 요약 : New Zealand end Rohit Sharma-led India's 12-match winning streak
* 추출적 요약 : The match witnessed India getting all out for 92, their seventh lowest total in ODI cricket history.
* 원문 : With Aegon Life iTerm Insurance plan, customers can enjoy tax benefits on your premiums paid and save up to â¹46,800^ on taxes. The plan provides life cover up to the age of 100 years. Also, customers have options to insure against Critical Illnesses, Disability and Accidental Death Benefit Rider with a life cover up to the age of 80 years.
* 실제 요약 : Aegon life iTerm insurance plan helps customers save tax
* 추출적 요약 : Also, customers have options to insure against Critical Illnesses, Disability and Accidental Death Benefit Rider with a life cover up to the age of 80 years.
* 원문 : Speaking about the sexual harassment allegations against Rajkumar Hirani, Sonam Kapoor said, "I've known Hirani for many years...What if it's not true, the [#MeToo] movement will get derailed." "In the #MeToo movement, I always believe a woman. But in this case, we need to reserve our judgment," she added. Hirani has been accused by an assistant who worked in 'Sanju'.
* 실제 요약 : Have known Hirani for yrs, what if MeToo claims are not true: Sonam
* 추출적 요약 : Speaking about the sexual harassment allegations against Rajkumar Hirani, Sonam Kapoor said, "I've known Hirani for many years...What if it's not true, the [#MeToo] movement will get derailed." "In the #MeToo movement, I always believe a woman.
|
_notebooks/2021-02-08-Capital-Asset-Pricing-Model.ipynb | ###Markdown
Daily and Cumulative Returns, CAPM This post includes code and notes from [python for finance and trading algorithms udemy course](https://udemy.com/python-for-finance-and-trading-algorithms/) and [python for finance and trading algorithms udemy course notebooks](https://github.com/theoneandonlywoj/Python-for-Financial-Analysis-and-Algorithmic-Trading).
###Code
from scipy import stats
# help(stats.linregress)
import pandas as pd
import pandas_datareader as web
start = pd.to_datetime('2020-01-01')
end = pd.to_datetime('today')
FXAIX_stock = web.DataReader('FXAIX', 'yahoo', start, end)
FXAIX_stock.head()
VRTTX_stock = web.DataReader('VRTTX', 'yahoo', start, end)
VRTTX_stock.head()
FNCMX_stock = web.DataReader('FNCMX', 'yahoo', start, end)
FNCMX_stock.head()
FSMAX_stock = web.DataReader('FSMAX', 'yahoo', start, end)
FSMAX_stock.head()
import matplotlib.pyplot as plt
%matplotlib inline
stocks
###Output
_____no_output_____
###Markdown
Compare Cumulative Return
###Code
FXAIX_stock['Cumulative'] = FXAIX_stock['Close']/FXAIX_stock['Close'].iloc[0]
VRTTX_stock['Cumulative'] = VRTTX_stock['Close']/VRTTX_stock['Close'].iloc[0]
FNCMX_stock['Cumulative'] = FNCMX_stock['Close']/FNCMX_stock['Close'].iloc[0]
FSMAX_stock['Cumulative'] = FSMAX_stock['Close']/FSMAX_stock['Close'].iloc[0]
FXAIX_stock['Cumulative'].plot(label='FXAIX_stock',figsize=(10,8))
VRTTX_stock['Cumulative'].plot(label='VRTTX_stock',figsize=(10,8))
FNCMX_stock['Cumulative'].plot(label='FNCMX_stock',figsize=(10,8))
FSMAX_stock['Cumulative'].plot(label='FSMAX_stock',figsize=(10,8))
plt.legend()
plt.title('Cumulative Return')
###Output
_____no_output_____
###Markdown
Get Daily Return
###Code
FXAIX_stock['Daily Return'] = FXAIX_stock['Close'].pct_change(1)
VRTTX_stock['Daily Return'] = VRTTX_stock['Close'].pct_change(1)
FXAIX_stock['Daily Return']
plt.scatter(FXAIX_stock['Daily Return'],VRTTX_stock['Daily Return'],alpha=0.3)
VRTTX_stock['Daily Return'].hist(bins=100)
FXAIX_stock['Daily Return'].hist(bins=100)
beta,alpha,r_value,p_value,std_err = stats.linregress(FXAIX_stock['Daily Return'].iloc[1:],VRTTX_stock['Daily Return'].iloc[1:])
beta
alpha
r_value
FXAIX_stock['Daily Return'].head()
import numpy as np
noise = np.random.normal(0,0.001,len(FXAIX_stock['Daily Return'].iloc[1:]))
#noise
FXAIX_stock['Daily Return'].iloc[1:] + noise
beta,alpha,r_value,p_value,std_err = stats.linregress(FXAIX_stock['Daily Return'].iloc[1:]+noise,FXAIX_stock['Daily Return'].iloc[1:])
beta
alpha
###Output
_____no_output_____ |
pages/data-analysis/DA0101EN-2-Review-Data-Wrangling.ipynb | ###Markdown
Data WranglingEstaimted time needed: **30** minutes ObjectivesAfter completing this lab you will be able to:- Handle missing values- Correct data format- Standardize and Normalize Data Table of content Identify and handle missing values Identify missing values Deal with missing values Correct data format Data standardization Data Normalization (centering/scaling) Binning Indicator variable Estimated Time Needed: 30 min What is the purpose of Data Wrangling? Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis. What is the fuel consumption (L/100k) rate for the diesel car? Import dataYou can find the "Automobile Data Set" from the following link: https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data. We will be using this data set throughout this course. Import pandas
###Code
import pandas as pd
import matplotlib.pylab as plt
###Output
_____no_output_____
###Markdown
Reading the data set from the URL and adding the related headers. URL of the dataset This dataset was hosted on IBM Cloud object click HERE for free storage
###Code
filename = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv"
###Output
_____no_output_____
###Markdown
Python list headers containing name of headers
###Code
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
###Output
_____no_output_____
###Markdown
Use the Pandas method read_csv() to load the data from the web address. Set the parameter "names" equal to the Python list "headers".
###Code
df = pd.read_csv(filename, names = headers)
###Output
_____no_output_____
###Markdown
Use the method head() to display the first five rows of the dataframe.
###Code
# To see what the data set looks like, we'll use the head() method.
df.head()
###Output
_____no_output_____
###Markdown
As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis. So, how do we identify all those missing values and deal with them? How to work with missing data?Steps for working with missing data: dentify missing data deal with missing data correct data format Identify and handle missing valuesIdentify missing valuesConvert "?" to NaNIn the car dataset, missing data comes with the question mark "?".We replace "?" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function: .replace(A, B, inplace = True) to replace A by B
###Code
import numpy as np
# replace "?" to NaN
df.replace("?", np.nan, inplace = True)
df.head(5)
###Output
_____no_output_____
###Markdown
dentify_missing_valuesEvaluating for Missing DataThe missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data: .isnull() .notnull()The output is a boolean value indicating whether the value that is passed into the argument is in fact missing data.
###Code
missing_data = df.isnull()
missing_data.head(5)
###Output
_____no_output_____
###Markdown
"True" stands for missing value, while "False" stands for not missing value. Count missing values in each columnUsing a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, "True" represents a missing value, "False" means the value is present in the dataset. In the body of the for loop the method ".value_counts()" counts the number of "True" values.
###Code
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
###Output
_____no_output_____
###Markdown
Based on the summary above, each column has 205 rows of data, seven columns containing missing data: "normalized-losses": 41 missing data "num-of-doors": 2 missing data "bore": 4 missing data "stroke" : 4 missing data "horsepower": 2 missing data "peak-rpm": 2 missing data "price": 4 missing data Deal with missing dataHow to deal with missing data? drop data a. drop the whole row b. drop the whole column replace data a. replace it by mean b. replace it by frequency c. replace it based on other functions Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely.We have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns:Replace by mean: "normalized-losses": 41 missing data, replace them with mean "stroke": 4 missing data, replace them with mean "bore": 4 missing data, replace them with mean "horsepower": 2 missing data, replace them with mean "peak-rpm": 2 missing data, replace them with meanReplace by frequency: "num-of-doors": 2 missing data, replace them with "four". Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur Drop the whole row: "price": 4 missing data, simply delete the whole row Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us Calculate the average of the column
###Code
avg_norm_loss = df["normalized-losses"].astype("float").mean(axis=0)
print("Average of normalized-losses:", avg_norm_loss)
###Output
_____no_output_____
###Markdown
Replace "NaN" by mean value in "normalized-losses" column
###Code
df["normalized-losses"].replace(np.nan, avg_norm_loss, inplace=True)
###Output
_____no_output_____
###Markdown
Calculate the mean value for 'bore' column
###Code
avg_bore=df['bore'].astype('float').mean(axis=0)
print("Average of bore:", avg_bore)
###Output
_____no_output_____
###Markdown
Replace NaN by mean value
###Code
df["bore"].replace(np.nan, avg_bore, inplace=True)
###Output
_____no_output_____
###Markdown
Question 1: According to the example above, replace NaN in "stroke" column by mean.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below: calculate the mean vaule for "stroke" columnavg_stroke = df["stroke"].astype("float").mean(axis = 0)print("Average of stroke:", avg_stroke) replace NaN by mean value in "stroke" columndf["stroke"].replace(np.nan, avg_stroke, inplace = True)--> Calculate the mean value for the 'horsepower' column:
###Code
avg_horsepower = df['horsepower'].astype('float').mean(axis=0)
print("Average horsepower:", avg_horsepower)
###Output
_____no_output_____
###Markdown
Replace "NaN" by mean value:
###Code
df['horsepower'].replace(np.nan, avg_horsepower, inplace=True)
###Output
_____no_output_____
###Markdown
Calculate the mean value for 'peak-rpm' column:
###Code
avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0)
print("Average peak rpm:", avg_peakrpm)
###Output
_____no_output_____
###Markdown
Replace NaN by mean value:
###Code
df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True)
###Output
_____no_output_____
###Markdown
To see which values are present in a particular column, we can use the ".value_counts()" method:
###Code
df['num-of-doors'].value_counts()
###Output
_____no_output_____
###Markdown
We can see that four doors are the most common type. We can also use the ".idxmax()" method to calculate for us the most common type automatically:
###Code
df['num-of-doors'].value_counts().idxmax()
###Output
_____no_output_____
###Markdown
The replacement procedure is very similar to what we have seen previously
###Code
#replace the missing 'num-of-doors' values by the most frequent
df["num-of-doors"].replace(np.nan, "four", inplace=True)
###Output
_____no_output_____
###Markdown
Finally, let's drop all rows that do not have price data:
###Code
# simply drop whole row with NaN in "price" column
df.dropna(subset=["price"], axis=0, inplace=True)
# reset index, because we droped two rows
df.reset_index(drop=True, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Good! Now, we obtain the dataset with no missing values. Correct data formatWe are almost there!The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).In Pandas, we use .dtype() to check the data type.astype() to change the data type Lets list the data types for each column
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the "astype()" method. Convert data types to proper format
###Code
df[["bore", "stroke"]] = df[["bore", "stroke"]].astype("float")
df[["normalized-losses"]] = df[["normalized-losses"]].astype("int")
df[["price"]] = df[["price"]].astype("float")
df[["peak-rpm"]] = df[["peak-rpm"]].astype("float")
###Output
_____no_output_____
###Markdown
Let us list the columns after the conversion
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Wonderful!Now, we finally obtain the cleaned dataset with no missing values and all data in its proper format. Data StandardizationData is usually collected from different agencies with different formats.(Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation) What is Standardization?Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison.ExampleTransform mpg to L/100km:In our dataset, the fuel consumption columns "city-mpg" and "highway-mpg" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standardWe will need to apply data transformation to transform mpg into L/100km? The formula for unit conversion isL/100km = 235 / mpgWe can do many mathematical operations directly in Pandas.
###Code
df.head()
# Convert mpg to L/100km by mathematical operation (235 divided by mpg)
df['city-L/100km'] = 235/df["city-mpg"]
# check your transformed data
df.head()
###Output
_____no_output_____
###Markdown
Question 2: According to the example above, transform mpg to L/100km in the column of "highway-mpg", and change the name of column to "highway-L/100km".
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below: transform mpg to L/100km by mathematical operation (235 divided by mpg)df["highway-mpg"] = 235/df["highway-mpg"] rename column name from "highway-mpg" to "highway-L/100km"df.rename(columns={'"highway-mpg"':'highway-L/100km'}, inplace=True) check your transformed data df.head()--> Data NormalizationWhy normalization?Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1ExampleTo demonstrate normalization, let's say we want to scale the columns "length", "width" and "height" Target:would like to Normalize those variables so their value ranges from 0 to 1.Approach: replace original value by (original value)/(maximum value)
###Code
# replace (original value) by (original value)/(maximum value)
df['length'] = df['length']/df['length'].max()
df['width'] = df['width']/df['width'].max()
###Output
_____no_output_____
###Markdown
Questiont 3: According to the example above, normalize the column "height".
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below:df['height'] = df['height']/df['height'].max() show the scaled columnsdf[["length","width","height"]].head()--> Here we can see, we've normalized "length", "width" and "height" in the range of [0,1]. BinningWhy binning? Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis.Example: In our dataset, "horsepower" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins Example of Binning Data In Pandas Convert data to correct format
###Code
df["horsepower"]=df["horsepower"].astype(int, copy=True)
###Output
_____no_output_____
###Markdown
Lets plot the histogram of horspower, to see what the distribution of horsepower looks like.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
plt.pyplot.hist(df["horsepower"])
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
We would like 3 bins of equal size bandwidth so we use numpy's linspace(start_value, end_value, numbers_generated function.Since we want to include the minimum value of horsepower we want to set start_value=min(df["horsepower"]).Since we want to include the maximum value of horsepower we want to set end_value=max(df["horsepower"]).Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4. We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins.
###Code
bins = np.linspace(min(df["horsepower"]), max(df["horsepower"]), 4)
bins
###Output
_____no_output_____
###Markdown
We set group names:
###Code
group_names = ['Low', 'Medium', 'High']
###Output
_____no_output_____
###Markdown
We apply the function "cut" the determine what each value of "df['horsepower']" belongs to.
###Code
df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True )
df[['horsepower','horsepower-binned']].head(20)
###Output
_____no_output_____
###Markdown
Lets see the number of vehicles in each bin.
###Code
df["horsepower-binned"].value_counts()
###Output
_____no_output_____
###Markdown
Lets plot the distribution of each bin.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
pyplot.bar(group_names, df["horsepower-binned"].value_counts())
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
Check the dataframe above carefully, you will find the last column provides the bins for "horsepower" with 3 categories ("Low","Medium" and "High"). We successfully narrow the intervals from 57 to 3! Bins visualizationNormally, a histogram is used to visualize the distribution of bins we created above.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
# draw historgram of attribute "horsepower" with bins = 3
plt.pyplot.hist(df["horsepower"], bins = 3)
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
The plot above shows the binning result for attribute "horsepower". Indicator variable (or dummy variable)What is an indicator variable? An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning. Why we use indicator variables? So we can use categorical variables for regression analysis in the later modules.Example We see the column "fuel-type" has two unique values, "gas" or "diesel". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert "fuel-type" into indicator variables. We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type.
###Code
df.columns
###Output
_____no_output_____
###Markdown
get indicator variables and assign it to data frame "dummy_variable_1"
###Code
dummy_variable_1 = pd.get_dummies(df["fuel-type"])
dummy_variable_1.head()
###Output
_____no_output_____
###Markdown
change column names for clarity
###Code
dummy_variable_1.rename(columns={'gas':'fuel-type-gas', 'diesel':'fuel-type-diesel'}, inplace=True)
dummy_variable_1.head()
###Output
_____no_output_____
###Markdown
If in the dataframe, column fuel-type has a value 'gas' in the first row gas will be '1' and diesel value will be taken as '0'.Similarly for row having diesel it will be 1 for diesel and 0 for gas.
###Code
# merge data frame "df" and "dummy_variable_1"
df = pd.concat([df, dummy_variable_1], axis=1)
# drop original column "fuel-type" from "df"
df.drop("fuel-type", axis = 1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now. Question 4: As above, create indicator variable to the column of "aspiration": "std" to 0, while "turbo" to 1.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below: get indicator variables of aspiration and assign it to data frame "dummy_variable_2"dummy_variable_2 = pd.get_dummies(df['aspiration']) change column names for claritydummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True) show first 5 instances of data frame "dummy_variable_1"dummy_variable_2.head()--> Question 5: Merge the new dataframe to the original dataframe then drop the column 'aspiration'
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below:merge the new dataframe to the original dataframdf = pd.concat([df, dummy_variable_2], axis=1) drop original column "aspiration" from "df"df.drop('aspiration', axis = 1, inplace=True)--> save the new csv
###Code
df.to_csv('clean_df.csv')
###Output
_____no_output_____ |
archive/MaximalStateLSTM.ipynb | ###Markdown
Maximal State LSTM Simulation OverviewThe goal of this notebook is to use 2017 Ford F-150 Ecoboost chasis dynamometer data from the Argonne National Laboratory to simulate the speed of the vehicle given the current brake pedal and accelerator pedal pressures. The model tested in this notebook is a sequence-to-sequence LSTM. Note that the model is end-to-end, not an encoder-decoder model. To attempt to address issue with the previous model, in additional to the brake and accelerator pedal positions, the previous speed is included as input features. Hopefully the LSTM can store all other important information internally and implicitly. If not, we can employ teacher forcing and store even more state information (like current gear/ratio, engine load/RPM, etc.) externally to help the LSTM. DataThis data is from the Downloadable Dynamometer Database and was generated at the Advanced Mobility Technology Laboratory (AMTL) at Argonne National Laboratory under the funding and guidance of the U.S. Department of Energy (DOE).If you read the previous `MinimalStateLSTM.ipynb` notebook, skip down a few lines after the data is collected.First, load a list of TSV data files. These individual files represent separate tests done on the vehicle. Each have different goals and simulation techniques which you can read about in the `./Dynamometer/2017 Ford F150 Ecoboost Test Summary.xlsx` file. The data is recorded at the frequency of 10Hz.
###Code
csvs = list(filter(lambda file: re.match(r'^(?!61706006)\d{8} Test Data\.txt$', file) is not None, os.listdir('./DynamometerData')))
###Output
_____no_output_____
###Markdown
We load each TSV into a Pandas dataframe. Note: you may not have enough memory to do this all in one pass. If so, load each individual TSV and only keep the important columns identified below.
###Code
dfs = [pandas.read_csv('./DynamometerData/' + csv, sep='\t', header=0) for csv in csvs]
###Output
_____no_output_____
###Markdown
We can get an idea of which columns we have access to. We are looking for columns recording accelerator pedal position, break pedal position, and resulting speed.
###Code
list(dfs[0].columns)
###Output
_____no_output_____
###Markdown
Before we select our columns, we need to know the maximum sequence recorded. We will round to the nearest 100 to allow for flexible batch sizes.
###Code
max_length = (ceil(max([len(df) for df in dfs]) / 100)) * 100
max_length
###Output
_____no_output_____
###Markdown
Now we can convert the data frames into input and target sets of sequences. We will make the suffix padding -1 as that is an invalid input value. We should not use 0 as that has meaning in the sequence.
###Code
# Padding with invalid value -1.
X = np.full([len(dfs), max_length, 10], -1.)
Y = np.full([len(dfs), max_length, 8], -1.)
for i, df in enumerate(dfs):
# Current
X[i,:len(df)-1,0] += df['Brake_pressure_applied_PCM[]'].values[1:] + 1
X[i,:len(df)-1,1] += df['Pedal_accel_pos_CAN[per]'].values[1:] + 1
# Previous
X[i,:len(df)-1,2] += df['Dyno_Spd[mph]'].values[:-1] + 1
X[i,:len(df)-1,3] += df['Eng_throttle_electronic_control_actual_PCM[deg]'].values[:-1] + 1
X[i,:len(df)-1,4] += df['Eng_throttle_position_PCM[per]'].values[:-1] + 1
X[i,:len(df)-1,5] += df['Trans_gear_engaged_CAN[]'].values[:-1] + 1
X[i,:len(df)-1,6] += df['Eng_load_PCM[per]'].values[:-1] + 1
X[i,:len(df)-1,7] += df['Eng_speed_PCM[rpm]'].values[:-1] + 1
X[i,:len(df)-1,8] += df['Trans_gear_ratio_measured_TCM[]'].values[:-1] + 1
X[i,:len(df)-1,9] += df['Trans_output_shaft_speed_raw_TCM[rpm]'].values[:-1] + 1
# Outputs
Y[i,:len(df)-1,0] += df['Dyno_Spd[mph]'].values[1:] + 1
Y[i,:len(df)-1,1] += df['Eng_throttle_electronic_control_actual_PCM[deg]'].values[1:] + 1
Y[i,:len(df)-1,2] += df['Eng_throttle_position_PCM[per]'].values[1:] + 1
Y[i,:len(df)-1,3] += df['Trans_gear_engaged_CAN[]'].values[1:] + 1
Y[i,:len(df)-1,4] += df['Eng_load_PCM[per]'].values[1:] + 1
Y[i,:len(df)-1,5] += df['Eng_speed_PCM[rpm]'].values[1:] + 1
Y[i,:len(df)-1,6] += df['Trans_gear_ratio_measured_TCM[]'].values[1:] + 1
Y[i,:len(df)-1,7] += df['Trans_output_shaft_speed_raw_TCM[rpm]'].values[1:] + 1
###Output
_____no_output_____
###Markdown
We can now delete the data frames to force release of some memory.
###Code
del dfs
###Output
_____no_output_____
###Markdown
Because the LSTM network is sensitive to magnitude, we need to scale our data. Since the sigmoid activation is used, we scale to the range $[0, 1]$. We store the minimums and maximums to inverse the transform after training and testing.
###Code
NEW_MIN = 0.25
NEW_MAX = 0.75
OLD_PAD_VAL = -1.
NEW_PAD_VAL = 0.
X_mins, X_maxs = [], []
for k in range(X.shape[2]):
X_mins.append(X[:,:,k][X[:,:,k] != OLD_PAD_VAL].min())
X_maxs.append(X[:,:,k][X[:,:,k] != OLD_PAD_VAL].max())
X_std = np.full(X.shape, NEW_PAD_VAL)
for i in range(X.shape[0]):
for k in range(X.shape[2]):
indices = np.where(X[i,:,k] != OLD_PAD_VAL)
X_std[i,indices,k] += ((X[i,indices,k] - X_mins[k]) / (X_maxs[k] - X_mins[k])) * (NEW_MAX - NEW_MIN) + NEW_MIN - NEW_PAD_VAL
Y_mins, Y_maxs = [], []
for k in range(Y.shape[2]):
Y_mins.append(Y[:,:,k][Y[:,:,k] != OLD_PAD_VAL].min())
Y_maxs.append(Y[:,:,k][Y[:,:,k] != OLD_PAD_VAL].max())
Y_std = np.full(Y.shape, NEW_PAD_VAL)
for i in range(Y.shape[0]):
for k in range(Y.shape[2]):
indices = np.where(Y[i,:,k] != OLD_PAD_VAL)
Y_std[i,indices,k] += ((Y[i,indices,k] - Y_mins[k]) / (Y_maxs[k] - Y_mins[k])) * (NEW_MAX - NEW_MIN) + NEW_MIN - NEW_PAD_VAL
###Output
_____no_output_____
###Markdown
ModelWe are now ready to start developing a model for our sequence. Since our sequences are one-to-one, I will not bother with an encoder-decoder architecture yet. This aslo shouldn't be necessary as only the past data is necessary to predict a single element of the sequence.Since our sequences are over 60,000 elements long, we are going to use a trick to divide the sequences into 100-element subsequences and use the `stateful` parameter in our LSTM layer. The `stateful` option will store and pass along the state of the LSTM between batches. In other words, the terminating state of the $i$th sequence in the batch will be the initial state of the $i$th sequence in the following batch. This means we have to be very careful when defining our inputs. Typically, the `batch_input_shape` is of the form `(batch_size, time_steps, features)`. However, we are not batching on the first axis of our data (the individual sequences) but the length of the sequence. So, our `batch_input_shape` will follow the form `(n_samples, batch_length, features)`. We have 95 sequences, and we will save 10\% to use as test sequences, our number of samples for training is 86. And, as stated before, the length of the subsequences will be 100 elements.The `return_sequences` option will override the typical LSTM behavior of just returning the last output as we want to use the entire sequence. The `stateful` option is discussed above.The output layer is a `TimeDistributed` `Dense` layer. This will apply the `linear` activation to every element in the output sequence from the LSTM.
###Code
train_model = Sequential()
train_model.add(InputLayer(batch_input_shape=(85, 100, X_std.shape[2])))
train_model.add(LSTM(200, return_sequences=True, stateful=True))
train_model.add(TimeDistributed(Dense(Y_std.shape[2], activation='linear')))
train_model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_2 (LSTM) (85, 100, 200) 168800
_________________________________________________________________
time_distributed_2 (TimeDist (85, 100, 8) 1608
=================================================================
Total params: 170,408
Trainable params: 170,408
Non-trainable params: 0
_________________________________________________________________
###Markdown
`RMSprop` is recommended for LSTM sequences and `mean_squared_error` is appropriate for our numeric output.
###Code
train_model.compile(optimizer='Adam', loss='mse')
###Output
_____no_output_____
###Markdown
TrainingBecause of out slightly unusual batching strategy, we need to implement a custom generator. We could extend a `keras.layers.Sequential` class, but our batching strategy is not too unusual so it is easy enough to implement a Python generator. The infinite loop allows for variable length epochs.
###Code
def batch_generator(X, y, batch_size):
while True:
for i in range(0, X.shape[1], batch_size):
yield (X[:,i:i+batch_size,:], y[:,i:i+batch_size,:])
###Output
_____no_output_____
###Markdown
Because our batch size is fixed, it is not possible to do simultaneous training and validation (unless we halved our data). So, here we manually shuffle and split our data using a 90-10 split.
###Code
SPLIT = X_std.shape[0] - X_std.shape[0] // 10
indices = np.arange(0, X_std.shape[0])
np.random.shuffle(indices)
X_shuffled = X_std[indices,:,:]
Y_shuffled = Y_std[indices,:,:]
X_train, X_test = X_shuffled[:SPLIT,:,:], X_shuffled[SPLIT:,:,:]
Y_train, Y_test = Y_shuffled[:SPLIT,:,:], Y_shuffled[SPLIT:,:,:]
###Output
_____no_output_____
###Markdown
It is very important to realize that Keras does not reset the state in a stateful LSTM after each epoch. However, we do *not* want the state to carry over between epochs. Therefore, we manually reset after each epoch using a custom callback.
###Code
class ResetStates(Callback):
def on_epoch_end(self, epoch, logs=None):
self.model.reset_states()
reset_states = ResetStates()
###Output
_____no_output_____
###Markdown
Because we are in a notebook and we don't want to lose our model upon a disconnect, we will use the `ModelCheckpoint` callback to save the best model seen so far.
###Code
model_checkpoint = ModelCheckpoint('./Models/MaximalStateLSTM/E{epoch:03d}L{loss:06.4E}.hdf5', save_weights_only=True)
###Output
_____no_output_____
###Markdown
Now we are ready to train. We will run for 200 epochs. Note that `steps_per_epoch` is required because our generator has no length. This is the max sequence length divided by the batch length.
###Code
train_model.fit_generator(batch_generator(X_train, Y_train, 100), epochs=200, steps_per_epoch=X.shape[1] / 100, callbacks=[reset_states, model_checkpoint])
###Output
Epoch 1/200
616/616 [==============================] - 129s 209ms/step - loss: 0.0013
Epoch 2/200
616/616 [==============================] - 129s 209ms/step - loss: 1.1809e-04
Epoch 3/200
616/616 [==============================] - 129s 209ms/step - loss: 7.7743e-05
Epoch 4/200
616/616 [==============================] - 129s 209ms/step - loss: 5.9385e-05
Epoch 5/200
616/616 [==============================] - 129s 209ms/step - loss: 5.3732e-05
Epoch 6/200
616/616 [==============================] - 129s 210ms/step - loss: 4.3678e-05
Epoch 7/200
616/616 [==============================] - 129s 209ms/step - loss: 4.6758e-05
Epoch 8/200
616/616 [==============================] - 129s 209ms/step - loss: 3.1416e-05
Epoch 9/200
616/616 [==============================] - 128s 208ms/step - loss: 3.0694e-05
Epoch 10/200
616/616 [==============================] - 128s 208ms/step - loss: 2.8129e-05
Epoch 11/200
616/616 [==============================] - 128s 208ms/step - loss: 3.4737e-05
Epoch 12/200
616/616 [==============================] - 128s 208ms/step - loss: 2.0784e-05
Epoch 13/200
616/616 [==============================] - 128s 208ms/step - loss: 2.1589e-05
Epoch 14/200
616/616 [==============================] - 129s 209ms/step - loss: 2.1789e-05
Epoch 15/200
616/616 [==============================] - 129s 210ms/step - loss: 1.9115e-05
Epoch 16/200
616/616 [==============================] - 140s 227ms/step - loss: 1.7756e-05
Epoch 17/200
616/616 [==============================] - 203s 329ms/step - loss: 2.2141e-05
Epoch 18/200
616/616 [==============================] - 178s 289ms/step - loss: 1.5015e-05
Epoch 19/200
616/616 [==============================] - 130s 211ms/step - loss: 1.9443e-05
Epoch 20/200
616/616 [==============================] - 179s 290ms/step - loss: 1.4581e-05
Epoch 21/200
616/616 [==============================] - 128s 209ms/step - loss: 1.7529e-05
Epoch 22/200
616/616 [==============================] - 129s 210ms/step - loss: 1.4752e-05
Epoch 23/200
616/616 [==============================] - 128s 208ms/step - loss: 1.5463e-05
Epoch 24/200
616/616 [==============================] - 128s 208ms/step - loss: 1.5382e-05
Epoch 25/200
604/616 [============================>.] - ETA: 2s - loss: 1.3963e-05 |
docs/notebooks/local_transform.ipynb | ###Markdown
Label and Transform content locallyThis notebook walks through training a transformation model and redacting PII locally in your environment.Follow the instructions here to set up your local environment: https://docs.gretel.ai/environment-setupPrerequisites:* Python 3.9+ (`python --version`).* Ensure that Docker is running (`docker info`).* The Gretel client SDK is installed and configured (`pip install -U gretel-client; gretel configure`).
###Code
import json
import yaml
from smart_open import open
import pandas as pd
from gretel_client import create_project, submit_docker_local
data_source = "https://gretel-public-website.s3.us-west-2.amazonaws.com/datasets/example-datasets/bike-customer-orders.csv"
# Simple policy to redact PII types with a character.
# Dates are shifted +/- 20 days based on the CustomerID field.
# Income is bucketized to 5000 number increments.
config = """
schema_version: 1.0
models:
- transforms:
data_source: "_"
policies:
- name: remove_pii
rules:
- name: fake_or_redact_pii
conditions:
value_label:
- person_name
- phone_number
- gender
- birth_date
transforms:
- type: redact_with_char
attrs:
char: X
- name: dateshifter
conditions:
field_label:
- date
- datetime
- birth_date
transforms:
- type: dateshift
attrs:
min: 20
max: 20
formats: "%Y-%m-%d"
field_name: "CustomerID"
- name: bucketize-income
conditions:
field_name:
- YearlyIncome
transforms:
- type: numberbucket
attrs:
min: 0
max: 1000000
nearest: 5000
"""
# Load and preview the DataFrame to train the transform model on.
df = pd.read_csv(data_source, nrows=500)
df.to_csv('training_data.csv', index=False)
df.head(5)
project = create_project()
# The following cell will create the transform model and
# run a sample of the data set through the model. this sample
# can be used to ensure the model is functioning correctly
# before continuing.
transform = project.create_model_obj(
model_config=yaml.safe_load(config),
data_source='training_data.csv'
)
run = submit_docker_local(transform, output_dir="tmp/")
# Review the sampled classification report
# to get an overview of detected data types
report = json.loads(open("tmp/report_json.json.gz").read())
pd.DataFrame(report["metadata"]["fields"])
# Next let's transform the remaining records using the transformation
# policy and model that was just created.
transform_records = transform.create_record_handler_obj(data_source='training_data.csv')
run = submit_docker_local(
transform_records,
model_path="tmp/model.tar.gz",
output_dir="tmp/"
)
# View the transformation report
report = json.loads(open("tmp/report_json.json.gz").read())
pd.DataFrame(report["metadata"]["fields"])
# View the transformed data
results = pd.read_csv('tmp/data.gz')
results.head(5)
###Output
_____no_output_____ |
docs/seaman/11_manoeuvring_simulation.ipynb | ###Markdown
Manoeuvring simulation in seaman
###Code
import warnings
warnings.filterwarnings("ignore")
import quantities as pq
import matplotlib.pyplot as plt
import os
import evaluation
# Seaman:
import seaman.tests
from seaman.systems.cppsystems.world import World
from seaman.systems.composite.dynamic_ship import DynamicShip
from seaman.simulations.manoeuvring import TurningCircle,ZigZag,SelfPropulsion
###Output
_____no_output_____
###Markdown
Create a world and load a ship
###Code
w = World()
ship = DynamicShip.from_shipfile(path=seaman.tests.test_ship_path)
w.register_unit(ship)
###Output
_____no_output_____
###Markdown
Run a turning circle simulation
###Code
simulation = TurningCircle(ship = ship)
simulation.dt = 0.1
#simulation.dt = 2
simulation.max_simulation_time = 1000
###Output
_____no_output_____
###Markdown
Set some parameters for this turning circle:You need to specify the physical unit using the Quantities package. This way we avoid to know in which physical unit parameters should be specified. [knots] will be automatically converted into [m/s], [degrees] into [radians] etc.
###Code
delta = pq.Quantity(35, pq.deg)
u = 12 * 1.852 / 3.6
rev = pq.Quantity(3.23568412333035, 1 / pq.s)
simulation.setup_simulation_parameters(speed = u,angle = delta,rev = rev, pullout=False)
###Output
_____no_output_____
###Markdown
Now run the simulation. The *TurningCircle* class knows how this simulation is performed in the correct way.
###Code
simulation.ship.cog.bl.rudder_0.store_all()
simulation.run()
###Output
_____no_output_____
###Markdown
Save simulationIt is possible to save the simulation results into a HDF5 file:
###Code
save_directory = os.path.abspath('results')
if not os.path.exists(save_directory):
os.mkdir(save_directory)
save_path = os.path.join(save_directory,'test.hdf5')
simulation.save_current_simulation(path = save_path)
###Output
_____no_output_____
###Markdown
Post-process the simulation DirectlyYou can process the data directly by accessing the results that simulation object holds in memory.
###Code
positions = simulation.ship.res.position_world
fig,ax=plt.subplots()
x0 = positions[:,0]
y0 = positions[:,1]
ax.plot(y0,x0)
ax.set_aspect('equal', 'box')
fig,ax=plt.subplots()
time = simulation.ship.res.time
psi = simulation.ship.res.attitude_world[:,2]
ax.plot(time,psi)
###Output
_____no_output_____
###Markdown
Using EvaluationYou can process the data in a more standard way by using the *Evaluation* package. *Evaluation* can analyze time series data from all SSPA facilities (Towingtank and MDL) but is also compatable with Seaman. There are benifits with having just ONE code to deal with data regardless of its origin. So that a ZigZag simulation in Seaman is treated the same way as a ZigZag model tests in MDL etc.
###Code
from evaluation.run_manoeuvring import RunZigZag, RunTurningCircle
from evaluation.run import Run
from evaluation.run_dynamic import RunDynamic
interesting_datasets = [r"ship/position_world/x",
r"ship/position_world/y",
r"ship/position_world/z",
r"ship/attitude_world/x",
r"ship/attitude_world/y",
r"ship/attitude_world/z",
r"ship/cog/bl/rudder_0/delta",
]
rename_channels = {
r"ship/cog/bl/rudder_0/delta":'delta',
}
hdf5_file_path = save_path
run_name = 'seaman run'
meta_data = {
'TrackSpeed':u,
'Beam':simulation.ship.static_ship.shipdict.main_data['beam'],
'Lpp':simulation.ship.static_ship.shipdict.main_data['l'],
'ScaleFactor':1,
}
units = {
'TrackSpeed':'m/s',
'Beam':'m',
'Lpp':'m',
'ScaleFactor':36,
}
run = RunTurningCircle.load_time_series_from_seaman(run_name=run_name,hdf5_file_path=hdf5_file_path,
interesting_datasets=interesting_datasets,
meta_data=meta_data, rename_channels=rename_channels,
units=units)
###Output
_____no_output_____
###Markdown
EvaluateNow we can evaluate the results and look at the data
###Code
run.evaluate()
results = run.results['turning_circle']
units = results.pop('units')
results
run.track_plot()
run.plot_summary();
run.plot('V')
run.plot('beta')
###Output
_____no_output_____ |
House Addresses.ipynb | ###Markdown
Duke ML Workshop Based on housing numbers address from google (link was provided to continue and exxplore on our own)
###Code
#common imports for notebook to work (learned this in class as well)
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.io
import pandas as pd
np.random.seed(42)
originialData = scipy.io.loadmat('train_32x32.mat') #we chose the smaller dataset because of storage reasons
X = originialData['X']
y = originialData['y']
imageID = 25
plt.imshow(X[:, :, :, imageID])
plt.show()
print(y[imageID]) #expected 63
from sklearn.utils import shuffle
X = X.reshape(X.shape[0] * X.shape[1] * X.shape[2], X.shape[3]).T
y = y.reshape(y.shape[0], )
X, y = shuffle(X, y, random_state = 42)
from sklearn.ensemble import RandomForestClassifier #Type of classifier in ML (we learned this in class as well)
dataRandomClass = RandomForestClassifier()
print(dataRandomClass)
RandomForestClassifier(bootstrap = True, class_weight = None, criterion = 'gini', #Gini! earned last week as well with impurity (Gini)
max_depth = None, max_features = 'auto', max_leaf_nodes = None,
min_impurity_split = 1e-07, min_samples_leaf = 1,
min_samples_split = 2, min_weight_fraction_leaf = 0.0,
n_estimators = 10, n_jobs = 1, oob_score = False, random_state = None,
verbose = 0, warm_start = False)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
dataRandomClass.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
finalPredictions = dataRandomClass.predict(X_test)
print("Accuracy Score:", accuracy_score(y_test, finalPredictions))
###Output
Accuracy Score: 0.5575348075348076
|
week07/.ipynb_checkpoints/prep_notebook_week07_part2-checkpoint.ipynb | ###Markdown
Maps with bqplot - Chloropeths
###Code
# import our usual things
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import ipywidgets
import bqplot
import numpy as np
###Output
_____no_output_____
###Markdown
Info viz maps with bqplot Let's quick check out another mark available to us in `bqplot`:
###Code
map_mark = bqplot.Map(scales={'projection': bqplot.AlbersUSA()})
fig = bqplot.Figure(marks=[map_mark], title='Basic Map Example')
#fig.layout.min_height='800px'
fig
###Output
_____no_output_____
###Markdown
This is using an "Albers" projection for our map which is stored as a json file (this will come up in D3 as well): https://github.com/d3/d3-3.x-api-reference/blob/master/Geo-Projections.mdYou'll note that there is some funny stuff happening here with Alaska -- sometimes shape files get messed up! Let's try a different way:
###Code
# scales
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json') # have to specify shapes to draw
# Marks
states_map = bqplot.Map(map_data=state_data,
scales={'projection':sc_geo})
# Fig
fig=bqplot.Figure(marks=[states_map],
title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig
###Output
_____no_output_____
###Markdown
A few notes -- where did this map data come from? Check out your packages in anaconda **GO DO**.Looks like we also have `EuropeMap.json`, `USCountiesMap.json` and `WorldMap.json`. This list probably tells you something already about who the developers are and where they live ;)In scales for the Map mark, there is also the limitation of `GeoScale` (general, to make your own), `Mercator` (which we all by now hate) and `AlbersUSA`, `Gnomonic` and `Sterographic`. For fun, let's see the world with some of these:
###Code
# scales
sc_geo = bqplot.Mercator()
map_data = bqplot.topo_load('map_data/WorldMap.json') # have to specify shapes to draw
# Marks
our_map = bqplot.Map(map_data=map_data,
scales={'projection':sc_geo})
# Fig
fig=bqplot.Figure(marks=[our_map],
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig
# scales
sc_geo = bqplot.Gnomonic()
map_data = bqplot.topo_load('map_data/WorldMap.json') # have to specify shapes to draw
# Marks
our_map = bqplot.Map(map_data=map_data,
scales={'projection':sc_geo})
# Fig
fig=bqplot.Figure(marks=[our_map],
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig
# scales
sc_geo = bqplot.Stereographic()
map_data = bqplot.topo_load('map_data/WorldMap.json') # have to specify shapes to draw
# Marks
our_map = bqplot.Map(map_data=map_data,
scales={'projection':sc_geo})
# Fig
fig=bqplot.Figure(marks=[our_map],
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig
###Output
_____no_output_____
###Markdown
Let's return to our state data:
###Code
# scales
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json') # have to specify shapes to draw
# Marks
states_map = bqplot.Map(map_data=state_data,
scales={'projection':sc_geo})
# Fig
fig=bqplot.Figure(marks=[states_map],
title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig
###Output
_____no_output_____
###Markdown
PS: not how small Alaska is! See actual size: Let's add something new: a tooltip that will give us info about the state & its ID's:
###Code
bqplot.Tooltip?
###Output
_____no_output_____
###Markdown
Note: it says something about "all attributes of the mark are accessible in the tool tip" -- this is a little frustrating part about bqplot that not all kinds of things can be displayed with tooltips. You can certainly build your own with widgets, but we won't for this case -- we'll just use the defaults:
###Code
#(1)
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json')
# (1.5) tooltip
def_tt = bqplot.Tooltip(fields=['id', 'name'])
states_map = bqplot.Map(map_data=state_data,
scales={'projection':sc_geo},
tooltip=def_tt)
# 4 interactions
states_map.interactions = {'click': 'select', 'hover': 'tooltip'}
fig=bqplot.Figure(marks=[states_map],
title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0}) # try w/o first and see
fig
###Output
_____no_output_____
###Markdown
So now it displays the name of the state we are hovering over *and* it turns different colors when we select different states. Let's keep adding! Let's allow the selected states (the ones colored red) to have their names printed out.
###Code
from states_utils import get_ids_and_names # the states_utils.py file is in the downloads section for today
ids, state_names = get_ids_and_names(states_map) # pulls out names and ids
state_names, ids
#(1)
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json')
# (1.5) tooltip
def_tt = bqplot.Tooltip(fields=['id', 'name'])
states_map = bqplot.Map(map_data=state_data,
scales={'projection':sc_geo},
tooltip=def_tt)
# 4 interactions
states_map.interactions = {'click': 'select', 'hover': 'tooltip'}
# more interactions:
def get_data_value(change):
#print(change['owner'].selected) # so we have IDs, but we want to print state names
if change['owner'].selected is not None:
for i,s in enumerate(change['owner'].selected): # over all selected states
print(state_names[s == ids])
states_map.observe(get_data_value,'selected')
fig=bqplot.Figure(marks=[states_map],
title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0}) # try w/o first and see
fig
###Output
_____no_output_____
###Markdown
Add in data to link to our mapWe'll now use a dataset that looks at total exports of each state in $US millions.This is originally from: https://www.ers.usda.gov/data-products/state-export-data/
###Code
comm = pd.read_csv('/Users/jillnaiman/Downloads/total_export.csv')
###Output
_____no_output_____
###Markdown
Take a quick look:
###Code
comm.head()
###Output
_____no_output_____
###Markdown
Let's see how our data is formatted:
###Code
comm.loc[comm['State'] == 'Alabama'].values
###Output
_____no_output_____
###Markdown
A few things to note about this dataset that might cause us problems: 1. we note that these are formatted as strings - this means we'll have to do some formatting when we plot data 1. also that the state name is the first column and not a number we'll also have to take care of this too Let's first work on formatting the years correctly:
###Code
# grab years
years = list(comm.columns.values)
years = np.array(years[1:]) # get rid of state
# as numbers
years = years.astype('int')
years
###Output
_____no_output_____
###Markdown
What we want to do, is plot out the total exports in all selected states as a function of years on a line plot. Let's start by making a line plot for an arbitrary state as a function of time. How to get data as a function of time? Let's try:
###Code
sn = 'Illinois'
comm.loc[comm['State'] == sn].values
# oddly shaped
comm.loc[comm['State'] == sn].values[0]
# ignore state name:
comm.loc[comm['State'] == sn].values[0][1:]
comm.loc[comm['State'] == sn].values[0][1:].astype('float64')
###Output
_____no_output_____
###Markdown
Oh no! We have to deal with the fact that we have commas in there:
###Code
exports_in = comm.loc[comm['State'] == sn].values[0][1:]
exports_in = np.array([exports_in[i].replace(',','') for i in range(len(exports_in))])
exports_in = exports_in.astype('float64')
exports_in
###Output
_____no_output_____
###Markdown
Finally, let's plot:
###Code
# scales
x_scl = bqplot.LinearScale()
y_scl = bqplot.LinearScale()
# axis
ax_xcl = bqplot.Axis(label='Year', scale=x_scl)
ax_ycl = bqplot.Axis(label='Total Export from State ' + sn,
scale=y_scl,
orientation='vertical', side='left')
# marks
lines = bqplot.Lines(x = years, y = exports_in,
scales = {'x': x_scl, 'y': y_scl})
# fig
fig_lines = bqplot.Figure(marks = [lines], axes = [ax_ycl, ax_xcl])
fig_lines
###Output
_____no_output_____
###Markdown
Let's make a US state map and use that to drive updates to our line plot.
###Code
# (I) US STATE MAP
# Scales
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json')
# tool tip
def_tt = bqplot.Tooltip(fields=['id', 'name'])
# marks
states_map = bqplot.Map(map_data=state_data, scales={'projection':sc_geo}, tooltip=def_tt)
# interactions
states_map.interactions = {'click': 'select', 'hover': 'tooltip'}
# (II) LINE PLOT
# scales
x_scl = bqplot.LinearScale()
y_scl = bqplot.LinearScale()
# axis
ax_xcl = bqplot.Axis(label='Year', scale=x_scl)
ax_ycl = bqplot.Axis(label='Total Export from State NA',
scale=y_scl,
orientation='vertical', side='left')
# lines: let's start with only zeros plotted
lines = bqplot.Lines(x = years, y = np.zeros(len(years)),
scales = {'x': x_scl, 'y': y_scl})
# (III) INTERACTIONS
# let do something additive for all states selected
def get_data_value(change):
exports = np.zeros(len(years)) # start with zeros in exports
snames = '' # store what state names we are plotting
if change['owner'].selected is not None: # something is selected
for i,s in enumerate(change['owner'].selected): # for all states selected
sn = state_names[s == ids][0] # grab the state name, note the [0] here -> try printing out and see!
snames += sn + ', ' # add to our label
# LINE PLOT SELECTION: we did this before
exports_in=comm.loc[comm['State'] == sn].values[0][1:]
# there are ","'s in exports we gotta take out
exports_in = np.array([exports_in[i].replace(',','') for i in range(len(exports_in))])
exports = np.add(exports, exports_in.astype('float64')) # note we are *adding* when we select multiples
lines.y = exports # update export line
ax_ycl.label='Total Export from ' + snames # list of selected states
else: # we don't have states selected!
lines.y = np.zeros(len(exports))
ax_ycl.label='Total Export from NA'
states_map.observe(get_data_value,'selected')
# (IV) CREATE FIGS
fig_map = bqplot.Figure(marks=[states_map], title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig_lines = bqplot.Figure(marks = [lines],
axes = [ax_ycl, ax_xcl])
# (V) Display
fig_map.layout.min_width='500px'
fig_lines.layout.min_width='500px'
myDashboard = ipywidgets.HBox([fig_map,fig_lines])
myDashboard
###Output
_____no_output_____
###Markdown
What about multiple lines in selected?
###Code
# (I) US STATE MAP
# Scales
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json')
# tool tip
def_tt = bqplot.Tooltip(fields=['id', 'name'])
# marks
states_map = bqplot.Map(map_data=state_data, scales={'projection':sc_geo}, tooltip=def_tt)
# interactions
states_map.interactions = {'click': 'select', 'hover': 'tooltip'}
# (II) INITIAL LINE PLOT
# scales
x_scl = bqplot.LinearScale()
y_scl = bqplot.LinearScale()
# axis
ax_xcl = bqplot.Axis(label='Year', scale=x_scl)
ax_ycl = bqplot.Axis(label='Total Export from State NA',
scale=y_scl,
orientation='vertical', side='left')
# lines: let's start with only zeros plotted
lines = bqplot.Lines(x = years, y = np.zeros(len(years)),
scales = {'x': x_scl, 'y': y_scl})
# (III) INTERACTIONS -- more complex
# let do something additive for all states selected
def get_data_value(change):
snames = '' # store what state names we are plotting
if change['owner'].selected is not None: # something is selected
ll = []
for i,s in enumerate(change['owner'].selected): # for all states selected
sn = state_names[s == ids][0] # grab the state name, note the [0] here -> try printing out and see!
snames += sn + ', ' # add to our label
# LINE PLOT SELECTION: we did this before
exports_in=comm.loc[comm['State'] == sn].values[0][1:]
# there are ","'s in exports we gotta take out
exports_in = np.array([exports_in[i].replace(',','') for i in range(len(exports_in))])
ll.append(bqplot.Lines(x = years, y = exports_in, scales = {'x': x_scl, 'y': y_scl}))
fig_lines.marks = ll # update export line
ax_ycl.label='Total Export from ' + snames # list of selected states
else: # we don't have states selected!
lines = bqplot.Lines(x = years, y = np.zeros(len(years)), scales = {'x': x_scl, 'y': y_scl})
fig_lines.marks = [lines]
ax_ycl.label='Total Export from NA'
states_map.observe(get_data_value,'selected')
# (IV) CREATE FIGS
fig_map = bqplot.Figure(marks=[states_map], title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig_lines = bqplot.Figure(marks = [lines],
axes = [ax_ycl, ax_xcl])
# (V) Display
fig_map.layout.min_width='500px'
fig_lines.layout.min_width='500px'
myDashboard = ipywidgets.HBox([fig_map,fig_lines])
myDashboard
###Output
_____no_output_____
###Markdown
Close, but not quite: colors aren't great and neither are scales! Let's do better:
###Code
comm.values.shape
comm.values[:,1:]
exports = comm.values[:,1:].flatten()
totalCom = np.array([exports[i].replace(',','') for i in range(len(exports))])
totalCom.astype('float')
totalCom = totalCom.astype('float')
totalCom.min(), totalCom.max(), totalCom.mean()
# (I) US STATE MAP
# Scales
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json')
# tool tip
def_tt = bqplot.Tooltip(fields=['id', 'name'])
# marks
states_map = bqplot.Map(map_data=state_data, scales={'projection':sc_geo}, tooltip=def_tt)
# interactions
states_map.interactions = {'click': 'select', 'hover': 'tooltip'}
# (II) INITIAL LINE PLOT
# scales
x_scl = bqplot.LinearScale()
y_scl = bqplot.LogScale(min=totalCom.min(), max=totalCom.max())
# axis
ax_xcl = bqplot.Axis(label='Year', scale=x_scl)
ax_ycl = bqplot.Axis(label='Total Export from State NA',
scale=y_scl,
orientation='vertical', side='left')
# lines: let's start with only zeros plotted
lines = bqplot.Lines(x = years, y = np.zeros(len(years)),
scales = {'x': x_scl, 'y': y_scl})
# (III) INTERACTIONS -- more complex
# let do something additive for all states selected
def get_data_value(change):
snames = '' # store what state names we are plotting
if change['owner'].selected is not None: # something is selected
ll = []
for i,s in enumerate(change['owner'].selected): # for all states selected
sn = state_names[s == ids][0] # grab the state name, note the [0] here -> try printing out and see!
snames += sn + ', ' # add to our label
# LINE PLOT SELECTION: we did this before
exports_in=comm.loc[comm['State'] == sn].values[0][1:]
# there are ","'s in exports we gotta take out
exports_in = np.array([exports_in[i].replace(',','') for i in range(len(exports_in))])
ll.append(bqplot.Lines(x = years, y = exports_in, scales = {'x': x_scl, 'y': y_scl}))
fig_lines.marks = ll # update export line
ax_ycl.label='Total Export from ' + snames # list of selected states
else: # we don't have states selected!
lines = bqplot.Lines(x = years, y = np.zeros(len(years)), scales = {'x': x_scl, 'y': y_scl})
fig_lines.marks = [lines]
ax_ycl.label='Total Export from NA'
states_map.observe(get_data_value,'selected')
# (IV) CREATE FIGS
fig_map = bqplot.Figure(marks=[states_map], title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0})
fig_lines = bqplot.Figure(marks = [lines],
axes = [ax_ycl, ax_xcl])
# (V) Display
fig_map.layout.min_width='500px'
fig_lines.layout.min_width='500px'
myDashboard = ipywidgets.HBox([fig_map,fig_lines])
myDashboard
###Output
_____no_output_____
###Markdown
But what about line colors? How would you change the colors when you add more states?Bonus: what about changing the color of the selection of the state along with the color of the line?
###Code
bqplot.Lines?
###Output
_____no_output_____
###Markdown
Choose your own adventure1. HW1. More with maps * How to change line colors? * What about display colors (hint - in other nb for this week) * What about select colors? * How about other data? - https://github.com/fivethirtyeight/data (librarians has state-by-state data)
###Code
bqplot.topo_load?
###Output
_____no_output_____ |
age_detector.ipynb | ###Markdown
Age Detector of a Person By Luis Miguel García Marín Importing libraries We import the necessary libraries for the development of our neural network.
###Code
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.layers as L
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Loading the dataset We load our dataset of 27305 facial images of 48x48 pixels with their labels, in our case in .csv format. The dataset can be downloaded at: https://www.kaggle.com/nipunarora8/age-gender-and-ethnicity-face-data-csv
###Code
df = pd.read_csv("data/age_gender.csv")
###Output
_____no_output_____
###Markdown
Exploring the data I We look at the first 5 rows (image with labels) of the dataset.
###Code
df.head()
###Output
_____no_output_____
###Markdown
Converting the pixels of the data to a numpy array When we explore the data, we see that the pixels are expressed as strings separated by spaces. To better handle this data, we are going to convert it into an array of numbers, with the help of a lambda function, which uses the functions x.split() (to separate the elements by each space) and np.array() (to build the array, with 32 precision float number type).
###Code
df['pixels'] = df['pixels'].apply(lambda x: np.array(x.split(), dtype="float32"))
###Output
_____no_output_____
###Markdown
Exploring the data II We see that now the pixels are a numerical array.
###Code
df.head()
###Output
_____no_output_____
###Markdown
Visualizing the data We now preview about 20 images of the dataset, accompanied by all the labels. However, we will only use the 'age' label.
###Code
plt.figure(figsize = (16,16))
for i in range(1500,1520):
plt.subplot(5, 5, (i%25)+1) # To display them on a grid
plt.xticks([]) # To leave a little margin
plt.yticks([]) # To leave a little margin
plt.grid(False)
plt.imshow(df['pixels'].iloc[i].reshape(48,48)) # We convert the image from 1D to 2D
plt.xlabel( # We add the labels
"Age:" + str(df['age'].iloc[i]) +
" Ethnicity:" + str(df['ethnicity'].iloc[i]) +
" Gender:" + str(df['gender'].iloc[i])
)
plt.show()
###Output
_____no_output_____
###Markdown
Extracting the images We obtain the pixels in x, converting them into a tuple to be able to correctly access the .shape attribute
###Code
x = np.array(df['pixels'].tolist())
x.shape
###Output
_____no_output_____
###Markdown
Normalizing the images We normalize the pixels so that the model can work better with floating values between 0 and 1. Knowing that the maximum value of a pixel is 255.
###Code
x = x / 255
###Output
_____no_output_____
###Markdown
Converting pixels from 1D to 3D In order to have information about the nearby pixels and to be able to perform convolution, we now reshape the pixels to go from working with one dimension to three dimensions (width in pixels, height in pixels and number of color channels). In this way, the input of our neural network will also have these dimensions.
###Code
x = x.reshape(-1,48,48,1)
x.shape
###Output
_____no_output_____
###Markdown
Extracting the labels We get in y the age labels. Since there is no predefined total number of classes (the maximum age of a person does not have a strict limit), we do not want to categorize.
###Code
y = df['age']
###Output
_____no_output_____
###Markdown
Separate the data into train and test Using the train_test_split function, we now partition the entire dataset into train and test.
###Code
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.22, random_state = 37
)
###Output
_____no_output_____
###Markdown
Building the model We build the neural network model, in this case it is a modification of the one in section 3 (Convolutional Neural Networks). The input must be 3-dimensional as specified previously (48, 48, 1). However, we want the output to be a real number that indicates the predicted age of the person in the photo, so the output will be a single unit with relu activation, since we do not have, as other times, a specific number of categories between which distribute the solution percentages.
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Conv2D,
MaxPool2D,
Flatten,
Dropout,
BatchNormalization,
)
model = Sequential(L.InputLayer(input_shape=(48,48,1)))
model.add(Conv2D(32, (3, 3), strides=1, padding="same", activation="relu", input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(MaxPool2D((2, 2), strides=2, padding="same"))
model.add(Conv2D(64, (3, 3), strides=1, padding="same", activation="relu"))
model.add(MaxPool2D((2, 2), strides=2, padding="same"))
model.add(Conv2D(128, (3, 3), strides=1, padding="same", activation="relu"))
model.add(MaxPool2D((2, 2), strides=2, padding="same"))
model.add(Flatten())
model.add(Dense(units=64, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(units=1, activation="relu"))
###Output
_____no_output_____
###Markdown
Summarizing the model We summarize the model.
###Code
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 48, 48, 32) 320
_________________________________________________________________
batch_normalization (BatchNo (None, 48, 48, 32) 128
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 24, 24, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 12, 12, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 12, 12, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 6, 6, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 4608) 0
_________________________________________________________________
dense (Dense) (None, 64) 294976
_________________________________________________________________
dropout (Dropout) (None, 64) 0
_________________________________________________________________
dense_1 (Dense) (None, 1) 65
=================================================================
Total params: 387,841
Trainable params: 387,777
Non-trainable params: 64
_________________________________________________________________
###Markdown
Compiling the model In the compilation of the model we indicated to use 'adam' (stochastic gradient descent method) as optimizer, mean square error as loss function and mean absolute error as metrics.
###Code
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
###Output
_____no_output_____
###Markdown
Training the model We proceed to train the model, with 20 epochs.
###Code
model.fit(
x_train, y_train, validation_data=(x_test, y_test), epochs=20, batch_size=64
)
###Output
Epoch 1/20
289/289 [==============================] - 3s 11ms/step - loss: 337.4630 - mae: 13.9079 - val_loss: 1166.3826 - val_mae: 28.6280
Epoch 2/20
289/289 [==============================] - 2s 8ms/step - loss: 193.8490 - mae: 10.4478 - val_loss: 427.5663 - val_mae: 15.8239
Epoch 3/20
289/289 [==============================] - 2s 7ms/step - loss: 165.3683 - mae: 9.5641 - val_loss: 115.5500 - val_mae: 7.8767
Epoch 4/20
289/289 [==============================] - 2s 7ms/step - loss: 149.2735 - mae: 9.0224 - val_loss: 139.9009 - val_mae: 8.3878
Epoch 5/20
289/289 [==============================] - 2s 7ms/step - loss: 141.8352 - mae: 8.7823 - val_loss: 147.1489 - val_mae: 8.5963
Epoch 6/20
289/289 [==============================] - 2s 7ms/step - loss: 135.6471 - mae: 8.5807 - val_loss: 164.0562 - val_mae: 10.1997
Epoch 7/20
289/289 [==============================] - 2s 7ms/step - loss: 126.9901 - mae: 8.3131 - val_loss: 167.6259 - val_mae: 9.0653
Epoch 8/20
289/289 [==============================] - 2s 7ms/step - loss: 125.4271 - mae: 8.2413 - val_loss: 91.6069 - val_mae: 7.1432
Epoch 9/20
289/289 [==============================] - 2s 7ms/step - loss: 115.9547 - mae: 7.9349 - val_loss: 360.8214 - val_mae: 16.3261
Epoch 10/20
289/289 [==============================] - 2s 7ms/step - loss: 114.0566 - mae: 7.8759 - val_loss: 91.5180 - val_mae: 7.0679
Epoch 11/20
289/289 [==============================] - 2s 7ms/step - loss: 109.1594 - mae: 7.6963 - val_loss: 89.7754 - val_mae: 7.0528
Epoch 12/20
289/289 [==============================] - 2s 7ms/step - loss: 108.4092 - mae: 7.6729 - val_loss: 114.1562 - val_mae: 7.5020
Epoch 13/20
289/289 [==============================] - 2s 7ms/step - loss: 101.9539 - mae: 7.4585 - val_loss: 90.7332 - val_mae: 7.0118
Epoch 14/20
289/289 [==============================] - 2s 7ms/step - loss: 99.1472 - mae: 7.3852 - val_loss: 98.3200 - val_mae: 7.3761
Epoch 15/20
289/289 [==============================] - 2s 7ms/step - loss: 97.9239 - mae: 7.2816 - val_loss: 95.5993 - val_mae: 7.1487
Epoch 16/20
289/289 [==============================] - 2s 7ms/step - loss: 96.4016 - mae: 7.2069 - val_loss: 133.9391 - val_mae: 8.3842
Epoch 17/20
289/289 [==============================] - 2s 7ms/step - loss: 93.8121 - mae: 7.1570 - val_loss: 111.9285 - val_mae: 8.0800
Epoch 18/20
289/289 [==============================] - 2s 7ms/step - loss: 91.6607 - mae: 7.0240 - val_loss: 126.1024 - val_mae: 8.0239
Epoch 19/20
289/289 [==============================] - 2s 7ms/step - loss: 89.8514 - mae: 6.9840 - val_loss: 123.5792 - val_mae: 7.9130
Epoch 20/20
289/289 [==============================] - 2s 7ms/step - loss: 86.8418 - mae: 6.8865 - val_loss: 128.4659 - val_mae: 8.0786
###Markdown
Examining the Predictions We create the make_predictions function to make separate predictions and test our model.
###Code
import matplotlib.image as mpimg
from tensorflow.keras.preprocessing import image as image_utils
from tensorflow.keras.applications.imagenet_utils import preprocess_input
def show_image(image_path):
image = mpimg.imread(image_path)
plt.imshow(image)
def make_predictions(image_path):
show_image(image_path)
image = image_utils.load_img(image_path, target_size=(48, 48), color_mode='grayscale')
image = image_utils.img_to_array(image)
image = image.reshape(1,48,48,1) # Reshaping the image
image = image / 255 # Normalizing the image
preds = model.predict(image)
return preds
###Output
_____no_output_____
###Markdown
Age detection function Finally, we create the age_detector function, which makes use of the make_predictions function and presents the result in a more legible way.
###Code
import math
def age_detector(image_path):
pred = make_predictions(image_path)
print("Your age is: ", math.floor(pred), " years old")
age_detector('data/leonor-15.jpg')
age_detector('data/anciana-90.jpg')
###Output
Your age is: 52 years old
###Markdown
Clear the Memory We clear the memory, in case we need it.
###Code
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____ |
exploratory_code/1_dataset_creation.ipynb | ###Markdown
Loading Companies...
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Let's import the data
###Code
companies = pd.read_csv('data/companies.csv')
#Having a look to the companies data structure
companies[:3]
#Let's first remove non USA companies, since they usually have a lot of missing data
companies_USA = companies[companies['country_code'] == 'USA']
#Check if there are any missing data for state_code in USA based companies
companies_USA['state_code'].unique()
#companies_USA['state_code'].value_counts()
# No nan values for state_code
#Let's maintain region and city in the dataset but probably they are not gonna be used
#companies_USA['city'].value_counts()
###Output
_____no_output_____
###Markdown
Converting categories to dummy variables (selecting top 50)
###Code
from operator import methodcaller
#Let's analyze category_list and probably expand it as dummy variables
#get a unique list of the categories
categories = list(companies_USA['category_list'].astype('str').unique())
#split each categori by |
categories = map(methodcaller("split", "|"), categories)
#flatten the list of sub categories
categories = [item for sublist in categories for item in sublist]
#total of 60k different categories
#categories
len(categories)
#We'll need to select the most important categories (that appears most of the times, and use Other for the rest)
companies_series = companies_USA['category_list'].astype('str')
categories_splitted_count = companies_series.str.split('|').apply(lambda x: pd.Series(x).value_counts()).sum()
#dummies
dummies = companies_series.str.get_dummies(sep='|')
########### Count of categories splitted first 50)###########
top50categories = list(categories_splitted_count.sort_values(ascending=False).index[:50])
##### Create a dataframe with the 50 top categories to be concatenated later to the complete dataframe
categories_df = dummies[top50categories]
categories_df = categories_df.add_prefix('Category_')
###Output
_____no_output_____
###Markdown
Comparing investments.csv and rounds.csv
###Code
#Let's start by comparing and understanding the difference between investments.csv and rounds.csv
df_investments = pd.read_csv('data/investments.csv')
df_investments = df_investments[df_investments['company_country_code'] == 'USA']
df_rounds = pd.read_csv('data/rounds.csv')
df_rounds = df_rounds[df_rounds['company_country_code'] == 'USA']
#companies_USA[companies_USA['permalink'] == '/organization/0xdata']
#df_investments[df_investments['company_permalink'] == '/organization/0xdata' ]
#df_rounds[df_rounds['company_permalink'] == '/organization/0xdata' ]
###Output
_____no_output_____
###Markdown
The difference between investments and rounds is that investments is providing the information of where the money came from. Investments contains information about which investors paid each round. While rounds is grouping and totalizing the information by round. Analyzing rounds.csv
###Code
#df_rounds
#Prepare an aggregated rounds dataframe grouped by company and funding type
rounds_agg = df_rounds.groupby(['company_permalink', 'funding_round_type'])['raised_amount_usd'].agg({'amount': [ pd.Series.sum, pd.Series.count]})
#Get available unique funding types
funding_types = list(rounds_agg.index.levels[1])
funding_types
#Prepare the dataframe where all the dummy features for each funding type will be added (number of rounds and total sum for each type)
rounds_df = companies_USA[['permalink']]
rounds_df = rounds_df.rename(columns = {'permalink':'company_permalink'})
#Iterate over each kind of funding type, and add two new features for each into the dataframe
def add_dummy_for_funding_type(df, aggr_rounds, funding_type):
funding_df = aggr_rounds.iloc[aggr_rounds.index.get_level_values('funding_round_type') == funding_type].reset_index()
funding_df.columns = funding_df.columns.droplevel()
funding_df.columns = ['company_permalink', funding_type, funding_type+'_funding_total_usd', funding_type+'_funding_rounds']
funding_df = funding_df.drop(funding_type,1)
new_df = pd.merge(df, funding_df, on='company_permalink', how='left')
new_df = new_df.fillna(0)
return new_df
#rounds_agg was generated a few steps above
for funding_type in funding_types:
rounds_df = add_dummy_for_funding_type(rounds_df, rounds_agg, funding_type)
#remove the company_permalink variable, since it's already available in the companies dataframe
rounds_df = rounds_df.drop('company_permalink', 1)
#set rounds_df to have the same index of the other dataframes
rounds_df.index = companies_USA.index
rounds_df[:3]
###Output
_____no_output_____
###Markdown
Merging 3 dataframes (companies, categories and rounds)
###Code
startups_df = pd.concat([companies_USA, categories_df, rounds_df], axis=1, ignore_index=False)
startups_df[:3]
###Output
_____no_output_____
###Markdown
Write resulting dataframe to csv file
###Code
startups_df.index = list(startups_df['permalink'])
startups_df = startups_df.drop('permalink', 1)
startups_df.to_csv('data/startups_1.csv')
#startups_df
###Output
_____no_output_____ |
examples/contrib/shale_oil_conductance_v2.8/shale_oil_conductance_v2.8.ipynb | ###Markdown
shale oil conductance calculation and permeability Ke Wang presented a method for shale oil conductance calculation[^1]. I also used it to do some work. Now I wanna show how to implement this method by OpenPNM. Both of us use OpenPNM and I get a permission from Ke Wang to add this method in OpenPNM release, if possible. Before I add it as a physics model in `hydraulic_conductance.py`, I think it is better to write just a function and get tested by other users and developers. how to calculate shale oil conductanceI think it is worthy to introduce the method they use to calculate shale oil conductance briefly first, before I show the code.Afsharpoor presented an equation to calculate liquid slip flow in a network of shale noncircular nanopores[^2].$$Q=\frac{A^{2}}{\mu L}\left(a+b L_{\mathrm{sd}}+c G+d L_{\mathrm{sd}}^{2}+e G^{2}+f L_{\mathrm{sd}} G\right) \Delta P$$Wang consider the adsorption effect and introduce adsorption layer and adsorption layer viscosity:$$A_a = A - A_b \\\mu_r = \frac{\mu(A - A_a) + \mu \alpha A_a}{A} \\\alpha = \frac{\mu_a}{\mu}$$Finally the equation is in the form of$$Q=\frac{A^{3}}{\left(A-A_{\mathrm{a}}+\alpha A_{\mathrm{a}}\right) \times \mu L}\left(a+b L_{\mathrm{sd}}+c G+d L_{\mathrm{sd}}^{2}+e G^{2}+f L_{\mathrm{sd}} G\right) \Delta P$$Please find the symbols' physical meaning in the references. [^1]:Yang Y., Wang K., Zhang L., et al. Pore-scale simulation of shale oil flow based on pore network model. Fuel, 2019, 251: 683-692.[^2]: Afsharpoor A., Javadpour F. Liquid slip flow in a network of shale noncircular nanopores. Fuel, 2016, 180: 580-590.
###Code
import openpnm as op
import random
import numpy as np
from bimodal_distribution import bimodal_distribution
import matplotlib.pyplot as plt
import openpnm.models as mods
ws = op.Workspace()
ws.clear()
###Output
_____no_output_____
###Markdown
pore number, throat number, network size are designed from Wang's paper (ref[1]), to make sure that pore and throat volume density is the same.
###Code
np.random.seed(0)
pn = op.network.Delaunay(num_points=1415, shape=[4.036e-6, 4.036e-6, 4.036e-6])
pn.project.name = 'shale'
ts = random.sample(list(pn.Ts), 3158)
trim_ts = np.setdiff1d(pn.Ts, ts)
op.topotools.trim(pn, pores=[], throats=trim_ts)
###Output
_____no_output_____
###Markdown
check data health is veeeeeeery important. I forgot this step and got stucked for more than a month.
###Code
# check data health and trim
health = pn.check_network_health()
#op.topotools.trim(network=pn, pores=health["trim_pores"])
op.topotools.trim(network=pn, pores=health["isolated_pores"])
# pnm geometry
geo = op.geometry.GenericGeometry(network=pn, pores=pn.pores(), throats=pn.throats())
###Output
_____no_output_____
###Markdown
Pore size is set in the range of 25-250 $\mu$m, to produce the same PSD in fig 2(a). It is packed as a function `bimodal_distribution`.
###Code
# add properties to geo
geo['pore.diameter'] = bimodal_distribution(pn.Np)
geo.add_model(propname='throat.max_size',
model=mods.misc.from_neighbor_pores,
mode='min',
prop='pore.diameter')
geo.add_model(propname='throat.diameter',
model=mods.misc.scaled,
factor=0.5,
prop='throat.max_size')
geo['throat.radius'] = geo['throat.diameter'] / 2
geo.add_model(propname='pore.cross_sectional_area',
model=mods.geometry.pore_cross_sectional_area.sphere,
pore_diameter='pore.diameter')
geo.add_model(propname='pore.volume',
model=mods.geometry.pore_volume.sphere)
geo.add_model(propname='throat.length',
model=mods.geometry.throat_length.spheres_and_cylinders,
pore_diameter='pore.diameter',
throat_diameter='throat.diameter')
geo.add_model(propname='throat.cross_sectional_area',
model=mods.geometry.throat_cross_sectional_area.cylinder,
throat_diameter='throat.diameter')
geo.add_model(propname='throat.volume',
model=mods.geometry.throat_volume.cylinder,
throat_diameter='throat.diameter',
throat_length='throat.length')
geo.add_model(propname='throat.cross_sectional_area',
model=mods.geometry.throat_cross_sectional_area.cylinder,
throat_diameter='throat.diameter')
geo.add_model(propname='throat.conduit_lengths',
model=mods.geometry.conduit_lengths.spheres_and_cylinders,
pore_diameter='pore.diameter',
throat_diameter='throat.diameter')
geo.add_model(propname='throat.surface_area',
model=mods.geometry.throat_surface_area.cylinder)
# shape factor
geo.add_model(propname='throat.perimeter',
model=mods.geometry.throat_perimeter.cylinder,
throat_diameter='throat.diameter')
geo.add_model(propname='throat.shape_factor',
model=mods.geometry.throat_capillary_shape_factor.mason_morrow,
throat_perimeter='throat.perimeter',
throat_area='throat.cross_sectional_area')
geo['pore.perimeter'] = np.pi * geo['pore.diameter']
geo['pore.shape_factor'] = geo['pore.cross_sectional_area'] / geo['pore.perimeter'] ** 2
# organic pores
geo['pore.organic'] = geo['pore.diameter'] <= 50e-9
organic_pores = [pore_index for pore_index in geo.pores()
if geo['pore.organic'][pore_index]==True]
organic_pores = np.array(organic_pores)
# organic throats
from find_organic_throats import find_organic_throats
organic_throats = find_organic_throats(organic_pores,
geo['throat.conns'], geo.Nt)
organic_thro = np.array(organic_throats[0])
organic_thro_indx = np.array(organic_throats[1])
geo['throat.organic'] = geo.tomask(throats=organic_thro_indx)
fig = plt.hist(geo['pore.diameter'], bins=25,
density=False, edgecolor='k', alpha=0.5)
#plt.savefig('shale-diameter.png')
###Output
_____no_output_____
###Markdown
According to ref [^1], organic pores are mostly less than 50 nm in diameters. So, 50 nm is considered as the boundary between organic and inorganic pores. Organic throats are those connected to both organic pores in two ends. I write a function `find_organic_throats` to finish the job.Viscosity ratio ($\alpha$) in organic pores is set as 1.1.Viscosity ratio ($\alpha$) in inorganic proes is set as 0.9.
###Code
# physics
water = op.phases.Water(network=geo)
# viscosity settings
alpha_o = 1.1 # viscosity ratio in organic pores = \mu_a / \mu, range(1-2.5)
alpha_i = 0.9 # viscosity ratio in inorganic pores, range(0.5-1)
# initialize
water['pore.viscosity'] = 3.6e-3
water['throat.viscosity'] = 3.6e-3
water['pore.viscosity_a'] = water['pore.viscosity'] * alpha_i
water['throat.viscosity_a'] = water['throat.viscosity'] * alpha_i
###Output
_____no_output_____
###Markdown
Slip length and viscosity are different in organic and inorganic pores.
###Code
# slip length
Ls_o = 60e-9 # organic slip length, range 0-250 nm
Ls_i = 50e-9 # organic slip length, range 0-60 nm
# slip length of inorganic pores and throats
water['pore.l_sd'] = Ls_i / np.sqrt(geo['pore.cross_sectional_area'])
water['throat.l_sd'] = Ls_i / np.sqrt(geo['throat.cross_sectional_area'])
if organic_pores.size!=0:
# dimensionless slip length of organic pores and throats
water['pore.l_sd'][organic_pores] =\
Ls_o / np.sqrt(geo['pore.cross_sectional_area'][organic_pores])
#excute when organic pores exist.
water['pore.viscosity_a'][organic_pores] =\
water['pore.viscosity'][organic_pores] * alpha_o
if organic_thro.size!=0:
water['throat.l_sd'][organic_thro_indx] =\
Ls_o / np.sqrt(geo['throat.cross_sectional_area'][organic_thro_indx])
water['throat.viscosity_a'][organic_thro_indx] =\
water['throat.viscosity'][organic_thro_indx] * alpha_o
ha = 1.8e-9 # adsorption layer thickness
geo['pore.cross_sectional_area_a'] = geo['pore.cross_sectional_area'] - \
np.pi * (geo['pore.diameter'] / 2 - ha) ** 2
geo['throat.cross_sectional_area_a'] = geo['throat.cross_sectional_area'] - \
np.pi * (geo['throat.radius'] - ha) ** 2
###Output
_____no_output_____
###Markdown
The `if else` statement below is used to compare the difference between shale oil conductance and classical hydraulic conductance assumption.
###Code
slip = True
# hydraulic conductance
if slip==True:
# hydraulic conductance
from conductance_calculas import slip_shale_conductance
water['throat.hydraulic_conductance'] = slip_shale_conductance(water)
else:
water.add_model(propname='throat.hydraulic_conductance',
model=op.models.physics.hydraulic_conductance.hagen_poiseuille)
# permeability
flow = op.algorithms.StokesFlow(network=pn, phase=water)
flow.set_value_BC(pores=pn['pore.left'], values=20) # inlet
flow.set_value_BC(pores=pn['pore.right'], values=10) # outlet
flow.run()
Q1 = flow.rate(pores=geo['pore.left'])
Q2 = flow.rate(pores=geo['pore.right'])
side_len = 4.036e-6
domain_area = side_len **2
domain_length = side_len
K2 = flow.calc_effective_permeability(inlets=geo['pore.left'], outlets=geo['pore.right'],
domain_area=domain_area, domain_length=domain_length)
K1 = flow.calc_effective_permeability()
print(K2)
print(K1)
# op.io.VTK.export_data(network=pn, phases=[flow], filename=r'shale_test')
###Output
_____no_output_____ |
Babu_Aravind_Sivamani_cse519_hw3_bond_james_123456789.ipynb | ###Markdown
Setting up the project on Google Drive
###Code
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/MyDrive/DSF-HW3/
# !git init
# !git clone https://github.com/babuaravind/
# !git clone https://babuaravind:<token>@github.com/babuaravind/Rossman-Store-Sales.git
%cd Rossman-Store-Sales/
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
/content/drive/MyDrive/DSF-HW3
/content/drive/MyDrive/DSF-HW3/Rossman-Store-Sales
###Markdown
Github Initialization Commands with SSH Auth Token
###Code
# !git remote -v
# !git config --list
# git_user = input("Enter username: ")
# from getpass import getpass
# password = getpass("Enter Password: ")
# !git remote rm origin
# !git remote add origin https://$git_user:[email protected]/babuaravind/Rossman-Store-Sales.git
# !git remote set-url origin https://<token>@github.com/babuaravind/Rossman-Store-Sales.git
# !git push origin main
# !git push origin
def git_push(message):
msg = message
!git add Babu_Aravind_Sivamani_cse519_hw3_bond_james_123456789.ipynb
!git config --global user.email "[email protected]"
!git config --global user.name "babuaravind"
!git commit -m msg
!git push origin
###Output
_____no_output_____
###Markdown
Checking if all files exist in proper order
###Code
%ls
###Output
Babu_Aravind_Sivamani_cse519_hw3_bond_james_123456789.ipynb store.csv
gitignore store.gsheet
hw3_bond_james_123456789.ipynb test.csv
knn_submission.csv train.csv
LICENSE train.gsheet
rf_submission.csv xg_submission.csv
sample_submission.csv
###Markdown
**Section 1: Library and Data Imports (Q1)**- Import your libraries and read the data into a dataframe. Print the head of the dataframe.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import sklearn.metrics as metrics
from sklearn.metrics import mean_squared_error, r2_score
import warnings
warnings.filterwarnings('ignore')
#fill in dtypes dict for efficient memory utilization
dtypes = {}
df = pd.read_csv('train.csv')
sf = pd.read_csv('store.csv')
###Output
_____no_output_____
###Markdown
Answer 1) Combining the store and train.csv dataframes into a single merged dataframe.
###Code
print(df.shape, sf.shape)
print("Train Dataframe \n", df.head(), '\n -----------------------------------------------------')
print("Store Dataframe \n", sf.head())
merged_frame = sf.merge(df, on = "Store", how = "inner")
merged_frame.head()
###Output
_____no_output_____
###Markdown
Answer 1. The above dataframe is a merged dataframe of store.csv and train.csv
###Code
#finding out the total sales value for each store
df['total_sales'] = df.groupby(["Store"])["Sales"].transform(sum)
###Output
_____no_output_____
###Markdown
**Section 2: Effect of Holidays (Q2)**
###Code
sales_christmas = df[df['StateHoliday'] == 'c'].sum()['Sales']
sales_easter = df[df['StateHoliday'] == 'b'].sum()['Sales']
sales_public = df[df['StateHoliday'] == 'a'].sum()['Sales']
total_sales_holiday = sales_christmas + sales_easter
total_sales_workday = df[df['StateHoliday'] == 0].sum()['Sales']
total_sales = total_sales_holiday + total_sales_holiday
# print(sales_christmas, sales_easter, sales_public, total_sales_holiday, total_sales_workday, total_sales)
#df[df['StateHoliday'] == 'b']['Date'].value_counts()
#sales_christmas = df[df['StateHoliday'] == 'c']['Sales'].sum()
#before_christmas = df[(df['StateHoliday'] == 'c') & (df['Sales'] > 0) & (df['Date'] == '2013-12-25')]['Sales'].sum()
#print(before_christmas)
#df[[df['StateHoliday'] == 'a'] & [df['Date'] == '2015-12-25']]
sales_before_easter = 0
# Sales 1 Day before Easter
sales_before_easter += df[df['Date'] == '2014-04-17']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2013-03-31']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2015-04-02']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2015-04-05']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2013-03-28']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2014-04-20']['Sales'].sum()
sales_easter_one_day = sales_before_easter
# Sales 2 Days before Easter
sales_before_easter += df[df['Date'] == '2014-04-16']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2013-03-30']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2015-04-01']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2015-04-04']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2013-03-27']['Sales'].sum()
sales_before_easter += df[df['Date'] == '2014-04-19']['Sales'].sum()
sales_before_christmas = 0
# Sales 1 Day before Christmas
sales_before_christmas += df[df['Date'] == '2013-12-24']['Sales'].sum()
sales_before_christmas += df[df['Date'] == '2014-12-24']['Sales'].sum()
sales_christmas_one_day = sales_before_christmas
# Sales 2 Day before Christmas
sales_before_christmas += df[df['Date'] == '2013-12-23']['Sales'].sum()
sales_before_christmas += df[df['Date'] == '2014-12-23']['Sales'].sum()
total_sales_before_holiday = sales_before_christmas + sales_before_easter
explode = (0.2, 0.2)
plt.pie([sales_christmas_one_day, sales_christmas], startangle=35, radius=1.9, colors=["green", "red"], shadow=True, explode=explode, textprops = {"fontsize": 20}, autopct = "%0.2f%%")
plt.title('Total Sales: 1 Day before vs Christmas Day', bbox={'facecolor':'0.8', 'pad':4})
plt.show()
plt.pie([sales_before_christmas, sales_christmas], startangle=21, radius=1.9, colors=["green", "red"], shadow=True, explode=explode, textprops = {"fontsize": 20}, autopct = "%0.2f%%")
plt.title('Total Sales: 2 Days before vs Christmas Day', bbox={'facecolor':'0.8', 'pad':4})
plt.show()
###Output
_____no_output_____
###Markdown
Answer 2.A) It is enough just from the data taken from one day of total sales before christmas that it is evident that more people shop before the holiday than on Christmas Day. When we increase the window of "before holidays" to two days this trend rightfully increases. RED REPRESENTS SALE ON XMAS and GREEN REPRESENTS SALE 1 or 2 DAYS BEFORE XMAS.
###Code
explode = (0.2, 0.2)
plt.pie([sales_easter_one_day, sales_easter], startangle=35, radius=1.9, colors=["lightblue", "violet"], shadow=True, explode=explode, textprops = {"fontsize": 20}, autopct = "%0.2f%%")
plt.title('Total Sales: 1 Day before vs Easter', bbox={'facecolor':'0.8', 'pad':4})
plt.show()
explode = (0.2, 0.1)
plt.pie([sales_before_easter, sales_easter], startangle=15, radius=1.9, colors=["lightblue", "magenta"], shadow=True, explode=explode, textprops = {"fontsize": 20}, autopct = "%0.2f%%")
plt.title('Total Sales: 2 Days before vs Easter', bbox={'facecolor':'0.8', 'pad':4})
plt.show()
###Output
_____no_output_____
###Markdown
We see a similar trend following for the holiday Easter as well. Even more people shop on the day before the holiday than on the holiday.
###Code
holiday_fig = plt.figure()
ax = holiday_fig.add_axes([0,0,1,1])
ax.set_title('Total Sales: Holiday vs 2 Days before Holiday')
langs = ['Holiday', 'Before Holiday']
x_val = [total_sales_holiday, total_sales_before_holiday]
ax.bar(langs,x_val)
plt.show()
###Output
_____no_output_____
###Markdown
As we can see in the above figure, An overwhelmingly more number of items are purchased before the holidays
###Code
mean_christmas = df[df['StateHoliday'] == 'c'].mean()['Sales']
mean_easter = df[df['StateHoliday'] == 'b'].mean()['Sales']
mean_public = df[df['StateHoliday'] == 'a'].mean()['Sales']
sale_fig = plt.figure()
ax = sale_fig.add_axes([0,0,1,1])
ax.set_title('Holidays affecting Sale')
langs = ['Christmas', 'Easter', 'Other Public Holidays']
x_val = [mean_christmas, mean_easter, mean_public]
ax.bar(langs, x_val)
plt.show()
###Output
_____no_output_____
###Markdown
Answer 2B) Looking at the above plot, we can see that the more important the holiday it is, the less likely it is for stores to make sales on those days.
###Code
mean_christmas = df[df['StateHoliday'] == 'c'].mean()['Sales']
mean_easter = df[df['StateHoliday'] == 'b'].mean()['Sales']
mean_public = df[df['StateHoliday'] == 'a'].mean()['Sales']
mean_non_holiday = df[df['StateHoliday'] == 0].mean()['Sales']
sale_fig = plt.figure()
ax = sale_fig.add_axes([0,0,1,1])
ax.set_title('Holidays affecting Sale')
langs = ['Christmas', 'Easter', 'Other Public Holidays', 'Non-Holiday']
x_val = [mean_christmas, mean_easter, mean_public, mean_non_holiday]
ax.bar(langs, x_val)
plt.show()
###Output
_____no_output_____
###Markdown
This is more evident when we compare to the average of sales of holidays and non holidays. It turns out that things do not sell well on holidays(Holidays affect sales negatively). **Section 3: Most and Least selling stores (Q3a & Q3b)**
###Code
df['total_sales'] = df.groupby(["Store"])["Sales"].transform(sum)
# Adding column to dataframe that stores a comulative count of days a store has oepened
# df['days_opened'] = df.groupby(["Sales", "Open"])["Open"].transform(sum)
# df['days_opened'] = df.groupby(["Store"])["Open"].count()
# df[(df.Sales > 0) & (df.Open == 1) & (df.days_opened >=180)]['Sales']
#tf = pd.DataFrame()
tf = df.groupby(["Store"]).Sales.sum().reset_index()
tf['open_count'] = df.groupby(["Store", "Open"]).Open.count()[:,1]
###Output
_____no_output_____
###Markdown
List the IDs of stores with highest cumulative and least cumulative sales
###Code
tf['Store'] = df.groupby(["Store"]).Sales.sum()
tf['open_count'] = df.groupby(["Store", "Open"]).Open.count()[:,1]
top_stores = (tf[(tf['open_count'] > 179)].Sales.nlargest(n=5).index + 1).tolist()
bot_stores = (tf[(tf['open_count'] > 179)].Sales.nsmallest(n=5).index + 1).tolist()
print("IDs of top five stores with the highest cumulative sales: ", top_stores)
print('----------------------------------------------')
print("IDs of top five stores with the highest cumulative sales: ", bot_stores)
tsf_id = top_stores.pop()
tsf = df[(df['Store'] == tsf_id)]
tsf.drop(['Store', 'StateHoliday', 'SchoolHoliday', 'Open', 'Promo', 'total_sales'], axis=1, inplace=True)
for store in top_stores:
temp_df = df[(df['Store'] == store)]
temp_df.drop(['Store', 'DayOfWeek', 'StateHoliday', 'SchoolHoliday', 'Open', 'Promo', 'total_sales'], axis=1, inplace=True)
tsf = tsf.set_index('Date').add(temp_df.set_index('Date'), fill_value=0).reset_index()
tsf = tsf[::-1]
tsf = tsf.iloc[6: , :]
tsf = tsf.iloc[:-5]
#BSF
bsf_id = bot_stores.pop()
bsf = df[(df['Store'] == bsf_id)]
bsf.drop(['Store', 'StateHoliday', 'SchoolHoliday', 'Open', 'Promo', 'total_sales'], axis=1, inplace=True)
for store in bot_stores:
temp_df2 = df[(df['Store'] == store)]
temp_df2.drop(['Store', 'DayOfWeek', 'StateHoliday', 'SchoolHoliday', 'Open', 'Promo', 'total_sales'], axis=1, inplace=True)
bsf = bsf.set_index('Date').add(temp_df2.set_index('Date'), fill_value=0).reset_index()
#bsf = bsf[::-1]
bsf = bsf.iloc[6: , :]
bsf = bsf.iloc[:-5]
## TSF Weekly
weekly_sales = []
weekly_sum = 0
counter = 0
for i in range(tsf.shape[0] + 1):
temp = tsf.iloc[i-1:i, -1]
weekly_sum += temp.median()
#print(counter, i, temp.max(), weekly_sum)
counter+=1
if counter > 7:
weekly_sales.append(weekly_sum)
counter = 1
weekly_sum = 0
## BSF Weekly
weekly_sales_bot = []
weekly_sum_bot = 0
counter = 0
for i in range(bsf.shape[0] + 1):
temp_bot = bsf.iloc[i-1:i, -1]
weekly_sum_bot += temp_bot.median()
#print(counter, i, temp_bot.max(), weekly_sum)
counter+=1
if counter > 7:
weekly_sales_bot.append(weekly_sum_bot)
counter = 1
weekly_sum_bot = 0
weekly_sales_bot.pop(0)
weekly_sales.pop(0)
from matplotlib.pyplot import figure
figure(figsize=(25,10), dpi=80)
x = [*range(1, 133, 1)]
plt.bar(np.array(x)-0.15, weekly_sales, width = 0.3)
plt.bar(np.array(x)+0.15, weekly_sales_bot, width = 0.3)
plt.xticks(range(min(x), max(x)+1))
plt.xlabel("The Week Number")
plt.ylabel('Weekly Sales')
plt.show()
###Output
_____no_output_____
###Markdown
the above plot describes the sales per week over time for these two sets of stores where top5 stores are depicted in blue and the bottom5 stores in orange.
###Code
figure(figsize=(25,10), dpi=80)
plt.bar(np.array(x)-0.15, np.log2(weekly_sales), width = 0.3)
plt.bar(np.array(x)+0.15, np.log2(weekly_sales_bot), width = 0.3)
plt.xticks(range(min(x), max(x)+1))
plt.xlabel("The Week Number")
plt.ylabel('Weekly Sales')
plt.show()
###Output
_____no_output_____
###Markdown
The same plot is scaled better for visual clarity by taking its log. It turns out the patterns are pretty similar over the weeks. Rise in sale for the top stores almost always means a rise in sale for the low-end stores as well. **Section 4: Closest Competitor: Distance and Age (Q4a & Q4b)**
###Code
unqiue_total_sales_series = df['total_sales'].unique()
sf['total_sales'] = unqiue_total_sales_series.tolist()
sf['weekly_sales_avg'] = (sf['total_sales'] / 134).astype(int)
stores_unique_id = [*range(1, 1116, 1)]
figure(figsize=(25,10), dpi=80)
plt.scatter(sf['CompetitionDistance'],sf['weekly_sales_avg'])
plt.xlabel('Distance')
plt.ylabel('Weekly Sales')
plt.show()
###Output
_____no_output_____
###Markdown
Answer 4A) Looking from the spread, we can imediately notice a cluster forming where it looks like it doesn't matter. But taking a closer look at the tail end of the distance, stores that have farther competitiors tend to do better than the majority of these stores that are clustered in terms of weekly sales. Though this trend is not very drastic, it does exist. **Section 5: Pearson Correlation of Features (Q5)**
###Code
from numpy import cov
import seaborn as sns
from scipy.stats import pearsonr
from scipy.stats import spearmanr
cf = df.copy()
cf.drop(['Date', 'StateHoliday', 'Store', 'Open'], axis=1, inplace=True)
dayofweek_ = df.iloc[ :, 1]
sales_ = df.iloc[ :, 3]
customers_ = df.iloc[ :, 4]
promo_ = df.iloc[ :, 6]
school_holiday_ = df.iloc[ :, 8]
##Pearson
print('LIST ALL POSSIBLE FEATURE PAIRS')
print('-----------------------------')
print('Pearsons')
print('-----------------------------')
#School - 4
sch_day, _ = pearsonr(school_holiday_, dayofweek_)
print('school_holiday and DayofWeek Pearsons correlation: %.3f' % sch_day)
sch_sal, _ = pearsonr(school_holiday_, sales_)
print('school_holiday and Sales Pearsons correlation: %.3f' % sch_sal)
sch_cus, _ = pearsonr(school_holiday_, customers_)
print('school_holiday and Customers Pearsons correlation: %.3f' % sch_cus)
sch_pro, _ = pearsonr(school_holiday_, promo_)
print('school_holiday and Promos Pearsons correlation: %.3f' % sch_pro)
#Promo - 3
pro_day, _ = pearsonr(promo_, dayofweek_)
print('Promos and DayofWeek Pearsons correlation: %.3f' % pro_day)
pro_sal, _ = pearsonr(promo_, sales_)
print('Promos and Sales Pearsons correlation: %.3f' % pro_sal)
pro_cus, _ = pearsonr(promo_, customers_)
print('Promos and Customer Pearsons correlation: %.3f' % pro_cus)
#Customer - 2
cus_day, _ = pearsonr(customers_, dayofweek_)
print('Customer and DayofWeek Pearsons correlation: %.3f' % cus_day)
cus_sal, _ = pearsonr(customers_, sales_)
print('Customer and Sales Pearsons correlation: %.3f' % cus_sal)
#Sales - 1
sal_day, _ = pearsonr(sales_, dayofweek_)
print('Sales and DayofWeek Pearsons correlation: %.3f' % sal_day)
#print(max(heat))
print('-----------------------------')
print('Spearman Coef')
print('-----------------------------')
#Spearman
#School - 4
sch_dayS, _ = spearmanr(school_holiday_, dayofweek_)
print('school_holiday and DayofWeek Pearsons correlation: %.3f' % sch_dayS)
sch_salS, _ = spearmanr(school_holiday_, sales_)
print('school_holiday and Sales Pearsons correlation: %.3f' % sch_salS)
sch_cusS, _ = spearmanr(school_holiday_, customers_)
print('school_holiday and Customers Pearsons correlation: %.3f' % sch_cusS)
sch_proS, _ = spearmanr(school_holiday_, promo_)
print('school_holiday and Promos Pearsons correlation: %.3f' % sch_proS)
#Promo - 3
pro_dayS, _ = spearmanr(promo_, dayofweek_)
print('Promos and DayofWeek Pearsons correlation: %.3f' % pro_dayS)
pro_salS, _ = spearmanr(promo_, sales_)
print('Promos and Sales Pearsons correlation: %.3f' % pro_salS)
pro_cusS, _ = spearmanr(promo_, customers_)
print('Promos and Customer Pearsons correlation: %.3f' % pro_cusS)
#Customer - 2
cus_dayS, _ = spearmanr(customers_, dayofweek_)
print('Customer and DayofWeek Pearsons correlation: %.3f' % cus_dayS)
cus_salS, _ = spearmanr(customers_, sales_)
print('Customer and Sales Pearsons correlation: %.3f' % cus_salS)
#Sales - 1
sal_dayS, _ = spearmanr(sales_, dayofweek_)
print('Sales and DayofWeek Pearsons correlation: %.3f' % sal_dayS)
print("*********")
print('The above statistic is just for checking/testing purposes, Actual Answer is listed in the next cell:')
###Output
LIST ALL POSSIBLE FEATURE PAIRS
-----------------------------
Pearsons
-----------------------------
school_holiday and DayofWeek Pearsons correlation: -0.205
school_holiday and Sales Pearsons correlation: 0.085
school_holiday and Customers Pearsons correlation: 0.072
school_holiday and Promos Pearsons correlation: 0.067
Promos and DayofWeek Pearsons correlation: -0.393
Promos and Sales Pearsons correlation: 0.452
Promos and Customer Pearsons correlation: 0.316
Customer and DayofWeek Pearsons correlation: -0.386
Customer and Sales Pearsons correlation: 0.895
Sales and DayofWeek Pearsons correlation: -0.462
-----------------------------
Spearman Coef
-----------------------------
school_holiday and DayofWeek Pearsons correlation: -0.205
school_holiday and Sales Pearsons correlation: 0.083
school_holiday and Customers Pearsons correlation: 0.079
school_holiday and Promos Pearsons correlation: 0.067
Promos and DayofWeek Pearsons correlation: -0.393
Promos and Sales Pearsons correlation: 0.490
Promos and Customer Pearsons correlation: 0.377
Customer and DayofWeek Pearsons correlation: -0.431
Customer and Sales Pearsons correlation: 0.903
Sales and DayofWeek Pearsons correlation: -0.451
*********
The above statistic is just for checking/testing purposes, Actual Answer is listed in the next cell:
###Markdown
Answer 5) Compute the Pearson and spearman Corr
###Code
corr_df = df.copy()
corr_df.drop(['Store', 'Date', 'StateHoliday','total_sales', 'SchoolHoliday'], axis=1, inplace=True)
print("Pearson's Correlation: ")
pear_corr = corr_df.corr(method = 'pearson')
print(pear_corr)
sns.heatmap(pear_corr)
print("Spearman's Correlation: ")
spear_corr = corr_df.corr(method = 'spearman')
print(spear_corr)
sns.heatmap(spear_corr)
###Output
Spearman's Correlation:
DayOfWeek Sales Customers Open Promo
DayOfWeek 1.000000 -0.450717 -0.430877 -0.528344 -0.392785
Sales -0.450717 1.000000 0.903353 0.652013 0.489565
Customers -0.430877 0.903353 1.000000 0.652015 0.377257
Open -0.528344 0.652013 0.652015 1.000000 0.295042
Promo -0.392785 0.489565 0.377257 0.295042 1.000000
###Markdown
For most of the variable relationships, there does not seem to be any drastic difference in values between the two ranking metric for the above dataset. The feature Pairs with the strongest correlation are as follows 1) Sales and Customers have a Spearman Correlation value of 0.9 2) Sales and Open have a Spearman Correlation value of 0.65 3) Customers and open also have a similar spearman score of 0.65 as well.The feature "Cuustomers" Correlates the best with "Sales". **Section 6: Permutation Testing (Q6)**
###Code
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
y_sale = np.asarray(df.loc[:, 'Sales'].copy())
## Single Var linear model X = Customers
X_customers = np.asarray(df.loc[:, 'Customers'].copy())
print(y_sale.shape, X_customers.shape)
X_train, X_test, y_train, y_test = train_test_split(X_customers, y_sale, test_size=0.2)
X_train= X_train.reshape(-1, 1)
y_train= y_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
customer_lin_reg = LinearRegression()
customer_lin_reg.fit(X_train, y_train)
y_pred = customer_lin_reg.predict(X_test)
#print(y_pred.shape, y_test.shape, X_test.shape, X_train.shape)
#print('Coefficients: \n', customer_lin_reg.coef_)
# The mean squared error
#print("RMSE: ", mean_squared_error(y_test, y_pred, squared=False))
# The coefficient of determination: 1 is perfect prediction
#print("Customer Var: R2: ", r2_score(y_test, y_pred))
# ------
cv = StratifiedKFold(2, shuffle=True, random_state=0)
score_cust, perm_scores_cust, pvalue_cust = permutation_test_score(
customer_lin_reg, X_test, y_test, scoring="neg_mean_squared_log_error", cv=cv, n_permutations=100)
print(perm_scores_cust)
print('-------------------------')
print("Customer Var: Score", score_cust**1/2)
print("Customer Var P Value:", pvalue_cust)
## Single Var linear Regression X = Promo
X_promos = np.asarray(df.loc[:, 'Promo'].copy())
print(y_sale.shape, X_promos.shape)
X_train, X_test, y_train, y_test = train_test_split(X_promos, y_sale, test_size=0.2)
print(y_pred.shape, y_test.shape, X_test.shape, X_train.shape)
X_train= X_train.reshape(-1, 1)
y_train= y_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
promos_lin_reg = LinearRegression()
promos_lin_reg.fit(X_train, y_train)
y_pred = promos_lin_reg.predict(X_test)
#print(y_pred.shape, y_test.shape, X_test.shape, X_train.shape)
#print('Promos: Coefficients: \n', customer_lin_reg.coef_)
# The mean squared error
print("Promos: RMSE: ", mean_squared_error(y_test, y_pred, squared=False))
# The coefficient of determination: 1 is perfect prediction
#print("Promos R2: ", r2_score(y_test, y_pred))
cv = StratifiedKFold(2, shuffle=True, random_state=0)
score_promo, perm_scores_promo, pvalue_promo = permutation_test_score(
promos_lin_reg, X_test, y_test, scoring="neg_mean_squared_log_error", cv=cv, n_permutations=100)
print(perm_scores_promo)
print('-------------------------')
print("Promos Var: Score", score_promo**1/2)
print("Promos Var P Value:", pvalue_promo)
## Single Var linear Regression X = DayOfWeek
X_week = np.asarray(df.loc[:, 'DayOfWeek'].copy())
X_train, X_test, y_train, y_test = train_test_split(X_week, y_sale, test_size=0.2)
X_train= X_train.reshape(-1, 1)
y_train= y_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
week_lin_reg = LinearRegression()
week_lin_reg.fit(X_train, y_train)
y_pred = week_lin_reg.predict(X_test)
print(y_pred.shape, y_test.shape, X_test.shape, X_train.shape)
print('Week: Coefficients: \n', week_lin_reg.coef_)
# The mean squared error
print("Week: RMSE: ", mean_squared_error(y_test, y_pred, squared=False))
# The coefficient of determination: 1 is perfect prediction
print("Week R2: ", r2_score(y_test, y_pred))
cv = StratifiedKFold(2, shuffle=True, random_state=0)
score_week, perm_scores_week, pvalue_week = permutation_test_score(
week_lin_reg, X_test, y_test, scoring="neg_mean_squared_log_error", cv=cv, n_permutations=100)
print(perm_scores_week)
print('-------------------------')
print("DayOfWeek Var: Score", score_week**1/2)
print("DayOfWeek Var P Value:", pvalue_week)
###Output
(203442, 1) (203442, 1) (203442, 1) (813767, 1)
Week: Coefficients:
[[-891.43580264]]
Week: RMSE: 3417.4595149002416
Week R2: 0.2123347836554197
[-12.90809448 -12.9080907 -12.90811308 -12.90810444 -12.90810692
-12.90806641 -12.90809217 -12.90809957 -12.90809795 -12.9081309
-12.9081022 -12.90805943 -12.90803952 -12.90808752 -12.90808876
-12.90810294 -12.90821792 -12.90812896 -12.90800829 -12.90814111
-12.90829587 -12.90808291 -12.90807184 -12.90808693 -12.90812623
-12.90809163 -12.90809946 -12.90814466 -12.90807444 -12.90809243
-12.90805008 -12.908099 -12.90813488 -12.90812659 -12.9082245
-12.90812113 -12.90812349 -12.908094 -12.90810153 -12.90810102
-12.9080971 -12.90808948 -12.90810182 -12.90812962 -12.90824729
-12.90807565 -12.90809173 -12.90814806 -12.90811342 -12.90801731
-12.9081057 -12.90809142 -12.90797785 -12.90810833 -12.90812627
-12.90812373 -12.90811087 -12.90813002 -12.90809288 -12.90811265
-12.90809236 -12.90810062 -12.90810836 -12.90810539 -12.90812705
-12.90814154 -12.9080665 -12.90812051 -12.90812218 -12.90809096
-12.90807747 -12.90810941 -12.90805477 -12.90811901 -12.90810789
-12.90811141 -12.90810145 -12.90810338 -12.90810596 -12.90813568
-12.90805732 -12.90812217 -12.90811447 -12.90811519 -12.90809991
-12.90808125 -12.90808563 -12.9081035 -12.9081234 -12.90811496
-12.90811432 -12.90808131 -12.90813242 -12.90810539 -12.90816273
-12.90810256 -12.9080987 -12.9081356 -12.90800848 -12.90809265]
-------------------------
DayOfWeek Var: Score -5.764132739303347
DayOfWeek Var P Value: 0.009900990099009901
###Markdown
Answer 6) (i) Customer Variable - likely good. (ii) DayOfWeek - Meaningless. (iii) Promo - RandomCompare how this model ranks with all 3 variables to Sales:
###Code
print(' ---------------------------------------')
print(' Score(RMSE-Log) P-Value ')
print('Customer-Sales ', round(score_cust**1/2, 2),"% ", round(pvalue_cust, 22),"%")
print('DayOfWeek-Sales ', round(score_week**1/2, 2),"% ", round(pvalue_week, 22),"%")
print('Promos-Sales ', round(score_promo**1/2, 2), "% ", round(pvalue_promo, 22),"%")
print(' ---------------------------------------')
###Output
---------------------------------------
Score(RMSE-Log) P-Value
Customer-Sales -4.17 % 0.009900990099009901 %
DayOfWeek-Sales -5.76 % 0.009900990099009901 %
Promos-Sales -6.12 % 0.009900990099009901 %
---------------------------------------
###Markdown
**Section 7: Interesting findings (Q7)** Interesting finding (i) It turns out that almost all kinds of store types have the same average compeition distance.Store Type B, however, are generally located a little farther away from their competitors.
###Code
bars = sf.groupby('StoreType')['CompetitionDistance'].mean()
barplotter = bars.plot.bar(x="Store Type", y="Competition Distance", rot=0, figsize=(14,5))
bars2 = sf.groupby('StoreType')['total_sales'].mean()
barplotter2 = bars2.plot.bar(x="Store Type", y="total sale", rot=0, figsize=(14,5))
###Output
_____no_output_____
###Markdown
(ii) Interesting finding - In the below plot, It seems to be the case that significantly very less or almost no competitor store opens during the month January. This could be very likely the case that competitors do not see fit to open branches right after the christmas/new years. Each marker on the graph represents a competitior opening at a certain distance. The intervals 1-12 represent months Jan-Dec.
###Code
figure(figsize=(25,10), dpi=80)
plt.scatter(sf['CompetitionOpenSinceMonth'],sf['CompetitionDistance'])
plt.xlabel('Distance')
plt.ylabel('Weekly Sales')
plt.show()
###Output
_____no_output_____
###Markdown
(iii) Interesting Finding - Line Plot It seems to be the case that the Frequency of customers tend stay clustered around a sweet spot when Promos are offered. The effect of promos on customers for these stores tend to marginally increase them than be a driving factor for people to consider shopping during that time.
###Code
figure(figsize=(25,10), dpi=80)
plt.plot(df['Customers'].head(5500), df['Promo'].head(5500), '-o')
plt.show()
###Output
_____no_output_____
###Markdown
**Section 8: Train Test Split and Modelling (Q8)** Answer 8) Creating a training and validation set. i. Splitting the test set as all data from May 2015 - July 2015. The remainder is considered as the Train set.
###Code
test_index = (1115*31*3) -1115
test_set = df.iloc[0:test_index, :]
test_set = test_set[::-1]
print(test_set)
#Rest of the data will be considered as training data.
train_set = df.iloc[test_index:, :]
train_set = train_set[::-1]
print(train_set)
###Output
Store DayOfWeek Date ... StateHoliday SchoolHoliday total_sales
1017208 1115 2 2013-01-01 ... a 1 4922229
1017207 1114 2 2013-01-01 ... a 1 16202585
1017206 1113 2 2013-01-01 ... a 1 5196242
1017205 1112 2 2013-01-01 ... a 1 7974294
1017204 1111 2 2013-01-01 ... a 1 4091076
... ... ... ... ... ... ... ...
102584 5 4 2015-04-30 ... 0 0 3642818
102583 4 4 2015-04-30 ... 0 0 7556507
102582 3 4 2015-04-30 ... 0 0 5408261
102581 2 4 2015-04-30 ... 0 0 3883858
102580 1 4 2015-04-30 ... 0 0 3716854
[914629 rows x 10 columns]
###Markdown
Data Preprocessing
###Code
from sklearn.preprocessing import LabelEncoder
basic_model_y = train_set.loc[:, 'Sales'].copy()
le = LabelEncoder()
basic_model_X = train_set.copy()
basic_model_X['StateHoliday'] = basic_model_X['StateHoliday'].astype(str)
#basic_model_X['StateHoliday'] = basic_model_X['StateHoliday'].astype('category')
#basic_model_X.loc[:, ["StateHoliday"]] = le.fit_transform(basic_model_X.loc[:, ["StateHoliday"]])
basic_model_X.loc[:, ["DayOfWeek"]] = le.fit_transform(basic_model_X.loc[:, ["DayOfWeek"]])
basic_model_X.drop(['Store', 'Date', 'Sales', 'total_sales', 'Customers', 'StateHoliday'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Building a basic regression model as a starting point. (Not considered as 1 of the 2 model)
###Code
from sklearn.ensemble import RandomForestRegressor
import xgboost; print(xgboost.__version__)
from xgboost import XGBRegressor
from sklearn.metrics import accuracy_score
from sklearn import neighbors
X_train, X_test, y_train, y_test = train_test_split(basic_model_X, basic_model_y, test_size=0.2)
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred = lin_reg.predict(X_test)
print('Coefficients: \n', lin_reg.coef_)
# The mean squared error
print("Linear Regression RMSE Score: ", mean_squared_error(y_test, y_pred, squared=False))
# The coefficient of determination: 1 is perfect prediction
print("Linear Regression R2 Score : ", r2_score(y_test, y_pred))
###Output
0.90
Coefficients:
[-103.15553028 5829.09091857 2086.12541704 80.40768542]
Linear Regression RMSE Score: 2634.494440436304
Linear Regression R2 Score : 0.5303840648483539
###Markdown
Trying out KNearestNeighbors with many K values (not considered as one of two different prediction model)
###Code
rmse_list = []
for K in range(5): #11
K=K+2
model = neighbors.KNeighborsRegressor(n_neighbors = K, leaf_size = 55)
model.fit(X_train, y_train)
knn_pred = model.predict(X_test)
error = mean_squared_error(y_test, knn_pred, squared=False)
rmse_list.append(error)
print('RMSE val for K=', K, ' is ', error)
print("R2: ", r2_score(y_test, knn_pred))
curve = pd.DataFrame(rmse_list)
curve.plot()
###Output
RMSE val for K= 2 is 4254.104424675183
R2: -0.22451622721496078
RMSE val for K= 3 is 3782.0576245282846
R2: 0.032157842124608815
RMSE val for K= 4 is 3482.3616811000115
R2: 0.17946709291604268
RMSE val for K= 5 is 3277.8381213042057
R2: 0.27301867306431815
RMSE val for K= 6 is 3140.9306198480595
R2: 0.33247899084811217
###Markdown
Model 1 - XGBoost Regressor. Approach - we are going take a brute-force approach to find optimal values for the parameters: n_estimator and max_depth. We will stop once they stop giving significant improvements.
###Code
N = 5
M = 8
max = 0
for i in range(30): #30
N += 5
M+=2
xgmodel=xgboost.XGBRegressor(n_estimators=N, max_depth=M)
xgmodel.fit(X_train, y_train)
xg_pred = xgmodel.predict(X_test)
accuracy_test = r2_score(y_test, xg_pred)
if accuracy_test > max:
max = accuracy_test
print('--------------------')
print(i," N and M = ", N, " ", M, "Acc: ", accuracy_test)
print('--------------------')
print("Highest Acc", max)
print(xg_model.get_params())
###Output
{'base_score': 0.5, 'booster': 'gbtree', 'colsample_bylevel': 1, 'colsample_bynode': 1, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.1, 'max_delta_step': 0, 'max_depth': 40, 'min_child_weight': 1, 'missing': None, 'n_estimators': 85, 'n_jobs': 1, 'nthread': None, 'objective': 'reg:linear', 'random_state': 0, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'seed': None, 'silent': None, 'subsample': 1, 'verbosity': 1}
###Markdown
Hyper Parameter explanation:Max Depth: Maximum depth of the treesGamma: Determines the min loss reduction needed to make a splitMin_child: It defines the minimum ssum of weights of all observations required in a child N_estimator: number of trees the model will build for taking the majority vote for predictions. according to our testing there is an optimal N value. Model 2 - RandomForest Regressor. Approach - we are going a similar approach for finding optimal values for n_estimator and max_depth parameters. We will once again stop our loop once they stop giving significant accuracy improvements. Random Forest is similar to Decision Trees. It works well with categorical values. the parameters we tuned are the max_depth and n_estimators. Hyperparameter explained for both RandomForestRegression Algorithm: - N_estimator parameter is the number of trees the model will build for taking the majority vote for predictions. according to our testing there is an optimal N value. - Whereas Max_depth will determine the ACTUAL depth of each generated tree in the algorithm.
###Code
from scipy import stats
N = 50
M = 10
rg_max = 0
for K in range(11):
N += 3
M += 1
rf = RandomForestRegressor(n_estimators=N, max_depth=13)
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
rf_r2 = r2_score(rf_pred, y_test)
if rf_r2 > rg_max:
rg_max = rf_r2
rf_rmse = mean_squared_error(rf_pred, y_test, squared=False)
print('--------------------')
print("N and M = ", N , " and ", M)
print("Acc and Error = ", rf_r2, " ", rf_rmse)
print(stats.ttest_ind(rf_pred, y_test))
print('--------------------')
print("Highest Acc:", rg_max)
###Output
--------------------
N and M = 53 and 11
Acc and Error = 0.19364069623977576 2570.7420593438405
Ttest_indResult(statistic=0.7780730282345326, pvalue=0.43652646543604934)
--------------------
--------------------
N and M = 56 and 12
Acc and Error = 0.1938435275582744 2570.7610095019018
Ttest_indResult(statistic=0.8348715437027235, pvalue=0.40379058351300356)
--------------------
--------------------
N and M = 59 and 13
Acc and Error = 0.19373948857019707 2570.7444281340886
Ttest_indResult(statistic=0.8223914298278749, pvalue=0.4108546913660588)
--------------------
--------------------
N and M = 62 and 14
Acc and Error = 0.19343198651073668 2570.752562723342
Ttest_indResult(statistic=0.7540112017778682, pvalue=0.4508429807673576)
--------------------
--------------------
N and M = 65 and 15
Acc and Error = 0.19340395132068533 2570.7493450622487
Ttest_indResult(statistic=0.7217265614997439, pvalue=0.47046306954614225)
--------------------
--------------------
N and M = 68 and 16
Acc and Error = 0.19327655830425317 2570.736784891019
Ttest_indResult(statistic=0.7194848386528978, pvalue=0.47184270010001317)
--------------------
--------------------
N and M = 71 and 17
Acc and Error = 0.19372117940023403 2570.7497519932117
Ttest_indResult(statistic=0.8385571355501967, pvalue=0.40171842079748)
--------------------
--------------------
N and M = 74 and 18
Acc and Error = 0.19358526061704961 2570.7607867541687
Ttest_indResult(statistic=0.7946020338358833, pvalue=0.4268455503369052)
--------------------
--------------------
N and M = 77 and 19
Acc and Error = 0.1936524393460206 2570.74064458504
Ttest_indResult(statistic=0.8240603422679021, pvalue=0.40990580946587296)
--------------------
--------------------
N and M = 80 and 20
Acc and Error = 0.19361160304149383 2570.7318368843994
Ttest_indResult(statistic=0.7772107424178882, pvalue=0.4370349478922575)
--------------------
--------------------
N and M = 83 and 21
Acc and Error = 0.19392408843055897 2570.7533563726493
Ttest_indResult(statistic=0.8656763258171261, pvalue=0.3866682538736119)
--------------------
Highest Acc: 0.19392408843055897
###Markdown
Finally, running the KNN, XGB and RandomForest Algorithm with optimal parameter values.
###Code
knn_model = neighbors.KNeighborsRegressor(n_neighbors = 8, leaf_size = 55)
knn_model.fit(X_train, y_train)
knn_pred = knn_model.predict(X_test)
knn_r2 = r2_score(y_test, knn_pred)
knn_rmse = mean_squared_error(y_test, knn_pred, squared=False)
print("KNN R2 Score: ", knn_r2, " ", "KNN RMSE: ", knn_rmse)
xg_model=xgboost.XGBRegressor(n_estimators=85, max_depth=40)
xg_model.fit(X_train, y_train)
xg_pred1 = xg_model.predict(X_test)
xg_r2 = r2_score(y_test, xg_pred1)
xg_rmse = mean_squared_error(y_test, xg_pred1, squared=False)
print("XGB R2 Score: ", xg_r2, " ", "XGB RMSE: ", xg_rmse)
rf = RandomForestRegressor(n_estimators=68, max_depth=16)
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
rf_r2 = r2_score(rf_pred, y_test)
rf_rmse = mean_squared_error(rf_pred, y_test, squared=False)
print(rf_r2, " ", rf_rmse)
print("RF R2 Score: ", rf_r2, " ", "RF RMSE: ", rf_rmse)
###Output
0.1937249108891005 2570.751080960518
RF R2 Score: 0.1937249108891005 RF RMSE: 2570.751080960518
###Markdown
Calculating Root Mean Square Percentage Error for our two models (XGB and RandomForest) Regressors
###Code
XGB_rmspe = np.square((y_test - xg_pred1) / y_test).fillna((y_test - xg_pred1).mean())
XGB_rmspe = XGB_rmspe.replace([np.inf, -np.inf], XGB_rmspe.median())
XGB_rmspe = np.sqrt(np.mean(XGB_rmspe))
rf_rmspe = np.square((y_test - rf_pred) / y_test).fillna((y_test - rf_pred).mean())
rf_rmspe = rf_rmspe.replace([np.inf, -np.inf], rf_rmspe.median())
rf_rmspe = np.absolute(rf_rmspe.mean())
rf_rmspe = np.sqrt(rf_rmspe)
print("----------------------------------")
print("XGBoost RMSPE: ", round(XGB_rmspe*100, 3), "%")
print("----------------------------------")
print("Random Forest RMSPE: ", round(rf_rmspe*100, 3), "%")
print("----------------------------------")
###Output
----------------------------------
XGBoost RMSPE: 57.832 %
----------------------------------
Random Forest RMSPE: 104.828 %
----------------------------------
###Markdown
**Section 9: t-test (Q9)** In the previous question, we have actually looked at 4 models, but for this evaluation, we will be deciding on 2 models. (i) XGBoost Regression Model (ii) Random Forest Regression Model We will be performing a t-test to evaluate whether their predictions are significantly different: We get a p value 0.998 (as close to 1) which indicates that the predictions are closely smiliar but not significantly different. whereas a P value of 1 would mean the exact same. And just for knowledge sake, we will testing the P-Value against the ground truth for both the models.
###Code
print('--------------------------------------------------------------------------')
print('T-Test Evaluation for two models')
print(stats.ttest_ind(rf_pred, xg_pred1))
print('--------------------------------------------------------------------------')
print('')
print('')
print('Checking P Value against the ground truth(y_test) for both models')
print('--------------------------------------------------------------------------')
print("XGBoost Model")
print(stats.ttest_ind(xg_pred1, y_test))
print('--------------------------------------------------------------------------')
print("Random Forest Model")
print(stats.ttest_ind(rf_pred, y_test))
print('--------------------------------------------------------------------------')
###Output
--------------------------------------------------------------------------
T-Test Evaluation for two models
Ttest_indResult(statistic=-0.002019779092585319, pvalue=0.9983884516427544)
--------------------------------------------------------------------------
Checking P Value against the ground truth(y_test) for both models
--------------------------------------------------------------------------
XGBoost Model
Ttest_indResult(statistic=0.7450498442638124, pvalue=0.4562420666969361)
--------------------------------------------------------------------------
Random Forest Model
Ttest_indResult(statistic=0.7433030652079579, pvalue=0.45729869354432895)
--------------------------------------------------------------------------
###Markdown
The predictions seem to be almost similar as the P-value score is of around 0.998 **Section 10: Screenshots (Q10)**
###Code
ts = pd.read_csv('test.csv')
ts.loc[:, ["DayOfWeek"]] = le.fit_transform(ts.loc[:, ["DayOfWeek"]])
submission = pd.DataFrame()
submission['Id'] = ts.loc[:, 'Id'].copy()
ts.drop(['Store', 'Date', 'StateHoliday', 'Id'], axis=1, inplace=True)
ts.Open.fillna(method='ffill', inplace=True)
##RF
submission['Sales'] = rf.predict(ts)
submission.to_csv('rf_submission.csv', index=False)
##XG
submission['Sales'] = xg_model.predict(ts)
submission.to_csv('xg_submission.csv', index=False)
##KNN
submission['Sales'] = knn_model.predict(ts)
submission.to_csv('knn_submission.csv', index=False)
###Output
_____no_output_____
###Markdown
Public Score & Highest Rank: 0.57550Private Score & Highest Rank: 0.53184Kaggle profile link: https://www.kaggle.com/babuaravindsivamaniScreenshot(s): [Image Link:](https://i.imgur.com/Gzpl74m.png) or click: https://i.imgur.com/Gzpl74m.png
###Code
###Output
_____no_output_____ |
Code/2_Tokenize_And_Train.ipynb | ###Markdown
Tokenize and Train NotebookThis notebook will take a text corpus and use it for training.
###Code
# Import the dataset
with open('./tempfiles/cleanParts.pkl', 'rb') as f:
words = pickle.load(f)
# Inspect the dataset
print(type(words))
words[0:10]
###Output
<class 'list'>
###Markdown
Generate Training BatchesThis function will be called during training to generate minibatches for the skip-gram model.
###Code
# Map words to indices
word2index_map = {}
index = 0
for sent in words:
for word in sent.lower().split():
if word not in word2index_map:
word2index_map[word] = index
index += 1
index2word_map = {index: word for word, index in word2index_map.items()}
vocabulary_size = len(index2word_map)
print("Vocab size:", vocabulary_size)
# print("Word Index:", index2word_map)
# Inspect the top of the dictionary
dict(list(index2word_map.items())[0:5])
# Define a function to generate skip-grams
# Initialize the skip-gram pairs list
skip_gram_pairs = []
# Set the skip-gram window size
window_size = 2
for sent in words:
tokenized_sent = sent.split()
# Set the target index
for tgt_idx in range(0, len(tokenized_sent)):
# Set range for the sentence
max_idx = len(tokenized_sent) - 1
# Define range around target
lo_idx = max(tgt_idx - window_size, 0)
hi_idx = min(tgt_idx + window_size, max_idx) + 1
# List the indices in the skip-gram outputs (removing target index)
number_list = range(lo_idx, hi_idx)
output_matches = list(filter(lambda x: x != tgt_idx, number_list))
# Generate skip-gram pairs
pairs = [[word2index_map[tokenized_sent[tgt_idx]], word2index_map[tokenized_sent[out]]] for out in output_matches]
# print(pairs)
for p in pairs:
skip_gram_pairs.append(p)
# Inspect some output:
skip_gram_pairs[0:12]
###Output
_____no_output_____
###Markdown
Now define a function to sample batches from the skipgram pairs during training.
###Code
def get_skipgram_batch(start_index, end_index):
instance_indices = list(range(len(skip_gram_pairs)))
# np.random.shuffle(instance_indices)
batch = instance_indices[start_index:end_index]
x = [skip_gram_pairs[i][0] for i in batch]
y = [[skip_gram_pairs[i][1]] for i in batch]
return x, y
# batch example
x_batch, y_batch = get_skipgram_batch(0,8)
print("X Batch: ", [index2word_map[word] for word in x_batch])
print("Y Batch: ", [index2word_map[word[0]] for word in y_batch])
###Output
X Batch: ['cleaner', 'band', 'odx', 'odx', 'al', 'al', 'tube', 'tube']
Y Batch: ['band', 'cleaner', 'al', 'tube', 'odx', 'tube', 'odx', 'al']
###Markdown
Training ========= To Do Training ==================Add the following features:* Number of epochs to train =========================================
###Code
batch_size = 128
embedding_dimension = 128
negative_samples = 64
n_iterations = int(round(2 * len(skip_gram_pairs) / batch_size,0))
LOG_DIR = "logs/word2vec_cab"
print("There are ", len(skip_gram_pairs), " skip-gram pairs")
print("The chosen iteration and batch size parameters will yield ",
round((batch_size * n_iterations)/len(skip_gram_pairs),2), " epochs.")
graph = tf.Graph()
# This may work with GPU
with graph.as_default(), tf.device('/cpu:0'), tf.name_scope("embeddings"):
# Input data, labels
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# Embedding lookup table
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_dimension],
-1.0, 1.0), name='embedding')
# This is essentialy a lookup table
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Create variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_dimension],
stddev=1.0 / math.sqrt(embedding_dimension)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.device('/cpu:0'):
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
inputs=embed,
labels=train_labels,
num_sampled=negative_samples,
num_classes=vocabulary_size))
tf.summary.scalar("NCE_loss", loss)
# Learning rate decay
global_step = tf.Variable(0, trainable=False)
learningRate = tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=1000,
decay_rate=0.95,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learningRate).minimize(loss)
merged = tf.summary.merge_all()
max_index = len(skip_gram_pairs)
start_index = 0
end_index = min(start_index + batch_size, max_index)
with tf.Session(graph=graph) as sess:
train_writer = tf.summary.FileWriter(LOG_DIR,
graph=tf.get_default_graph())
saver = tf.train.Saver()
with open(os.path.join(LOG_DIR, 'metadata.tsv'), "w") as metadata:
metadata.write('Name\tClass\n')
for k, v in index2word_map.items():
metadata.write('%s\t%d\n' % (v, k))
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embeddings.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(LOG_DIR, 'metadata.tsv')
projector.visualize_embeddings(train_writer, config)
tf.global_variables_initializer().run()
for step in range(n_iterations):
x_batch, y_batch = get_skipgram_batch(start_index, end_index)
summary, _ = sess.run([merged, train_step],
feed_dict={train_inputs: x_batch,
train_labels: y_batch})
train_writer.add_summary(summary, step)
if start_index >= max_index:
start_index = 0
else:
start_index = start_index + batch_size + 1
end_index = min(start_index + batch_size, max_index)
if step % 10 == 0:
print("Completed ", step, " of ", n_iterations)
saver.save(sess, os.path.join(LOG_DIR, "w2v_model.ckpt"), step)
loss_value = sess.run(loss,
feed_dict={train_inputs: x_batch,
train_labels: y_batch})
print("Loss at %d: %.5f" % (step, loss_value))
# Normalize embeddings before using
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
normalized_embeddings_matrix = sess.run(normalized_embeddings)
###Output
Completed 0 of 141989
Loss at 0: 180.20447
Completed 10 of 141989
Loss at 10: 199.43880
Completed 20 of 141989
Loss at 20: 190.76051
Completed 30 of 141989
Loss at 30: 150.21616
|
Use_Cases/other/Social_Media_Emotion_Detection/src/notebooks/Analysis.ipynb | ###Markdown
Read from database
###Code
connection_users = pg.connect("host='1c_postgres_db' port=5432 dbname=postgres user=postgres")
connection_updates = pg.connect("host='3c_postgres_db' port=5432 dbname=postgres user=postgres")
df_users = pd.read_sql_query('select * from twitter_users',
con=connection_users)
df = pd.read_sql_query('select * from twitter_updates',
con=connection_updates,
parse_dates=['status_created_at'],)
###Output
_____no_output_____
###Markdown
Data overview User data
###Code
df_users.head(5)
df_users.describe().drop('count')
df_users.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 42 entries, 0 to 41
Data columns (total 9 columns):
user_id 42 non-null object
user_name 42 non-null object
user_location 42 non-null object
account_created_at 42 non-null datetime64[ns]
statuses_count 42 non-null int64
favorites_count 42 non-null int64
followers_count 42 non-null int64
friends_count 42 non-null int64
verified 42 non-null bool
dtypes: bool(1), datetime64[ns](1), int64(4), object(3)
memory usage: 2.8+ KB
###Markdown
User updates
###Code
rows, columns = df.shape
print(rows,'Rows x ', columns, 'Columns')
df.info()
df.describe().drop('count').drop(['retweet_count'], axis=1).style.format({'anger': "{:.1%}", 'disgust': "{:.1%}", 'fear': "{:.1%}", 'joy': "{:.1%}", 'sadness': "{:.1%}", 'surprise': "{:.1%}"})
df.head()
###Output
_____no_output_____
###Markdown
Number of users
###Code
df_user_id_count = df['user_id'].nunique()
df_user_id_count
###Output
_____no_output_____
###Markdown
Number of tweets per user
###Code
df['nr_of_status_in_dataset'] = df[['user_id', 'status_id']].groupby('user_id').transform('count')
df.head()
df_user_id = df[['user_id', 'status_id']].groupby('user_id').agg('count')
df_user_id.plot(kind='hist', legend=False, bins=32, figsize=(17,5))
sns.violinplot(x=df_user_id, inner='quartiles', figsize=(25,5))
sns.violinplot(y=df_user_id)
df_user_id.describe()
df_user_id.tail()
###Output
_____no_output_____
###Markdown
Number of unique status updates
###Code
rows, columns = df.shape
df_status_id_count = df['status_id'].nunique()
print(df_status_id_count, '/', rows, 'status updates are unique')
###Output
1303 / 1303 status updates are unique
###Markdown
Emotions of all users over time
###Code
cm = sns.light_palette("green", as_cmap=True)
df_avg_emotions_per_year = df.drop(['retweet_count', 'text', 'nr_of_status_in_dataset'], axis=1)
df_avg_emotions_per_year['year'] = df_avg_emotions_per_year['status_created_at'].dt.year
df_avg_emotions_per_year = df_avg_emotions_per_year.groupby(['year']).agg('mean')
df_avg_emotions_per_year.style.background_gradient(cmap=cm, axis=1)
ax = df_avg_emotions_per_year.plot(figsize=(15,5))
ax.set_xticks(df_avg_emotions_per_year.index)
ax.set_xticklabels(df_avg_emotions_per_year.index, rotation=90)
df_avg_emotions_over_time = df.drop(['retweet_count', 'text', 'nr_of_status_in_dataset'], axis=1)
df_avg_emotions_over_time['year'] = df_avg_emotions_over_time['status_created_at'].dt.year
df_avg_emotions_over_time['month'] = df_avg_emotions_over_time['status_created_at'].dt.month
df_all_emotions = df_avg_emotions_over_time.groupby(['year', 'month']).agg('mean')
df_all_emotions.plot(figsize=(15,5))
###Output
_____no_output_____ |
Face emotion recognition pipelines1.ipynb | ###Markdown
Loading the model for making the predictions
###Code
model4_test=load_learner(path=r"D:\Data science\Alma better\DL Facial emotion recognition\Images\images\train",file='fastai_emojis_model4.pkl')
model4_test.dl
os.chdir(r'D:\Data science\Alma better\DL Facial emotion recognition\Images\images\validation\surprise')
###Output
_____no_output_____
###Markdown
Some test predictions on an image
###Code
test1=cv2.imread('./330.jpg')
t = pil2tensor(test1, dtype=np.float32) # converts to numpy tensor
#t = t.permute(2,0,1) # Move num_channels as first dimension
t = t.float()/255.0
im = Image(t) # Convert to fastAi Image - this class has "apply_tfms"
model4_test.predict(im)
show_image(im)
model4_test.predict(im)[0]
type(model4_test.predict(im)[0])
model4_test
os.getcwd()
os.chdir('D:/Data science/Alma better/DL Facial emotion recognition/Images/images/validation/happy')
#path='./531.jpg'
img = cv2.imread('./531.jpg')
plt.imshow(img)
###Output
_____no_output_____
###Markdown
Testing on different category images
###Code
os.chdir('D:/Data science/Alma better/DL Facial emotion recognition/Images/images/validation/surprise')
a3=cv2.imread('./10162.jpg')
t = pil2tensor(a3, dtype=np.float32) # converts to numpy tensor
t = t.float()/255.0
#t = t.permute(2,0,1) # Move num_channels as first dimension
im = Image(t) # Convert to fastAi Image - this class has "apply_tfms"
pred0=model4_test.predict(im)
print(pred0)
print(str(pred0[0]))
a1=cv2.imread('./10097.jpg')
plt.imshow(a1)
#not used just for experimentation
Emojis_dict = {'Category tensor(0)':"Angry", 'Category tensor(1)':"Disgust", 'Category tensor(2)':"Fear", 'Category tensor(3)':"Happy",\
'Category tensor(4)':"Neutral", 'Category tensor(5)':"Sad", 'Category tensor(6)':"Surprise"}
###Output
_____no_output_____
###Markdown
Images emotion detection pipeline
###Code
def prediction(img1):
predictions = []
predictions = model4_test.predict(img1)
predictions[0]
#print(predictions)
#type(predictions)
prediction1=[]
prediction1=str(predictions[0])
#emotion = []
#emotion = Emojis_dict[predictions1]
if prediction1 == 'angry':
print("The person here is angry")
elif prediction1 == 'disgust':
print("The person here is disgusted")
elif prediction1 == 'fear':
print("The person here is in fear")
elif prediction1 == 'happy':
print("The person here is happy")
elif prediction1 == 'neutral':
print("The person here is neutral")
elif prediction1 == 'sad':
print("The person here is sad")
elif prediction1 == 'surprise':
print("The person here is surprised")
else:
print("Cannot detect")
#cv2.destroyWindow("preview")
def return_prediction(path):
#converting image to gray scale and save it
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite(path, gray)
#detect face in image, crop it then resize it then save it
#face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
face_clip = img[y:y+h, x:x+w]
cv2.imwrite(path, cv2.resize(face_clip, (350, 350)))
#read the processed image then make prediction and display the result
read_image = cv2.imread(path)
t = pil2tensor(read_image, dtype=np.float32) # converts to numpy tensor
t = t.float()/255.0
#t = t.permute((2,0,1))
#t=t.transpose((2,0,1))
img1 = Image(t) # Convert to fastAi Image - this class has "apply_tfms"
model_pred1 = model4_test.predict(img1)[0]
predicted=prediction(img1) #uncomment when above type of display text is required for image outputs
plt.imshow(img) #uncomment if image has to be displayed
return str(model_pred1)
t.shape
a5 = t.float()/255.0
a5.shape
a9=a5.permute(2,0,1).shape
a9
return_prediction('./10259.jpg')
os.getcwd()
return_prediction('./10306.jpg')
c=Emojis_dict['Category tensor(3)']
#experimentation
for i in range(len(Emojis_dict)):
if c=='Happy':
print('Yes Happy face')
else:
print('Cannot detect')
prediction1=str('Category tensor(3)')
type(prediction1)
#btn_upload = widgets.FileUpload()
#out_pl = widgets.Output()
#lbl_pred = widgets.Label()
###Output
_____no_output_____
###Markdown
Emotion detection pipeline for detection on videos
###Code
def test_rerun(text, cap):
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, "The last phase of the person's Emotion was recorded "+str(text), (95,30), font, 1.0, (255, 0, 0), 2, cv2.LINE_AA)
cv2.putText(img, "Press SPACE: Detecting", (5,470), font, 0.7, (255, 0, 0), 2, cv2.LINE_AA)
cv2.putText(img, "Hold Q: To Quit😎", (460,470), font, 0.7, (255, 0, 0), 2, cv2.LINE_AA)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for x,y,w,h in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow("Image", img)
if cv2.waitKey(1) == ord(' '):
cv2.imwrite("test6.jpg", img)
text = return_prediction("test6.jpg")
test_video_pred(text, cap)
break
if cv2.waitKey(1) == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
os.getcwd()
os.chdir(r'D:\Data science\Alma better\DL Facial emotion recognition\Images\images\Input and output')
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
#cap = cv2.VideoCapture('./pexels-tiger-lily-7149007.mp4')
def test_video_pred(text, cap):
while(True):
ret, img = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, "The last phase of person's emotion was recorded: "+str(text), (95,30), font, 1.0, (255, 0, 0), 2, cv2.LINE_AA)
cv2.putText(img, "Press SPACE: For detection", (5,470), font, 0.7, (255, 0, 0), 2, cv2.LINE_AA)
cv2.putText(img, "Hold Q: To Quit😎", (460,470), font, 0.7, (255, 0, 0), 2, cv2.LINE_AA)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for x,y,w,h in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow("Image", img)
if cv2.waitKey(1) == ord(' '):
cv2.imwrite("test6.jpg", img)
text = return_prediction("test6.jpg")
test_rerun(text, cap)
#plt.imshow(img)
break
if cv2.waitKey(1) == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
###Output
_____no_output_____
###Markdown
Examples
###Code
cap = cv2.VideoCapture('./pexels-tiger-lily-7149007.mp4')
test_video_pred('None',cap)
cap=cv2.VideoCapture('./pexels-yan-krukov-7693411.mp4')
test_video_pred('None',cap)
###Output
I guess you are neutral
I guess you are neutral
I guess you are neutral
I guess you are neutral
I guess you are neutral
I guess you are neutral
I guess you are neutral
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are neutral
I guess you are neutral
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are neutral
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
I guess you are happy
###Markdown
🤩😃- It says the people in the video are in neutral and happy emotions. Yes!!Yayy!!!- Its working well
###Code
cap=cv2.VideoCapture('./pexels-yan-krukov-7640073.mp4')
test_video_pred('None',cap)
test_video_pred('None',cap)
os.chdir(r'D:\Data science\Alma better\DL Facial emotion recognition\Images\images\validation\fear')
return_prediction('./10099.jpg')
###Output
The person here is in fear
###Markdown
Live video emotion detection
###Code
cap=cv2.VideoCapture(0)
test_video_pred('None',cap)
cap=cv2.VideoCapture(0)
test_video_pred('None',cap)
###Output
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is sad
The person here is neutral
The person here is neutral
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is sad
The person here is sad
The person here is sad
The person here is sad
The person here is happy
The person here is happy
The person here is happy
The person here is happy
The person here is angry
The person here is happy
The person here is angry
The person here is happy
The person here is angry
The person here is angry
The person here is sad
The person here is neutral
The person here is angry
The person here is angry
The person here is neutral
The person here is angry
The person here is angry
The person here is angry
The person here is angry
The person here is angry
The person here is sad
The person here is neutral
The person here is angry
The person here is sad
The person here is neutral
The person here is angry
The person here is neutral
The person here is sad
The person here is angry
The person here is sad
The person here is sad
The person here is sad
The person here is sad
|
project-ukrainian-coffee-shops/coffee_shops.ipynb | ###Markdown
DataCamp Certification Case Study Project BriefYou are on the data science team for a coffee company that is looking to expand their business into Ukraine. They want to get an understanding of the existing coffee shop market there.You have a dataset from Google businesses. It contains information about coffee shops in Ukraine. The marketing manager wants to identify the key coffee shop segments. They will use this to construct their marketing plan. In their current location, they split the market into 5 segments. The marketing manager wants to know how many segments are in this new market, and their key features.You will be presenting your findings to the Marketing Manager, who has no data science background.The data you will use for this analysis can be accessed here: `"data/coffee_shops.csv"` Table of Content* [Getting to know the dataset](getting-to-know)* [Regrouping](regroup)
###Code
# import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
%matplotlib inline
###Output
_____no_output_____
###Markdown
Getting to know the dataset From the case study description document we see that the missing values in 'delivery, dine-in, takeout' columns are actually False. Therefore, let's take care of it!
###Code
shops = pd.read_csv('data/coffee_shops.csv')
shops[['Delivery option', 'Dine in option', 'Takeout option']] = shops[['Delivery option',
'Dine in option',
'Takeout option']].fillna(False, axis=1)
shops.head(3)
print('Number of rows and columns:', shops.shape)
shops.info()
# taking care of duplicates and/or misspelled words
def correct_names(places: list):
for idx, place in enumerate(places.lower()):
if place.startswith('dim ka'):
places[idx] = 'dim kavy'
elif 'gangster' in place:
places[idx] = 'gangster coffee shop'
elif 'aroma' in place:
places[idx] = 'aroma kava (coffee)'
elif 'art coffee' in place:
places[idx] = 'art coffee'
elif place.startswith('смажимо каву'):
places[idx] = 'смажимо каву'
elif 'my coffee' in place:
pla
shops['Place name'].unique().tolist()
###Output
_____no_output_____ |
Lessons&CourseWorks/3.ObjectTracking&Localization/5.RepresentingState&Motion/2. Car, playground.ipynb | ###Markdown
Car playground 1This notebook provides some initial variables and creates one car object, but, here, you are encouraged to play around with the car movement code!This playground is especially useful if you **modify the car.py file** and want to test out some new car property or function!So, your only tasks for this notebook are to use it as your own personal testing ground. Some ideas:1. Create multiple cars, with different initial states - And visualize them using `display_world()`2. Add a function in car.py (by navigating back to it clicking the orange Jupyter icon), and then call that function, here!3. Add a new initial variable to __init__ in car.py and use it!
###Code
import numpy as np
import car
%matplotlib inline
# Auto-reload function so that this notebook keeps up with
# changes in the class file
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Create a new car object
###Code
# Create a 2D world of 0's
height = 4
width = 6
world = np.zeros((height, width))
# Define the initial car state
initial_position = [0, 0] # [y, x] (top-left corner)
velocity = [0, 1] # [vy, vx] (moving to the right)
# Create a car with initial params
carla = car.Car(initial_position, velocity, world)
carla.display_world()
## Create multiple cars and visualize them
car1 = car.Car(initial_position,velocity,world,'r')
car2 = car.Car(initial_position,velocity,world,'g')
car3 = car.Car(initial_position,velocity,world,'b')
car4 = car.Car(initial_position,velocity,world,'m')
car1.move()
car2.move()
car3.move()
car4.move()
car1.move()
car2.turn_right()
car3.turn_left()
car4.move()
car1.turn_right()
car1.move()
car2.move()
car3.move()
car4.move()
car1.display_world()
car2.display_world()
car3.display_world()
car4.display_world()
###Output
_____no_output_____ |
how_to_improve_your_chance_of_getting_mortgage_loan.ipynb | ###Markdown
How to improve your chances of getting a Mortgage Loan Context* This is a publicly available dataset that contains information about Mortgage loan.* The Home Mortgage Disclosure Act (HMDA) requires many financial institutions to maintain, report, and publicly disclose information about mortgages. These public data are important because they help show whether lenders are serving the housing needs of their communities; or help authourities to determine and fish out all predatory act of lending; they give public officials information that helps them make decisions and policies; and they shed light on lending patterns that could be discriminatory. * Eg. a reported increase in mortgage borrowing by blacks and Hispanics as of 1993. Project GoalDiscovering the main factors affecting applicants mortgage loan approval rate using python contentThis project consist typically of exploratory Data Analysis. All the technical steps have been explained with comments 1.0.0) EXPLORATORY DATA ANALYSIS SECTION We will start off by importing various libraries for our analysis.The css code below will create border lines around our dataframes
###Code
%%HTML
<style type='text/css'>
table.dataframe td,table.dataframe th{
border: 1px solid black !important;
color: solid black !important
}
</style>
# create a class to print different font properties in python
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Importing various libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['agg.path.chunksize'] = 10000
import string
import warnings
warnings.filterwarnings('ignore')
sns.set_style('whitegrid')
# Load the dataset from a directory on local machine
df_main = pd.read_csv('Washington_State_HDMA.csv',low_memory=False)
###Output
_____no_output_____
###Markdown
**1.0.1)** An Overview* The shape of the data is (466565,47) which means it contains 466,566 unique data entries and 47 attribute/features/columns.* Dataset has 34 features with object datatype, 9 with float datatype and 4 with integer datatype.
###Code
# Get the dataset information
df_main.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 466566 entries, 0 to 466565
Data columns (total 47 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 tract_to_msamd_income 465951 non-null float64
1 rate_spread 8638 non-null float64
2 population 465956 non-null float64
3 minority_population 465956 non-null float64
4 number_of_owner_occupied_units 465944 non-null float64
5 number_of_1_to_4_family_units 465955 non-null float64
6 loan_amount_000s 466566 non-null int64
7 hud_median_family_income 465960 non-null float64
8 applicant_income_000s 404533 non-null float64
9 state_name 466566 non-null object
10 state_abbr 466566 non-null object
11 sequence_number 466566 non-null int64
12 respondent_id 466566 non-null object
13 purchaser_type_name 466566 non-null object
14 property_type_name 466566 non-null object
15 preapproval_name 466566 non-null object
16 owner_occupancy_name 466566 non-null object
17 msamd_name 428292 non-null object
18 loan_type_name 466566 non-null object
19 loan_purpose_name 466566 non-null object
20 lien_status_name 466566 non-null object
21 hoepa_status_name 466566 non-null object
22 edit_status_name 74505 non-null object
23 denial_reason_name_3 1246 non-null object
24 denial_reason_name_2 6746 non-null object
25 denial_reason_name_1 34499 non-null object
26 county_name 466199 non-null object
27 co_applicant_sex_name 466566 non-null object
28 co_applicant_race_name_5 14 non-null object
29 co_applicant_race_name_4 21 non-null object
30 co_applicant_race_name_3 105 non-null object
31 co_applicant_race_name_2 1862 non-null object
32 co_applicant_race_name_1 466566 non-null object
33 co_applicant_ethnicity_name 466566 non-null object
34 census_tract_number 465960 non-null float64
35 as_of_year 466566 non-null int64
36 application_date_indicator 466566 non-null int64
37 applicant_sex_name 466566 non-null object
38 applicant_race_name_5 46 non-null object
39 applicant_race_name_4 68 non-null object
40 applicant_race_name_3 297 non-null object
41 applicant_race_name_2 4478 non-null object
42 applicant_race_name_1 466566 non-null object
43 applicant_ethnicity_name 466566 non-null object
44 agency_name 466566 non-null object
45 agency_abbr 466566 non-null object
46 action_taken_name 466566 non-null object
dtypes: float64(9), int64(4), object(34)
memory usage: 167.3+ MB
###Markdown
1.0.2) This is the overall description of some numerical features of the dataset >`Loan amount`: Is the amount of money the applicant applied for. >`as_of_year`: Is the the reporting year of the HMDA record.* The Average loan amount received by applicants is `$`$298.26k$ and the average income of applicants is `$`$112.98k$ which is slightly lower than the loan receives by applicants. * The minimum loan amount received by applicants is `$`$1k$ USD(United States Dollars) which is proportional to the minimum annual gross income of applicants thus `$`$1k$.* The median `tract_to_msamd_income`(The percentage of the median family income for the tract compared to the median family income for the MSA/MD) is `$`$104.530k$. > * **Tract or Census Tract:** Is a noughborhood consisting of a population between 2,800 and 8,000. > * **Metropolitan statistical areas (MSA):** are delineated by the U.S. Office of Management and Budget (OMB) as having at least one urbanized area with a minimum population of 50,000. * The minimum,average and maximum interest `rate_spread` is 1.50, 1.72 and 13.66 respectively.>The interest rate spread is what the company charges on a loan compared to its cost of money.* The average `hud_median_family_income` is `$`$76797.148$ > * hud_median_family_income is median family income in dollars for the MSA/MD in which the tract is located.* `as_of_year` is the year the HDMA data was given to the federal agency.
###Code
# show data decription
df_main.describe()
###Output
_____no_output_____
###Markdown
1.0.3) Describing the non-numerical features * Most of the mortgage applicants are male representing $60.76$% and female of $23.61$%. The remaining $15.63$% applicants didn't provide their gender status.* All the properties are located in the Washington state from which this entire Mortgage data was collected.* The `loan_type_name` provide information of the institutions or individuals that guaranteed the loan:Government programs offered by Federal Housing Administration (FHA), the Department of Veterans Affairs (VA), or the Department of Agriculture's Rural Housing Service (RHS) or Farm Service Agency (FSA). All other loans are classified as conventional. For this data, most loans were guaranted Convetionally representing %$71.30$ and the rest %$29.70$.* In the loan process %$99.997$ percent of it was not subjected to the Home Ownership and Equity Protection Act(HOEPA). > **The Home Ownership and Equity Protection Act** is a federal law that discourages banks and other financial institutions from predatory lending when they fund mortgages and home equity loans. * Per this data set, the top-most reasons of being denied of loan is because of `Debt_to_income ratio`.
###Code
# display only object datatype
df_main.describe(include='O').iloc[:,20:]
# know how the data features correlate with each other
df_corr = df_main.corr().abs().unstack().sort_values(ascending=False)
df_corr = df_corr.reset_index()
df_corr.columns = ['feat_one','feat_two','count']
# slice and jump one step at times
df_corr = df_corr[df_corr['count'] != 1.0][1::2]
# get features with correlation of more than 0.3
df_corr[df_corr['count'] > 0.3]
###Output
_____no_output_____
###Markdown
1.0.4) Understanding our label `action_taken_name`Loan origination is the process by which a borrower applies for a new loan, and a lender processes that application. This process ends when the loan is approved or denied. In this case `loan originated` means the loan has been approved for disbursement in our case.what is `loan purchased by institution`: Most lenders sell loans to **Secondary market** due to liquidity reasons, meaning they either don’t want the loans in their balance sheet or they sell loans so they can lend to more borrowers.**Secondary Market** is basically an institution willing to buy loans from the primary lenders. When this happens, either the secondary market will resume the service right(means consumers have to make payment to the secondary market) or the service right is retained by the original lender.**NB:** In our case, we will remove all loans that were sold to secondary institutions since we want to deal directly to primary lenders. We will also take-out loans that were withdrawn by applicants `Application withdrawn by applicant`.
###Code
print(df_main['action_taken_name'].unique())
df_main = df_main[(df_main['action_taken_name'] != 'Loan purchased by the institution') &\
(df_main['action_taken_name'] != 'Application withdrawn by applicant')]
df_main['action_taken_name'].unique()
###Output
_____no_output_____
###Markdown
_We will also drop some selected columns that have too much missing values and are not really of much importance to this project._ 1.0.5) What is the main reason why applicants loans are denied. * The `denial_reason_name_1`,`denial_reason_name_2` and `denial_reason_name_3` columns contain the various reason why an applicant loan application is denied. Each applicant has atmost three reasons to be denied of a loan. > **NB:**There is a huge number of missing data: a total of **42491** out of **1031065** is missing accross the three columns. Decision about it would be made a the Feature engineering section.* Since we want the overall reasons that cuts accross all the three columns,the three features would be joined together into one column.* After that we find the percentage of each reason compared to the others. * From the graph below, the significant reason why applicants are denied of loans is `Debt-to-income` ratio and bad `Credit History` having $23$% and $22$% respectively. > The debt-to-income ratio is the percentage of your gross monthly income that goes to paying your monthly debt payments. Most lenders don't want it to be above 36%. > Credit history is a record of a consumer's ability to repay debts and demonstrated responsibility in repaying debts. * The most avoidable one is the incomplete loan application. A number of $6,184$ representing $1.73$ `%` of the total loan applications were denied because of incompleted application.
###Code
print(df_main[['denial_reason_name_1',
'denial_reason_name_2',
'denial_reason_name_3']].isna().sum(),'\n\n')
print('Total available values = {}'.format(df_main[['denial_reason_name_1',
'denial_reason_name_2',
'denial_reason_name_3']].notna().sum().sum()))
print('Total missing values = {}'.format(df_main[['denial_reason_name_1',
'denial_reason_name_2',
'denial_reason_name_3']].isna().sum().sum()))
# select only the three denial reasons from the dataset
df_loan_denial = df_main[['denial_reason_name_1','denial_reason_name_2','denial_reason_name_3']]
# compute the percentage of denials
df_loan_denial = pd.DataFrame(pd.concat([df_loan_denial['denial_reason_name_1'],
df_loan_denial['denial_reason_name_2'],
df_loan_denial['denial_reason_name_3']],
ignore_index=True).value_counts(normalize=True),
columns=['denial_reason_count'])
# Declare a matplotlib figure and plot the denial reasons
fig,ax = plt.subplots(figsize=(10,5))
sns.barplot(y=df_loan_denial.index,x=df_loan_denial.denial_reason_count,orient='h');
plt.ylabel('Denial Reasons',fontdict={'fontsize':15,'fontstyle':'italic','fontweight':'bold'})
plt.xlabel('Reasons Count(%)',fontdict={'fontsize':15,'fontstyle':'italic','fontweight':'bold'})
plt.tick_params(axis='both',labelsize=15)
plt.subplots_adjust(left=0.3)
plt.title('Various reasons why applicants loan are denied(%)',
fontdict={'fontsize':16,'fontstyle':'italic','fontweight':'bold'});
plt.savefig('denial_reasons',bbox_inches="tight")
###Output
_____no_output_____
###Markdown
1.0.6) Does applicants gross income play role in his/her loan attainment?In figure **1.0.5,** it shows clearly that debt-to-income ratio is the main reason why applicants are denied of loans.This brings up the question, is high income applicants having an edge over the lower income groups?This turned out be true!.We will create a new column called `loan_approved` which will consist of binary, `1` means loan was approved and `0` means denied. The we will divide the entire dataset into two groups of dataframes `loan_approved` and `loan_denied` and then print out the mean and median income of both categories.* The bar plot shows that the mean and median income of those with their loans approved is slightly higher than those with their loans denied.
###Code
# creating loan_approved column from the column 'action_taken_name' columns
df_main['loan_approved'] = df_main['action_taken_name'].apply(lambda x: 1 if x == 'Loan originated' else 0)
# loan_approved = 1 and loan_denied = 0
loan_approved,loan_denied = df_main[df_main['loan_approved'] == 1],df_main[df_main['loan_approved'] == 0]
print('Approved mean {} and median {}'.format(loan_approved['applicant_income_000s'].mean(),
loan_approved['applicant_income_000s'].median()))
print('Denied mean {} and median {}'.format(loan_denied['applicant_income_000s'].mean(),
loan_denied['applicant_income_000s'].median()))
# Find the mean and median of applicants gross income
app_loan_mean,app_loan_med = loan_approved['applicant_income_000s'].mean(),loan_approved['applicant_income_000s'].median()
den_loan_mean,den_loan_med = loan_denied['applicant_income_000s'].mean(),loan_denied['applicant_income_000s'].median()
# Plot the results
fig,ax = plt.subplots(1,1,sharey=True,figsize=(10,6))
ax.bar(x=['mean gross income','median gross income'],height=(app_loan_mean,app_loan_med),color='b',alpha=1,width=0.1)
ax.bar(x=['mean gross income','median gross income'],height=(den_loan_mean,den_loan_med),color='red',width=0.1)
plt.legend(['Loan Approved: mean = {} median = {}'.format(np.round(app_loan_mean,2),app_loan_med),
'Loan Denied: mean= {} median = {}'.format(np.round(den_loan_mean,2),den_loan_med)],
fontsize=13,title='Loan Status',title_fontsize=15)
ax.tick_params(axis='x',labelsize=20)
plt.title(' \n Bar plot shows the mean and median gross income \n of the approved and denied loan status',fontsize=20,pad=20);
# save the plot onto memory
plt.savefig('gross_income',bbox_inches="tight")
###Output
_____no_output_____
###Markdown
1.0.7) Can a Low or High neighborhood family income of where the property is located affect applicants chances of getting a loan * From the HMDA dataset, lenders disclose the census tract of applicants instead of making their addressess public, which is part of the community where the property is located.* Each census tract is located in a Metropolitian Statistical Area/Metropolitian Division (MSA/MD). *The `hud_median_family_income` is the median family income in dollars for the MSA/MD in which the tract is located.* Basically,you must be expecting that for a loan to be approved the applicant's income must be quite similar or above the neighbourhood median family income. * well, this is in some sense true! but wait,we will see how applicants with their average income GREATER OR LESS than the mean neighborhood median income of where the property is located are affected.
###Code
# Lets take away the null values from the applicant_income and msamd_name
df_hud_appincome = df_main[df_main['applicant_income_000s'].notna()]
df_hud_appincome = df_hud_appincome[df_main['msamd_name'].notna()]
#The trailing zeros in the column name indicates that the actual
# amount must be multiplied by 1000 to get the actual values
df_hud_appincome['applicant_income_000s'] = df_hud_appincome['applicant_income_000s']*1000
# Form a dictionary of neighborhood name and its corresponding average median income
msamd_name_mean = dict(df_hud_appincome.groupby('msamd_name').mean()['hud_median_family_income'])
series_list = []
for name,val in msamd_name_mean.items():
df = df_hud_appincome[df_hud_appincome['msamd_name'] == name]
greater = df[df['applicant_income_000s'] > val]
greater['mean_status'] = 'greater'
series_list.append(greater['mean_status'])
equal = df[df['applicant_income_000s'] == val]
equal['mean_status'] = 'equal'
series_list.append(equal['mean_status'])
less = df[df['applicant_income_000s'] < val]
less['mean_status'] = 'lesser'
series_list.append(less['mean_status'])
# concatenate the list of series and add it to the dataframe
df_transformed = pd.concat(series_list)
combined = pd.concat([df_hud_appincome,df_transformed],axis=1)
df_grouped = (combined.reset_index().groupby(['msamd_name',
'mean_status',
'loan_approved']).count()['index']).reset_index()
# declare empty list
percentage_list = []
# calculate all the percentage of its corresponding
for indx in range(0,df_grouped.shape[0],2):
curr_val,next_val = df_grouped.loc[indx,'index'],df_grouped.loc[indx+1,'index']
curr_next_val = curr_val + next_val
curr_perc,next_perc = (curr_val/curr_next_val)*100,(next_val/curr_next_val)*100
percentage_list.append(np.round(curr_perc,2)),percentage_list.append(np.round(next_perc,2))
df_grouped = pd.concat([df_grouped,pd.Series(percentage_list,name='percentage %')],axis=1)
# Because the plots are many, we shorten the names to allow space
# df_grouped.rename({'msamd_name':'name','mean_status':'status'},axis=1,inplace=True)
# df_grouped['status'] = df_grouped['status'].map({-1:'low',0:'equal',1:'above'})
# creating a facetgrid with MSA/MD, Mean Status,loan_approved, percentage %
sns.set(font_scale=1)
# Take a random sample of ten neighborhood
# samp_10_neighborhood = df_grouped.sample(10,random_state = 7)
grid = sns.FacetGrid(df_grouped, row='msamd_name', col='mean_status', height=2.2, aspect=2)
grid.map(sns.barplot, 'loan_approved', 'percentage %', alpha=.5, ci=None,order=[0,1],palette='deep')
grid.add_legend();
grid.tight_layout()
grid.fig.subplots_adjust(top=0.9,wspace=1.0)
grid.fig.suptitle('Applicants median income compared to the average median income \n of each neigborhood where the property is located',
fontproperties={'size':20,'weight':'bold'})
axes = plt.gca()
from matplotlib.lines import Line2D
cmap = plt.cm.coolwarm
custom_lines = [Line2D([0], [0], color='#dab9aa', lw=10),
Line2D([0], [0], color='#a1afca', lw=10)]
axes.legend(custom_lines, ['Loan Approved', 'Loan Denied'],title='Loan Approval Status',
loc='best', bbox_to_anchor=(0.85, 0.2, 0.9, 10.5));
plt.savefig('neighborhood',bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Reading plot* On the x-axis, `0` and `1` means `loan_denied` and `loan_approved` respectively* Each row is a unique neighborhood* On each row, read from left towards right* Lesser status means applicant median income is Lower compared to the average median income of the neighborhood where the property is located.* Greater status means applicant median income is Above compared to the average median income of the neighborhood where the property is located.**FINDINGS*** As you view the greater and lesser mean status graphs on each row, you could see either slight decrease in the number of `loan_denied` or increased in the number of `approved loans` for almost all the plots.* This implies, applicants with higher median income than the average median income of the neigbhorhood where the property is located, have got their loan approved compared to appplicants with lower median income. 1.0.8) What property type has the highest loan approval rate? **1-4 family dwelling, multifamily dwelling and manufactured housing are the three property types.** **Manufactured homes:** Housing that is essentially ready for occupancy upon leaving the factory and being transported to a building site.**Multifamily dwelling:** Any housing unit where two (2) or more dwellings are separated by a common wall, floor or ceiling, including but not limited to apartments, condominiums and townhouses. Finding* Applicants for multifamily dwelling housing property has the highest percentage of qualifying for loans.Such properties can produce cashflow of decent rental income for payment at the earlier stage and that is what most lenders want. * Most applicants of multifamily dwelling are mostly investors and they obviously have good credit-score history and they also provide decent down payment.* It's tougher to get a loan for manufactured housing. This is because manufactured housing tends to depreciate, while traditional home values tend to increase over time.
###Code
# creating a dataframe with `loan_approved` grouped by property_type_name as index
df_cross = pd.crosstab(df_main['property_type_name'],df_main['loan_approved'])
# creating a dictionary of property_type_name and its percentage
perc_dict = {}
for indx in range(df_cross.shape[0]):
# calculate percentage
percentage = df_cross.loc[df_cross.index[indx],1]/(df_cross.loc[df_cross.index[indx],0] + df_cross.loc[df_cross.index[indx],1])
perc_dict[df_cross.index[indx]] = np.round(percentage*100,2)
# getting the x and y values for the plot
x = list(perc_dict.keys())
y = list(perc_dict.values())
# creating a barplot of a property type and its rate of approval
sns.barplot(y,x,orient='h',order=sorted(perc_dict,key=lambda x: x[1],reverse=True),palette='Accent');
plt.xlabel('Rate of Approval (%)',fontstyle='italic')
plt.ylabel('Property Type',fontstyle='italic')
plt.xlim([0,100])
# title
plt.title('Types of Properties and its Rate (%) of approval',pad=20,fontweight='bold');
plt.savefig('Property_type',bbox_inches='tight')
###Output
_____no_output_____
###Markdown
1.0.9) Which of the loan types has a better chance of being approved? There are loans that are insured or guaranteed by government programs offered by: 1) Federal Housing Administration (FHA) 2) Department of Veterans Affairs (VA) 3) Department of Agriculture's Rural Housing Service (RHS) or Farm Service Agency (FSA). All other loans are classified as conventional.
###Code
# creating a dataframe with loan_type_name and loan_approved
df_main['loan_type_name'].unique()
# a cross table of loan type and loan approval status
df_loan_type = pd.crosstab(df_main['loan_type_name'],df_main['loan_approved'],normalize='index')
# A function to create barplot
def rate_barplot(df,label1=None,label2=None,title=None,x_label=None,y_label=None):
"""This return a barplot with a well labelled axis"""
# getting the x values from the length of dataframe
x = np.arange(df.shape[0])
# index of the df as a label
labels = list(df.index)
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
width = 0.35
ret1 = ax.bar(x-width/2,df[0],width=width,label=label1)
ret2 = ax.bar(x+width/2,df[1],width=width,label=label2)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylim([0,1])
ax.set_title(title,fontsize=15,fontweight='bold')
ax.set_ylabel(y_label,fontstyle='italic')
ax.set_xlabel(x_label,fontstyle='italic')
ax.grid(True,which='minor',axis='y')
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}%'.format(np.round(height*100,2)),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(ret1)
autolabel(ret2)
# position the legend outside the main axis
plt.legend(loc='best', bbox_to_anchor=(0.85, 0.5, 0.5, 0.5))
plt.setp(ax.get_xticklabels(),rotation=45,ha='right');
plt.savefig('loan_type_rate',bbox_inches='tight')
plt.show()
# creating barplot with the `rate_barplot` function
rate_barplot(df_loan_type,'Loan Denied','Loan Approved','Loan-Type Rate % of Approval','Loan Type','Rate(%)')
###Output
_____no_output_____
###Markdown
* From the above graph,FSA/RHS has the highest rate of loan approval whiles FHA stands has the lowest. * The FSA/RHS are Agencies set to help low-income rural residence and farmers to get loans. Applicants guaranteed under this agency needs not to worry about credit history or present income since they are pardoned from such criteria but they should be able make payment for their loan,taxes and insurance. * FSA takes up to 95% percent of loss. 1.1.0) WHICH COUNTY HAS THE HIGHEST NUMBER OF LOAN APPLICANTS AND HIGHEST RATE OF LOAN APPROVAL?
###Code
df_main['county_name'].value_counts().head()
pd.crosstab(df_main['county_name'],df_main['loan_approved'],normalize='index').sort_values(by=[1],ascending=False).head(5)[1]
###Output
_____no_output_____
###Markdown
**King County has the highest number of loan applicants*** King County according to the 2019 us population and housing census is having a whooping number 2,252,782 residents which makes it the most populous county in Washington and the 13th most populous county in America.* The second highest is Pierce County, also having a population of 904,980 residents making it the second and 61st most populous county in Washington and United States respectively.* Atleat this two give a clear indication of how the number of applicants correlate with the population of that county.* `King County` has the highest rate of loan approval followed by snohomish County. 1.1.1) Can the purpose of the loan help you? Loan applicantions are intended for `Refinancing`, `Home Purchase` and `Home Improvement`. Home Purchase and Home Improvement are self explainatory. `Refinancing:`Refinancing means getting a new mortgage to replace the original. Refinancing is done to allow a borrower to obtain a better interest term and rate.
###Code
df_purpose = pd.crosstab(df_main['loan_purpose_name'],df_main['loan_approved'],normalize='index')
rate_barplot(df_purpose,'Loan Denied','Loan Approved','Loan Purpose vs Rate (%) of Approval',
'Loan Purpose','Purpose Rate(%)')
plt.savefig('loan_purpose',bbox_inches='tight')
###Output
_____no_output_____
###Markdown
* Applying for a loan to purchase a home has a significant rate of approval over the other two. 1.1.2) How does the `owner_occupancy_name` and `hoepa_status_name` affect loan approval? `HOEPA:` The Home Ownership and Equity Protection Act (HOEPA) was enacted in 1994 as an amendment to the Truth in Lending Act (TILA) to address abusive practices in refinances and closed-end home equity loans with high interest rates or high fees. So `HOEPA_STATUS_NAME` shows whether or not a loan was subjected to heopa regulations.`Owner_occupancy_name:` This shows the owner-occupancy status of the property. Second homes, vacation homes, and rental properties are classified as "not owner-occupied as a principal dwelling".For multifamily dwellings (housing five or more families), and any dwellings located outside MSA/MDs, or in MSA/MDs where an institution does not have home or branch offices, an institution may either enter`not applicable`. Most of these properties are for investment purposes.
###Code
# grouping the dataframe with three columns being the index and loan_approved values
df_hoepa_grp = pd.crosstab([df_main['owner_occupancy_name'],
df_main['hoepa_status_name']],df_main['loan_approved'],
normalize='index').reset_index()
# create loan_approved columns filled with zeros indicating loan denied
df_hoepa_grp['loan_approved'] = 0
df_hoepa_grp_0 = df_hoepa_grp.drop(1,axis=1).rename(columns={0:'percentage'})
# Create loan_approved columns filled with ones indicating loan approved
df_hoepa_grp['loan_approved'] = 1
df_hoepa_grp_1 = df_hoepa_grp.drop(0,axis=1).rename(columns={1:'percentage'})
# combine the dataframes
df_hoepa_grp_combine = pd.concat([df_hoepa_grp_0,df_hoepa_grp_1],axis=0,ignore_index=True,sort=False)
# In order to have a well layed out plot, lets shorten the name the columns and the owner_occupancy_name
df_hoepa_grp_combine.rename(columns={'owner_occupancy_name':'owner_occupy',
'hoepa_status_name':'hoepa_st','percentage':'rate(%)'},inplace=True)
df_hoepa_grp_combine['owner_occupy'] = df_hoepa_grp_combine['owner_occupy'].map({'Not owner-occupied as a principal dwelling':'not_prin_dweller',
'Owner-occupied as a principal dwelling':'prin_dweller',
'Not applicable':'not_applic'})
# Plot the graph
sns.set(font_scale=1.2)
grid = sns.FacetGrid(df_hoepa_grp_combine, row='owner_occupy', col='hoepa_st',height=2.95, aspect=2,margin_titles=False)
grid.map(sns.barplot,'loan_approved', 'rate(%)', alpha=.5, ci=None,order=[0,1],palette='deep')
grid.add_legend()
grid.fig.subplots_adjust(top=0.9,wspace=1.0)
axes = plt.gca()
from matplotlib.lines import Line2D
cmap = plt.cm.coolwarm
custom_lines = [Line2D([0], [0], color='#dab9aa', lw=10),
Line2D([0], [0], color='#a1afca', lw=10)]
axes.legend(custom_lines, ['Loan Approved', 'Loan Denied'],title='Loan Approval Status',
loc='best', bbox_to_anchor=(0.85, 0.5, 0.6, 3));
plt.savefig('hoepa',bbox_inches='tight')
###Output
_____no_output_____
###Markdown
* None of the loans that were indicated as ` Non_applicable` were subjected to hoepa status* All the loans subjected to hoepa status stands $100$% rate of approval* multifamily dwellings (housing five or more families) that is once indicated as `not_applicable` also have comparatively higher rate of approval.* The Owner occupied houses have a slight chance over the not_owner occupied homes 1.1.3) DOES THE LIEN STATUS PLAY A ROLE IN THE LOAN APPROVAL? * A lien is a claim or legal right against assets that are typically used as collateral to satisfy a debt. A lien serves to guarantee an underlying obligation, such as the repayment of a loan. * Typical example is a payment agreement for a home loan. The documents includes provisions that allow the lender to keep you from selling the house until you pay what you(debter) owe.* Let say that an investor is seeking for a loan to buy a real estate with an assessed value of `$`2,000,000lender(first lien) approved a loan of `$` 1,500,000 the rest of the `$` 500,000 amount was approved by second lender(subordinate lien).
###Code
pd.crosstab(df_main['lien_status_name'],df_main['loan_approved'],normalize='index')
ax = sns.pointplot(x='lien_status_name',y='loan_approved',data=df_main)
plt.setp(ax.get_xticklabels(),rotation=20,ha='right');
plt.ylim([0,1]);
plt.title('How lien status affects the approval rate',fontsize=15,fontweight='bold');
###Output
_____no_output_____
###Markdown
**Results:*** Loan secured by a `first lien` has the highest rate of approval($74$%). This because loan subjected to this binds the debtor from not being allowed to sell the property untill the full loan repayment is been made. In this case there is relatively lower risk for lenders. * Loan secured by a `second lien` has the second highest rate of approval and this is clear because when the debtor defaults and there is a force liquidation of asset, the subordinate lien will only be paid if and only when the first lien( the primary lien's) money is paid fully. In this case the second lien stands a degree of risk.* `Not secured by a lien` has the lowest rate of approval ($0.55$%). Remember, the higher the risk of you paying the loan, the lower your chances of getting the loan 1.1.4) IS THE LENDING PROCESS DESCRIMINATORY??? i) We will start by comparing how county with certain percentage of minority and county above or below the overall median county approval rate affects loan approval.* The United States is the third-most populous country in the world, with an estimated population of 329,227,746 as of January 28, 2020.* White people constitute the majority of the U.S. population, with a total of about 234,370,202 or 73%."Non-Hispanic Whites" make up 60.7% of the country's population.* Hispanic,Latino Americans and African/Black-Americans are the minority in USA.* Properties that are located in the census_tract having a minority population between 13-32%, recorded the highest rate of loan approval for both above and below the overall median approval rate of all counties. Such counties are mixed-racial.* Applying for a loan to purchase a property located in county that have HIGHER minority population and also below the overall median approval rate of all counties ,have lower rate of loan approval than the LOWER minority populated areas.This can be basically the fact that, people in those counties have low income which makes lenders think that taking loan to invest in those areas may be more riskier so they tend not to approve.In all the four categories,counties that are below the `median approval rate of all counties` have lower rate of loan approval.
###Code
# make a copy of the main. Name the new df as df_county_population
df_county_population = df_main.copy()
# Take out all nan counties
df_county_population = df_county_population[df_county_population['county_name'].notna()]
# make a dataframe of county name as index and loan_approved as values. get the median approval rate for all the counties
med_county_appr_rate = np.round((pd.crosstab(df_county_population['county_name'],
df_county_population['loan_approved'],normalize='index')[1]).median(),2)
df_county_rate = pd.crosstab(df_county_population['county_name'],df_county_population['loan_approved'],normalize='index')
# Get names of all counties above the median county approval rate
count_name_above_roa = df_county_rate.loc[df_county_rate[1] >= med_county_appr_rate].index.tolist()
# create 'Aboveall_county_median_appr_rate' column to hold whether the county is above or below the overall median county rate of
# approval. above=1 and below=0
df_county_population['Aboveall_county_median_appr_rate'] = df_county_population['county_name'].isin(count_name_above_roa).astype(int)
# list of all county names
county_name_list = df_county_population['county_name'].unique().tolist()
# county names with its respective average minority population rate
avg_popu_dict = df_county_population.groupby('county_name').mean()['minority_population'].to_dict()
# Fillin the null values in the minority_population with the average minority_population percentage of its particular county
for county_name in county_name_list:
df_county_population.loc[(df_county_population['county_name'] == county_name) & (df_county_population['minority_population'].isna()),
'minority_population'] = avg_popu_dict[county_name]
# Break the minority_population into four quantiles
df_county_population['minority_population'] = pd.qcut(df_county_population['minority_population'],4)
# Make a dataframe of loan approval rate
df_county_pop = pd.crosstab([df_county_population['Aboveall_county_median_appr_rate'],
df_county_population['minority_population']],df_county_population['loan_approved'],
normalize='index').reset_index()
fig,ax = plt.subplots(figsize=(12,6))
sns.barplot(x='minority_population',y=1,data=df_county_pop,hue='Aboveall_county_median_appr_rate')
plt.ylim([0,1])
plt.legend(title='Aboveall_county_median_appr_rate',loc='best', bbox_to_anchor=(0.85, 0.5, 0.52, 0.52))
ax.set_xlabel('Minority population range(%)')
ax.set_ylabel('Loan approval rate(%)')
ax.set_title('Minority population categories VS Rate of loan approval',fontweight='bold');
###Output
_____no_output_____
###Markdown
ii) Can applicant gender influence his/her loan?**NB:** Because our main focus is on the two gender status `male` and `female`, we will take all others out of the `applicant_sex_name` column and `co_applicant_sex_name` column. minority_population,msamd_name,co_applicant_sex_name,co_applicant_race_name_5,co_applicant_race_name_4,co_applicant_race_name_3,co_applicant_race_name_2,co_applicant_race_name_1,co_applicant_ethnicity_name,applicant_sex_name,applicant_race_name_5,applicant_race_name_4,applicant_race_name_3,applicant_race_name_2,applicant_race_name_1,loan_approved
###Code
# Make a copy of the main df into another called df_sex_grouped
df_sex_grouped = df_main.copy()
# Take only male and female sex from both main applicants and co-applicants columns
df_sex_grouped = df_sex_grouped[((df_sex_grouped['applicant_sex_name'] == 'Female') | (df_sex_grouped['applicant_sex_name'] == 'Male')) & ((df_sex_grouped['co_applicant_sex_name'] == 'Female') | (df_sex_grouped['co_applicant_sex_name'] == 'Male'))]
# create a pivot table with the dataframe
df_sex_grouped_pivot = pd.pivot_table(df_sex_grouped,values='loan_approved',index='applicant_sex_name',
columns='co_applicant_sex_name',aggfunc='mean')
# Plot a heatmap
ax = sns.heatmap(df_sex_grouped_pivot,annot=True,center=0,cbar=False)
# add title to the plot
plt.title('Main applicants and Co-applicants gender influence over loan approval',pad=20,fontweight='bold');
bottom,top = ax.get_ylim()
# add 1/2 each to make the edge full
ax.set_ylim(bottom+0.5,top-0.5);
###Output
_____no_output_____
###Markdown
* It's recorded that, Male main applicants and Female co-applicants have a better chance of qualifying for a loan than all the remaining combination. This makes me think that couples have better chance over single applicants.This is confirmed in a research article published by forbes. [You can get the details here](https://www.forbes.com/sites/trulia/2016/08/31/how-your-relationship-status-affects-mortgage-worthiness/27b17b7e6114). * As a single applicant,you have to be earning good income on your own in other to maintain good debt-to-income ratio and nice credit score(which is the main reason applicants loans are denied). * Couples can raise this income easily if they join their money, this gives them good debt-to-income ratio and nice credit score to make a hedge over single applicants.* The heatmap also shows that main and co-applicants of the same-sex have slightly lower chances than the other way round. iii) Can applicant loan be denied because of his/her race?The issue of racism has been a problem in the United States since the foundation of the nation. `Not applicable` is the name given to institutions as applicants and co_applicants.
###Code
# make a pivot table with various features of the main dataframe.
df_applicants_pivot = pd.pivot_table(df_main,values='loan_approved',
index='co_applicant_race_name_1',
columns='applicant_race_name_1')
# create a figure
fig,ax = plt.subplots(figsize=(12,6))
# draw a heatmap
sns.heatmap(df_applicants_pivot,annot=True,annot_kws={"size": 15},linewidths=2, linecolor='yellow')
# get the bottom and top size of the map
bottom,top = ax.get_ylim()
# add 1/2 each to make the edge full
ax.set_ylim(bottom+0.5,top+0.5)
plt.setp(ax.get_xticklabels(), rotation=30, ha="right",
rotation_mode="anchor");
plt.title('Main applicant Vs Co-applicant race')
bottom,top = ax.get_ylim()
# add 1/2 each to make the edge full
ax.set_ylim(bottom+0.5,top-0.5);
plt.ylabel('Co-applicant race')
plt.xlabel('Main applicant race');
plt.savefig('race',bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
**Table 2**: Each column value is the mean value of co-applicant race(column name) vs all other race. Example.Assume the co_applicant race is `WHITE` therefore as shown on the heatmap,we compare it with all the main applicants race values then find the average.Again after doing this calculation,`Black or African American` happens to have the lowest average rate of loan approval $66$% compared to the `white` race(majority) $78$%.* `Asian americans` a minority group, happens to be having the highest rate of loan approval $76$% and $79$% in both table 1 and 2 respectively.A research article published by Pew Research Center shows that Blacks and Hispanics face extra challenges in getting home loans than the rest of the race in america.The reasons lenders cite for turning down mortgage applications show different patterns depending on racial or ethnic group. Among whites, Hispanics and Asians rejected for conventional home loans, for instance, the most frequently cited reason was that their debt-to-income ratio was too high (25%, 26% and 29%, respectively). Among blacks, the most often cited reason was a poor credit history (31%). According to the research,Blacks and Hispanics generally put less money down on houses relative to total value than other groups. This makes lenders denied them of loan since it reflect the income status of the applicant and their ability to make repayments.[click to read more on this interesting findings](https://www.pewresearch.org/fact-tank/2017/01/10/blacks-and-hispanics-face-extra-challenges-in-getting-home-loans/) * The heatmap shows the rate of loan approval from the various race combination.A more summarized version is a shown the tables.We take a particular race of a main or co-applicants and combined it with the approval rate of the rest of the races and find the average.
###Code
column_names = df_applicants_pivot.columns.tolist()
df = pd.DataFrame({column_name:np.round(df_applicants_pivot[column_name].mean(),2) for column_name in column_names},
index=['All_other_race'])
# set seaborn color map
cm = sns.light_palette("green", as_cmap=True)
def highlight(s):
return 'background-color: yellow'
# add highlight and caption to the table
df.style.applymap(highlight,subset=['Black or African American','White'])\
.set_caption('MEAN VALUE OF MAIN APPLICANT RACE NAME VS ALL OTHER RACE')
###Output
_____no_output_____
###Markdown
**Table 1**: Each column value is the mean value of race(column name) vs all other race. Example.Assume the main applicant race is `WHITE` therefore as shown on the heatmap,we compare it with all the co-applicants race values then find the average.after doing this calculation,`Black or African American` happens to have the lowest average rate of loan approval $65$% compared to the `white` race(majority) $75$%.
###Code
column_names = df_applicants_pivot.columns.tolist()
df = pd.DataFrame({column_name:np.round(df_applicants_pivot.loc[column_name,:].mean(),2) for column_name in column_names},
index=['All_other_race'])
cm = sns.light_palette("green", as_cmap=True)
def highlight(s):
return 'background-color: yellow'
df.style.applymap(highlight,subset=['Black or African American','White'])\
.set_caption('MEAN OF CO-APPLICANT RACE VS ALL OTHER RACE')
# plt.savefig('table2',bbox_inches='tight')
###Output
_____no_output_____ |
05-ODEs/SecondOrderODEs/SecondOrderODEs-Full.ipynb | ###Markdown
Second Order ODEs in PythonOur last assignment consisted of a series of first order ODEs. This assignment will be similarly structured; however, you will be using the same techniques to solve second order ODEs. In particular, the examples I will give will be for a damped spring-mass system and coupled spring mass system then the problems you will solve will be for a single damped pendulum and a coupled pendulum. ODEs in PythonIn this class, we will make use for the [scipy.integrate toolbox](https://docs.scipy.org/doc/scipy/reference/integrate.html). Primarily, we will be accessing the [solve_ivp](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.htmlscipy.integrate.solve_ivp) (for *initial* value problems) and the [solve_bvp](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_bvp.htmlscipy.integrate.solve_bvp) (for *boundary* value problems).Through these two functions, Scipy seeks the best type of solver for a particular system. We'll import these and other necessary elements below.
###Code
# Python Imports
# 3rd Party Numerical Imports
import numpy as np
from scipy.integrate import solve_ivp as ivp
# 3rd Party Plotting Utilities
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Examples The Damped Spring-Mass SystemYou all may remember that the equation of motion for a damped spring-mass system is\begin{equation} m \ddot{x} + c \dot{x} + k x = 0\end{equation}where $m$ is the mass, $c$ is the damping constant and $k$ is the spring constant. Just like we did in the first ODE assignment, we will have to rewrite this equation in terms of the highest order term.\begin{equation} \ddot{x} = -2\gamma \dot{x} - \omega_0^2 x\end{equation}where $2\gamma = c/m$ is the normalized damping coefficient and $\omega_0^2 = k/m$ is the natural frequency of the system. Now, the only problem with this equation is that we still have a term of $\dot{x}$ which we will have to write out as a second equation. This way, the differential equation solver will know how $\ddot{x}$, $\dot{x}$ and $x$ are related. We know that $\dot{x} = v$. Therefore, we will tell the solver this information by using a *system* of equations which we will write as\begin{align} \dot{x} &= v \\ \dot{v} &= -2\gamma v - \omega_0^2 x.\end{align}Now, as it did on the last assignment, the ODE solver in Python expects as its first argument a function that packages all the necessary information about the problem we are trying to solve. And, the specified function is meant to take the independent variable as the first argument ($t$ in this case), the dependent variables as a tuple in the second argument ($X$ in this case) and any other variables after that. To see this visually, check the example and input below.```pythondef odefunc(indepVar, depVars, *otherArgs)```
###Code
def dshm(t, X, gamma, omega0Sq):
'''A function for the ODE solver that specifies the conditions for damped, simple harmonic oscillations.'''
# Unpack the variables
pos, vel = X
# Return the Output of the Equations
return [
vel,
-2*gamma*vel - omega0Sq*pos
]
###Output
_____no_output_____
###Markdown
Note from the equations and functions above that, as we have it written the first output will be the *integration* of $\dot{x}$ with respect to time which is the position and the second output will be the integration of $\dot{v} = \ddot{x}$ with respect to time which is the velocity. Essentially, ODE solvers do not know how to handle anything other than a first order ODE. Therefore, we have to reframe our problems of higher orders into ones of a system of first order ODEs.At this point, we are ready to begin testing this equation against different initial conditions. Before doing so, let's take a look at the necessary arguments for Scipy's IVP solver. Their documentation states that the function call looks like```pythonsolve_ivp(odefunc, tSpan, y0, args=None, tEval=None)```The function call takes other arguments which we will not utilize here. However, those listed above are explained below.* `odefunc`: This represents the name of the function to be evaluated. The IVP solver will call this function to create the solution.* `tSpan`: This is meant to be a two-tuple meant to represent the start and stop times.* `y0`: The initial condition(s) of the system as a tuple or list* `args`: A tuple of arguments meant to be passed on to the `odefunc`. Note: **they must be in the same order as they appear in the `odefunc`**.* `tEval`: Used to specify specific times to evaluate.Now that we're aware of these, let's solve the charging problem under various initial conditions.
###Code
# Set the values of the variables
m = 1
c = 0.5
k = 2
gamma = c/(2*m)
omega0Sq = k/m
# Set the initial value of the position and velocity
X0 = [1, 0] # pos = 1, vel = 0
# Set Time Specs
tSpan = (0, 10) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 101)
# Solve the Problem
sol = ivp(dshm, tSpan, X0, args=(gamma, omega0Sq), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(figsize=(10, 7))
_ = ax.plot(sol.t, sol.y[0], linewidth=2, label='Position (m)') # Plot the Position (which is the zeroth solution in the solution list)
_ = ax.plot(sol.t, sol.y[1], linewidth=2, label='Velocity (m/s)') # Plot the Velocity (which is the first solution in the solution list)
_ = ax.set_xlim(tSpan)
_ = ax.set_xlabel('Time (s)', fontsize=14)
_ = ax.set_ylabel('Amplitude', fontsize=14)
_ = ax.set_title('Damped Harmonic Oscillator', fontsize=16)
_ = ax.grid(True)
_ = ax.legend()
###Output
_____no_output_____
###Markdown
Now, let's instead assume that the mass starts at the origin and is given some initial kick in the positive direction.
###Code
# Set the values of the variables
m = 1
c = 0.5
k = 2
gamma = c/(2*m)
omega0Sq = k/m
# Set the initial value of the position and velocity
X0 = [0, 2] # pos = 0, vel = 2
# Set Time Specs
tSpan = (0, 10) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 101)
# Solve the Problem
sol = ivp(dshm, tSpan, X0, args=(gamma, omega0Sq), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(figsize=(10, 7))
_ = ax.plot(sol.t, sol.y[0], linewidth=2, label='Position (m)') # Plot the Position (which is the zeroth solution in the solution list)
_ = ax.plot(sol.t, sol.y[1], linewidth=2, label='Velocity (m/s)') # Plot the Velocity (which is the first solution in the solution list)
_ = ax.set_xlim(tSpan)
_ = ax.set_xlabel('Time (s)', fontsize=14)
_ = ax.set_ylabel('Amplitude', fontsize=14)
_ = ax.set_title('Damped Harmonic Oscillator', fontsize=16)
_ = ax.grid(True)
_ = ax.legend()
###Output
_____no_output_____
###Markdown
Finally, let's take a look at the *critically damped* case when $\gamma = \omega_0$.
###Code
# Set the values of the variables
gamma = 1
omega0 = 1
omega0Sq = omega0**2
# Set the initial value of the position and velocity
X0 = [1, -3] # pos = 1, vel = -1
# Set Time Specs
tSpan = (0, 10) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 101)
# Solve the Problem
sol = ivp(dshm, tSpan, X0, args=(gamma, omega0Sq), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(figsize=(10, 7))
_ = ax.plot(sol.t, sol.y[0], linewidth=2, label='Position (m)') # Plot the Position (which is the zeroth solution in the solution list)
_ = ax.plot(sol.t, sol.y[1], linewidth=2, label='Velocity (m/s)') # Plot the Velocity (which is the first solution in the solution list)
_ = ax.set_xlim(tSpan)
_ = ax.set_xlabel('Time (s)', fontsize=14)
_ = ax.set_ylabel('Amplitude', fontsize=14)
_ = ax.set_title('Critically Damped Harmonic Oscillator', fontsize=16)
_ = ax.grid(True)
_ = ax.legend()
###Output
_____no_output_____
###Markdown
You are welcome to play around with the three cells above to see how changing $k$, $m$, $c$ or the initial conditions affects the system. Coupled, Damped Harmonic OscillatorsNow, let's consider the case where we have a mass connected to a spring which is attached to the left wall and a spring attached between another mass and the right wall with a spring connecting the two masses in between. We can write the equations of motion for the system as being\begin{align} m_1 \ddot{x}_1 &= -c \dot{x}_1 - k_1 x_1 - k_2 \left( x_1 - x_2 \right) \\ m_2 \ddot{x}_2 &= -c \dot{x}_2 - k_2 \left( x_2 - x_1 \right) - k_3 x_2\end{align}we can then rewrite this as\begin{align} \dot{x}_1 &= v_1 \\ \dot{x}_2 &= v_2 \\ \dot{v}_1 &= -2\gamma v_1 - \omega_0^2 x_1 - \omega_0^2 \left( x_1 - x_2 \right) \\ \dot{v}_2 &= -2\gamma v_2 - \omega_0^2 \left( x_2 - x_1 \right) - \omega_0^2 x_2\end{align}assuming $k_1=k_2=k$ and $m_1=m_2=m$.
###Code
# The Predator-Prey Equation
def cdhm(t, X, gamma, omega0Sq):
'''A function for the ODE solver that specifies the conditions for damped, simple harmonic oscillations.'''
# Unpack the variables
pos1, pos2, vel1, vel2 = X
# Return the Output of the Equations
return [
vel1,
vel2,
-2*gamma*vel1 - omega0Sq*pos1 - omega0Sq*(pos1 - pos2),
-2*gamma*vel2 - omega0Sq*(pos2 - pos1) - omega0Sq*pos2
]
# Set the values of the variables
m = 1
c = 0.5
k = 2
gamma = c/(2*m)
omega0Sq = k/m
# Set the initial value of the position and velocity
X0 = [0, -1, 0, 0] # pos1 = 1, pos2 = 1.5, vel = 0
# Set Time Specs
tSpan = (0, 10) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 301)
# Solve the Problem
sol = ivp(cdhm, tSpan, X0, args=(gamma, omega0Sq), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(2, 1, figsize=(10, 12), sharex=True)
_ = ax[0].plot(sol.t, sol.y[0], linewidth=2, label='$x_1$') # Plot the Position1 (which is the zeroth solution in the solution list)
_ = ax[0].plot(sol.t, sol.y[1], linewidth=2, label='$x_2$') # Plot the Position2 (which is the first solution in the solution list)
_ = ax[1].plot(sol.t, sol.y[2], linewidth=2, label='$v_1$') # Plot the Position1 (which is the zeroth solution in the solution list)
_ = ax[1].plot(sol.t, sol.y[3], linewidth=2, label='$v_2$') # Plot the Position2 (which is the first solution in the solution list)
_ = ax[0].set_xlim(tSpan)
_ = ax[1].set_xlim(tSpan)
_ = ax[1].set_xlabel('Time (s)', fontsize=14)
_ = ax[0].set_ylabel('Position (m)', fontsize=14)
_ = ax[1].set_ylabel('Velocity (m/s)', fontsize=14)
_ = fig.suptitle('Coupled, Damped Harmonic Oscillator', fontsize=16)
_ = ax[0].grid(True)
_ = ax[1].grid(True)
_ = ax[0].legend()
_ = ax[1].legend()
# Set the values of the variables
m = 1
c = 0.5
k = 2
gamma = c/(2*m)
omega0Sq = k/m
# Set the initial value of the position and velocity
X0 = [0, 0, 0, -2] # pos1 = 1, pos2 = 1.5, vel = 0
# Set Time Specs
tSpan = (0, 10) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 301)
# Solve the Problem
sol = ivp(cdhm, tSpan, X0, args=(gamma, omega0Sq), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(2, 1, figsize=(10, 12), sharex=True)
_ = ax[0].plot(sol.t, sol.y[0], linewidth=2, label='$x_1$') # Plot the Position1 (which is the zeroth solution in the solution list)
_ = ax[0].plot(sol.t, sol.y[1], linewidth=2, label='$x_2$') # Plot the Position2 (which is the first solution in the solution list)
_ = ax[1].plot(sol.t, sol.y[2], linewidth=2, label='$v_1$') # Plot the Position1 (which is the zeroth solution in the solution list)
_ = ax[1].plot(sol.t, sol.y[3], linewidth=2, label='$v_2$') # Plot the Position2 (which is the first solution in the solution list)
_ = ax[0].set_xlim(tSpan)
_ = ax[1].set_xlim(tSpan)
_ = ax[1].set_xlabel('Time (s)', fontsize=14)
_ = ax[0].set_ylabel('Position (m)', fontsize=14)
_ = ax[1].set_ylabel('Velocity (m/s)', fontsize=14)
_ = fig.suptitle('Coupled, Damped Harmonic Oscillator', fontsize=16)
_ = ax[0].grid(True)
_ = ax[1].grid(True)
_ = ax[0].legend()
_ = ax[1].legend()
###Output
_____no_output_____
###Markdown
Assignment The PendulumThe equation of motion for a damped pendulum is\begin{equation} l \ddot{\theta} + c \dot{\theta} + g \sin \theta = 0\end{equation}which can be rewritten as\begin{align} \dot{\theta} &= \omega \\ \dot{\omega} &= -\frac{c}{l} \omega - \frac{g}{l} \sin \theta\end{align}Write the function for Scipy below.
###Code
def dpen(t, X, c, l, g):
"""Damped Pendulum"""
# Unpack
theta, omega = X
# Return Solutions
return [
omega,
-(c/l)*omega - (g/l)*np.sin(theta)
]
###Output
_____no_output_____
###Markdown
Write the code to solve the equation here for the initial conditions\begin{align} \theta(0) &= \frac{9\pi}{10} \\ \dot{\theta}(0) &= 0\end{align}and when $c=0.5$, $g=9.8$ and $l=1$.
###Code
# Set the values of the variables
c = 0.5
g = 9.8
l = 1.0
# Set the initial value of the position and velocity
X0 = [9*np.pi/10, 0] # pos = 0, vel = 2
# Set Time Specs
tSpan = (0, 10) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 301)
# Solve the Problem
sol = ivp(dpen, tSpan, X0, args=(c, l, g), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(figsize=(10, 7))
_ = ax.plot(sol.t, sol.y[0], linewidth=2, label='$\theta$ (rad)') # Plot the Position (which is the zeroth solution in the solution list)
_ = ax.plot(sol.t, sol.y[1], linewidth=2, label='$\omega$ (rad/s)') # Plot the Velocity (which is the first solution in the solution list)
_ = ax.set_xlim(tSpan)
_ = ax.set_xlabel('Time (s)', fontsize=14)
_ = ax.set_ylabel('Amplitude', fontsize=14)
_ = ax.set_title('Damped Pendulum', fontsize=16)
_ = ax.grid(True)
_ = ax.legend()
###Output
_____no_output_____
###Markdown
Coupled PendulumsIn the case when two pendula are connected to each other via a spring, the equations of motion are\begin{align} \ddot{\theta}_1 + \frac{g}{l} \sin \theta_1 + \frac{k}{m} (\theta_1 - \theta_2) &= 0 \\ \ddot{\theta}_2 + \frac{g}{l} \sin \theta_2 + \frac{k}{m} (\theta_2 - \theta_1) &= 0 \\\end{align}which can be written as\begin{align} \dot{\theta}_1 &= \omega_1 \\ \dot{\theta}_2 &= \omega_2 \\ \dot{\omega}_1 &= - \frac{g}{l} \sin \theta_1 - \frac{k}{m} (\theta_1 - \theta_2) \\ \dot{\omega}_2 &= - \frac{g}{l} \sin \theta_2 - \frac{k}{m} (\theta_2 - \theta_1) \\\end{align}Write the function for Scipy below.
###Code
def cpen(t, X, g, l, k, m):
"""Coupled Pendulums"""
# Unpack
th1, th2, om1, om2 = X
# Return Solutions
return [
om1,
om2,
-(g/l)*np.sin(th1) - (k/m)*(th1 - th2),
-(g/l)*np.sin(th2) - (k/m)*(th2 - th1),
]
###Output
_____no_output_____
###Markdown
And write the code to solve the differential equation below for the initial conditions\begin{align} \theta_1(0) &= 0 \\ \theta_2(0) &= \frac{\pi}{6} \\ \dot{\theta}_1(0) &= 0 \\ \dot{\theta}_2(0) &= 0 \\\end{align}and when $k=1$, $m=1$, $g=9.8$ and $l=2$.
###Code
# Set the values of the variables
g = 9.8
l = 1
k = 1
m = 1
# Set the initial value of the position and velocity
X0 = [0, np.pi/6, 0, 0]
# Set Time Specs
tSpan = (0, 30) # The simulation times for our system
tEval = np.linspace(tSpan[0], tSpan[1], 1001)
# Solve the Problem
sol = ivp(cpen, tSpan, X0, args=(g, l, k, m), t_eval=tEval)
# Plot the Solution
fig, ax = plt.subplots(2, 1, figsize=(10, 12), sharex=True)
_ = ax[0].plot(sol.t, sol.y[0], linewidth=2, label='$x_1$') # Plot the Position1 (which is the zeroth solution in the solution list)
_ = ax[0].plot(sol.t, sol.y[1], linewidth=2, label='$x_2$') # Plot the Position2 (which is the first solution in the solution list)
_ = ax[1].plot(sol.t, sol.y[2], linewidth=2, label='$v_1$') # Plot the Position1 (which is the zeroth solution in the solution list)
_ = ax[1].plot(sol.t, sol.y[3], linewidth=2, label='$v_2$') # Plot the Position2 (which is the first solution in the solution list)
_ = ax[0].set_xlim(tSpan)
_ = ax[1].set_xlim(tSpan)
_ = ax[1].set_xlabel('Time (s)', fontsize=14)
_ = ax[0].set_ylabel('Angle (rad)', fontsize=14)
_ = ax[1].set_ylabel('Angular Velocity (rad/s)', fontsize=14)
_ = fig.suptitle('Coupled, Damped Harmonic Oscillator', fontsize=16)
_ = ax[0].grid(True)
_ = ax[1].grid(True)
_ = ax[0].legend()
_ = ax[1].legend()
###Output
_____no_output_____ |
01 - Python basics.ipynb | ###Markdown
Puedo escribir texto como en un reporte Aquí puedo escribir en tipo bold, o cursivoEsto va en un nuevo párrafo Título Título más pequeño Otro más $\theta_i := \theta_i + \sum_{i=0}^{\infty}x^2+2$$$\theta_i := \theta_i + \sum_{i=0}^{\infty}x^2+2$$
###Code
print(type(5))
print(type(5.5))
print(type(True))
print(type('Hola'))
x = 10
print(x)
print(type(x))
x = 'Hola'
print(x)
print(type(x))
x = -5
if x > 0:
print(f'{x} es positivo')
elif x == 0:
print(f'{x} es cero')
else:
print(f'{x} es negativo')
def determina_signo(x):
if x > 0:
return 1
elif x == 0:
return 0
else:
return -1
def determina_signo(x):
return 1 if x > 0 else 0 if x == 0 else -1
x = 10
sign = determina_signo(x)
if sign == 1:
print(f'{x} es positivo')
elif sign == 0:
print(f'{x} es cero')
else:
print(f'{x} es negativo')
a = 5
b = 10
if a > b:
max = a
else:
max = b
print(max)
a = 50
b = 10
max = a if a > b else b
print(max)
for i in range(0,10,2):
print(i)
for i in range(0,10,2):
print(i)
if i == 8:
break
else:
print('ya finalizó el ciclo')
x = range(5)
print(x)
print(type(x))
x = list(range(5))
print(x)
print(type(x))
for val in [4, 5.68, "Hola", True, [1,2,3]]:
print(val)
data = [4, 6, -7, 0, 52, -7, 11, 26, -3]
data[5]
len(data)
data[len(data)-1]
data[-1]
data[2:5]
data[5:2]
data[-3:-1]
data
data[2:5] = 25
data[2:5] = [22,1,0,10,12,23]
data
data.append(5)
data
data.append([-1,0])
data
data.extend([-1,0])
data
data[-3:-2] = []
data
data.remove(22)
data
data.sort(key = lambda val: abs(val))
data
data.sort(key = lambda val: val**8 if val < 0 else val, reverse=True)
data
square = lambda n: n*n
square(5)
data = [4, 6, -7, 0, 52, -9, 11, 26, -3, 13, -5]
def filter_negatives(data):
neg_data = []
for val in data:
if val < 0:
neg_data.append(val)
return neg_data
def filter_negatives(data):
return [val for val in data if val < 0] # list comprehension
filtered_data = filter_negatives(data)
filtered_data
###Output
_____no_output_____ |
_notebooks/2020-11-30-Feature-Selection.ipynb | ###Markdown
Feature Selection In The Machine Learning Process> The Relevance and Impact of Model-Driven Feature Selection - toc:true - branch:master- badges:true- comments:false- categories: [jupyter] IntroductionRecently I had the rare privilege to encounter a brilliant and concise perspective of machine learning from an equally experienced practitioner in the field. According to my mentor, machine learning is the "semi-automatic process of extracting information from Data". I have seen similar definitions and perspectives of the concept of machine learning, as there is no universally accepted definition yet. However, this particular definition caught my ears and attention because of the use of `semi-automatic` phrase! Understandably I was equally curious about the meaning of `semi-automatic` in the context of the machine learning process.As a way of summary, machine learning is `semi-automatic` because the process involves two parts: qualitative and quantitative. The qualitative activities are those activities we often refer to as pre-modelling activities. More or less those activities that at best falls under the realm of 'art' of data science, subjectively carried out to pre-process and shape the data in a manner amenable to a model, the quantitative aspect of the machine learning process. Some of these qualitative processes, like 'Feature Engineering', creates another problem of high dimensionality, which invariably leads to increased complexity,over-fitting and consequently poor performance of the model.There are a plethora of ways devised to handle the problem of the high dimensionality of data. Feature selection is one of such useful methods. Feature selection, simply put, is a process of pruning the features or variables of a dataset to the 'necessary few' that have high explained ratios. You can do Feature selection in three ways:- Univariate Statistics Process- Iterative Process (Recursive Feature Elimination-RFE)- Model-Based Feature Selection ProcessGiven our sub-topic, we shall dwell on the `Model-driven Process` of Feature Selection in this post. The Model Process of Feature SelectionMaking feature selection with the aid of algorithms is one of the most straightforward and direct processes of feature selection. I must stress here that while it is one of the convenient ways of feature selection, not all algorithms are suited for it. In essence, Model-driven feature selection leaves the decision process of feature selection and de-selection to the whims and caprices of tree-base algorithms.Model-driven feature selection is done in the following simple steps.- Have your Dataset at hand- split your dataset into training and test sets- Initialized your model- Fit the model with the training set of your dataset- Extract the important features- Adjust dataset according and proceed to the model building stage Model -Driven Feature Selection - DemonstrationWe need dataset and algorithm to demonstrate the model-based feature selection effectively. To this end, we shall look to the versatile and comprehensive sci-kit learn python package. Import the 'Select from model' class and other necessary modules
###Code
import sys;sys.path.extend([r"/Users/user/anaconda3/envs/wrangling_data/lib/python3.7/site-packages"])
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import matplotlib.pyplot as plt
cancer = load_breast_cancer()
cancer.data.shape
#get deterministic random numbers
make_up_num = np.random.RandomState(42)
noise = make_up_num.normal(size=(len(cancer.data),50))
#first thirty features are from the data whereas the next 50 features are noise
stak_noise = np.hstack([cancer.data, noise])
X_train,X_test, y_train, y_test = train_test_split(stak_noise, cancer.target, random_state=42, test_size=.50)
cancer.feature_names
select = SelectFromModel(RandomForestClassifier(n_estimators=100, random_state=42))
#The select for model class select all features that have an important measureof tge features
#(as provided by the supervised model greater thanthe provided threshold)
select.fit(X_train, y_train)
trans_xtrain = select.transform(X_train)
print(f'X_train.shape:{X_train.shape}')
print(f'trans_xtrain.shape:{trans_xtrain.shape}')
###Output
X_train.shape:(284, 80)
trans_xtrain.shape:(284, 20)
###Markdown
Model Discard useless and irrelevant FeaturesFrom the above operation, our original data had 30 features. We generated additional 50 fake features to test the efficacy of the process. From our above result, upon transforming the training set, the number of features drastically reduced to 20 attributes. From the preceding process, the model didn't only do away with the 50 fictitious features it equally discarded 10 features from our original 30 features!
###Code
mask = select.get_support()
#Visualize the mask: black is True and white is False
plt.matshow(mask.reshape(1,-1), cmap='gray_r')
plt.xlabel('Sample index')
plt.yticks(())
###Output
_____no_output_____
###Markdown
Plot feature important from trained modelAnother way is to use the important feature method to visualize the importance of features. From the bellow bar graph we can see that the most important feature is the `worst concave points` whereas the least importance feature is the `Symmetry error`?
###Code
cancer = load_breast_cancer()
X_train,X_test,y_train,y_test = train_test_split(cancer.data, cancer.target,random_state=42)
forest = RandomForestClassifier(n_estimators=100, random_state=42)
forest.fit(X_train,y_train)
forest.feature_importances_
def plot_feature_importance_cancer(model):
n_features = cancer.data.shape[1]
fig, ax = plt.subplots(figsize=(12,8))
plt.barh(range(n_features),model.feature_importances_, align='center')
plt.yticks(np.arange(n_features),cancer.feature_names)
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plot_feature_importance_cancer(forest)
###Output
_____no_output_____ |
baseline analysis/Baseline RF SVM LR Analysis .ipynb | ###Markdown
0 Load in Actual SEEG Dataset And Run Algorithm CellsLoad in an actual real EEG dataset and then run analysis with baseline classifiers
###Code
datadir = '/Volumes/ADAM LI/pydata/dnn/testdata/fft/'
datafiles = []
for root, dirs, files in os.walk(datadir):
for file in files:
datafiles.append(os.path.join(root, file))
print(len(datafiles))
datahandler = util.DataHandler()
numcomp = 50
# PCA
sklearn_pca = PCA(n_components=numcomp)
print("testing datadir is: ", datadir)
###Output
1
testing datadir is: /Volumes/ADAM LI/pydata/dnn/testdata/fft/
###Markdown
1 Load Data and Concatenate Assumes, we can't make use of the time dimension. We apply PCA on the frequency feature space, expecting n_components to capture sufficient variability in the data to match the variability that will be seen by the deep neural networks.- Load data- PCA (n_components)- split training/testing data
###Code
# keep track of the explained variance
# load datafiles into features
for idx, datafile in enumerate(datafiles):
data = np.load(datafile)
power = data['power']
seizonsets = data['seizonsets']
seizoffsets = data['seizoffsets']
timepoints = data['timepoints']
if idx == 0:
features = sklearn_pca.fit_transform(power.reshape(-1, power.shape[-1]).T)
exp_var = sklearn_pca.explained_variance_ratio_[:, np.newaxis]
Xdata = features
ydata = datahandler.computelabels(seizonsets,seizoffsets, timepoints)
else:
try:
features = sklearn_pca.fit_transform(power.reshape(-1, power.shape[-1]).T)
ylabels = datahandler.computelabels(seizonsets,seizoffsets, timepoints)
exp_var = np.concatenate((exp_var, sklearn_pca.explained_variance_ratio_[:, np.newaxis]), axis=1)
if len(seizonsets) > 0 and seizonsets[0] != np.nan:
Xdata = np.concatenate((Xdata, features), axis=0)
ydata = np.concatenate((ydata,ylabels), axis=0)
else:
print(datafile)
except IndexError:
print(datafile)
print(exp_var.shape)
print(Xdata.shape)
print(ydata.shape)
print(exp_var.shape)
# plot explained var ratio
cumsum = np.cumsum(exp_var, axis=0)
sns.set(font_scale=2)
fig = plt.figure(figsize=(15,10))
plt.plot(cumsum, 'k-o')
ax = plt.gca()
ax.set_xticks(np.arange(0,50,2))
ax.set_xticklabels(np.arange(1,51,2))
ax.set_ylabel("Cumulative Explained Variance")
ax.set_xlabel("Number of Components")
ax.set_title("Scree Plot of FFT Features From PCA Transf.")
b = plt.axhline(0.95, color='black', linestyle='--', label='95% Explained Var')
ax.legend(handles=[b], loc=4)
# plot explained var ratio
cumsum = np.cumsum(exp_var, axis=0)
# cumsum_df = pd.DataFrame(index=)
sns.set(font_scale=2)
fig = plt.figure(figsize=(15,10))
sns.pointplot(data=cumsum.T, ci=95)
ax = plt.gca()
ax.set_xticks(np.arange(0,50,2))
ax.set_xticklabels(np.arange(1,51,2))
ax.set_ylabel("Cumulative Explained Variance")
ax.set_xlabel("Number of Components")
ax.set_title("Scree Plot of FFT Features From PCA Transf.")
b = plt.axhline(0.95, color='black', linestyle='--', label='95% Explained Var')
ax.legend(handles=[b], loc=4)
print(np.mean(cumsum[-1,:]))
###Output
0.990209666692169
###Markdown
2 Split Into Training and Testing Data
###Code
randstate = 42 # for reproducability
X_train, X_test, y_train, y_test = train_test_split(Xdata, ydata, test_size=0.33, random_state=randstate)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print(rf)
print(estimator)
rf_test = rf.predict(Xdata)
ytrue = ydata
print('Out-of-bag score estimate: %.3f' % rf.oob_score_)
print('Mean accuracy score: %.3f' % accuracy_score(ytrue, rf_test))
print('F1 score:', f1_score(ytrue, rf_test))
print('Recall:', recall_score(ytrue, rf_test))
print('Precision:', precision_score(ytrue, rf_test))
print('\n clasification report:\n', classification_report(ytrue, rf_test))
print('\n confusion matrix:\n',confusion_matrix(ytrue, rf_test))
fig = plt.figure()
cm = pd.DataFrame(confusion_matrix(ytrue, rf_test), columns=['norm','seiz'], index=['norm','seiz'])
sns.heatmap(cm, annot=True, fmt='g')
ax = plt.gca()
ax.set_title('Random Forest Baseline On Test Data Confusion Matrix')
ax.set_ylabel('True Label')
ax.set_xlabel('Predicted Label')
plt.show()
lr_test = estimator.predict(Xdata)
ytrue = ydata
print('Out-of-bag score estimate: %.3f' % rf.oob_score_)
print('Mean accuracy score: %.3f' % accuracy_score(ytrue, lr_test))
print('F1 score:', f1_score(ytrue, lr_test))
print('Recall:', recall_score(ytrue, lr_test))
print('Precision:', precision_score(ytrue, lr_test))
print('\n clasification report:\n', classification_report(ytrue, lr_test))
print('\n confusion matrix:\n',confusion_matrix(ytrue, lr_test))
fig = plt.figure()
cm = pd.DataFrame(confusion_matrix(ytrue, lr_test), columns=['norm','seiz'], index=['norm','seiz'])
sns.heatmap(cm, annot=True, fmt='g')
ax = plt.gca()
ax.set_title('Logistic Regression Baseline Confusion Matrix')
ax.set_ylabel('True Label')
ax.set_xlabel('Predicted Label')
plt.show()
plt.figure()
sns.distplot(ydata, kde=False, axlabel='Seizure or Not', label='Testing Dataset')
ax=plt.gca()
ax.set_title('Training Data Distribution')
ax.set_xticks([0,1])
ax.set_xticklabels(['Normal', 'Seizure'])
ax.legend()
print("Baseline testing prediction of all seizure: ", np.sum(ydata)/len(ydata))
plt.figure()
sns.distplot(y_train, kde=False, axlabel='Seizure or Not', label='Train')
sns.distplot(y_test, kde=False, label='Test')
ax=plt.gca()
ax.set_title('Training Data Distribution')
ax.set_xticks([0,1])
ax.set_xticklabels(['Normal', 'Seizure'])
ax.legend()
print("Baseline training prediction of all seizure: ", np.sum(y_train)/len(y_train))
print("Baseline testing prediction of all seizure: ", np.sum(y_test)/len(y_test))
###Output
Baseline training prediction of all seizure: 0.5480838088197926
Baseline testing prediction of all seizure: 0.5486821123721978
###Markdown
3 Run Models- Random Forest- SVM- Logistic Regression
###Code
%%time
rf = RandomForestClassifier(n_estimators=100, oob_score=True, random_state=randstate)
res = rf.fit(X_train, y_train.ravel())
print(res)
predicted = rf.predict(X_test)
print('Out-of-bag score estimate: %.3f' % rf.oob_score_)
print('Mean accuracy score: %.3f' % accuracy_score(y_test, predicted))
print('F1 score:', f1_score(y_test, predicted))
print('Recall:', recall_score(y_test, predicted))
print('Precision:', precision_score(y_test, predicted))
print('\n clasification report:\n', classification_report(y_test,predicted))
print('\n confusion matrix:\n',confusion_matrix(y_test, predicted))
cm = pd.DataFrame(confusion_matrix(y_test, predicted), columns=['norm','seiz'], index=['norm','seiz'])
sns.heatmap(cm, annot=True, fmt='g')
ax = plt.gca()
ax.set_title('Random Forest Baseline Confusion Matrix')
ax.set_ylabel('True Label')
ax.set_xlabel('Predicted Label')
%%time
logistic = linear_model.LogisticRegression()
scaler = StandardScaler()
# set pipeline to run things using sklearn
pipe = Pipeline(steps=[('scaler', scaler), ('logistic', logistic)])
# Prediction
Cs = np.logspace(-4, 4, 3)
penalty = ['l1', 'l2']
max_iter = [100, 1000, 2000]
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(logistic__penalty=penalty,
logistic__C=Cs,
logistic__max_iter=max_iter
))
result = estimator.fit(X_train, y_train.ravel())
print(result)
predicted = estimator.predict(X_test)
y_true = y_test
print('Mean accuracy score: %.3f' % accuracy_score(y_test, predicted))
print('F1 score:', f1_score(y_test, predicted))
print('Recall:', recall_score(y_test, predicted))
print('Precision:', precision_score(y_test, predicted))
print('\n clasification report:\n', classification_report(y_test,predicted, target_names=['norm', 'onset']))
print('\n confusion matrix:\n',confusion_matrix(y_test, predicted))
cm = pd.DataFrame(confusion_matrix(y_test, predicted), columns=['norm','seiz'], index=['norm','seiz'])
sns.heatmap(cm, annot=True, fmt='g')
ax = plt.gca()
ax.set_title('Logistic Regression Baseline Confusion Matrix')
ax.set_ylabel('True Label')
ax.set_xlabel('Predicted Label')
%%time
clf = svm.SVC()
scaler = StandardScaler()
# set pipeline to run things using sklearn
pipe = Pipeline(steps=[('scaler', scaler), ('svm', clf)])
# Prediction
Cs = [1]
kernel=['linear']
# shrinking = [1, 0]
# max_iter = [100, 1000, 2000]
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(
svm__C=Cs,
svm__kernel=kernel,
# svm__shrinking=shrinking
))
result = estimator.fit(X_train, y_train.ravel())
print(result)
predicted = estimator.predict(X_test)
y_true = y_test
print('Mean accuracy score: %.3f' % accuracy)
print('F1 score:', f1_score(y_test, predicted))
print('Recall:', recall_score(y_test, predicted))
print('Precision:', precision_score(y_test, predicted))
print('\n clasification report:\n', classification_report(y_test,predicted, target_names=['norm', 'onset']))
print('\n confusion matrix:\n',confusion_matrix(y_test, predicted))
cm = pd.DataFrame(confusion_matrix(y_test, predicted), columns=['norm','seiz'], index=['norm','seiz'])
sns.heatmap(cm, annot=True)
# predicted_train = rf.predict(X_train)
# predicted_test = rf.predict(X_test)
# test_score = r2_score(y_test, predicted_test)
# spearman = spearmanr(y_test, predicted_test)
# pearson = pearsonr(y_test, predicted_test)
# print('Out-of-bag R-2 score estimate: %.3f ' % rf.oob_score_)
# print('Test data R-2 score: %.3f ' % test_score)
# print('Test data Spearman correlation: %.3f ' % spearman[0])
# print('Test data Pearson correlation: %.3f ' % pearson[0])
# run KMeans many times
kclust = 2
numsamples = X.shape[0]
kmeans = KMeans(n_clusters=kclust).fit(X)
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
# print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=numsamples),
metrics.accuracy_score(labels, estimator.labels_)
))
print(82 * '_')
# thomo\tcompl\tv-meas\tARI\tAMI\
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette\taccuracy')
bench_k_means(KMeans(init='k-means++', n_clusters=kclust, n_init=10),
name="k-means++", data=X)
bench_k_means(KMeans(init='random', n_clusters=kclust, n_init=10),
name="random", data=X)
cm = pd.DataFrame(confusion_matrix(y_test, predicted), columns=['norm','onset'], index=['norm','onset'])
sns.heatmap(cm, annot=True)
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
test_score = r2_score(y_test, predicted_test)
spearman = spearmanr(y_test, predicted_test)
pearson = pearsonr(y_test, predicted_test)
print('Out-of-bag R-2 score estimate: %.3f ' % rf.oob_score_)
print('Test data R-2 score: %.3f ' % test_score)
print('Test data Spearman correlation: %.3f ' % spearman[0])
print('Test data Pearson correlation: %.3f ' % pearson[0])
###Output
_____no_output_____ |
notebook/mix-8.ipynb | ###Markdown
ObjectiveCreate a combined network of autoencoder and classifier to classify CIFAR10 data. Constraints1. Use only 50% data of classes bird, deer and truck2. Use output of encoder part of autoencoder as input of classifier Approach - Create a custom dataset which reads only 50% of bird, deer and truck, remaining classes 100%- Create sampler which assignes weights to different classes to deal with imbalance dataset- Use Image augmentation to inhance performance of classifire - Use stacked convolutional autoencoder to get most important features of input image- Use convolutional neural network to classify images- Use encoded output of autoencoder and use as input to CNN
###Code
import torch
import numpy as np
import torch.nn as nn
import torch.cuda as cuda
import matplotlib.pyplot as plt
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import Subset
from torch.utils.data import Dataset
from torch.nn import functional as F
###Output
_____no_output_____
###Markdown
Dataset The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.  Image augmentation
###Code
# transformation for validation data
transform_valid = transforms.Compose([transforms.Resize((32,32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# transformation for training data
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.RandomAffine(0, shear=10, scale=(0.8,1.2)),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
transforms.Resize((32,32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
###Output
_____no_output_____
###Markdown
Custom dataset for filtering classes
###Code
class CifarCustomTrainDataset(Dataset):
def __init__(self, transforms=None):
'''
Create a dataset with only 50% of bird, deer and truck classes, remaining classes with 100%
'''
self.transforms = transforms
self.cifar10_train = datasets.CIFAR10('../data', train=True, download=True)
self.truck_indices, self.deer_indices, self.bird_indices, self.other_indices = [], [], [], []
truck_idx, deer_idx, bird_idx = self.cifar10_train.class_to_idx['truck'], self.cifar10_train.class_to_idx['deer'], self.cifar10_train.class_to_idx['bird']
for i in range(len(self.cifar10_train)):
current_class = self.cifar10_train[i][1]
if current_class == truck_idx:
self.truck_indices.append(i)
elif current_class == deer_idx:
self.deer_indices.append(i)
elif current_class == bird_idx:
self.bird_indices.append(i)
else:
self.other_indices.append(i)
self.truck_indices = self.truck_indices[:int(0.5 * len(self.truck_indices))]
self.deer_indices = self.deer_indices[:int(0.5 * len(self.deer_indices))]
self.bird_indices = self.bird_indices[:int(0.5 * len(self.bird_indices))]
self.cifar_10 = Subset(self.cifar10_train, self.truck_indices+self.deer_indices+self.bird_indices+self.other_indices)
def __len__(self):
return len(self.cifar_10)
def __getitem__(self, idx):
data = self.cifar_10[idx]
if self.transforms is not None:
data = (self.transforms(data[0]),data[1])
return data
# image counter function for weight calculation
def get_label(self, idx):
data = self.cifar_10[idx]
return data[1]
###Output
_____no_output_____
###Markdown
training and validation dataset
###Code
valid_dataset = datasets.CIFAR10('../data', train=False, download=True, transform=transform_valid)
train_dataset = CifarCustomTrainDataset(transform_train)
print("train data size : ", len(train_dataset))
print("valid data size : ", len(valid_dataset))
print("truck size : ", len(train_dataset.truck_indices))
print("deer size : ", len(train_dataset.deer_indices))
print("bird size : ", len(train_dataset.bird_indices))
print("other size : ", len(train_dataset.other_indices))
###Output
Files already downloaded and verified
Files already downloaded and verified
train data size : 42500
valid data size : 10000
truck size : 2500
deer size : 2500
bird size : 2500
other size : 35000
###Markdown
Data sampler to handle imbalance dataset
###Code
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
"""
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.get_label(idx)
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
sampler = ImbalancedDatasetSampler(train_dataset)
###Output
_____no_output_____
###Markdown
**Some hyper parameters**
###Code
batch_size = 100
learning_rate = 0.0001
num_epochs = 120
weight_decay=0.001
###Output
_____no_output_____
###Markdown
**Create the dataloader**
###Code
cifar10_train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=1, sampler=sampler)
cifar10_valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
###Output
_____no_output_____
###Markdown
**Utility to display the original and output image of autoencoder**
###Code
def show_img(orig, denoised):
fig=plt.figure()
orig = orig.swapaxes(0, 1).swapaxes(1, 2)
denoised = denoised.swapaxes(0, 1).swapaxes(1, 2)
# Normalize for display purpose
orig = (orig - orig.min()) / (orig.max() - orig.min())
denoised = (denoised - denoised.min()) / (denoised.max() - denoised.min())
fig.add_subplot(1, 3, 1, title='Original')
plt.imshow(orig)
fig.add_subplot(1, 3, 3, title='Denoised')
plt.imshow(denoised)
fig.subplots_adjust(wspace = 0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Classifier (CNN)
###Code
class CNN(nn.Module):
def __init__(self):
super().__init__()
# 8 x 8 x 200 input
self.conv1 = nn.Conv2d(200, 512, 3, 1, padding=1) # 8 x 8 x 300
self.bn1 = nn.BatchNorm2d(512)
self.do1 = nn.Dropout(0.5)
self.mp1 = nn.MaxPool2d(2, stride=2) # 4 x 4 x 300
self.conv2 = nn.Conv2d(512, 1024, 3, 1, padding=1) # 4 x 4 x 400
self.bn2 = nn.BatchNorm2d(1024)
self.do2 = nn.Dropout(0.5)
self.mp2 = nn.MaxPool2d(2, stride=2) # 2 x 2 x 400
self.fc3 = nn.Linear(2*2*1024, 512)
self.bn3 = nn.BatchNorm1d(512)
self.do3 = nn.Dropout(0.5)
self.fc4 = nn.Linear(512, 512)
self.bn4 = nn.BatchNorm1d(512)
self.do4 = nn.Dropout(0.5)
self.fc5 = nn.Linear(512, 256)
self.bn5 = nn.BatchNorm1d(256)
self.do5 = nn.Dropout(0.5)
self.fc6 = nn.Linear(256, 10)
def forward(self, x):
x = self.mp1(self.do1(self.bn1(F.relu(self.conv1(x)))))
x = self.mp2(self.do2(self.bn2(F.relu(self.conv2(x)))))
x = x.view(-1, 2*2*1024)
x = self.do3(self.bn3(F.relu(self.fc3(x))))
x = self.do4(self.bn4(F.relu(self.fc4(x))))
x = self.do5(self.bn5(F.relu(self.fc5(x))))
x = self.fc6(x)
return x
###Output
_____no_output_____
###Markdown
Convolutional Autoencoder
###Code
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# 32 x 32 x 3 (input)
self.conv1 = nn.Conv2d(3, 100, 5, stride=1, padding=2) # 32 x 32 x 100
self.bn1 = nn.BatchNorm2d(100)
self.mp1e = nn.MaxPool2d(2, stride=2, return_indices=True) # 11 x 11 x 256
self.conv2 = nn.Conv2d(100, 150, 5, stride=1, padding=2) # 16 x 16 x 150
self.bn2 = nn.BatchNorm2d(150)
self.mp2e = nn.MaxPool2d(2, stride=2, return_indices=True) # 16 x 16 x 150
self.conv3 = nn.Conv2d(150, 200, 3, stride=1, padding=1) # 8 x 8 x 200
self.conv4 = nn.ConvTranspose2d(200, 150, 3, stride=1, padding=1) # 8 x 8 x 200
self.mp1d = nn.MaxUnpool2d(2) # 8 x 8 x 150
self.bn3 = nn.BatchNorm2d(150)
self.conv5 = nn.ConvTranspose2d(150, 100, 5, stride=1, padding=2) # 16 x 16 x 150
self.mp2d = nn.MaxUnpool2d(2) # 16 x 16 x 100
self.bn4 = nn.BatchNorm2d(100)
self.conv6 = nn.ConvTranspose2d(100, 3, 5, stride=1, padding=2) # 32 x 32 x 100
def forward(self, x):
# Encoder
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x, i_mp1e = self.mp1e(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x, i_mp2e = self.mp2e(x)
features = self.conv3(x)
# Decoder
y = self.conv4(features)
y = self.bn3(y)
y = F.relu(y)
y = self.mp1d(y, i_mp2e)
y = self.conv5(y)
y = self.bn4(y)
y = F.relu(y)
y = self.mp1d(y, i_mp1e)
output = self.conv6(y)
return features, output
###Output
_____no_output_____
###Markdown
**The Denoising Autoencoder**
###Code
autoencoder = Autoencoder().cuda()
parameters = list(autoencoder.parameters())
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(parameters, lr=learning_rate, weight_decay=weight_decay)
autoencoder
cnn = CNN().cuda()
parameters_cnn = list(cnn.parameters())
loss_func_cnn = nn.CrossEntropyLoss()
optimizer_cnn = torch.optim.Adam(parameters_cnn, lr=learning_rate, weight_decay=weight_decay)
cnn
train_loss_aen = []
valid_loss_aen = []
train_loss_cnn = []
valid_loss_cnn = []
train_acc_cnn = []
valid_acc_cnn = []
for epoch in range(num_epochs):
# Let's train the model
epoch_train_loss_aen = 0.0
epoch_train_loss_cnn = 0.0
epoch_train_iter = 0
epoch_val_loss_aen = 0.0
epoch_val_loss_cnn = 0.0
epoch_val_iter = 0
running_train_corrects_cnn = 0.0
running_val_corrects_cnn = 0.0
total_train = 0
total_val = 0
autoencoder.train()
cnn.train()
for image, label in cifar10_train_loader:
#print(label.size(0))
# autoencoder training
image = Variable(image).cuda()
optimizer.zero_grad()
encoder_output, output = autoencoder(image)
loss_aen = loss_func(output, image)
loss_aen.backward()
optimizer.step()
total_train += label.size(0)
epoch_train_iter += 1
epoch_train_loss_aen += loss_aen.data.item()
# cnn training
encoder_output = Variable(encoder_output).cuda()
optimizer_cnn.zero_grad()
output_cnn = cnn(encoder_output)
label = Variable(label).cuda()
loss_cnn = loss_func_cnn(output_cnn, label)
loss_cnn.backward()
optimizer_cnn.step()
_, preds = torch.max(output_cnn, 1)
running_train_corrects_cnn += torch.sum(preds == label.data)
epoch_train_loss_cnn += loss_cnn.data.item()
# Let's visualize the first image of the last batch in our validation set
if epoch % 30 == 0:
original = image[0].cpu()
decoded = output[0].cpu()
original = original.data.numpy()
decoded = decoded.data.numpy()
show_img(original, decoded)
autoencoder.eval()
cnn.eval()
for image, label in cifar10_valid_loader:
image = Variable(image).cuda()
encoder_output, output = autoencoder(image)
loss = loss_func(output, image)
total_val += label.size(0)
epoch_val_iter += 1
epoch_val_loss_aen += loss.data.item()
# cnn training
encoder_output = Variable(encoder_output).cuda()
output_cnn = cnn(encoder_output)
label = Variable(label).cuda()
_, preds = torch.max(output_cnn, 1)
running_val_corrects_cnn += torch.sum(preds == label.data)
loss_cnn = loss_func_cnn(output_cnn, label)
epoch_val_loss_cnn += loss_cnn.data.item()
if epoch % 30 == 0:
print('aen loss epoch [{}/{}], train:{:.4f}, valid:{:.4f}'.format(epoch+1, num_epochs, epoch_train_loss_aen, epoch_val_loss_aen))
print('cnn loss epoch [{}/{}], train:{:.4f}, valid:{:.4f}'.format(epoch+1, num_epochs, epoch_train_loss_cnn/epoch_train_iter, epoch_val_loss_cnn/epoch_val_iter))
print('cnn acc epoch [{}/{}], train:{:.2f}%, valid:{:.2f}%'.format(epoch+1, num_epochs, running_train_corrects_cnn.float()/total_train*100, running_val_corrects_cnn.float()/total_val*100))
train_loss_aen.append(epoch_train_loss_aen / epoch_train_iter)
valid_loss_aen.append(epoch_val_loss_aen / epoch_val_iter)
train_loss_cnn.append(epoch_train_loss_cnn / epoch_train_iter)
valid_loss_cnn.append(epoch_val_loss_cnn / epoch_val_iter)
train_acc_cnn.append(running_train_corrects_cnn.float()/total_train*100)
valid_acc_cnn.append(running_val_corrects_cnn.float()/total_val*100)
# Save the model
#torch.save(autoencoder.state_dict(), "./5.autoencoder.pth")
fig = plt.figure(figsize=(10, 7))
plt.plot(train_loss_aen, label='Train loss')
plt.plot(valid_loss_aen, label='Validation loss')
plt.legend()
plt.show()
fig = plt.figure(figsize=(10, 7))
plt.plot(train_loss_cnn, label='Train loss')
plt.plot(valid_loss_cnn, label='Validation loss')
plt.legend()
plt.show()
fig = plt.figure(figsize=(10, 7))
plt.plot(train_acc_cnn, label='Train acc')
plt.plot(valid_acc_cnn, label='Validation acc')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Observation:By seeing accuracy graph we can say that model is overfitting. Let's do standalone inference
###Code
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def im_convert(tensor):
image = tensor.cpu().clone().detach().numpy()
image = image.transpose(1, 2, 0)
image = image * np.array((0.5, 0.5, 0.5)) + np.array((0.5, 0.5, 0.5))
image = image.clip(0, 1)
return image
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dataiter = iter(cifar10_valid_loader)
images, labels = dataiter.next()
images = images.to(device)
labels = labels.to(device)
encoder_output, denoised = autoencoder(images)
encoder_output = encoder_output.to(device)
label = cnn(encoder_output)
_, preds = torch.max(label, 1)
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(10):
ax = fig.add_subplot(2, 10, idx+1, xticks=[], yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{} ({})".format(str(classes[preds[idx].item()]), str(classes[labels[idx].item()])), color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____ |
analysis/01__mpra/05__mpranalyze_comp/05__mpranalyze_compare.ipynb | ###Markdown
05__mpranalyze_comparein this notebook, i run MPRAnalyze in 'compare' mode to get log2 foldchanges and p-values between (a) sequence orthologs and (b) cell types.
###Code
# # install MPRAnalyze
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install("MPRAnalyze", version = "3.8")
# # install RCurl
# install.packages("RCurl")
# # install biocparallel
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install("BiocParallel")
# load the package
library(MPRAnalyze)
library(tidyr)
library(BiocParallel)
###Output
_____no_output_____
###Markdown
1. load data first load data for library depth correction
###Code
dna_counts_depth <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/dna_counts.for_depth_estimation.mpranalyze.txt", sep="\t", header=TRUE)
# since we only have 1 dna replicate -- add another so code doesn't crash (expects matrix)
dna_counts_depth["dna_2"] <- dna_counts_depth["dna_1"]
row.names(dna_counts_depth) <- dna_counts_depth$element
dna_counts_depth <- dna_counts_depth[ , !(names(dna_counts_depth) %in% c("element")), drop=FALSE]
dna_counts_depth <- as.matrix(dna_counts_depth)
rna_counts_depth <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/rna_counts.for_depth_estimation.mpranalyze.txt", sep="\t", header=TRUE)
row.names(rna_counts_depth) <- rna_counts_depth$element
rna_counts_depth <- rna_counts_depth[ , !(names(rna_counts_depth) %in% c("element")), drop=FALSE]
rna_counts_depth <- as.matrix(rna_counts_depth)
dna_cols_depth <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/dna_col_ann.for_depth_estimation.mpranalyze.txt", sep="\t", header=TRUE)
names(dna_cols_depth) <- c("id", "condition", "sample")
# add second row to dna_cols_depth
row2 <- data.frame(id="dna_2", condition="dna", sample="2")
dna_cols_depth <- rbind(dna_cols_depth, row2)
row.names(dna_cols_depth) <- dna_cols_depth$id
rna_cols_depth <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/rna_col_ann.for_depth_estimation.mpranalyze.txt", sep="\t", header=TRUE)
names(rna_cols_depth) <- c("id", "condition", "sample")
row.names(rna_cols_depth) <- rna_cols_depth$id
rna_cols_depth
# make sure everything is a factor
dna_cols_depth$condition <- as.factor(dna_cols_depth$condition)
rna_cols_depth$condition <- as.factor(rna_cols_depth$condition)
rna_cols_depth$sample <- as.factor(rna_cols_depth$sample)
rna_cols_depth
###Output
_____no_output_____
###Markdown
then data to model: first, DNA (same for all models)
###Code
all_comp_dna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/dna_counts.all_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(all_comp_dna_counts) <- all_comp_dna_counts$comp_id
all_comp_dna_counts <- all_comp_dna_counts[ , !(names(all_comp_dna_counts) %in% c("comp_id"))]
all_comp_dna_counts <- as.matrix(all_comp_dna_counts)
all_comp_dna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/dna_col_ann.all_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(all_comp_dna_cols) <- all_comp_dna_cols$X
head(all_comp_dna_cols)
all_comp_dna_cols$barcode <- as.factor(all_comp_dna_cols$barcode)
all_comp_dna_cols$seq <- as.factor(all_comp_dna_cols$seq)
all_comp_dna_cols$condition <- as.factor(all_comp_dna_cols$condition)
all_comp_dna_cols
###Output
_____no_output_____
###Markdown
then controls (same for all models)
###Code
all_comp_ctrls <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/ctrl_status.all_comp.mpranalyze.txt", sep="\t", header=TRUE)
all_comp_ctrls <- as.logical(all_comp_ctrls$ctrl_status)
head(all_comp_ctrls)
length(all_comp_ctrls)
###Output
_____no_output_____
###Markdown
then data to model: native effects
###Code
native_rna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/native_rna_counts.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(native_rna_counts) <- native_rna_counts$comp_id
native_rna_counts <- native_rna_counts[ , !(names(native_rna_counts) %in% c("comp_id"))]
native_rna_counts <- as.matrix(native_rna_counts)
native_rna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/native_col_ann.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(native_rna_cols) <- native_rna_cols$X
head(native_rna_cols)
# make sure everything is a factor
native_rna_cols$barcode <- as.factor(native_rna_cols$barcode)
native_rna_cols$seq <- as.factor(native_rna_cols$seq)
native_rna_cols$condition <- as.factor(native_rna_cols$condition)
head(native_rna_cols)
###Output
_____no_output_____
###Markdown
cis+ trans interactions
###Code
all_rna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/all_rna_counts.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(all_rna_counts) <- all_rna_counts$comp_id
all_rna_counts <- all_rna_counts[ , !(names(all_rna_counts) %in% c("comp_id"))]
all_rna_counts <- as.matrix(all_rna_counts)
all_rna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/all_col_ann.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(all_rna_cols) <- all_rna_cols$X
# make sure everything is a factor
all_rna_cols$barcode <- as.factor(all_rna_cols$barcode)
all_rna_cols$seq <- as.factor(all_rna_cols$seq)
all_rna_cols$condition <- as.factor(all_rna_cols$condition)
head(all_rna_cols)
###Output
_____no_output_____
###Markdown
human/mouse data for cis effects in each cell line
###Code
# for seq comparisons, load each cell line data separately
hues64_rna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/HUES64_rna_counts.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(hues64_rna_counts) <- hues64_rna_counts$comp_id
hues64_rna_counts <- hues64_rna_counts[ , !(names(hues64_rna_counts) %in% c("comp_id"))]
hues64_rna_counts <- as.matrix(hues64_rna_counts)
mesc_rna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/mESC_rna_counts.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(mesc_rna_counts) <- mesc_rna_counts$comp_id
mesc_rna_counts <- mesc_rna_counts[ , !(names(mesc_rna_counts) %in% c("comp_id"))]
mesc_rna_counts <- as.matrix(mesc_rna_counts)
hues64_rna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/HUES64_col_ann.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(hues64_rna_cols) <- hues64_rna_cols$X
mesc_rna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/mESC_col_ann.seq_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(mesc_rna_cols) <- mesc_rna_cols$X
# make sure everything is a factor
hues64_rna_cols$barcode <- as.factor(hues64_rna_cols$barcode)
mesc_rna_cols$barcode <- as.factor(mesc_rna_cols$barcode)
hues64_rna_cols$seq <- as.factor(hues64_rna_cols$seq)
mesc_rna_cols$seq <- as.factor(mesc_rna_cols$seq)
hues64_rna_cols$condition <- as.factor(hues64_rna_cols$condition)
mesc_rna_cols$condition <- as.factor(mesc_rna_cols$condition)
head(hues64_rna_cols)
###Output
_____no_output_____
###Markdown
human/mouse data for trans effects of each sequence
###Code
# for cell comparisons, load each cell line data separately
human_rna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/human_rna_counts.cell_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(human_rna_counts) <- human_rna_counts$comp_id
human_rna_counts <- human_rna_counts[ , !(names(human_rna_counts) %in% c("comp_id"))]
human_rna_counts <- as.matrix(human_rna_counts)
mouse_rna_counts <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/mouse_rna_counts.cell_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(mouse_rna_counts) <- mouse_rna_counts$comp_id
mouse_rna_counts <- mouse_rna_counts[ , !(names(mouse_rna_counts) %in% c("comp_id"))]
mouse_rna_counts <- as.matrix(mouse_rna_counts)
human_rna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/human_col_ann.cell_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(human_rna_cols) <- human_rna_cols$X
mouse_rna_cols <- read.table("../../../data/02__mpra/01__counts/mpranalyze_files/mouse_col_ann.cell_comp.mpranalyze.txt", sep="\t", header=TRUE)
row.names(mouse_rna_cols) <- mouse_rna_cols$X
# make sure everything is a factor
human_rna_cols$barcode <- as.factor(human_rna_cols$barcode)
mouse_rna_cols$barcode <- as.factor(mouse_rna_cols$barcode)
human_rna_cols$seq <- as.factor(human_rna_cols$seq)
mouse_rna_cols$seq <- as.factor(mouse_rna_cols$seq)
human_rna_cols$condition <- as.factor(human_rna_cols$condition)
mouse_rna_cols$condition <- as.factor(mouse_rna_cols$condition)
head(human_rna_cols)
###Output
_____no_output_____
###Markdown
2. estimate library depth for sample/condition pairsince our library has many more sequences than just the TSSs we want to model, estimate the library depth based on the full seq output (or downsampled counts, in HUES64 case), then manually provide this estimation to the model
###Code
# create MPRA object
depth_obj <- MpraObject(dnaCounts = dna_counts_depth, rnaCounts = rna_counts_depth,
dnaAnnot = dna_cols_depth, rnaAnnot = rna_cols_depth)
# estimate depth factors using uq -- here, a sample/condition pair == 1 library
depth_obj <- estimateDepthFactors(depth_obj, lib.factor = c("sample", "condition"), depth.estimator='uq',
which.lib = "dna")
depth_obj <- estimateDepthFactors(depth_obj, lib.factor = c("id"),
depth.estimator='uq', which.lib = "rna")
rna_depths <- rnaDepth(depth_obj)
rna_depths
rna_cols_depth$depth <- rna_depths
rna_cols_depth
###Output
_____no_output_____
###Markdown
3. run model to compare native effects
###Code
nrow(rna_cols_depth)
nrow(native_rna_cols)
head(native_rna_cols)
# first need to set the dnadepths and rnadepths manually
all_comp_dna_cols$depth <- rep(1, nrow(all_comp_dna_cols))
# note 13 will change depending how many barcodes there are per element
native_rna_cols$depth <- rep(rna_depths, each=13)
# create MPRA object
obj <- MpraObject(dnaCounts = all_comp_dna_counts, rnaCounts = native_rna_counts,
dnaAnnot = all_comp_dna_cols, rnaAnnot = native_rna_cols, controls = all_comp_ctrls,
BPPARAM = SnowParam(workers=16,type="SOCK"))
obj <- setDepthFactors(obj, dnaDepth = all_comp_dna_cols$depth, rnaDepth = native_rna_cols$depth)
obj <- analyzeComparative(obj = obj,
dnaDesign = ~ barcode,
rnaDesign = ~ seq,
reducedDesign = ~ 1)
native_res <- testLrt(obj)
head(native_res)
hist(native_res[all_comp_ctrls,]$pval)
hist(native_res[!all_comp_ctrls,]$pval)
write.table(native_res, file = "../../../data/02__mpra/02__activs/native_results.txt", sep = "\t",
quote = FALSE)
###Output
_____no_output_____
###Markdown
4. run model to compare seq / condition interactions
###Code
# note 13 will change depending how many barcodes there are per element
all_rna_cols$depth <- rep(rna_depths, each=26)
# create MPRA object
obj1 <- MpraObject(dnaCounts = all_comp_dna_counts, rnaCounts = all_rna_counts,
dnaAnnot = all_comp_dna_cols, rnaAnnot = all_rna_cols, controls = all_comp_ctrls,
BPPARAM = SnowParam(workers=16,type="SOCK"))
obj1 <- setDepthFactors(obj1, dnaDepth = all_comp_dna_cols$depth, rnaDepth = all_rna_cols$depth)
head(all_rna_cols)
obj1 <- analyzeComparative(obj = obj1,
dnaDesign = ~ barcode,
rnaDesign = ~ seq + condition+ seq:condition,
reducedDesign = ~ seq + condition)
int_res <- testLrt(obj1)
head(int_res)
hist(int_res[all_comp_ctrls,]$pval)
hist(int_res[!all_comp_ctrls,]$pval)
write.table(int_res, file = "../../../data/02__mpra/02__activs/cis_trans_interaction_results.txt", sep = "\t",
quote = FALSE)
###Output
_____no_output_____
###Markdown
5. run model to compare seqs in HUES64
###Code
rna_cols_depth
hues64_depths <- rna_depths[1:3]
hues64_depths
# note 13 will change depending how many barcodes there are per element
hues64_rna_cols$depth <- rep(hues64_depths, each=26)
# create MPRA object
obj2 <- MpraObject(dnaCounts = all_comp_dna_counts, rnaCounts = hues64_rna_counts,
dnaAnnot = all_comp_dna_cols, rnaAnnot = hues64_rna_cols, controls = all_comp_ctrls,
BPPARAM = SnowParam(workers=16,type="SOCK"))
obj2 <- setDepthFactors(obj2, dnaDepth = all_comp_dna_cols$depth, rnaDepth = hues64_rna_cols$depth)
obj2 <- analyzeComparative(obj = obj2,
dnaDesign = ~ barcode,
rnaDesign = ~ seq,
reducedDesign = ~ 1)
hues64_res <- testLrt(obj2)
head(hues64_res)
hist(hues64_res[all_comp_ctrls,]$pval)
hist(hues64_res[!all_comp_ctrls,]$pval)
write.table(hues64_res, file = "../../../data/02__mpra/02__activs/HUES64_cis_results.txt", sep = "\t",
quote = FALSE)
###Output
_____no_output_____
###Markdown
6. run model to compare seqs in mESC
###Code
# note 13 will change depending how many barcodes there are per element
mesc_depths <- rna_depths[4:6]
mesc_rna_cols$depth <- rep(mesc_depths, each=26)
# create MPRA object
obj3 <- MpraObject(dnaCounts = all_comp_dna_counts, rnaCounts = mesc_rna_counts,
dnaAnnot = all_comp_dna_cols, rnaAnnot = mesc_rna_cols, controls = all_comp_ctrls,
BPPARAM = SnowParam(workers=16,type="SOCK"))
obj3 <- setDepthFactors(obj3, dnaDepth = all_comp_dna_cols$depth, rnaDepth = mesc_rna_cols$depth)
obj3 <- analyzeComparative(obj = obj3,
dnaDesign = ~ barcode,
rnaDesign = ~ seq,
reducedDesign = ~ 1)
mesc_res <- testLrt(obj3)
head(mesc_res)
hist(mesc_res[all_comp_ctrls,]$pval)
hist(mesc_res[!all_comp_ctrls,]$pval)
write.table(mesc_res, file = "../../../data/02__mpra/02__activs/mESC_cis_results.txt", sep = "\t",
quote = FALSE)
###Output
_____no_output_____
###Markdown
7. run model to compare cells for human seq
###Code
# note 13 will change depending how many barcodes there are per element
human_rna_cols$depth <- rep(rna_depths, each=13)
# create MPRA object
obj4 <- MpraObject(dnaCounts = all_comp_dna_counts, rnaCounts = human_rna_counts,
dnaAnnot = all_comp_dna_cols, rnaAnnot = human_rna_cols, controls = all_comp_ctrls,
BPPARAM = SnowParam(workers=16,type="SOCK"))
obj4 <- setDepthFactors(obj4, dnaDepth = all_comp_dna_cols$depth, rnaDepth = human_rna_cols$depth)
obj4 <- analyzeComparative(obj = obj4,
dnaDesign = ~ barcode,
rnaDesign = ~ condition,
reducedDesign = ~ 1)
human_trans_res <- testLrt(obj4)
head(human_trans_res)
hist(human_trans_res[all_comp_ctrls,]$pval)
hist(human_trans_res[!all_comp_ctrls,]$pval)
write.table(human_trans_res, file = "../../../data/02__mpra/02__activs/human_trans_results.txt", sep = "\t",
quote = FALSE)
###Output
_____no_output_____
###Markdown
8. run model to compare cells for mouse seq
###Code
# note 13 will change depending how many barcodes there are per element
mouse_rna_cols$depth <- rep(rna_depths, each=13)
# create MPRA object
obj5 <- MpraObject(dnaCounts = all_comp_dna_counts, rnaCounts = mouse_rna_counts,
dnaAnnot = all_comp_dna_cols, rnaAnnot = mouse_rna_cols, controls = all_comp_ctrls,
BPPARAM = SnowParam(workers=16,type="SOCK"))
obj5 <- setDepthFactors(obj5, dnaDepth = all_comp_dna_cols$depth, rnaDepth = mouse_rna_cols$depth)
obj5 <- analyzeComparative(obj = obj5,
dnaDesign = ~ barcode,
rnaDesign = ~ condition,
reducedDesign = ~ 1)
mouse_trans_res <- testLrt(obj5)
head(mouse_trans_res)
hist(mouse_trans_res[all_comp_ctrls,]$pval)
hist(mouse_trans_res[!all_comp_ctrls,]$pval)
write.table(mouse_trans_res, file = "../../../data/02__mpra/02__activs/mouse_trans_results.txt", sep = "\t",
quote = FALSE)
###Output
_____no_output_____ |
python/d2l-en/pytorch/chapter_convolutional-modern/densenet.ipynb | ###Markdown
Densely Connected Networks (DenseNet)ResNet significantly changed the view of how to parametrize the functions in deep networks. *DenseNet* (dense convolutional network) is to some extent the logical extension of this :cite:`Huang.Liu.Van-Der-Maaten.ea.2017`.To understand how to arrive at it, let us take a small detour to mathematics. From ResNet to DenseNetRecall the Taylor expansion for functions. For the point $x = 0$ it can be written as$$f(x) = f(0) + f'(0) x + \frac{f''(0)}{2!} x^2 + \frac{f'''(0)}{3!} x^3 + \ldots.$$The key point is that it decomposes a function into increasingly higher order terms. In a similar vein, ResNet decomposes functions into$$f(\mathbf{x}) = \mathbf{x} + g(\mathbf{x}).$$That is, ResNet decomposes $f$ into a simple linear term and a more complexnonlinear one.What if we want to capture (not necessarily add) information beyond two terms?One solution was DenseNet :cite:`Huang.Liu.Van-Der-Maaten.ea.2017`.:label:`fig_densenet_block`As shown in :numref:`fig_densenet_block`, the key difference between ResNet and DenseNet is that in the latter case outputs are *concatenated* (denoted by $[,]$) rather than added.As a result, we perform a mapping from $\mathbf{x}$ to its values after applying an increasingly complex sequence of functions:$$\mathbf{x} \to \left[\mathbf{x},f_1(\mathbf{x}),f_2([\mathbf{x}, f_1(\mathbf{x})]), f_3([\mathbf{x}, f_1(\mathbf{x}), f_2([\mathbf{x}, f_1(\mathbf{x})])]), \ldots\right].$$In the end, all these functions are combined in MLP to reduce the number of features again. In terms of implementation this is quite simple:rather than adding terms, we concatenate them. The name DenseNet arises from the fact that the dependency graph between variables becomes quite dense. The last layer of such a chain is densely connected to all previous layers. The dense connections are shown in :numref:`fig_densenet`.:label:`fig_densenet`The main components that compose a DenseNet are *dense blocks* and *transition layers*. The former define how the inputs and outputs are concatenated, while the latter control the number of channels so that it is not too large. [**Dense Blocks**]DenseNet uses the modified "batch normalization, activation, and convolution"structure of ResNet (see the exercise in :numref:`sec_resnet`).First, we implement this convolution block structure.
###Code
import torch
from torch import nn
from d2l import torch as d2l
def conv_block(input_channels, num_channels):
return nn.Sequential(
nn.BatchNorm2d(input_channels), nn.ReLU(),
nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))
###Output
_____no_output_____
###Markdown
A *dense block* consists of multiple convolution blocks, each using the same number of output channels. In the forward propagation, however, we concatenate the input and output of each convolution block on the channel dimension.
###Code
class DenseBlock(nn.Module):
def __init__(self, num_convs, input_channels, num_channels):
super(DenseBlock, self).__init__()
layer = []
for i in range(num_convs):
layer.append(conv_block(
num_channels * i + input_channels, num_channels))
self.net = nn.Sequential(*layer)
def forward(self, X):
for blk in self.net:
Y = blk(X)
# Concatenate the input and output of each block on the channel
# dimension
X = torch.cat((X, Y), dim=1)
return X
###Output
_____no_output_____
###Markdown
In the following example,we [**define a `DenseBlock` instance**] with 2 convolution blocks of 10 output channels.When using an input with 3 channels, we will get an output with $3+2\times 10=23$ channels. The number of convolution block channels controls the growth in the number of output channels relative to the number of input channels. This is also referred to as the *growth rate*.
###Code
blk = DenseBlock(2, 3, 10)
X = torch.randn(4, 3, 8, 8)
Y = blk(X)
Y.shape
###Output
_____no_output_____
###Markdown
[**Transition Layers**]Since each dense block will increase the number of channels, adding too many of them will lead to an excessively complex model. A *transition layer* is used to control the complexity of the model. It reduces the number of channels by using the $1\times 1$ convolutional layer and halves the height and width of the average pooling layer with a stride of 2, further reducing the complexity of the model.
###Code
def transition_block(input_channels, num_channels):
return nn.Sequential(
nn.BatchNorm2d(input_channels), nn.ReLU(),
nn.Conv2d(input_channels, num_channels, kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2))
###Output
_____no_output_____
###Markdown
[**Apply a transition layer**] with 10 channels to the output of the dense block in the previous example. This reduces the number of output channels to 10, and halves the height and width.
###Code
blk = transition_block(23, 10)
blk(Y).shape
###Output
_____no_output_____
###Markdown
[**DenseNet Model**]Next, we will construct a DenseNet model. DenseNet first uses the same single convolutional layer and maximum pooling layer as in ResNet.
###Code
b1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
###Output
_____no_output_____
###Markdown
Then, similar to the four modules made up of residual blocks that ResNet uses,DenseNet uses four dense blocks.Similar to ResNet, we can set the number of convolutional layers used in each dense block. Here, we set it to 4, consistent with the ResNet-18 model in :numref:`sec_resnet`. Furthermore, we set the number of channels (i.e., growth rate) for the convolutional layers in the dense block to 32, so 128 channels will be added to each dense block.In ResNet, the height and width are reduced between each module by a residual block with a stride of 2. Here, we use the transition layer to halve the height and width and halve the number of channels.
###Code
# `num_channels`: the current number of channels
num_channels, growth_rate = 64, 32
num_convs_in_dense_blocks = [4, 4, 4, 4]
blks = []
for i, num_convs in enumerate(num_convs_in_dense_blocks):
blks.append(DenseBlock(num_convs, num_channels, growth_rate))
# This is the number of output channels in the previous dense block
num_channels += num_convs * growth_rate
# A transition layer that halves the number of channels is added between
# the dense blocks
if i != len(num_convs_in_dense_blocks) - 1:
blks.append(transition_block(num_channels, num_channels // 2))
num_channels = num_channels // 2
###Output
_____no_output_____
###Markdown
Similar to ResNet, a global pooling layer and a fully-connected layer are connected at the end to produce the output.
###Code
net = nn.Sequential(
b1, *blks,
nn.BatchNorm2d(num_channels), nn.ReLU(),
nn.AdaptiveMaxPool2d((1, 1)),
nn.Flatten(),
nn.Linear(num_channels, 10))
###Output
_____no_output_____
###Markdown
[**Training**]Since we are using a deeper network here, in this section, we will reduce the input height and width from 224 to 96 to simplify the computation.
###Code
lr, num_epochs, batch_size = 0.1, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
###Output
loss 0.156, train acc 0.943, test acc 0.825
5535.8 examples/sec on cuda:0
|
Worksheet_set_1/Python worksheet 1.ipynb | ###Markdown
Q11 to Q15 11. Write a python program to find the factorial of a number.
###Code
def fact(n):
return n*1 if(n<=1) else n*fact(n-1)
fact(6)
###Output
_____no_output_____
###Markdown
12. Write a python program to find whether a number is prime or composite.
###Code
def check_prime(n):
if(n<2):return "neither prime number nor composite number."
elif(n==2):return "only even prime number"
else:
for i in range(2,n):
if(n%i==0):
ret="composite number"
break
else: ret="prime number"
return ret
for i in range(0,30+1):
print(i,":",check_prime(i))
###Output
0 : neither prime number nor composite number.
1 : neither prime number nor composite number.
2 : only even prime number
3 : prime number
4 : composite number
5 : prime number
6 : composite number
7 : prime number
8 : composite number
9 : composite number
10 : composite number
11 : prime number
12 : composite number
13 : prime number
14 : composite number
15 : composite number
16 : composite number
17 : prime number
18 : composite number
19 : prime number
20 : composite number
21 : composite number
22 : composite number
23 : prime number
24 : composite number
25 : composite number
26 : composite number
27 : composite number
28 : composite number
29 : prime number
30 : composite number
###Markdown
13. Write a python program to check whether a given string is palindrome or not.
###Code
def palindrome(st):
size=len(st)
if size<2:return "size is too small"
for i in range(1,size+1):
if st[i-1]==st[size-i]:ret="given string is palindrome"
else:return "given string is not palindrome"
return ret
vals= [ 'a','SoloS','RotatoR','saurabh','SagaS','elephant','TenT','RepapeR','CiviC','KayaK','Lever','MadaM','python']
for i in vals:print(i,":",palindrome(i))
###Output
a : size is too small
SoloS : given string is palindrome
RotatoR : given string is palindrome
saurabh : given string is not palindrome
SagaS : given string is palindrome
elephant : given string is not palindrome
TenT : given string is not palindrome
RepapeR : given string is palindrome
CiviC : given string is palindrome
KayaK : given string is palindrome
Lever : given string is not palindrome
MadaM : given string is palindrome
python : given string is not palindrome
###Markdown
14. Write a Python program to get the third side of right-angled triangle from two given sides.
###Code
import math
def hypo(s1,s2):
return math.sqrt((s1)**2+(s2)**2)
hypo(3,4)
import math
def hyp(s1=0,s2=0,hyp=0):
print("Kindly enter zero to find that side")
s1=int(input("Enter a first side: "))
s2=int(input("Enter a second side: "))
hyp=int(input("Enter a hypo side: "))
if(s1==0):return math.sqrt((hyp)**2-(s2)**2)
elif(s2==0):return math.sqrt((hyp)**2-(s1)**2)
else:return math.sqrt((s1)**2+(s2)**2)
hyp()
###Output
Kindly enter zero to find that side
Enter a first side: 3
Enter a second side: 0
Enter a hypo side: 5
###Markdown
15. Write a python program to print the frequency of each of the characters present in a given string
###Code
def count_String(st):
count={}
for i in st:
if i in count:count[i]=count[i]+1
else:count[i]=1
return count
count_String('Saurabh Amareshchandra Pathak')
###Output
_____no_output_____ |
colab/collab_donkeycar_keras.ipynb | ###Markdown
Install TensorFlow
###Code
# Newest nightly version. Didn't work for training. Missing 'ConfigProto' library
#!pip uninstall tensorflow
!pip install tensorflow-gpu==1.13.1
# Install tensor flow gpu in case it's not installed
#!pip install tensorflow-gpu==1.14.0
# Check if TF installed
#!pip install tensorflow==2.0.0-rc1
!python3 -c 'import tensorflow as tf; print(tf.__version__)'
# Check if GPU in computer
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
###Output
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Installation DonkeyCar
###Code
!git clone https://github.com/autorope/donkeycar.git
%cd /content/donkeycar
!git checkout dev
!pip3 install -e .[pc]
!donkey createcar --path /content/d2/
###Output
Cloning into 'donkeycar'...
remote: Enumerating objects: 18, done.[K
remote: Counting objects: 100% (18/18), done.[K
remote: Compressing objects: 100% (10/10), done.[K
remote: Total 11700 (delta 8), reused 15 (delta 8), pack-reused 11682[K
Receiving objects: 100% (11700/11700), 59.79 MiB | 46.70 MiB/s, done.
Resolving deltas: 100% (7319/7319), done.
/content/donkeycar
Already on 'dev'
Your branch is up to date with 'origin/dev'.
Obtaining file:///content/donkeycar
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (1.17.5)
Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (6.2.2)
Requirement already satisfied: docopt in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (0.6.2)
Requirement already satisfied: tornado in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (4.5.3)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (2.21.0)
Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (2.8.0)
Requirement already satisfied: moviepy in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (0.2.3.5)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (0.25.3)
Requirement already satisfied: PrettyTable in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (0.7.2)
Collecting paho-mqtt
[?25l Downloading https://files.pythonhosted.org/packages/59/11/1dd5c70f0f27a88a3a05772cd95f6087ac479fac66d9c7752ee5e16ddbbc/paho-mqtt-1.5.0.tar.gz (99kB)
[K |████████████████████████████████| 102kB 3.3MB/s
[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from donkeycar==3.1.1) (3.1.2)
Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->donkeycar==3.1.1) (2.8)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->donkeycar==3.1.1) (1.24.3)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->donkeycar==3.1.1) (3.0.4)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->donkeycar==3.1.1) (2019.11.28)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from h5py->donkeycar==3.1.1) (1.12.0)
Requirement already satisfied: decorator<5.0,>=4.0.2 in /usr/local/lib/python3.6/dist-packages (from moviepy->donkeycar==3.1.1) (4.4.1)
Requirement already satisfied: imageio<3.0,>=2.1.2 in /usr/local/lib/python3.6/dist-packages (from moviepy->donkeycar==3.1.1) (2.4.1)
Requirement already satisfied: tqdm<5.0,>=4.11.2 in /usr/local/lib/python3.6/dist-packages (from moviepy->donkeycar==3.1.1) (4.28.1)
Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas->donkeycar==3.1.1) (2.6.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->donkeycar==3.1.1) (2018.9)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->donkeycar==3.1.1) (2.4.6)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->donkeycar==3.1.1) (1.1.0)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->donkeycar==3.1.1) (0.10.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib->donkeycar==3.1.1) (42.0.2)
Building wheels for collected packages: paho-mqtt
Building wheel for paho-mqtt (setup.py) ... [?25l[?25hdone
Created wheel for paho-mqtt: filename=paho_mqtt-1.5.0-cp36-none-any.whl size=61416 sha256=c0b0b5453d06b1aa07ff7874e2fae0afbc34d095b9fbaccd5d881b7191f8bedf
Stored in directory: /root/.cache/pip/wheels/02/94/6c/8474137cb7a5a3e001d70a22c8ff919caee69435376bccce79
Successfully built paho-mqtt
Installing collected packages: paho-mqtt, donkeycar
Running setup.py develop for donkeycar
Successfully installed donkeycar paho-mqtt-1.5.0
using donkey v3.1.1 ...
Creating car folder: /content/d2/
making dir /content/d2/
Creating data & model folders.
making dir /content/d2/models
making dir /content/d2/data
making dir /content/d2/logs
Copying car application template: complete
Copying car config defaults. Adjust these before starting your car.
Copying train script. Adjust these before starting your car.
Copying my car config overrides
Donkey setup complete.
###Markdown
Upload files **From PC**
###Code
#for Zipping in pc with linux
#zip -r ~/donkeycar/d2/data/data.zip *
import os
from google.colab import files
if(os.path.exists("/content/data.zip")):
os.remove("/content/data.zip")
if(os.path.exists("/content/d2/data/data.zip")):
os.remove("/content/d2/data/data.zip")
uploaded = files.upload()
WORK_FOLDER = "/content/d2/data/"
if(os.path.exists(WORK_FOLDER) == False):
os.makedirs(WORK_FOLDER)
!mv /content/data.zip /content/d2/data/
%cd /content/d2/data/
!unzip -o data.zip
#for Zipping in pc with linux
#zip -r ~/donkeycar/d2/data/data.zip *
import os
from google.colab import files
if(os.path.exists("/content/data.zip")):
os.remove("/content/data.zip")
if(os.path.exists("/content/d2/data/data.zip")):
os.remove("/content/d2/data/data.zip")
uploaded = files.upload()
WORK_FOLDER = "/content/d2/data/"
if(os.path.exists(WORK_FOLDER) == False):
os.makedirs(WORK_FOLDER)
#!mv /content/data.zip /content/d2/data/
#%cd /content/d2/data/
#!unzip -o data.zip
###Output
_____no_output_____
###Markdown
**From Github**
###Code
%cd /content/d2/data/
!rm -r * #Erase in case of old data
!git clone https://github.com/JuanFuriaz/donkey_car_data.git # get the latest data
!mv /content/d2/data/donkey_car_data/* /content/d2/data/ #
!rm -r donkey_car_data
!rm -r tubs-bigline-december/
###Output
/content/d2/data
Cloning into 'donkey_car_data'...
remote: Enumerating objects: 40174, done.[K
remote: Total 40174 (delta 0), reused 0 (delta 0), pack-reused 40174[K
Receiving objects: 100% (40174/40174), 120.24 MiB | 40.55 MiB/s, done.
Resolving deltas: 100% (14036/14036), done.
Checking out files: 100% (34526/34526), done.
###Markdown
Trainig
###Code
!python /content/d2/manage.py train --type linear --model /content/d2/models/lin_aug2_collab.h5
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
Update Models via GitHub **Initialize Folder Models in GitHub**
###Code
%cd /content/d2/models
!git config --global user.name "JuanFuriaz"
!git config --global user.email "[email protected]"
!git config --global credential.helper 'cache --timeout=100800'
!git config --global user.password "oaDCpWTF2019!"
!git init
#!git add *
#!git commit -m "initialize in cloud"
!git remote add origin https://JuanFuriaz:[email protected]/JuanFuriaz/donkey_car_models.git
!git pull origin master --allow-unrelated-histories
###Output
/content/d2/models
Initialized empty Git repository in /content/d2/models/.git/
remote: Enumerating objects: 28, done.[K
remote: Counting objects: 100% (28/28), done.[K
remote: Compressing objects: 100% (26/26), done.[K
remote: Total 112 (delta 5), reused 25 (delta 2), pack-reused 84[K
Receiving objects: 100% (112/112), 85.23 MiB | 38.03 MiB/s, done.
Resolving deltas: 100% (28/28), done.
From https://github.com/JuanFuriaz/donkey_car_models
* branch master -> FETCH_HEAD
* [new branch] master -> origin/master
###Markdown
**Upload after Initialization**
###Code
%cd /content/d2/models
!git add *
!git commit -m "model commit"
!git push -u origin master
###Output
_____no_output_____
###Markdown
Removing Folders or files via GitHub
###Code
#!unzip /content/d2/data/data.zip -d /content/d2/
!rm -r /content/d2/data/data.zip
###Output
_____no_output_____
###Markdown
Creating Keras HEATMAP video
###Code
%cd /content/
from keras.layers import Input
from keras.models import Model, load_model
from keras.layers import Convolution2D
import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from glob import glob
from keras import backend as K
#plt.rcParams['animation.ffmpeg_path'] = '/home/jm/bin/ffmpeg' # explicit path for finding ffmpeg in my computer
def compute_visualisation_mask(img, functor, layers_kernels, layers_strides):
activations = functor([np.array([img])])
upscaled_activation = np.ones((3, 6))
for layer in [4, 3, 2, 1, 0]:
averaged_activation = np.mean(activations[layer], axis=3).squeeze(axis=0) * upscaled_activation
if layer > 0:
output_shape = (activations[layer - 1].shape[1], activations[layer - 1].shape[2])
else:
output_shape = (120, 160)
x = tf.constant(
np.reshape(averaged_activation, (1,averaged_activation.shape[0],averaged_activation.shape[1],1)),
tf.float32
)
conv = tf.nn.conv2d_transpose(
x, layers_kernels[layer],
output_shape=(1,output_shape[0],output_shape[1], 1),
strides=layers_strides[layer],
padding='VALID'
)
with tf.Session() as session:
result = session.run(conv)
upscaled_activation = np.reshape(result, output_shape)
final_visualisation_mask = upscaled_activation
return (final_visualisation_mask - np.min(final_visualisation_mask))/(np.max(final_visualisation_mask) - np.min(final_visualisation_mask))
def save_movie_mp4(image_array, video_name = "example.mp4"):
writer = animation.FFMpegFileWriter(fps=20, metadata=dict(artist='Me'), bitrate=1800)
dpi = 72.0
xpixels, ypixels = image_array[0].shape[0], image_array[0].shape[1]
fig = plt.figure(figsize=(ypixels/dpi, xpixels/dpi), dpi=dpi)
im = plt.figimage(image_array[0])
def animate(i):
im.set_array(image_array[i])
return (im,)
plt.show()
ani = animation.FuncAnimation(fig, animate, frames=len(image_array))
ani.save(video_name, writer=writer)
def get_video_array(video_limit=500, data_path = 'my/path/to/imgs/*.jpg', functor= None, layers_kernels = None, layers_strides = None):
def numericalSort(value):
parts = value.split("/")[-1]
parts = int(parts.split("_")[0])
return parts
imgs = []
alpha = 0.004
beta = 1.0 - alpha
counter = 0
for path in sorted(glob(data_path), key=numericalSort):
img = cv2.imread(path)
salient_mask = compute_visualisation_mask(img, functor, layers_kernels, layers_strides)
salient_mask_stacked = np.dstack((salient_mask,salient_mask))
salient_mask_stacked = np.dstack((salient_mask_stacked,salient_mask))
blend = cv2.addWeighted(img.astype('float32'), alpha, salient_mask_stacked, beta, 0.0)
imgs.append(blend)
counter += 1
if video_limit is not None:
if counter >= video_limit:
return imgs
return imgs
def get_keras_functor(model_path="my/path/to/model.h5"):
"""
Create CNN-model structure for Heatmap
"""
custom_objects = {"GlorotUniform": tf.keras.initializers.glorot_uniform}
model = load_model(model_path, custom_objects)
img_in = Input(shape=(120, 160, 3), name='img_in')
x = img_in
x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu', name='conv2d_1')(x)
x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu', name='conv2d_2')(x)
x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu', name='conv2d_3')(x)
x = Convolution2D(64, (3, 3), strides=(2, 2), activation='relu', name='conv2d_4')(x)
conv_5 = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu', name='conv2d_5')(x)
convolution_part = Model(inputs=[img_in], outputs=[conv_5])
for layer_num in ('1', '2', '3', '4', '5'):
convolution_part.get_layer('conv2d_' + layer_num).set_weights(
model.get_layer('conv2d_' + layer_num).get_weights())
inp = convolution_part.input # input placeholder
outputs = [layer.output for layer in convolution_part.layers][1:] # all layer outputs
functor = K.function([inp], outputs)
return functor
def main(video_limit = 100, data_path = 'my/path/to/imgs/*.jpg', model_path="my/path/to/model.h5", video_name = "example.mp4"):
functor = get_keras_functor(model_path= model_path)
kernel_3x3 = tf.constant(np.array([
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]]
]), tf.float32)
kernel_5x5 = tf.constant(np.array([
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
]), tf.float32)
layers_kernels = {4: kernel_3x3, 3: kernel_3x3, 2: kernel_5x5, 1: kernel_5x5, 0: kernel_5x5}
layers_strides = {4: [1, 1, 1, 1], 3: [1, 2, 2, 1], 2: [1, 2, 2, 1], 1: [1, 2, 2, 1], 0: [1, 2, 2, 1]}
imgs = get_video_array(video_limit= video_limit, data_path = data_path, functor= functor, layers_kernels = layers_kernels, layers_strides = layers_strides)
save_movie_mp4(imgs, video_name)
if __name__ == '__main__':
main(700, 'd2/data/tub_2_19-12-22/*.jpg',"d2/models/mod_lin_1.h5", "lin_mod_tub2_700.mp4" )
###Output
_____no_output_____ |
site/public/courses/DS-2.2/Notebooks/.ipynb_checkpoints/linear_regression_slides-checkpoint.ipynb | ###Markdown
Linear RegressionIn regression, we are interested in predicting a scalar-valued target, such as the price of a stock.By linear, we mean that the target must be predicted as a linear function of the inputs. Simple Linear Regression What elements are present in the diagram?The black dots are the observed values of x and y.The blue line is our least squares line.The red lines are the residuals, which are the distances between the observed values and the least squares line. Mathematical formulation for linear regressionLinear regression is an approach for predicting a quantitative response using a feature or multiple featuresIt takes this form:$y_i = \beta_0 + \beta_1 x_i+ \epsilon_i$As we have $N$ sample, we can write it down in vector representation: ${\bf y} = \beta_0 + \beta_1 {\bf x}+ {\bf \epsilon}$The goal:Obtain $\beta_0$ and $\beta_1$ from ${\bf x}$ and ${\bf y}$ such that we have the minimum error for $1/N \sum_{i=1}^{N}({\hat y}_i - y_i)^2$ Activity: Write a Python code to obtain the intercept and coefficient for a simple regression problemTask: for the given ${\bf x}$ and ${\bf y}$, obtain the $\beta_0$ and $\beta_1$. Plot the resultSteps:1- `X = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) Y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3])` 2- Use numpy `np.polyfit` and `np.poly1d` to obtain the linear regression model coefficients3- Use sklearn `from sklearn.linear_model import LinearRegression` to obtain the linear regression model coefficients
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
np.random.seed(seed=0)
X = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
Y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
plt.scatter(X, Y)
print(np.polyfit(X, Y, 1))
print(np.poly1d(np.polyfit(X, Y, 1)))
print(np.unique(X))
plt.plot(np.unique(X), np.poly1d(np.polyfit(X, Y, 1))(np.unique(X)), color='red', linewidth=5)
plt.show()
reg = LinearRegression().fit(X.reshape(-1, 1), Y.reshape(-1, 1))
print(reg.coef_)
print(reg.intercept_)
###Output
[0.25163494 0.79880123]
0.2516 x + 0.7988
[ 2.167 3.1 3.3 4.168 4.4 5.313 5.5 5.654 6.182 6.71
6.93 7.042 7.59 7.997 9.27 9.779 10.791]
###Markdown
Multiple Linear RegressionThis approach and model can be extended to muliple features${\bf y} = \beta_0 + \beta_1 {\bf x}_1+ \beta_1 {\bf x}_2 + ... + {\bf \epsilon}$Read: https://github.com/justmarkham/DAT4/blob/master/notebooks/08_linear_regression.ipynb Activity: Obtain the linear regression parameters for advertising dataSteps:1- Load `Advertising.csv`2- Use `from sklearn.linear_model import LinearRegression` and `import statsmodels.formula.api as smf` 3- Compare the result and sklearn with ordinary least square (OLS) from statsmodels 4- Good resource for OLS: https://www.statsmodels.org/stable/regression.html
###Code
import pandas as pd
from sklearn.linear_model import LinearRegression
import numpy as np
data = pd.read_csv('Advertising.csv')
# create X and y
feature_cols = ['TV', 'radio', 'newspaper']
X = data[feature_cols]
y = data.sales
# follow the usual sklearn pattern: import, instantiate, fit
lm = LinearRegression()
lm.fit(X, y)
# print intercept and coefficients
print(lm.intercept_)
print(lm.coef_)
print(lm.predict([[100, 25, 25]]))
import statsmodels.formula.api as smf
# create a fitted model with all three features
lm = smf.ols(formula='sales ~ TV + radio + newspaper', data=data).fit()
# print the coefficients
lm.params
###Output
_____no_output_____
###Markdown
How to evaluate linear regression model?The linear regression model can be evaluated by :1- Mean Absolute Error (MAE)2- Mean Squared Error (MSE)3- Root Mean Squared Error (RMSE)4- R-Squared -> https://github.com/cs109/2015/blob/master/Lectures/07-BiasAndRegression.pdfRead : https://www.ritchieng.com/machine-learning-evaluate-linear-regression-model/Read: https://www.graphpad.com/guides/prism/7/curve-fitting/r2_ameasureofgoodness_of_fitoflinearregression.htm?toc=0&printWindow Activity: For advertising data, compute the R-squared
###Code
from sklearn.metrics import r2_score
print(reg.score(X.reshape(-1, 1), Y.reshape(-1, 1)))
print(r2_score(Y.reshape(-1, 1), reg.predict(X.reshape(-1, 1))))
###Output
0.6928760302783603
0.6928760302783603
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.