path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Charts_in_detail/BarChart.ipynb | ###Markdown
Preparing the Data
###Code
age = np.random.randint(20, 80, 100) #Parameters: 1)Lowest(inclusive). 2)Highest(exclusive). 3)Size(quantity)
income = np.random.randint(5000, 50000, 100)
#Divide "age" and "income" into groups as following
age_20_30, age_30_40, age_40_50, age_50_60, age_60_80 = [], [], [], [], []
for i in age:
if i<30:
age_20_30.append(i)
elif 30<=i<40:
age_30_40.append(i)
elif 40<=i<50:
age_40_50.append(i)
elif 50<=i<60:
age_50_60.append(i)
else:
age_60_80.append(i)
income_10k, income_20k, income_30k, income_40k, income_50k = [], [], [], [], []
for i in income:
if i<10000:
income_10k.append(i)
elif 10000<=i<20000:
income_20k.append(i)
elif 20000<=i<30000:
income_30k.append(i)
elif 30000<=i<40000:
income_40k.append(i)
else:
income_50k.append(i)
###Output
_____no_output_____
###Markdown
Initial Step
###Code
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80'
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)]
ax1.bar(labels, sizes)#getting the bar chart
plt.ylabel('Frequency') #assigning name for Y-axis (optional)
plt.xlabel('Age')
plt.title('') #assigning name for the bar chart (optional)
plt.show() #displaying the bar chart
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80' #x-axis(age)
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
ax1.bar(labels, sizes)#getting the bar chart
###Output
_____no_output_____
###Markdown
1. Width of bars
###Code
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80' #x-axis(age)
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
ax1.bar(labels, sizes, width=0.5) ###getting the bar chart
###Output
_____no_output_____
###Markdown
2. Labelling axes
###Code
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80' #x-axis(age)
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
plt.xlabel('Age') ###X-axis label
plt.ylabel('Frequency') ###Y-axis label
ax1.bar(labels, sizes, width=0.9)#getting the bar chart
###Output
_____no_output_____
###Markdown
3. Customizing color
###Code
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80' #x-axis(age)
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
plt.xlabel('Age') #X-axis label
plt.ylabel('Frequency') #Y-axis label
ax1.bar(labels, sizes, width=0.9, color='r') ### color=''-changes color of the bar
###Output
_____no_output_____
###Markdown
4. Customizing edges
###Code
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80' #x-axis(age)
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
plt.xlabel('Frequency') #X-axis label
plt.ylabel('Age') #Y-axis label
ax1.bar(labels, sizes, width=0.9, color='r', linewidth=5, edgecolor='b') ###edgecolor=''-changes colors of bar edges, linewidth is the width of edges
###Output
_____no_output_____
###Markdown
5. Vertical bars
###Code
fig, ax1 = plt.subplots()
labels = '20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80' #x-axis(age)
sizes = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
plt.xlabel('Frequency') #X-axis label
plt.ylabel('Age') #Y-axis label
ax1.barh(labels, sizes) #Use "barh" to plot horizontal bars.
###Output
_____no_output_____
###Markdown
6. Two bars
###Code
#Now we compare two different data(Age and Income)
fig, ax2 = plt.subplots()
index = np.arange(len(labels)) ###we need 'index' while creating the bars
labels = ['20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80']
age = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
income = [len(income_10k), len(income_20k), len(income_30k), len(income_40k), len(income_50k)]
plt.xlabel('Age and Income')
plt.ylabel('Frequency')
ax2.bar(index, age, width=0.4, color='r')
ax2.bar(index+0.4, income, width=0.4, color='g')
plt.xticks(index+0.2, labels) ###Naming groups
###Output
_____no_output_____
###Markdown
7. Legend and title
###Code
#Give labels for each bar as parameters and call "plt.legend()"
#To give title, type plt.title('')
fig, ax2 = plt.subplots()
index = np.arange(len(labels)) #we need 'index' while creating the bars
labels = ['20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80']
age = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
income = [len(income_10k), len(income_20k), len(income_30k), len(income_40k), len(income_50k)]
plt.xlabel('Age and Income')
plt.ylabel('Frequency')
ax2.bar(index, age, width=0.4, color='r', label="Age")
ax2.bar(index+0.4, income, width=0.4, color='g', label="Income")
ax2.legend()
plt.title('Data science') ###Giving title for the graph
plt.xticks(index+0.2, labels)
###Output
_____no_output_____
###Markdown
8.Separating bars
###Code
fig, ax2 = plt.subplots()
index = np.arange(len(labels)) ###we need 'index' while creating the bars
labels = ['20 to 30', '30 to 40', '40 to 50', '50 to 60', '60 to 80']
age = [len(age_20_30), len(age_30_40), len(age_40_50), len(age_50_60), len(age_60_80)] #y-axis(frequency)
income = [len(income_10k), len(income_20k), len(income_30k), len(income_40k), len(income_50k)]
plt.xlabel('Age and Income')
plt.ylabel('Frequency')
ax2.bar(index, age, width=0.4-0.1, color='r', label="Age") #decrease the bar_width by 0.1
ax2.bar(index+0.4, income, width=0.4-0.1, color='g', label="Income") #decrease the bar_width by 0.1
ax2.legend()
plt.xticks(index+0.2, labels)
###Output
_____no_output_____ |
26-50/p38.ipynb | ###Markdown
Pandigital multiples Take the number 192 and multiply it by each of 1, 2, and 3:192 × 1 = 192192 × 2 = 384192 × 3 = 576By concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3)The same can be achieved by starting with 9 and multiplying by 1, 2, 3, 4, and 5, giving the pandigital, 918273645, which is the concatenated product of 9 and (1,2,3,4,5).What is the largest 1 to 9 pandigital 9-digit number that can be formed as the concatenated product of an integer with (1,2, ... , n) where n > 1? --- Idea From above example:- 192384576 is generated by $ 192 * (1 * 10^6 + 2 * 10^3 + 3 * 10^0) $- 918273645 is generated by $ 9 * (1*10^8 + 2*10^6 + 3*10^4 + 4*10^2 + 5*10^0) $Now we can see 918273645 is currently the largest pandigital number, so we can start from 918273645, to check if any pandigital muplitple is greater than this one.Also, the first digit of the integer must be 9 if this integer will be greater than 918273645. So start from $ 9x_1, 9x_1x_2, 9x_1x_2x_3, ... $ And, $ 9x_1...x_i $ takes $ i+1 $ digits, $ k * 9x_1....x_i, k \in [2,3, ..., 9] $ takes $ i+2 $ digits. So if want to take exactly 9 digits,$i$ could only be 0 or 3. $i=0$ is in example, so we only need to check $i=3$. ---
###Code
def is_pandigital(n):
return not '0' in str(n) and len(str(n)) == len(set(str(n)))
def solve():
largest = (918273645, (9, 5))
for n in filter(is_pandigital, range(9001, int(1e4))):
if is_pandigital(n * 100002):
largest = max(largest, (n * 100002, 2))
return largest
solve()
###Output
_____no_output_____ |
notebooks/scvi_amortised/cell2location_synthetic_data_scVI_amortised_10x_data_batch_1250_2500_dropout_rate05_layers2_n_hidden200_eval.ipynb | ###Markdown
Benchmarking cell2location pyro model using softplus/exp for scales
###Code
import sys, ast, os
#sys.path.insert(1, '/nfs/team205/vk7/sanger_projects/BayraktarLab/cell2location/')
sys.path.insert(1, '/nfs/team205/vk7/sanger_projects/BayraktarLab/scvi-tools/')
import scanpy as sc
import anndata
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
data_type='float32'
#import cell2location_model
#import cell2location_module_scvi
import scvi
import torch
from matplotlib import rcParams
rcParams['pdf.fonttype'] = 42 # enables correct plotting of text
import seaborn as sns
###Output
_____no_output_____
###Markdown
The purpose of the notebook is to benchmark several versions of the model using mouse brain data.
###Code
sc_data_folder = '/nfs/team205/vk7/sanger_projects/cell2location_paper/notebooks/selected_data/mouse_visium_snrna/'
sp_data_folder = '/nfs/team205/vk7/sanger_projects/cell2location_paper/notebooks/selected_results/benchmarking/with_tissue_zones/data/'
results_folder = '/nfs/team205/vk7/sanger_projects/cell2location_paper/notebooks/selected_results/benchmarking/with_tissue_zones/real_mg/pyro/'
###Output
_____no_output_____
###Markdown
Read datasets and train cell2location Data can be downloaded as follows:```bashwget https://cell2location.cog.sanger.ac.uk/paper/synthetic_with_tissue_zones/synth_adata_real_mg_20210131.h5adwget https://cell2location.cog.sanger.ac.uk/paper/synthetic_with_tissue_zones/training_5705STDY8058280_5705STDY8058281_20210131.h5ad```
###Code
adata_vis = anndata.read(f'{sp_data_folder}synth_adata_real_mg_20210131.h5ad')
adata_vis.uns['spatial'] = {'x': 'y'}
#adata_vis = adata_vis[adata_vis.obs['sample'].isin([f'exper{i}' for i in range(5,10)]),:]
adata_snrna_raw = anndata.read(f'{sp_data_folder}training_5705STDY8058280_5705STDY8058281_20210131.h5ad')
import scipy
adata_snrna_raw.X = scipy.sparse.csr_matrix(adata_snrna_raw.X)
###Output
_____no_output_____
###Markdown
adata_vis.X = scipy.sparse.csr_matrix(adata_vis.X)
###Code
Add counts matrix as `adata.raw`
###Output
_____no_output_____
###Markdown
adata_snrna_raw.raw = adata_snrna_rawadata_vis.raw = adata_vis compute average for each clusteraver = scvi.external.cell2location.compute_cluster_averages(adata_snrna_raw, 'annotation_1') make sure the order of gene matches between aver and x_dataaver = aver.loc[adata_vis.var_names,:] generate one-hot encoded matrix telling which obs belong to whic samplesobs2sample_df = pd.get_dummies(adata_vis.obs['sample'])
###Code
adata_vis
###Output
_____no_output_____
###Markdown
Model training
###Code
adata_vis = scvi.external.cell2location.setup_anndata(adata=adata_vis, cell_state_df=aver, batch_key="sample")
adata_vis.uns['_scvi']
mod = scvi.external.Cell2location(adata_vis, batch_size=2500,
amortised=True,
encoder_kwargs={'n_layers': 2, 'n_hidden': 200,
'dropout_rate': 0.2,
'activation_fn': torch.nn.ReLU},
N_cells_per_location=8)
mod.train(max_epochs=1000, lr=0.01, use_gpu=True)
means = mod.posterior_median(use_gpu = True)
means['w_sf'].shape
mod_m = scvi.external.Cell2location(adata_vis, batch_size=1250,
amortised=True,
encoder_kwargs={'n_layers': 2, 'n_hidden': 200,
'dropout_rate': 0.2,
'activation_fn': torch.nn.ReLU},
N_cells_per_location=8)
mod_m.train(max_epochs=1000, lr=0.01, use_gpu=True)
means_m = mod_m.posterior_median(use_gpu = True)
###Output
_____no_output_____
###Markdown
test Predictivenum_samples = 5predictive = mod_m.module.create_predictive(num_samples=num_samples, parallel=False)from scvi.dataloaders import AnnDataLoadertrain_dl = AnnDataLoader(adata_vis, shuffle=False, batch_size=500)for tensor_dict in train_dl: args, kwargs = mod_m.module._get_fn_args_from_batch(tensor_dict) samples = { k: v.detach().cpu().numpy() for k, v in predictive(*args, **kwargs).items() if k != "obs" } save Pyro param state model_save_path = os.path.join(save_path, "model_params.pt")torch.save(model.state_dict(), model_save_path) amortised_plate_sites = {'name': "obs_plate", 'in': ['x_data'], 'sites': { "n_s_cells_per_location": 1, "y_s_groups_per_location": 1, "z_sr_groups_factors": 5, "w_sf": 4, "l_s_add": 1, }}np.sum([np.sum(amortised_plate_sites['sites'][k]) for k in amortised_plate_sites['sites'].keys()]) * 2 create indices for loc and scales of each sitecounter = 0indices = dict()for site, n_dim in amortised_plate_sites['sites'].items(): indices[site] = {'locs': np.arange(counter, counter + n_dim), 'scales': np.arange(counter + n_dim, counter + n_dim * 2)} counter += n_dim * 2 indices save modelmod_m.save(dir_path='./results/scvi/minibatch_1sample', overwrite=True, save_anndata=False) load modelmod_m.load(dir_path='./results/scvi/minibatch_1sample', adata=adata_vis, use_gpu=True)
###Code
### Compare ELBO as training progresses
###Output
_____no_output_____
###Markdown
plt.plot(mod.module.history_['train_loss_epoch'].index[200:], np.array(mod.module.history_['train_loss_epoch'].values.flatten())[200:]);plt.plot(mod_m.module.history_['train_loss_epoch'].index[200:], np.array(mod_m.module.history_['train_loss_epoch'].values.flatten())[200:]);plt.legend(labels=['minibatch 2500/25000', 'minibatch 1250/25000']);plt.xlim(0, len(mod_m.module.history_['train_loss_epoch']));
###Code
plt.plot(mod.module.history_['train_loss_epoch'].index[10:],
np.array(mod.module.history_['train_loss_epoch'].values.flatten())[10:]);
plt.legend(labels=['minibatch 125/25000']);
plt.xlim(0, len(mod_m.module.history_['train_loss_epoch']));
plt.plot(mod_m.module.history_['train_loss_epoch'].index[40:],
np.array(mod_m.module.history_['train_loss_epoch'].values.flatten())[40:]);
plt.legend(labels=['minibatch 1250/25000']);
plt.xlim(0, len(mod_m.module.history_['train_loss_epoch']));
#plt.plot(range(1, 100), np.array(mod.module.history_)[1:100]);
plt.plot(mod_m.module.history_['train_loss_epoch'].index[1:100],
np.array(mod_m.module.history_['train_loss_epoch'].values.flatten())[1:100]);
plt.legend(labels=['full data', 'minibatch 500/2500']);
plt.xlim(0, 100);
###Output
_____no_output_____
###Markdown
Evaluate accuracy using $R^2$
###Code
from re import sub
cell_count = adata_vis.obs.loc[:, ['cell_abundances_' in i for i in adata_vis.obs.columns]]
cell_count.columns = [sub('cell_abundances_', '', i) for i in cell_count.columns]
cell_count_columns = cell_count.columns
cell_proportions = (cell_count.T / cell_count.sum(1)).T
infer_cell_count = pd.DataFrame(means['w_sf'], index=adata_vis.obs_names,
columns=aver.columns)
infer_cell_count = infer_cell_count[cell_count.columns]
infer_cell_proportions = (infer_cell_count.T / infer_cell_count.sum(1)).T
infer_cell_count_m = pd.DataFrame(means_m['w_sf'], index=adata_vis.obs_names,
columns=aver.columns)
infer_cell_count_m = infer_cell_count_m[cell_count.columns]
infer_cell_proportions_m = (infer_cell_count_m.T / infer_cell_count_m.sum(1)).T
infer_cell_count.iloc[0:5,0:5], infer_cell_count_m.iloc[0:5,0:5]
rcParams['figure.figsize'] = 4, 4
rcParams["axes.facecolor"] = "white"
plt.hist2d(cell_count.values.flatten(),
infer_cell_count.values.flatten(),# / np.mean(adata_vis_res.var['gene_level'].values),
bins=[50, 50], norm=mpl.colors.LogNorm());
plt.xlabel('Simulated cell abundance');
plt.ylabel('Estimated cell abundance');
plt.title(r'minibatch 2500/25000, $R^2$: ' \
+ str(np.round(np.corrcoef(cell_count.values.flatten(),
infer_cell_count.values.flatten()), 3)[0,1]));
#plt.gca().set_aspect('equal', adjustable='box')
plt.tight_layout()
#plt.savefig(fig_path + '/Cell_density_cor.pdf')
rcParams['figure.figsize'] = 4, 4
rcParams["axes.facecolor"] = "white"
plt.hist2d(cell_count.values.flatten(),
infer_cell_count_m.values.flatten(),# / np.mean(adata_vis_res.var['gene_level'].values),
bins=[50, 50], norm=mpl.colors.LogNorm());
plt.xlabel('Simulated cell abundance');
plt.ylabel('Estimated cell abundance');
plt.title(r'minibatch 1250/25000, $R^2$: ' \
+ str(np.round(np.corrcoef(cell_count.values.flatten(),
infer_cell_count_m.values.flatten()), 3)[0,1]));
#plt.gca().set_aspect('equal', adjustable='box')
plt.tight_layout()
#plt.savefig(fig_path + '/Cell_density_cor.pdf')
###Output
_____no_output_____
###Markdown
Original implementation of cell2location in pymc3 has $R^2 = 0.791$. Evaluate with PR curves
###Code
import matplotlib as mpl
from matplotlib import pyplot as plt
import numpy as np
from scipy import interpolate
with plt.style.context('seaborn'):
seaborn_colors = mpl.rcParams['axes.prop_cycle'].by_key()['color']
def compute_precision_recall(pos_cell_count, infer_cell_proportions, mode='macro'):
r""" Plot precision-recall curves on average and for each cell type.
:param pos_cell_count: binary matrix showing which cell types are present in which locations
:param infer_cell_proportions: inferred locations (the higher the more cells)
"""
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
### calculating ###
predictor = infer_cell_proportions.values + np.random.gamma(20, 1e-12,
infer_cell_proportions.shape)
# For each cell type
precision = dict()
recall = dict()
average_precision = dict()
for i, c in enumerate(infer_cell_proportions.columns):
precision[c], recall[c], _ = precision_recall_curve(pos_cell_count[:, i],
predictor[:, i])
average_precision[c] = average_precision_score(pos_cell_count[:, i], predictor[:, i], average=mode)
average_precision["averaged"] = average_precision_score(pos_cell_count, predictor,
average=mode)
# A "micro-average": quantifying score on all classes jointly
if mode == 'micro':
precision_, recall_, threshold = precision_recall_curve(pos_cell_count.ravel(),
predictor.ravel())
#precision_[threshold < 0.1] = 0
precision["averaged"], recall["averaged"] = precision_, recall_
elif mode == 'macro':
precisions = []
recall_grid = np.linspace(0, 1, 2000)
for i, c in enumerate(infer_cell_proportions.columns):
f = interpolate.interp1d(recall[c], precision[c])
precision_interp = f(recall_grid)
precisions.append(precision_interp)
precision["averaged"] = np.mean(precisions, axis=0)
recall['averaged'] = recall_grid
return precision, recall, average_precision
def compare_precision_recall(pos_cell_count, infer_cell_proportions,
method_title, title='',
legend_loc=(0, -.37),
colors=sc.pl.palettes.default_102,
mode='macro', curve='PR'):
r""" Plot precision-recall curves on average and for each cell type.
:param pos_cell_count: binary matrix showing which cell types are present in which locations
:param infer_cell_proportions: inferred locations (the higher the more cells),
list of inferred parameters for several methods
:param method_title: title for each infer_cell_proportions
:param title: plot title
"""
# setup plot details
from itertools import cycle
colors = cycle(colors)
lines = []
labels = []
roc = {}
### plotting ###
for i, color in zip(range(len(infer_cell_proportions)), colors):
if curve == 'PR':
precision, recall, average_precision = compute_precision_recall(pos_cell_count,
infer_cell_proportions[i],
mode=mode)
xlabel = 'Recall'
ylabel = 'Precision'
l, = plt.plot(recall["averaged"], precision["averaged"], color=color, lw=3)
elif curve == 'ROC':
FPR, TPR, average_precision = compute_roc(pos_cell_count,
infer_cell_proportions[i],
mode=mode)
xlabel = 'FPR'
ylabel = 'TPR'
l, = plt.plot(FPR["averaged"], TPR["averaged"], color=color, lw=3)
lines.append(l)
labels.append(method_title[i] + '(' + curve + ' score = {0:0.2f})'
''.format(average_precision["averaged"]))
roc[method_title[i]] = average_precision["averaged"]
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if legend_loc is not None:
plt.legend(lines, labels, loc=legend_loc, prop=dict(size=8))
#plt.show()
return roc
rcParams['figure.figsize'] = 6, 3
rcParams['font.size'] = 8
results = [
infer_cell_count,
infer_cell_count_m
]
results_proportion = [
infer_cell_proportions,
infer_cell_proportions_m
]
names = [
'minibatch 2500/25000 obs',
'minibatch 1250/25000 obs',
]
compare_precision_recall(cell_count.values > 0.1,
results,
method_title=names,
legend_loc=(1.1, 0.5))
plt.tight_layout();
plt.title('Absolute cell abundance');
plt.show();
compare_precision_recall(cell_count.values > 0.1,
results_proportion,
method_title=names,
legend_loc=(1.1, 0.5))
plt.tight_layout();
plt.title('Relative cell abundance');
plt.show();
###Output
_____no_output_____
###Markdown
Original implementation of cell2location in pymc3 has PR score = 0.66. $R^2$ stratified by abundance and regional pattern
###Code
from scipy.spatial.distance import jensenshannon
def hist_obs_sim(cell_count, infer_cell_count,
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
title='', compute_kl=True, equal=True, max_val=1):
cor = np.round(np.corrcoef(cell_count.values.flatten(),
infer_cell_count.values.flatten()), 3)[0,1]
title = title +'\n'+ r'$R^2$: ' + str(cor)
if compute_kl:
js = np.array([jensenshannon(cell_count.values[r,:], infer_cell_count.values[r,:])
for r in range(cell_count.shape[0])])
js = np.mean(js[~np.isnan(js)])
title = title + '\nAverage JSD: ' + str(np.round(js, 2))
plt.hist2d(cell_count.values.flatten(),
infer_cell_count.values.flatten(),
bins=[35, 35], norm=mpl.colors.LogNorm());
plt.xlabel(xlab);
plt.ylabel(ylab);
if equal:
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(0, max_val);
plt.ylim(0, max_val);
plt.title(title);
def hist_by_category(cell_count, infer_cell_count, design,
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
nrow=1, ncol=4, compute_kl=True, equal=True):
design_loc = design.loc[cell_count.columns,:]
max_val = np.array([cell_count.values.max(), infer_cell_count.values.max()]).max()
if max_val < 1:
max_val = 1
plt.subplot(nrow, ncol, 1)
ind = (design_loc['is_uniform'] * design_loc['is_high_density']).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Uniform & high abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
plt.subplot(nrow, ncol, 2)
ind = (design_loc['is_uniform'] * (1 - design_loc['is_high_density'])).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Uniform & low abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
plt.subplot(nrow, ncol, 3)
ind = ((1 - design_loc['is_uniform']) * design_loc['is_high_density']).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Sparse & high abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
plt.subplot(nrow, ncol, 4)
ind = ((1 - design_loc['is_uniform']) * (1 - design_loc['is_high_density'])).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Sparse & low abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
rcParams['figure.figsize'] = 18,4.5
rcParams["axes.facecolor"] = "white"
hist_by_category(cell_proportions, infer_cell_proportions, adata_vis.uns['design']['cell_types2zones'],
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
nrow=1, ncol=4, equal=True)
plt.tight_layout();
plt.show();
hist_by_category(cell_proportions, infer_cell_proportions_m, adata_vis.uns['design']['cell_types2zones'],
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
nrow=1, ncol=4, equal=True)
plt.tight_layout();
plt.show();
import sys
for module in sys.modules:
try:
print(module,sys.modules[module].__version__)
except:
try:
if type(modules[module].version) is str:
print(module,sys.modules[module].version)
else:
print(module,sys.modules[module].version())
except:
try:
print(module,sys.modules[module].VERSION)
except:
pass
###Output
ipykernel 5.3.4
ipykernel._version 5.3.4
json 2.0.9
re 2.2.1
IPython 7.20.0
IPython.core.release 7.20.0
logging 0.5.1.2
zlib 1.0
traitlets 5.0.5
traitlets._version 5.0.5
argparse 1.1
ipython_genutils 0.2.0
ipython_genutils._version 0.2.0
platform 1.0.8
pygments 2.7.4
pexpect 4.8.0
ptyprocess 0.7.0
decorator 4.4.2
pickleshare 0.7.5
backcall 0.2.0
prompt_toolkit 3.0.8
wcwidth 0.2.5
jedi 0.17.0
parso 0.8.1
colorama 0.4.4
ctypes 1.1.0
_ctypes 1.1.0
urllib.request 3.7
jupyter_client 6.1.7
jupyter_client._version 6.1.7
zmq 20.0.0
zmq.backend.cython 40303
zmq.backend.cython.constants 40303
zmq.sugar 20.0.0
zmq.sugar.constants 40303
zmq.sugar.version 20.0.0
jupyter_core 4.7.1
jupyter_core.version 4.7.1
_curses b'2.2'
dateutil 2.8.1
six 1.15.0
decimal 1.70
_decimal 1.70
distutils 3.7.9
scanpy 1.7.0
scanpy._metadata 1.7.0
packaging 20.9
packaging.__about__ 20.9
importlib_metadata 1.7.0
csv 1.0
_csv 1.0
numpy 1.20.0
numpy.core 1.20.0
numpy.core._multiarray_umath 3.1
numpy.lib 1.20.0
numpy.linalg._umath_linalg 0.1.5
scipy 1.6.0
anndata 0.7.5
anndata._metadata 0.7.5
h5py 3.1.0
cached_property 1.5.2
natsort 7.1.1
pandas 1.2.1
pytz 2021.1
pandas.compat.numpy.function 1.20.0
sinfo 0.3.1
stdlib_list v0.8.0
numba 0.52.0
yaml 5.3.1
llvmlite 0.35.0
pkg_resources._vendor.appdirs 1.4.3
pkg_resources.extern.appdirs 1.4.3
pkg_resources._vendor.packaging 20.4
pkg_resources._vendor.packaging.__about__ 20.4
pkg_resources.extern.packaging 20.4
pkg_resources._vendor.pyparsing 2.2.1
pkg_resources.extern.pyparsing 2.2.1
numba.misc.appdirs 1.4.1
sklearn 0.24.1
sklearn.base 0.24.1
joblib 1.0.0
joblib.externals.loky 2.9.0
joblib.externals.cloudpickle 1.6.0
scipy._lib.decorator 4.0.5
scipy.linalg._fblas b'$Revision: $'
scipy.linalg._flapack b'$Revision: $'
scipy.linalg._flinalg b'$Revision: $'
scipy.special.specfun b'$Revision: $'
scipy.ndimage 2.0
scipy.optimize.minpack2 b'$Revision: $'
scipy.sparse.linalg.isolve._iterative b'$Revision: $'
scipy.sparse.linalg.eigen.arpack._arpack b'$Revision: $'
scipy.optimize._lbfgsb b'$Revision: $'
scipy.optimize._cobyla b'$Revision: $'
scipy.optimize._slsqp b'$Revision: $'
scipy.optimize._minpack 1.10
scipy.optimize.__nnls b'$Revision: $'
scipy.linalg._interpolative b'$Revision: $'
scipy.integrate._odepack 1.9
scipy.integrate._quadpack 1.13
scipy.integrate._ode $Id$
scipy.integrate.vode b'$Revision: $'
scipy.integrate._dop b'$Revision: $'
scipy.integrate.lsoda b'$Revision: $'
scipy.interpolate._fitpack 1.7
scipy.interpolate.dfitpack b'$Revision: $'
scipy.stats.statlib b'$Revision: $'
scipy.stats.mvn b'$Revision: $'
sklearn.utils._joblib 1.0.0
leidenalg 0.8.3
igraph 0.8.3
texttable 1.6.3
igraph.version 0.8.3
matplotlib 3.3.4
pyparsing 2.4.7
cycler 0.10.0
kiwisolver 1.3.1
PIL 8.1.0
PIL._version 8.1.0
PIL.Image 8.1.0
xml.etree.ElementTree 1.3.0
cffi 1.14.4
tables 3.6.1
numexpr 2.7.2
legacy_api_wrap 1.2
get_version 2.1
scvi 0.0.0
torch 1.8.1+cu102
torch.version 1.8.1+cu102
tqdm 4.56.0
tqdm.cli 4.56.0
tqdm.version 4.56.0
tqdm._dist_ver 4.56.0
ipywidgets 7.6.3
ipywidgets._version 7.6.3
_cffi_backend 1.14.4
pycparser 2.20
pycparser.ply 3.9
pycparser.ply.yacc 3.10
pycparser.ply.lex 3.10
pyro 1.6.0+9e1fd393
opt_einsum v3.3.0
pyro._version 1.6.0+9e1fd393
pytorch_lightning 1.2.7
pytorch_lightning.info 1.2.7
torchmetrics 0.2.0
fsspec 0.8.5
tensorboard 2.4.1
tensorboard.version 2.4.1
google.protobuf 3.14.0
tensorboard.compat.tensorflow_stub stub
tensorboard.compat.tensorflow_stub.pywrap_tensorflow 0
seaborn 0.11.1
seaborn.external.husl 2.1.0
statsmodels 0.12.2
|
content/functions/functions_1.ipynb | ###Markdown
Introduction to Functions- [Download the lecture notes](https://philchodrow.github.io/PIC16A/content/functions/functions_1.ipynb). **Functions** are one of the most important constructs in computer programming. A function is a single command which, when executed, performs some operations and may return a value. You've already encountered functions in PIC10A, where they may have looked something like this: ```cpp// Filename: boldy.cppinclude int main() { std::cout << "To boldly go"; return 0;}```You'll notice the *type declaration* (`int`), the function name (`main`), the parameter declaration (`()`, i.e. no parameters in this case), and the *return value* (`0`). Python functions have a similar syntax. Instead of a type declaration, one uses the `def` keyword to denote function definition. One does not use `{}` braces, but one does use a `:` colon to initiate the body of the function and whitespace to indent the body. Since Python is interpreted rather than compiled, functions are ready to use as soon as they are defined.
###Code
def boldly_print(): # colon ends declaration and begins definition
print("To boldly go")
# return values are optional
boldly_print()
# ---
###Output
To boldly go
###Markdown
ParametersJust as in C++, in Python we can pass *arguments* (or *parameters*) to functions in order to modify their behavior.
###Code
def boldly_print_2(k):
for i in range(k):
print("To boldly go")
boldly_print_2(3)
# ---
###Output
To boldly go
To boldly go
To boldly go
###Markdown
These arguments can be given *default* values, so that it is not necessary to specify each argument in each function call.
###Code
def boldly_print_3(k, verb="go"):
for i in range(k):
print("To boldly " + verb)
boldly_print_3(2)
# ---
###Output
To boldly go
To boldly go
###Markdown
It is often desirable to use *keyword arguments* so that your code clearly indicates which argument is being supplied which value:
###Code
boldly_print_3(3, "sing") # fine
# ---
boldly_print_3(k=3, verb="sing") # same as above, easier to read
# ---
###Output
To boldly sing
To boldly sing
To boldly sing
###Markdown
All keyword arguments must be supplied after all positional arguments:
###Code
boldly_print_3(k = 3, "sing")
# ---
###Output
_____no_output_____
###Markdown
ScopeThe **global scope** is the set of all variables available for usage outside of any function.
###Code
x = 3 # available in global scope
x
###Output
_____no_output_____
###Markdown
Functions create a **local scope**. This means: - Variables in the global scope are available within the function. - Variables created within the function are **not** available within the global scope.
###Code
# variables within the global scope are available within the function
def print_x():
print(x)
print_x()
# ---
def print_y():
y = 2
print(y)
print_y()
# ---
y
# ---
###Output
_____no_output_____
###Markdown
Immutable variables in the global scope cannot be modified by functions, even if you use the same variable name.
###Code
def new_x():
x = 7
print(x)
new_x()
# ---
print(x)
# ---
###Output
3
###Markdown
On the other hand, *mutable* variables in global scope can be modified by functions. **This is usually a bad idea**, for reasons we'll discuss in another set of notes.
###Code
# this works, but it's a bad idea.
captains = ["Kirk", "Picard", "Janeway", "Sisko"]
def reverse_names():
for i in range(4):
captains[i] = captains[i][::-1]
reverse_names()
captains
###Output
_____no_output_____
###Markdown
Return valuesSo far, we've seen examples of functions that print but do not *return* anything. Usually, you will want your function to have one or more return values. These allow the output of a function to be used in future computations.
###Code
def boldly_return(k = 1, verb = "go"):
return(["to boldly " + verb for i in range(k)])
x = boldly_return(k = 2, verb = "dance")
x
###Output
_____no_output_____
###Markdown
Your function can return multiple values:
###Code
def double_your_number(j):
return(j, 2*j)
x, y = double_your_number(10)
###Output
_____no_output_____
###Markdown
The `return` statement *immediately* terminates the function's local scope, usually returning to global scope. So, for example, a `return` statement can be used to terminate a `while` loop, similar to a `break` statement.
###Code
def largest_power_below(a, upper_bound):
i = 1
while True:
i *= a
if a*i >= upper_bound:
return(i)
largest_power_below(3, 10000)
###Output
_____no_output_____ |
notebooks/Data_Attribute_Recommendation_Generic_Model_Template.ipynb | ###Markdown
Data Attribute Recommendation - Generic Model TemplateDeep dive into the Python SDK for the Data Attribute Recommendation service to explore when and how to use the generic model template. Business ScenarioWe will consider a business scenario involving product master data. The creation and maintenance of this product master data requires the careful manual selection of the correct categories for a given product from a pre-defined label of product categories.In this workshop, we will explore how to automate this tedious manual task with the Data Attribute Recommendation service. This workshop will cover: * Data Upload* Model Training and Deployment* Inference Requests We will work through a basic example of how to achieve these tasks using the [Python SDK for Data Attribute Recommendation](https://github.com/SAP/data-attribute-recommendation-python-sdk). *Note: if you are doing several runs of this notebook on a trial account, you may see errors stating 'The resource can no longer be used. Usage limit has been reached'. It can be beneficial to [clean up the service instance](Cleaning-up-a-service-instance) to free up limited trial resources acquired by an earlier run of the notebook. [Some limits](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) cannot be reset this way.* Table of Contents* [Exercise 01.1](Exercise-01.1) - Installing the SDK and preparing the service key* [Exercise 01.2](Exercise-01.2) - Uploading the data* [Exercise 01.3](Exercise-01.3) - Training the model* [Exercise 01.4](Exercise-01.4) - Deploying the Model and predicting labels* [Resources](Resources) - Additional reading* [Cleaning up a service instance](Cleaning-up-a-service-instance) - Clean up all resources on the service instance* [Optional Exercises](Optional-Exercises) - Optional exercises Exercise 01.1*Back to [table of contents](Table-of-Contents)*In exercise 01.1, we will install the SDK and prepare the service key. Installing the SDK The Data Attribute Recommendation SDK is available from the Python package repository. It can be installed with the standard `pip` tool:
###Code
! pip install data-attribute-recommendation-sdk
###Output
_____no_output_____
###Markdown
*Note: If you are not using a Jupyter notebook, but instead a regular Python development environment, we recommend using a Python virtual environment to set up your development environment. Please see [the dedicated tutorial to learn how to install the SDK inside a Python virtual environment](https://developers.sap.com/tutorials/cp-aibus-dar-sdk-setup.html).* Creating a service instance and key on BTP Trial Please log in to your trial account: https://cockpit.eu10.hana.ondemand.com/trial/In the your global account screen, go to the "Boosters" tab:*Boosters are only available on the Trial landscape. If you are using a production environment, please follow this tutorial to manually [create a service instance and a service key](https://developers.sap.com/tutorials/cp-aibus-dar-service-instance.html)*. In the Boosters tab, enter "Data Attribute Recommendation" into the search box. Then, select theservice tile from the search results:  The resulting screen shows details of the booster pack. Here, click the "Start" button and wait a few seconds. Once the booster is finished, click the "go to Service Key" link to obtain your service key. Finally, download the key and save it to disk. Loading the service key into your Jupyter Notebook Once you downloaded the service key from the Cockpit, upload it to your notebook environment. The service key must be uploaded to same directory where the `Data_Attribute_Recommendation_*_Model_Template.ipynb` file is stored.When using Jupyterlab, a file browser is visible to the left of the notebook view. Click the upload button here to upload the `default_key.json` file we downloaded earlier from the BTP Cockpit. Once you click the upload button, a file chooser dialog will open where you can select the `default_key.json`:After the upload finished successfully, you should see the `default_key.json` in the file browser.**Make sure that the file name is `default_key.json`. If your service key file has a different name, this notebook will not work.** The service key contains your credentials to access the service. Please treat this as carefully as you would treat any password. We keep the service key as a separate file outside this notebook to avoid leaking the secret credentials.The service key is a JSON file. We will load this file once and use the credentials throughout this workshop.
###Code
# First, set up logging so we can see the actions performed by the SDK behind the scenes
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
from pprint import pprint # for nicer output formatting
import json
import os
if not os.path.exists("default_key.json"):
msg = "'default_key.json' is not found. Please follow instructions above to create a service key of"
msg += " Data Attribute Recommendation. Then, upload it into the same directory where"
msg += " this notebook is saved."
print(msg)
raise ValueError(msg)
with open("default_key.json") as file_handle:
key = file_handle.read()
SERVICE_KEY = json.loads(key)
print("Service URL: ")
pprint(SERVICE_KEY["url"])
print("Client ID:")
pprint(SERVICE_KEY["uaa"]["clientid"])
###Output
_____no_output_____
###Markdown
Summary Exercise 01.1In exercise 01.1, we have covered the following topics:* How to install the Python SDK for Data Attribute Recommendation* How to obtain a service key for the Data Attribute Recommendation service Exercise 01.2*Back to [table of contents](Table-of-Contents)**To perform this exercise, you need to execute the code in all previous exercises.*In exercise 01.2, we will upload our demo dataset to the service. The Dataset Obtaining the Data The dataset we use in this workshop is a CSV file containing scientific paper titles and their topic categories. This dataset is ideal to understand use cases where the labels are independent of one another. What this means is that the presence or absence of one label does not influence the others. Let's inspect the data:
###Code
# if you are experiencing an import error here, run the following in a new cell:
# ! pip install pandas
import pandas as pd
df = pd.read_csv("data/arxiv.csv")
df.head(5)
df.tail()
print()
print(f"Data has {df.shape[0]} rows and {df.shape[1]} columns.")
###Output
_____no_output_____
###Markdown
The CSV contains the titles of several scientific papers. For each title, the set of topics associated with the title are provided as labels. The following are the labels and their associated full forms.- CSC: Computer Science- STA: Statistics- QFI: Quantitative Finance- QBI: Quantitative Biology- PHY: PhysicsFor e.g., the first instance of the data `Contemporary machine learning: a guide for practitioners in the physical sciences` has the following set of labels:- Computer Science- Physics We will use the Data Attribute Recommendation service to predict the labels for a given paper based on its **title**. However, you can add other attributes such as length of the paper, number of words, conference name and type to improve the ability to make the classifier be able to learn better. Creating the DatasetSchema We first have to describe the shape of our data by creating a DatasetSchema. This schema informs the service about the individual column types found in the CSV. We also describe which are the target columns used for training. These columns will be later predicted.The service currently supports three column types: **TEXT**, **CATEGORY** and **NUMBER**. As labels to be predicted, only **CATEGORY** and **NUMBER** is currently supported.A DatasetSchema for the Arxiv dataset looks as follows:```json{ "features": [ {"label": "title", "type": "TEXT"}, ], "labels": [ {"label": "label1", "type": "CATEGORY"}, {"label": "label2", "type": "CATEGORY"}, {"label": "label3", "type": "CATEGORY"}, {"label": "label4", "type": "CATEGORY"}, {"label": "label5", "type": "CATEGORY"} ], "name": "arxiv-multilabel-prediction",}```We will now upload this DatasetSchema to the Data Attribute Recommendation service. The SDK provides the[`DataManagerClient.create_dataset_schema()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.data_manager_client.DataManagerClient.create_dataset_schema) method for this purpose.
###Code
from sap.aibus.dar.client.data_manager_client import DataManagerClient
dataset_schema = {
"features": [
{"label": "title", "type": "TEXT"},
],
"labels": [
{"label": "label1", "type": "CATEGORY"},
{"label": "label2", "type": "CATEGORY"},
{"label": "label3", "type": "CATEGORY"},
{"label": "label4", "type": "CATEGORY"},
{"label": "label5", "type": "CATEGORY"}
],
"name": "arxiv-multilabel-prediction",
}
data_manager = DataManagerClient.construct_from_service_key(SERVICE_KEY)
response = data_manager.create_dataset_schema(dataset_schema)
dataset_schema_id = response["id"]
print()
print("DatasetSchema created:")
pprint(response)
print()
print(f"DatasetSchema ID: {dataset_schema_id}")
# Compress file first for a faster upload
! gzip -9 -c data/arxiv.csv > arxiv.csv.gz
###Output
_____no_output_____
###Markdown
The API responds with the newly created DatasetSchema resource. The service assigned an ID to the schema. We save this ID in a variable, as we will need it when we upload the data. Uploading the Data to the service The [`DataManagerClient`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.data_manager_client.DataManagerClient) class is also responsible for uploading data to the service. This data must fit to an existing DatasetSchema. After uploading the data, the service will validate the Dataset against the DatasetSchema in a background process. The data must be a CSV file which can optionally be `gzip` compressed.We will now upload our `arxiv.csv.gz` file, using the DatasetSchema which we created earlier.Data upload is a two-step process. We first create the Dataset using [`DataManagerClient.create_dataset()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.data_manager_client.DataManagerClient.create_dataset). Then we can upload data to the Dataset using the [`DataManagerClient.upload_data_to_dataset()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.data_manager_client.DataManagerClient.upload_data_to_dataset) method.
###Code
dataset_resource = data_manager.create_dataset("arxiv-category-dataset", dataset_schema_id)
dataset_id = dataset_resource["id"]
print()
print("Dataset created:")
pprint(dataset_resource)
print()
print(f"Dataset ID: {dataset_id}")
###Output
_____no_output_____
###Markdown
Note that the data upload can take a few minutes. Please do not restart the process while the cell is still running.
###Code
# Open in binary mode.
with open('arxiv.csv.gz', 'rb') as file_handle:
dataset_resource = data_manager.upload_data_to_dataset(dataset_id, file_handle)
print()
print("Dataset after data upload:")
print()
pprint(dataset_resource)
###Output
_____no_output_____
###Markdown
Note that the Dataset status changed from `NO_DATA` to `VALIDATING`.Dataset validation is a background process. The status will eventually change from `VALIDATING` to `SUCCEEDED`.The SDK provides the [`DataManagerClient.wait_for_dataset_validation()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.data_manager_client.DataManagerClient.wait_for_dataset_validation) method to poll for the Dataset validation.
###Code
dataset_resource = data_manager.wait_for_dataset_validation(dataset_id)
print()
print("Dataset after validation has finished:")
print()
pprint(dataset_resource)
###Output
_____no_output_____
###Markdown
If the status is `FAILED` instead of `SUCCEEDED`, then the `validationMessage` will contain details about the validation failure. Summary Exercise 01.2In exercise 01.2, we have covered the following topics:* How to create a DatasetSchema* How to upload a Dataset to the serviceYou can find optional exercises related to exercise 01.2 [below](Optional-Exercises-for-01.2). Exercise 01.3*Back to [table of contents](Table-of-Contents)**To perform this exercise, you need to execute the code in all previous exercises.*In exercise 01.3, we will train the model. Training the Model The Dataset is now uploaded and has been validated successfully by the service.To train a machine learning model, we first need to select the correct model template. Selecting the right ModelTemplateThe Data Attribute Recommendation service currently supports the following ModelTemplates:| ID | Name | Description ||--------------------------------------|---------------------------|---------------------------------------------------------------------------|| d7810207-ca31-4d4d-9b5a-841a644fd81f | **Hierarchical template** | Recommended for the prediction of multiple classes that form a hierarchy. || 223abe0f-3b52-446f-9273-f3ca39619d2c | **Generic template** | Generic neural network for multi-label, multi-class classification. || 188df8b2-795a-48c1-8297-37f37b25ea00 | **AutoML template** | Finds the best machine learning model out of several traditional algorithms. Single output only. ([Blog post](https://blogs.sap.com/2021/04/28/how-does-automl-works-in-data-attribute-recommendation/)) || bdbcd699-4419-40a5-abb8-e7ad43dde49b | **Regression template** | Predict the numeric value of a field. Single output only. ([Blog post](https://blogs.sap.com/2021/11/14/solving-regression-use-cases-with-data-attribute-recommendation/)) |We are building a model to predict the labels which are independent of one another. The **Generic template** is correct for this scenario. Refer to the [official documentation on ModelTemplates](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/1e76e8c636974a06967552c05d40e066.html) to learn more. Additional model templates may be added over time, so check back regularly. Starting the training When working with models, we use the [`ModelManagerClient`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.model_manager_client.ModelManagerClient) class.To start the training, we need the IDs of the dataset and the desired model template. We also have to provide a name for the model.The [`ModelManagerClient.create_job()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.model_manager_client.ModelManagerClient.create_job) method launches the training Job.*Only one model of a given name can exist. If you receive a message stating 'The model name specified is already in use', you either have to remove the model and its associated model first or you have to change the `model_name` variable name below. You can also [clean up the entire service instance](Cleaning-up-a-service-instance).*
###Code
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from sap.aibus.dar.client.exceptions import DARHTTPException
model_manager = ModelManagerClient.construct_from_service_key(SERVICE_KEY)
model_template_id = "223abe0f-3b52-446f-9273-f3ca39619d2c" # multi-label template
model_name = "arxiv-multilabel-model"
job_resource = model_manager.create_job(model_name, dataset_id, model_template_id)
job_id = job_resource['id']
print()
print("Job resource:")
print()
pprint(job_resource)
print()
print(f"ID of submitted Job: {job_id}")
###Output
_____no_output_____
###Markdown
The job is now running in the background. Similar to the DatasetValidation, we have to poll the job until it succeeds.The SDK provides the [`ModelManagerClient.wait_for_job()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_job) method:
###Code
job_resource = model_manager.wait_for_job(job_id)
print()
print("Job resource after training is finished:")
pprint(job_resource)
###Output
_____no_output_____
###Markdown
IntermissionThe model training will take between 5 and 10 minutes.In the meantime, we can explore the available [resources](Resources) for both the service and the SDK. Inspecting the ModelOnce the training job is finished successfully, we can inspect the model using [`ModelManagerClient.read_model_by_name()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.model_manager_client.ModelManagerClient.read_model_by_name).
###Code
model_resource = model_manager.read_model_by_name(model_name)
print()
pprint(model_resource)
###Output
_____no_output_____
###Markdown
In the model resource, the `validationResult` key provides information about model performance. You can also use these metrics to compare performance of different [ModelTemplates](Selecting-the-right-ModelTemplate) or different datasets. Summary Exercise 01.3In exercise 01.3, we have covered the following topics:* How to select the appropriate ModelTemplate* How to train a Model from a previously uploaded DatasetYou can find optional exercises related to exercise 01.3 [below](Optional-Exercises-for-01.3). Exercise 01.4*Back to [table of contents](Table-of-Contents)**To perform this exercise, you need to execute the code in all previous exercises.*In exercise 01.4, we will deploy the model and predict labels for some unlabeled data. Deploying the Model The training job has finished and the model is ready to be deployed. By deploying the model, we create a server process in the background on the Data Attribute Recommendation service which will serve inference requests.In the SDK, the [`ModelManagerClient.create_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlmodule-sap.aibus.dar.client.model_manager_client) method lets us create a Deployment.
###Code
deployment_resource = model_manager.create_deployment(model_name)
deployment_id = deployment_resource["id"]
print()
print("Deployment resource:")
print()
pprint(deployment_resource)
print(f"Deployment ID: {deployment_id}")
###Output
_____no_output_____
###Markdown
*Note: if you are using a trial account and you see errors such as 'The resource can no longer be used. Usage limit has been reached', consider [cleaning up the service instance](Cleaning-up-a-service-instance) to free up limited trial resources.* Similar to the data upload and the training job, model deployment is an asynchronous process. We have to poll the API until the Deployment is in status `SUCCEEDED`. The SDK provides the [`ModelManagerClient.wait_for_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_deployment) for this purposes.
###Code
deployment_resource = model_manager.wait_for_deployment(deployment_id)
print()
print("Finished deployment resource:")
print()
pprint(deployment_resource)
###Output
_____no_output_____
###Markdown
Once the Deployment is in status `SUCCEEDED`, we can run inference requests. *For trial users: the deployment will be stopped after 8 hours. You can restart it by deleting the deployment and creating a new one for your model. The [`ModelManagerClient.ensure_deployment_exists()`](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) method will delete and re-create automatically. Then, you need to poll until the deployment is succeeded using [`ModelManagerClient.wait_for_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_deployment) as above.* Executing Inference requests With a single inference request, we can send up to 50 objects to the service to predict the labels. The data send to the service must match the `features` section of the DatasetSchema created earlier. The `labels` defined inside of the DatasetSchema will be predicted for each object and returned as a response to the request.In the SDK, the [`InferenceClient.create_inference_request()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.htmlsap.aibus.dar.client.inference_client.InferenceClient.create_inference_request) method handles submission of inference requests.
###Code
from sap.aibus.dar.client.inference_client import InferenceClient
inference = InferenceClient.construct_from_service_key(SERVICE_KEY)
objects_to_be_classified = [
{
"features": [
{"name": "title", "value": "Not even wrong: The spurious link between biodiversity and ecosystem functioning"}
],
},
]
inference_response = inference.create_inference_request(model_name, objects_to_be_classified)
print()
print("Inference request processed. Response:")
print()
pprint(inference_response)
###Output
_____no_output_____
###Markdown
*Note: For trial accounts, you only have a limited number of objects which you can classify.* You can also try to come up with your own example:
###Code
my_own_items = [
{
"features": [
{"name": "title", "value": "EDIT THIS"}
],
},
]
inference_response = inference.create_inference_request(model_name, my_own_items)
print()
print("Inference request processed. Response:")
print()
pprint(inference_response)
###Output
_____no_output_____
###Markdown
You can also classify multiple objects at once. For each object, the `top_n` parameter determines how many predictions are returned.
###Code
objects_to_be_classified = [
{
"objectId": "optional-identifier-1",
"features": [
{"name": "title", "value": "Low-luminosity stellar wind accretion onto neutron stars in HMXBs"}
],
},
{
"objectId": "optional-identifier-2",
"features": [
{"name": "title", "value": "Super-speeds with Zero-RAM: Next Generation Large-Scale Optimization in Your Laptop"}
],
},
{
"objectId": "optional-identifier-3",
"features": [
{"name": "title", "value": "Why optional stopping is a problem for Bayesians"}
],
}
]
inference_response = inference.create_inference_request(model_name, objects_to_be_classified, top_n=3)
print()
print("Inference request processed. Response:")
print()
pprint(inference_response)
###Output
_____no_output_____
###Markdown
We can see that the service now returns the `n-best` predictions for each label as indicated by the `top_n` parameter.In some cases, the predicted category has the special value `nan`. In the `arxiv.csv` data set, not all records have the full set of categories. Some records only have one label and some having up to three. The model learns this fact from the data and will occasionally suggest that a record should not have a label. To learn how to execute inference calls without the SDK just using the underlying RESTful API, see [Inference without the SDK](Inference-without-the-SDK). Summary Exercise 01.4In exercise 01.4, we have covered the following topics:* How to deploy a previously trained model* How to execute inference requests against a deployed modelYou can find optional exercises related to exercise 01.4 [below](Optional-Exercises-for-01.4). Wrapping upIn this workshop, we looked into the following topics:* Installation of the Python SDK for Data Attribute Recommendation* Modelling data with a DatasetSchema* Uploading data into a Dataset* Training a model* Predicting labels for unlabelled dataUsing these tools, we are able to solve the problem of missing Master Data attributes starting from just a CSV file containing training data.Feel free to revisit the workshop materials at any time. The [resources](Resources) section below contains additional reading.If you would like to explore the additional capabilities of the SDK, visit the [optional exercises](Optional-Exercises) below. Cleanup During the course of the workshop, we have created several resources on the Data Attribute Recommendation Service:* DatasetSchema* Dataset* Job* Model* DeploymentThe SDK provides several methods to delete these resources. Note that there are dependencies between objects: you cannot delete a Dataset without deleting the Model beforehand.You will need to set `CLEANUP_SESSION = True` below to execute the cleanup.
###Code
# Clean up all resources created earlier
CLEANUP_SESSION = False
def cleanup_session():
model_manager.delete_deployment_by_id(deployment_id) # this can take a few seconds
model_manager.delete_model_by_name(model_name)
model_manager.delete_job_by_id(job_id)
data_manager.delete_dataset_by_id(dataset_id)
data_manager.delete_dataset_schema_by_id(dataset_schema_id)
print("DONE cleaning up!")
if CLEANUP_SESSION:
print("Cleaning up resources generated in this session.")
cleanup_session()
else:
print("Not cleaning up. Set 'CLEANUP_SESSION = True' above and run again!")
###Output
_____no_output_____
###Markdown
Resources*Back to [table of contents](Table-of-Contents)* Data Attribute Recommendation* [SAP Help Portal](https://help.sap.com/viewer/product/Data_Attribute_Recommendation/SHIP/en-US)* [API Reference](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/b45cf9b24fd042d082c16191aa938c8d.html)* [Tutorials using Postman - interact with the service RESTful API directly](https://developers.sap.com/mission.cp-aibus-data-attribute.html)* [Trial Account Limits](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html)* [Metering and Pricing](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/1e093326a2764c298759fcb92c5b0500.html)* [Blog Post: How does AutoML work in Data Attribute Recommendation?](https://blogs.sap.com/2021/04/28/how-does-automl-works-in-data-attribute-recommendation/)* [Blog Post: Solving regression use-cases with Data Attribute Recommendation](https://blogs.sap.com/2021/11/14/solving-regression-use-cases-with-data-attribute-recommendation/)* [All Blog Posts on Data Attribute Recommendation](https://blogs.sap.com/tags/73554900100800002858/) SDK Resources* [SDK source code on Github](https://github.com/SAP/data-attribute-recommendation-python-sdk)* [SDK documentation](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/)* [How to obtain support](https://github.com/SAP/data-attribute-recommendation-python-sdk/blob/master/README.mdhow-to-obtain-support)* [Tutorials: Classify Data Records with the SDK for Data Attribute Recommendation](https://developers.sap.com/group.cp-aibus-data-attribute-sdk.html) Addendum Inference without the SDK*Back to [table of contents](Table-of-Contents)* The Data Attribute Service exposes a RESTful API. The SDK we use in this workshop uses this API to interact with the DAR service.For custom integration, you can implement your own client for the API. The tutorial "[Use Machine Learning to Classify Data Records]" is a great way to explore the Data Attribute Recommendation API with the Postman REST client. Beyond the tutorial, the [API Reference] is a comprehensive documentation of the RESTful interface.[Use Machine Learning to Classify Data Records]: https://developers.sap.com/mission.cp-aibus-data-attribute.html[API Reference]: https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/b45cf9b24fd042d082c16191aa938c8d.htmlTo demonstrate the underlying API, the next example uses the `curl` command line tool to perform an inference request against the Inference API.The example uses the `jq` command to extract the credentials from the service. The authentication token is retrieved from the `uaa_url` and then used for the inference request.
###Code
# If the following example gives you errors that the jq or curl commands cannot be found,
# you may be able to install them from conda by uncommenting one of the lines below:
#%conda install -q jq
#%conda install -q curl
%%bash -s "$model_name" # Pass the python model_name variable as the first argument to shell script
model_name=$1
echo "Model: $model_name"
key=$(cat key.json)
url=$(echo $key | jq -r .url)
uaa_url=$(echo $key | jq -r .uaa.url)
clientid=$(echo $key | jq -r .uaa.clientid)
clientsecret=$(echo $key | jq -r .uaa.clientsecret)
echo "Service URL: $url"
token_url=${uaa_url}/oauth/token?grant_type=client_credentials
echo "Obtaining token with clientid $clientid from $token_url"
bearer_token=$(curl \
--silent --show-error \
--user $clientid:$clientsecret \
$token_url \
| jq -r .access_token
)
inference_url=${url}/inference/api/v3/models/${model_name}/versions/1
echo "Running inference request against endpoint $inference_url"
echo ""
# We pass the token in the Authorization header.
# The payload for the inference request is passed as
# the body of the POST request below.
# The output of the curl command is piped through `jq`
# for pretty-printing
curl \
--silent --show-error \
--header "Authorization: Bearer ${bearer_token}" \
--header "Content-Type: application/json" \
-XPOST \
${inference_url} \
-d '{
"objects": [
{
"features": [
{
"name": "manufacturer",
"value": "Energizer"
},
{
"name": "description",
"value": "Alkaline batteries; 1.5V"
},
{
"name": "price",
"value": "5.99"
}
]
}
]
}' | jq
###Output
_____no_output_____
###Markdown
Cleaning up a service instance*Back to [table of contents](Table-of-Contents)*To clean all data on the service instance, you can run the following snippet. The code is self-contained and does not require you to execute any of the cells above. However, you will need to have the `key.json` containing a service key in place.You will need to set `CLEANUP_EVERYTHING = True` below to execute the cleanup.**NOTE: This will delete all data on the service instance!**
###Code
CLEANUP_EVERYTHING = True
def cleanup_everything():
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
import json
import os
if not os.path.exists("key.json"):
msg = "key.json is not found. Please follow instructions above to create a service key of"
msg += " Data Attribute Recommendation. Then, upload it into the same directory where"
msg += " this notebook is saved."
print(msg)
raise ValueError(msg)
with open("key.json") as file_handle:
key = file_handle.read()
SERVICE_KEY = json.loads(key)
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
model_manager = ModelManagerClient.construct_from_service_key(SERVICE_KEY)
for deployment in model_manager.read_deployment_collection()["deployments"]:
model_manager.delete_deployment_by_id(deployment["id"])
for model in model_manager.read_model_collection()["models"]:
model_manager.delete_model_by_name(model["name"])
for job in model_manager.read_job_collection()["jobs"]:
model_manager.delete_job_by_id(job["id"])
from sap.aibus.dar.client.data_manager_client import DataManagerClient
data_manager = DataManagerClient.construct_from_service_key(SERVICE_KEY)
for dataset in data_manager.read_dataset_collection()["datasets"]:
data_manager.delete_dataset_by_id(dataset["id"])
for dataset_schema in data_manager.read_dataset_schema_collection()["datasetSchemas"]:
data_manager.delete_dataset_schema_by_id(dataset_schema["id"])
print("Cleanup done!")
if CLEANUP_EVERYTHING:
print("Cleaning up all resources in this service instance.")
cleanup_everything()
else:
print("Not cleaning up. Set 'CLEANUP_EVERYTHING = True' above and run again.")
###Output
_____no_output_____ |
nb/structure-solver.ipynb | ###Markdown
Structure solving as meta-optimization (demo)This is going to be so cool!In the work of Senior et al. (2019), Yang et al. (2020), and others, static optimization constraints are predicted then provided to a static, general purpose optimization algorithm (with some amount of manual tuning of optimization parameters to the specific task).Fascinatingly, there is a broad modern literature on the use of neural networks to learn to optimize. For example, Andrychowicz et al. (2016) demonstrate the learning of a domain-specific optimization algorithm that subsequently was shown to out-perform all of the best in class optimizers available for that problem (that had been a legacy of painstaking effort over more than a decade).This is amazing because there's the potential to learn better and better optimizers from data which can in turn save time and money for future work - but it's also quite interesting to think of how an optimizer might learn to become specialized to individual optimization problems (such as navigating the energy landscape of a protein structure).(Image [CC-BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0) / [Thomas Splettstoesser](commons.wikimedia.org/wiki/User:Splette); [original](https://commons.wikimedia.org/wiki/File:Folding_funnel_schematic.svg/media/File:Folding_funnel_schematic.svg)) Work in progressThe plan is to modify the [GraphNetEncoder](https://github.com/google/jax-md/blob/master/jax_md/nn.pyL650) and [EnergyGraphNet](https://github.com/google/jax-md/blob/master/jax_md/energy.pyL944) from jax-md to also accept as input evolutionary data and not to predict a single energy value but to predict several things including:1. A future conformation,2. A distance matrix,3. Bond angles, and4. Compound interaction strengthsThe simplest way to include (1) in a loss seems to be to have one of the model outputs be a coordinate for each node that are passed to a conventional jax-md energy function which is then used to incentivized input conformations being mapped to output conformations with lower energy.It looks like (2) and (3) would be straightforward if the model returned edge representation in some form. It's possible to for now also accomplish (4) in this way.The philosophy regarding (4) is that when folding a new protein you could obtain its iteraction profile fairly easily and if your model was previously trained to use interaction profiles as a guide (in the same way as using evolutionary data as a guide) might then be able to solve the structure more easily. Succeeding with that means architecting the model in a way consistent with that use case.This might be done in a variety of ways. In the spirit of our learned optimizer, we might wish to learn an optimizer that not only minimizes energy but predicts conformations that are more and more consistent with interaction profiles with a set of compounds. To do this it seems we may need to run a simulator of those structure/compound interactions (which would be computationally expensive but not impossible, especially for important structures). The tendency of the learned energy minimizer to minimize energy could be fine-tuned based on the interactions of produced structures with compounds.Or, we might consider the compound interactions as simply a guide to better learning how to extract information from evolutionary data and ignore their predictions at structure inference time.Alternatively, we might consider compound-polymer interaction strengths as a type of input, like evolutionary data, that need to be correctly encoded but need not be predicted by the network - it simply is yet another kind of input information that can help the model learn to predict low-energy structures.It's possible we might want to synergize with the energy-predicting approach of jax-md given that the task of learning to predict structures of lower energy seems closely related to that of computing energies - so training node functions to compute partial energies might be nice pre-training for their learning to perform position updates that reduce energy. SetupEnsure the most recent version of Flatland is installed.
###Code
!pip install git+git://github.com/cayley-group/flatland.git --quiet
###Output
_____no_output_____
###Markdown
Loading examplesHere we use a [Tensorflow Datasets](https://github.com/tensorflow/datasets) definition of a dataset generated using the Flatland environment. This provides a simplified interface to returning a [tf.data](https://www.tensorflow.org/guide/data) Dataset which has a variety of convenient methods for handling the input example stream (e.g. for batching, shuffling, caching, and pre-fetching).Let's load an example from the "flatland_mock" dataset to see what the structure and data type of examples will be.
###Code
from absl import logging
logging.set_verbosity(logging.INFO)
import tensorflow as tf
import tensorflow_datasets as tfds
import flatland.dataset
ds = tfds.load('flatland_mock', split="train")
assert isinstance(ds, tf.data.Dataset)
ds = ds.cache().repeat()
for example in tfds.as_numpy(ds):
break
example
###Output
_____no_output_____
###Markdown
Train demo solverHere we have a wrapper to train the demo solver that currently only trains an energy predicting model but subsequently will transfer-learn this to predicting lower-energy structures.
###Code
from flatland.train import train_demo_solver
from absl import logging
logging.set_verbosity(logging.INFO)
params = train_demo_solver(num_training_steps=1,
training_log_every=1,
batch_size=16)
from flatland.train import demo_example_stream, graph_network_neighbor_list
from flatland.train import OrigamiNet
from jax_md import space
from functools import partial
box_size = 10.862
batch_size = 16
iter_examples = demo_example_stream(
batch_size=batch_size, split="train")
positions, energies, forces = next(iter_examples)
_, polymer_length, polymer_dimensions = positions.shape
displacement, shift = space.periodic(box_size)
neighbor_fn, init_fn, apply_fn = graph_network_neighbor_list(
network=OrigamiNet,
displacement_fn=displacement,
box_size=box_size,
polymer_length=polymer_length,
polymer_dimensions=polymer_dimensions,
r_cutoff=3.0,
dr_threshold=0.0)
neighbor = neighbor_fn(positions[0], extra_capacity=6)
structure_fn = partial(apply_fn, params)
structure = structure_fn(positions[0], neighbor)[1:]
structure
# A polymer of length 10 and dimension 2
structure.shape
%timeit structure_fn(next(iter_examples)[0][0], neighbor)
###Output
150 ms ± 3.39 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
Long auto-regressive searchHere we will provide some minimal experimentation with using the model to actually optimize a structure by simply repeatedly applying the structure minimizer. We'll characterize what happens to the energy - e.g. does it consistently go down over time or does it diverge after a certain length of such a "rollout"?
###Code
# WIP
###Output
_____no_output_____
###Markdown
Genetic + short auto-regressivePresuming the previous won't be stable under long-rollouts, we'll use the previous method only over somewhat short rollouts (for the horizon over which these are stable) in conjunction with an evolutionary optimization approach to progressively determining better and better optimization starting points.
###Code
# WIP
###Output
_____no_output_____ |
Complete-Python-3-Bootcamp-master/06-Errors and Exception Handling/01-Errors and Exceptions Handling.ipynb | ###Markdown
Errors and Exception HandlingIn this lecture we will learn about Errors and Exception Handling in Python. You've definitely already encountered errors by this point in the course. For example:
###Code
print('Hello)
###Output
_____no_output_____
###Markdown
Note how we get a SyntaxError, with the further description that it was an EOL (End of Line Error) while scanning the string literal. This is specific enough for us to see that we forgot a single quote at the end of the line. Understanding these various error types will help you debug your code much faster. This type of error and description is known as an Exception. Even if a statement or expression is syntactically correct, it may cause an error when an attempt is made to execute it. Errors detected during execution are called exceptions and are not unconditionally fatal.You can check out the full list of built-in exceptions [here](https://docs.python.org/3/library/exceptions.html). Now let's learn how to handle errors and exceptions in our own code. try and exceptThe basic terminology and syntax used to handle errors in Python are the try and except statements. The code which can cause an exception to occur is put in the try block and the handling of the exception is then implemented in the except block of code. The syntax follows: try: You do your operations here... ... except ExceptionI: If there is ExceptionI, then execute this block. except ExceptionII: If there is ExceptionII, then execute this block. ... else: If there is no exception then execute this block. We can also just check for any exception with just using except: To get a better understanding of all this let's check out an example: We will look at some code that opens and writes a file:
###Code
try:
f = open('testfile','w')
f.write('Test write this')
except IOError:
# This will only check for an IOError exception and then execute this print statement
print("Error: Could not find file or read data")
else:
print("Content written successfully")
f.close()
###Output
Content written successfully
###Markdown
Now let's see what would happen if we did not have write permission (opening only with 'r'):
###Code
try:
f = open('testfile','r')
f.write('Test write this')
except IOError:
# This will only check for an IOError exception and then execute this print statement
print("Error: Could not find file or read data")
else:
print("Content written successfully")
f.close()
###Output
Error: Could not find file or read data
###Markdown
Great! Notice how we only printed a statement! The code still ran and we were able to continue doing actions and running code blocks. This is extremely useful when you have to account for possible input errors in your code. You can be prepared for the error and keep running code, instead of your code just breaking as we saw above.We could have also just said except: if we weren't sure what exception would occur. For example:
###Code
try:
f = open('testfile','r')
f.write('Test write this')
except:
# This will check for any exception and then execute this print statement
print("Error: Could not find file or read data")
else:
print("Content written successfully")
f.close()
###Output
Error: Could not find file or read data
###Markdown
Great! Now we don't actually need to memorize that list of exception types! Now what if we kept wanting to run code after the exception occurred? This is where finally comes in. finallyThe finally: block of code will always be run regardless if there was an exception in the try code block. The syntax is: try: Code block here ... Due to any exception, this code may be skipped! finally: This code block would always be executed.For example:
###Code
try:
f = open("testfile", "w")
f.write("Test write statement")
f.close()
finally:
print("Always execute finally code blocks")
###Output
Always execute finally code blocks
###Markdown
We can use this in conjunction with except. Let's see a new example that will take into account a user providing the wrong input:
###Code
def askint():
try:
val = int(input("Please enter an integer: "))
except:
print("Looks like you did not enter an integer!")
finally:
print("Finally, I executed!")
print(val)
askint()
askint()
###Output
Please enter an integer: five
Looks like you did not enter an integer!
Finally, I executed!
###Markdown
Notice how we got an error when trying to print val (because it was never properly assigned). Let's remedy this by asking the user and checking to make sure the input type is an integer:
###Code
def askint():
try:
val = int(input("Please enter an integer: "))
except:
print("Looks like you did not enter an integer!")
val = int(input("Try again-Please enter an integer: "))
finally:
print("Finally, I executed!")
print(val)
askint()
###Output
Please enter an integer: five
Looks like you did not enter an integer!
Try again-Please enter an integer: four
Finally, I executed!
###Markdown
Hmmm...that only did one check. How can we continually keep checking? We can use a while loop!
###Code
def askint():
while True:
try:
val = int(input("Please enter an integer: "))
except:
print("Looks like you did not enter an integer!")
continue
else:
print("Yep that's an integer!")
break
finally:
print("Finally, I executed!")
print(val)
askint()
###Output
Please enter an integer: five
Looks like you did not enter an integer!
Finally, I executed!
Please enter an integer: four
Looks like you did not enter an integer!
Finally, I executed!
Please enter an integer: 3
Yep that's an integer!
Finally, I executed!
###Markdown
So why did our function print "Finally, I executed!" after each trial, yet it never printed `val` itself? This is because with a try/except/finally clause, any continue or break statements are reserved until *after* the try clause is completed. This means that even though a successful input of **3** brought us to the else: block, and a break statement was thrown, the try clause continued through to finally: before breaking out of the while loop. And since print(val) was outside the try clause, the break statement prevented it from running.Let's make one final adjustment:
###Code
def askint():
while True:
try:
val = int(input("Please enter an integer: "))
except:
print("Looks like you did not enter an integer!")
continue
else:
print("Yep that's an integer!")
print(val)
break
finally:
print("Finally, I executed!")
askint()
###Output
Please enter an integer: six
Looks like you did not enter an integer!
Finally, I executed!
Please enter an integer: 6
Yep that's an integer!
6
Finally, I executed!
|
tam deep_learning.ipynb | ###Markdown
Embedding layer
###Code
Embedding_Layer=Embedding(input_dim = words + 1, output_dim = 300,input_length=max_length)
###Output
_____no_output_____
###Markdown
CNN
###Code
model = Sequential()
model.add(Embedding_Layer)
model.add(Conv1D(filters =256,kernel_size=2, activation='relu'))
model.add(MaxPooling1D(2))
model.add(Conv1D(filters =512,kernel_size=3, activation='relu'))
model.add(MaxPooling1D(3,padding='same'))
model.add(Flatten())
model.add(Dense(32, activation = 'relu'))
#model.add(Dense(4, activation = 'relu'))
model.add(Dense(3, activation = 'sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 20, 300) 9612600
_________________________________________________________________
conv1d (Conv1D) (None, 19, 256) 153856
_________________________________________________________________
max_pooling1d (MaxPooling1D) (None, 9, 256) 0
_________________________________________________________________
conv1d_1 (Conv1D) (None, 7, 512) 393728
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 3, 512) 0
_________________________________________________________________
flatten (Flatten) (None, 1536) 0
_________________________________________________________________
dense (Dense) (None, 32) 49184
_________________________________________________________________
dense_1 (Dense) (None, 3) 99
=================================================================
Total params: 10,209,467
Trainable params: 10,209,467
Non-trainable params: 0
_________________________________________________________________
###Markdown
LSTM
###Code
model_1 = Sequential()
model_1.add(Embedding_Layer)
model_1.add(LSTM(256,return_sequences=False))
model_1.add(Flatten())
model_1.add(Dense(32, activation = 'relu'))
#model.add(Dense(4, activation = 'relu'))
model_1.add(Dense(3, activation = 'sigmoid'))
model_1.summary()
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 20, 300) 9612600
_________________________________________________________________
lstm (LSTM) (None, 256) 570368
_________________________________________________________________
flatten_1 (Flatten) (None, 256) 0
_________________________________________________________________
dense_2 (Dense) (None, 32) 8224
_________________________________________________________________
dense_3 (Dense) (None, 3) 99
=================================================================
Total params: 10,191,291
Trainable params: 10,191,291
Non-trainable params: 0
_________________________________________________________________
###Markdown
Bidirectional
###Code
model_2 = Sequential()
model_2.add(Embedding_Layer)
model_2.add(Bidirectional(LSTM(256,return_sequences=False)))
model_2.add(Flatten())
model_2.add(Dense(32, activation = 'relu'))
#model.add(Dense(4, activation = 'relu'))
model_2.add(Dense(3, activation = 'sigmoid'))
model_2.summary()
###Output
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 20, 300) 9612600
_________________________________________________________________
bidirectional (Bidirectional (None, 512) 1140736
_________________________________________________________________
flatten_2 (Flatten) (None, 512) 0
_________________________________________________________________
dense_4 (Dense) (None, 32) 16416
_________________________________________________________________
dense_5 (Dense) (None, 3) 99
=================================================================
Total params: 10,769,851
Trainable params: 10,769,851
Non-trainable params: 0
_________________________________________________________________
###Markdown
compile
###Code
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model_1.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model_2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
###Output
_____no_output_____
###Markdown
Training models
###Code
model.fit(padd,label,epochs=20,verbose=1,batch_size=64)
model_1.fit(padd,label,epochs=20,verbose=1,batch_size=64)
model_2.fit(padd,label,epochs=20,verbose=1,batch_size=64)
###Output
Epoch 1/20
253/253 [==============================] - 112s 428ms/step - loss: 0.2755 - acc: 0.8118
Epoch 2/20
253/253 [==============================] - 107s 423ms/step - loss: 0.0934 - acc: 0.9399
Epoch 3/20
253/253 [==============================] - 108s 425ms/step - loss: 0.0747 - acc: 0.9465
Epoch 4/20
253/253 [==============================] - 106s 418ms/step - loss: 0.0638 - acc: 0.9557
Epoch 5/20
253/253 [==============================] - 108s 428ms/step - loss: 0.0623 - acc: 0.9547
Epoch 6/20
253/253 [==============================] - 107s 424ms/step - loss: 0.0620 - acc: 0.9537
Epoch 7/20
253/253 [==============================] - 108s 427ms/step - loss: 0.0547 - acc: 0.9631
Epoch 8/20
253/253 [==============================] - 107s 423ms/step - loss: 0.0490 - acc: 0.9628
Epoch 9/20
253/253 [==============================] - 106s 420ms/step - loss: 0.0539 - acc: 0.9619
Epoch 10/20
253/253 [==============================] - 105s 417ms/step - loss: 0.0482 - acc: 0.9680
Epoch 11/20
253/253 [==============================] - 106s 417ms/step - loss: 0.0455 - acc: 0.9691
Epoch 12/20
253/253 [==============================] - 107s 422ms/step - loss: 0.0445 - acc: 0.9692
Epoch 13/20
253/253 [==============================] - 107s 421ms/step - loss: 0.0441 - acc: 0.9684
Epoch 14/20
253/253 [==============================] - 106s 418ms/step - loss: 0.0420 - acc: 0.9693
Epoch 15/20
253/253 [==============================] - 106s 421ms/step - loss: 0.0417 - acc: 0.9723
Epoch 16/20
253/253 [==============================] - 106s 417ms/step - loss: 0.0378 - acc: 0.9733
Epoch 17/20
253/253 [==============================] - 105s 413ms/step - loss: 0.0359 - acc: 0.9747
Epoch 18/20
253/253 [==============================] - 105s 415ms/step - loss: 0.0377 - acc: 0.9723
Epoch 19/20
253/253 [==============================] - 106s 418ms/step - loss: 0.0379 - acc: 0.9739
Epoch 20/20
253/253 [==============================] - 105s 415ms/step - loss: 0.0370 - acc: 0.9740
###Markdown
Development data
###Code
d_data=pd.read_csv('/content/drive/MyDrive/Ankit/dec 2020 codalab/Hope speech/Tamil/tamil_hope_first_dev.csv',names=['text','label','nan'],sep='\t')
d_data=d_data[["text","label"]]
d_data
#processing as training set
d_data['text']=d_data['text'].str.lower() #changing into lower case (remove and check acc too)
d_data['text']=d_data['text'].str.strip() #remove white spaces
d_data['text']=d_data['text'].str.replace(r'\d+','') #remove numbers
#d_data['text']=d_data['text'].apply(lambda x: x.encode('ascii', 'ignore').decode('ascii')) #removing emoji
d_data['text']=d_data['text'].str.replace('[^\w\s]','') #removing punct
d_data['text']=d_data['text'].apply(lambda x:remove_URL(x))
#d_data['text'] = d_data['text'].apply(lambda x: remove_sw(x))
###Output
_____no_output_____
###Markdown
###Code
encoded1 =tok1.texts_to_sequences(d_data['text'])
print(d_data['text'][0])
encoded1[0]
padded = sequence.pad_sequences(encoded1, maxlen=max_length, padding='post')
padded[0]
val_labelEncode=labelEncode.transform(d_data['label'])
val_label=to_categorical(np.asarray(val_labelEncode))
val_label[0]
###Output
_____no_output_____
###Markdown
prediction
###Code
# devlopment prediction of CNN
dev_predictions = model.predict(padded)
# Finding out of two output neurons which one has highest probability
# It will return us the predicted class
# It will convert probability into final class 1 and 0
dev_predictions1 = np.zeros_like(dev_predictions)
dev_predictions1[np.arange(len(dev_predictions)), dev_predictions.argmax(1)] = 1
#print(dev_predictions)
#print(dev_predictions1)
# devlopment prediction of Lstm
l_dev_predictions = model_1.predict(padded)
# Finding out of two output neurons which one has highest probability
# It will return us the predicted class
# It will convert probability into final class 1 and 0
l_dev_predictions1 = np.zeros_like(l_dev_predictions)
l_dev_predictions1[np.arange(len(l_dev_predictions)), l_dev_predictions.argmax(1)] = 1
#print(l_dev_predictions)
#print(l_dev_predictions1)
# devlopment prediction of BiLSTM
b_dev_predictions = model_2.predict(padded)
# Finding out of two output neurons which one has highest probability
# It will return us the predicted class
# It will convert probability into final class 1 and 0
b_dev_predictions1 = np.zeros_like(b_dev_predictions)
b_dev_predictions1[np.arange(len(b_dev_predictions)), b_dev_predictions.argmax(1)] = 1
#print(b_dev_predictions)
#print(b_dev_predictions1)
###Output
_____no_output_____
###Markdown
Acuracy report
###Code
#Accuracy CNN
from sklearn.metrics import classification_report
print(classification_report(val_label,dev_predictions1))
#Accuracy Lstm
print(classification_report(val_label,l_dev_predictions1))
#Accuracy BiiLstm
print(classification_report(val_label,b_dev_predictions1))
###Output
_____no_output_____
###Markdown
Testing Data
###Code
t_data=pd.read_csv('/content/drive/MyDrive/Ankit/dec 2020 codalab/Hope speech/Tamil/tamil_hope_test.csv',names=['text'],sep=',')
t_data
###Output
_____no_output_____ |
CSESFI_13_GrayCode.ipynb | ###Markdown
###Code
n = int(input())
def addZeros(string, n):
length = len(string)
for i in range((n-length)):
string = "0" + string
return string
for i in range(int(1<<n)):
a = i^(i>>1)
binary = bin(a)
# print(binary)
splite_binary = binary.split('0b')[1]
new_binary = addZeros(splite_binary, n)
print(new_binary)
###Output
[1;30;43mStreaming output truncated to the last 5000 lines.[0m
1001101001000100
1001101001000101
1001101001000111
1001101001000110
1001101001000010
1001101001000011
1001101001000001
1001101001000000
1001101011000000
1001101011000001
1001101011000011
1001101011000010
1001101011000110
1001101011000111
1001101011000101
1001101011000100
1001101011001100
1001101011001101
1001101011001111
1001101011001110
1001101011001010
1001101011001011
1001101011001001
1001101011001000
1001101011011000
1001101011011001
1001101011011011
1001101011011010
1001101011011110
1001101011011111
1001101011011101
1001101011011100
1001101011010100
1001101011010101
1001101011010111
1001101011010110
1001101011010010
1001101011010011
1001101011010001
1001101011010000
1001101011110000
1001101011110001
1001101011110011
1001101011110010
1001101011110110
1001101011110111
1001101011110101
1001101011110100
1001101011111100
1001101011111101
1001101011111111
1001101011111110
1001101011111010
1001101011111011
1001101011111001
1001101011111000
1001101011101000
1001101011101001
1001101011101011
1001101011101010
1001101011101110
1001101011101111
1001101011101101
1001101011101100
1001101011100100
1001101011100101
1001101011100111
1001101011100110
1001101011100010
1001101011100011
1001101011100001
1001101011100000
1001101010100000
1001101010100001
1001101010100011
1001101010100010
1001101010100110
1001101010100111
1001101010100101
1001101010100100
1001101010101100
1001101010101101
1001101010101111
1001101010101110
1001101010101010
1001101010101011
1001101010101001
1001101010101000
1001101010111000
1001101010111001
1001101010111011
1001101010111010
1001101010111110
1001101010111111
1001101010111101
1001101010111100
1001101010110100
1001101010110101
1001101010110111
1001101010110110
1001101010110010
1001101010110011
1001101010110001
1001101010110000
1001101010010000
1001101010010001
1001101010010011
1001101010010010
1001101010010110
1001101010010111
1001101010010101
1001101010010100
1001101010011100
1001101010011101
1001101010011111
1001101010011110
1001101010011010
1001101010011011
1001101010011001
1001101010011000
1001101010001000
1001101010001001
1001101010001011
1001101010001010
1001101010001110
1001101010001111
1001101010001101
1001101010001100
1001101010000100
1001101010000101
1001101010000111
1001101010000110
1001101010000010
1001101010000011
1001101010000001
1001101010000000
1001101110000000
1001101110000001
1001101110000011
1001101110000010
1001101110000110
1001101110000111
1001101110000101
1001101110000100
1001101110001100
1001101110001101
1001101110001111
1001101110001110
1001101110001010
1001101110001011
1001101110001001
1001101110001000
1001101110011000
1001101110011001
1001101110011011
1001101110011010
1001101110011110
1001101110011111
1001101110011101
1001101110011100
1001101110010100
1001101110010101
1001101110010111
1001101110010110
1001101110010010
1001101110010011
1001101110010001
1001101110010000
1001101110110000
1001101110110001
1001101110110011
1001101110110010
1001101110110110
1001101110110111
1001101110110101
1001101110110100
1001101110111100
1001101110111101
1001101110111111
1001101110111110
1001101110111010
1001101110111011
1001101110111001
1001101110111000
1001101110101000
1001101110101001
1001101110101011
1001101110101010
1001101110101110
1001101110101111
1001101110101101
1001101110101100
1001101110100100
1001101110100101
1001101110100111
1001101110100110
1001101110100010
1001101110100011
1001101110100001
1001101110100000
1001101111100000
1001101111100001
1001101111100011
1001101111100010
1001101111100110
1001101111100111
1001101111100101
1001101111100100
1001101111101100
1001101111101101
1001101111101111
1001101111101110
1001101111101010
1001101111101011
1001101111101001
1001101111101000
1001101111111000
1001101111111001
1001101111111011
1001101111111010
1001101111111110
1001101111111111
1001101111111101
1001101111111100
1001101111110100
1001101111110101
1001101111110111
1001101111110110
1001101111110010
1001101111110011
1001101111110001
1001101111110000
1001101111010000
1001101111010001
1001101111010011
1001101111010010
1001101111010110
1001101111010111
1001101111010101
1001101111010100
1001101111011100
1001101111011101
1001101111011111
1001101111011110
1001101111011010
1001101111011011
1001101111011001
1001101111011000
1001101111001000
1001101111001001
1001101111001011
1001101111001010
1001101111001110
1001101111001111
1001101111001101
1001101111001100
1001101111000100
1001101111000101
1001101111000111
1001101111000110
1001101111000010
1001101111000011
1001101111000001
1001101111000000
1001101101000000
1001101101000001
1001101101000011
1001101101000010
1001101101000110
1001101101000111
1001101101000101
1001101101000100
1001101101001100
1001101101001101
1001101101001111
1001101101001110
1001101101001010
1001101101001011
1001101101001001
1001101101001000
1001101101011000
1001101101011001
1001101101011011
1001101101011010
1001101101011110
1001101101011111
1001101101011101
1001101101011100
1001101101010100
1001101101010101
1001101101010111
1001101101010110
1001101101010010
1001101101010011
1001101101010001
1001101101010000
1001101101110000
1001101101110001
1001101101110011
1001101101110010
1001101101110110
1001101101110111
1001101101110101
1001101101110100
1001101101111100
1001101101111101
1001101101111111
1001101101111110
1001101101111010
1001101101111011
1001101101111001
1001101101111000
1001101101101000
1001101101101001
1001101101101011
1001101101101010
1001101101101110
1001101101101111
1001101101101101
1001101101101100
1001101101100100
1001101101100101
1001101101100111
1001101101100110
1001101101100010
1001101101100011
1001101101100001
1001101101100000
1001101100100000
1001101100100001
1001101100100011
1001101100100010
1001101100100110
1001101100100111
1001101100100101
1001101100100100
1001101100101100
1001101100101101
1001101100101111
1001101100101110
1001101100101010
1001101100101011
1001101100101001
1001101100101000
1001101100111000
1001101100111001
1001101100111011
1001101100111010
1001101100111110
1001101100111111
1001101100111101
1001101100111100
1001101100110100
1001101100110101
1001101100110111
1001101100110110
1001101100110010
1001101100110011
1001101100110001
1001101100110000
1001101100010000
1001101100010001
1001101100010011
1001101100010010
1001101100010110
1001101100010111
1001101100010101
1001101100010100
1001101100011100
1001101100011101
1001101100011111
1001101100011110
1001101100011010
1001101100011011
1001101100011001
1001101100011000
1001101100001000
1001101100001001
1001101100001011
1001101100001010
1001101100001110
1001101100001111
1001101100001101
1001101100001100
1001101100000100
1001101100000101
1001101100000111
1001101100000110
1001101100000010
1001101100000011
1001101100000001
1001101100000000
1001100100000000
1001100100000001
1001100100000011
1001100100000010
1001100100000110
1001100100000111
1001100100000101
1001100100000100
1001100100001100
1001100100001101
1001100100001111
1001100100001110
1001100100001010
1001100100001011
1001100100001001
1001100100001000
1001100100011000
1001100100011001
1001100100011011
1001100100011010
1001100100011110
1001100100011111
1001100100011101
1001100100011100
1001100100010100
1001100100010101
1001100100010111
1001100100010110
1001100100010010
1001100100010011
1001100100010001
1001100100010000
1001100100110000
1001100100110001
1001100100110011
1001100100110010
1001100100110110
1001100100110111
1001100100110101
1001100100110100
1001100100111100
1001100100111101
1001100100111111
1001100100111110
1001100100111010
1001100100111011
1001100100111001
1001100100111000
1001100100101000
1001100100101001
1001100100101011
1001100100101010
1001100100101110
1001100100101111
1001100100101101
1001100100101100
1001100100100100
1001100100100101
1001100100100111
1001100100100110
1001100100100010
1001100100100011
1001100100100001
1001100100100000
1001100101100000
1001100101100001
1001100101100011
1001100101100010
1001100101100110
1001100101100111
1001100101100101
1001100101100100
1001100101101100
1001100101101101
1001100101101111
1001100101101110
1001100101101010
1001100101101011
1001100101101001
1001100101101000
1001100101111000
1001100101111001
1001100101111011
1001100101111010
1001100101111110
1001100101111111
1001100101111101
1001100101111100
1001100101110100
1001100101110101
1001100101110111
1001100101110110
1001100101110010
1001100101110011
1001100101110001
1001100101110000
1001100101010000
1001100101010001
1001100101010011
1001100101010010
1001100101010110
1001100101010111
1001100101010101
1001100101010100
1001100101011100
1001100101011101
1001100101011111
1001100101011110
1001100101011010
1001100101011011
1001100101011001
1001100101011000
1001100101001000
1001100101001001
1001100101001011
1001100101001010
1001100101001110
1001100101001111
1001100101001101
1001100101001100
1001100101000100
1001100101000101
1001100101000111
1001100101000110
1001100101000010
1001100101000011
1001100101000001
1001100101000000
1001100111000000
1001100111000001
1001100111000011
1001100111000010
1001100111000110
1001100111000111
1001100111000101
1001100111000100
1001100111001100
1001100111001101
1001100111001111
1001100111001110
1001100111001010
1001100111001011
1001100111001001
1001100111001000
1001100111011000
1001100111011001
1001100111011011
1001100111011010
1001100111011110
1001100111011111
1001100111011101
1001100111011100
1001100111010100
1001100111010101
1001100111010111
1001100111010110
1001100111010010
1001100111010011
1001100111010001
1001100111010000
1001100111110000
1001100111110001
1001100111110011
1001100111110010
1001100111110110
1001100111110111
1001100111110101
1001100111110100
1001100111111100
1001100111111101
1001100111111111
1001100111111110
1001100111111010
1001100111111011
1001100111111001
1001100111111000
1001100111101000
1001100111101001
1001100111101011
1001100111101010
1001100111101110
1001100111101111
1001100111101101
1001100111101100
1001100111100100
1001100111100101
1001100111100111
1001100111100110
1001100111100010
1001100111100011
1001100111100001
1001100111100000
1001100110100000
1001100110100001
1001100110100011
1001100110100010
1001100110100110
1001100110100111
1001100110100101
1001100110100100
1001100110101100
1001100110101101
1001100110101111
1001100110101110
1001100110101010
1001100110101011
1001100110101001
1001100110101000
1001100110111000
1001100110111001
1001100110111011
1001100110111010
1001100110111110
1001100110111111
1001100110111101
1001100110111100
1001100110110100
1001100110110101
1001100110110111
1001100110110110
1001100110110010
1001100110110011
1001100110110001
1001100110110000
1001100110010000
1001100110010001
1001100110010011
1001100110010010
1001100110010110
1001100110010111
1001100110010101
1001100110010100
1001100110011100
1001100110011101
1001100110011111
1001100110011110
1001100110011010
1001100110011011
1001100110011001
1001100110011000
1001100110001000
1001100110001001
1001100110001011
1001100110001010
1001100110001110
1001100110001111
1001100110001101
1001100110001100
1001100110000100
1001100110000101
1001100110000111
1001100110000110
1001100110000010
1001100110000011
1001100110000001
1001100110000000
1001100010000000
1001100010000001
1001100010000011
1001100010000010
1001100010000110
1001100010000111
1001100010000101
1001100010000100
1001100010001100
1001100010001101
1001100010001111
1001100010001110
1001100010001010
1001100010001011
1001100010001001
1001100010001000
1001100010011000
1001100010011001
1001100010011011
1001100010011010
1001100010011110
1001100010011111
1001100010011101
1001100010011100
1001100010010100
1001100010010101
1001100010010111
1001100010010110
1001100010010010
1001100010010011
1001100010010001
1001100010010000
1001100010110000
1001100010110001
1001100010110011
1001100010110010
1001100010110110
1001100010110111
1001100010110101
1001100010110100
1001100010111100
1001100010111101
1001100010111111
1001100010111110
1001100010111010
1001100010111011
1001100010111001
1001100010111000
1001100010101000
1001100010101001
1001100010101011
1001100010101010
1001100010101110
1001100010101111
1001100010101101
1001100010101100
1001100010100100
1001100010100101
1001100010100111
1001100010100110
1001100010100010
1001100010100011
1001100010100001
1001100010100000
1001100011100000
1001100011100001
1001100011100011
1001100011100010
1001100011100110
1001100011100111
1001100011100101
1001100011100100
1001100011101100
1001100011101101
1001100011101111
1001100011101110
1001100011101010
1001100011101011
1001100011101001
1001100011101000
1001100011111000
1001100011111001
1001100011111011
1001100011111010
1001100011111110
1001100011111111
1001100011111101
1001100011111100
1001100011110100
1001100011110101
1001100011110111
1001100011110110
1001100011110010
1001100011110011
1001100011110001
1001100011110000
1001100011010000
1001100011010001
1001100011010011
1001100011010010
1001100011010110
1001100011010111
1001100011010101
1001100011010100
1001100011011100
1001100011011101
1001100011011111
1001100011011110
1001100011011010
1001100011011011
1001100011011001
1001100011011000
1001100011001000
1001100011001001
1001100011001011
1001100011001010
1001100011001110
1001100011001111
1001100011001101
1001100011001100
1001100011000100
1001100011000101
1001100011000111
1001100011000110
1001100011000010
1001100011000011
1001100011000001
1001100011000000
1001100001000000
1001100001000001
1001100001000011
1001100001000010
1001100001000110
1001100001000111
1001100001000101
1001100001000100
1001100001001100
1001100001001101
1001100001001111
1001100001001110
1001100001001010
1001100001001011
1001100001001001
1001100001001000
1001100001011000
1001100001011001
1001100001011011
1001100001011010
1001100001011110
1001100001011111
1001100001011101
1001100001011100
1001100001010100
1001100001010101
1001100001010111
1001100001010110
1001100001010010
1001100001010011
1001100001010001
1001100001010000
1001100001110000
1001100001110001
1001100001110011
1001100001110010
1001100001110110
1001100001110111
1001100001110101
1001100001110100
1001100001111100
1001100001111101
1001100001111111
1001100001111110
1001100001111010
1001100001111011
1001100001111001
1001100001111000
1001100001101000
1001100001101001
1001100001101011
1001100001101010
1001100001101110
1001100001101111
1001100001101101
1001100001101100
1001100001100100
1001100001100101
1001100001100111
1001100001100110
1001100001100010
1001100001100011
1001100001100001
1001100001100000
1001100000100000
1001100000100001
1001100000100011
1001100000100010
1001100000100110
1001100000100111
1001100000100101
1001100000100100
1001100000101100
1001100000101101
1001100000101111
1001100000101110
1001100000101010
1001100000101011
1001100000101001
1001100000101000
1001100000111000
1001100000111001
1001100000111011
1001100000111010
1001100000111110
1001100000111111
1001100000111101
1001100000111100
1001100000110100
1001100000110101
1001100000110111
1001100000110110
1001100000110010
1001100000110011
1001100000110001
1001100000110000
1001100000010000
1001100000010001
1001100000010011
1001100000010010
1001100000010110
1001100000010111
1001100000010101
1001100000010100
1001100000011100
1001100000011101
1001100000011111
1001100000011110
1001100000011010
1001100000011011
1001100000011001
1001100000011000
1001100000001000
1001100000001001
1001100000001011
1001100000001010
1001100000001110
1001100000001111
1001100000001101
1001100000001100
1001100000000100
1001100000000101
1001100000000111
1001100000000110
1001100000000010
1001100000000011
1001100000000001
1001100000000000
1000100000000000
1000100000000001
1000100000000011
1000100000000010
1000100000000110
1000100000000111
1000100000000101
1000100000000100
1000100000001100
1000100000001101
1000100000001111
1000100000001110
1000100000001010
1000100000001011
1000100000001001
1000100000001000
1000100000011000
1000100000011001
1000100000011011
1000100000011010
1000100000011110
1000100000011111
1000100000011101
1000100000011100
1000100000010100
1000100000010101
1000100000010111
1000100000010110
1000100000010010
1000100000010011
1000100000010001
1000100000010000
1000100000110000
1000100000110001
1000100000110011
1000100000110010
1000100000110110
1000100000110111
1000100000110101
1000100000110100
1000100000111100
1000100000111101
1000100000111111
1000100000111110
1000100000111010
1000100000111011
1000100000111001
1000100000111000
1000100000101000
1000100000101001
1000100000101011
1000100000101010
1000100000101110
1000100000101111
1000100000101101
1000100000101100
1000100000100100
1000100000100101
1000100000100111
1000100000100110
1000100000100010
1000100000100011
1000100000100001
1000100000100000
1000100001100000
1000100001100001
1000100001100011
1000100001100010
1000100001100110
1000100001100111
1000100001100101
1000100001100100
1000100001101100
1000100001101101
1000100001101111
1000100001101110
1000100001101010
1000100001101011
1000100001101001
1000100001101000
1000100001111000
1000100001111001
1000100001111011
1000100001111010
1000100001111110
1000100001111111
1000100001111101
1000100001111100
1000100001110100
1000100001110101
1000100001110111
1000100001110110
1000100001110010
1000100001110011
1000100001110001
1000100001110000
1000100001010000
1000100001010001
1000100001010011
1000100001010010
1000100001010110
1000100001010111
1000100001010101
1000100001010100
1000100001011100
1000100001011101
1000100001011111
1000100001011110
1000100001011010
1000100001011011
1000100001011001
1000100001011000
1000100001001000
1000100001001001
1000100001001011
1000100001001010
1000100001001110
1000100001001111
1000100001001101
1000100001001100
1000100001000100
1000100001000101
1000100001000111
1000100001000110
1000100001000010
1000100001000011
1000100001000001
1000100001000000
1000100011000000
1000100011000001
1000100011000011
1000100011000010
1000100011000110
1000100011000111
1000100011000101
1000100011000100
1000100011001100
1000100011001101
1000100011001111
1000100011001110
1000100011001010
1000100011001011
1000100011001001
1000100011001000
1000100011011000
1000100011011001
1000100011011011
1000100011011010
1000100011011110
1000100011011111
1000100011011101
1000100011011100
1000100011010100
1000100011010101
1000100011010111
1000100011010110
1000100011010010
1000100011010011
1000100011010001
1000100011010000
1000100011110000
1000100011110001
1000100011110011
1000100011110010
1000100011110110
1000100011110111
1000100011110101
1000100011110100
1000100011111100
1000100011111101
1000100011111111
1000100011111110
1000100011111010
1000100011111011
1000100011111001
1000100011111000
1000100011101000
1000100011101001
1000100011101011
1000100011101010
1000100011101110
1000100011101111
1000100011101101
1000100011101100
1000100011100100
1000100011100101
1000100011100111
1000100011100110
1000100011100010
1000100011100011
1000100011100001
1000100011100000
1000100010100000
1000100010100001
1000100010100011
1000100010100010
1000100010100110
1000100010100111
1000100010100101
1000100010100100
1000100010101100
1000100010101101
1000100010101111
1000100010101110
1000100010101010
1000100010101011
1000100010101001
1000100010101000
1000100010111000
1000100010111001
1000100010111011
1000100010111010
1000100010111110
1000100010111111
1000100010111101
1000100010111100
1000100010110100
1000100010110101
1000100010110111
1000100010110110
1000100010110010
1000100010110011
1000100010110001
1000100010110000
1000100010010000
1000100010010001
1000100010010011
1000100010010010
1000100010010110
1000100010010111
1000100010010101
1000100010010100
1000100010011100
1000100010011101
1000100010011111
1000100010011110
1000100010011010
1000100010011011
1000100010011001
1000100010011000
1000100010001000
1000100010001001
1000100010001011
1000100010001010
1000100010001110
1000100010001111
1000100010001101
1000100010001100
1000100010000100
1000100010000101
1000100010000111
1000100010000110
1000100010000010
1000100010000011
1000100010000001
1000100010000000
1000100110000000
1000100110000001
1000100110000011
1000100110000010
1000100110000110
1000100110000111
1000100110000101
1000100110000100
1000100110001100
1000100110001101
1000100110001111
1000100110001110
1000100110001010
1000100110001011
1000100110001001
1000100110001000
1000100110011000
1000100110011001
1000100110011011
1000100110011010
1000100110011110
1000100110011111
1000100110011101
1000100110011100
1000100110010100
1000100110010101
1000100110010111
1000100110010110
1000100110010010
1000100110010011
1000100110010001
1000100110010000
1000100110110000
1000100110110001
1000100110110011
1000100110110010
1000100110110110
1000100110110111
1000100110110101
1000100110110100
1000100110111100
1000100110111101
1000100110111111
1000100110111110
1000100110111010
1000100110111011
1000100110111001
1000100110111000
1000100110101000
1000100110101001
1000100110101011
1000100110101010
1000100110101110
1000100110101111
1000100110101101
1000100110101100
1000100110100100
1000100110100101
1000100110100111
1000100110100110
1000100110100010
1000100110100011
1000100110100001
1000100110100000
1000100111100000
1000100111100001
1000100111100011
1000100111100010
1000100111100110
1000100111100111
1000100111100101
1000100111100100
1000100111101100
1000100111101101
1000100111101111
1000100111101110
1000100111101010
1000100111101011
1000100111101001
1000100111101000
1000100111111000
1000100111111001
1000100111111011
1000100111111010
1000100111111110
1000100111111111
1000100111111101
1000100111111100
1000100111110100
1000100111110101
1000100111110111
1000100111110110
1000100111110010
1000100111110011
1000100111110001
1000100111110000
1000100111010000
1000100111010001
1000100111010011
1000100111010010
1000100111010110
1000100111010111
1000100111010101
1000100111010100
1000100111011100
1000100111011101
1000100111011111
1000100111011110
1000100111011010
1000100111011011
1000100111011001
1000100111011000
1000100111001000
1000100111001001
1000100111001011
1000100111001010
1000100111001110
1000100111001111
1000100111001101
1000100111001100
1000100111000100
1000100111000101
1000100111000111
1000100111000110
1000100111000010
1000100111000011
1000100111000001
1000100111000000
1000100101000000
1000100101000001
1000100101000011
1000100101000010
1000100101000110
1000100101000111
1000100101000101
1000100101000100
1000100101001100
1000100101001101
1000100101001111
1000100101001110
1000100101001010
1000100101001011
1000100101001001
1000100101001000
1000100101011000
1000100101011001
1000100101011011
1000100101011010
1000100101011110
1000100101011111
1000100101011101
1000100101011100
1000100101010100
1000100101010101
1000100101010111
1000100101010110
1000100101010010
1000100101010011
1000100101010001
1000100101010000
1000100101110000
1000100101110001
1000100101110011
1000100101110010
1000100101110110
1000100101110111
1000100101110101
1000100101110100
1000100101111100
1000100101111101
1000100101111111
1000100101111110
1000100101111010
1000100101111011
1000100101111001
1000100101111000
1000100101101000
1000100101101001
1000100101101011
1000100101101010
1000100101101110
1000100101101111
1000100101101101
1000100101101100
1000100101100100
1000100101100101
1000100101100111
1000100101100110
1000100101100010
1000100101100011
1000100101100001
1000100101100000
1000100100100000
1000100100100001
1000100100100011
1000100100100010
1000100100100110
1000100100100111
1000100100100101
1000100100100100
1000100100101100
1000100100101101
1000100100101111
1000100100101110
1000100100101010
1000100100101011
1000100100101001
1000100100101000
1000100100111000
1000100100111001
1000100100111011
1000100100111010
1000100100111110
1000100100111111
1000100100111101
1000100100111100
1000100100110100
1000100100110101
1000100100110111
1000100100110110
1000100100110010
1000100100110011
1000100100110001
1000100100110000
1000100100010000
1000100100010001
1000100100010011
1000100100010010
1000100100010110
1000100100010111
1000100100010101
1000100100010100
1000100100011100
1000100100011101
1000100100011111
1000100100011110
1000100100011010
1000100100011011
1000100100011001
1000100100011000
1000100100001000
1000100100001001
1000100100001011
1000100100001010
1000100100001110
1000100100001111
1000100100001101
1000100100001100
1000100100000100
1000100100000101
1000100100000111
1000100100000110
1000100100000010
1000100100000011
1000100100000001
1000100100000000
1000101100000000
1000101100000001
1000101100000011
1000101100000010
1000101100000110
1000101100000111
1000101100000101
1000101100000100
1000101100001100
1000101100001101
1000101100001111
1000101100001110
1000101100001010
1000101100001011
1000101100001001
1000101100001000
1000101100011000
1000101100011001
1000101100011011
1000101100011010
1000101100011110
1000101100011111
1000101100011101
1000101100011100
1000101100010100
1000101100010101
1000101100010111
1000101100010110
1000101100010010
1000101100010011
1000101100010001
1000101100010000
1000101100110000
1000101100110001
1000101100110011
1000101100110010
1000101100110110
1000101100110111
1000101100110101
1000101100110100
1000101100111100
1000101100111101
1000101100111111
1000101100111110
1000101100111010
1000101100111011
1000101100111001
1000101100111000
1000101100101000
1000101100101001
1000101100101011
1000101100101010
1000101100101110
1000101100101111
1000101100101101
1000101100101100
1000101100100100
1000101100100101
1000101100100111
1000101100100110
1000101100100010
1000101100100011
1000101100100001
1000101100100000
1000101101100000
1000101101100001
1000101101100011
1000101101100010
1000101101100110
1000101101100111
1000101101100101
1000101101100100
1000101101101100
1000101101101101
1000101101101111
1000101101101110
1000101101101010
1000101101101011
1000101101101001
1000101101101000
1000101101111000
1000101101111001
1000101101111011
1000101101111010
1000101101111110
1000101101111111
1000101101111101
1000101101111100
1000101101110100
1000101101110101
1000101101110111
1000101101110110
1000101101110010
1000101101110011
1000101101110001
1000101101110000
1000101101010000
1000101101010001
1000101101010011
1000101101010010
1000101101010110
1000101101010111
1000101101010101
1000101101010100
1000101101011100
1000101101011101
1000101101011111
1000101101011110
1000101101011010
1000101101011011
1000101101011001
1000101101011000
1000101101001000
1000101101001001
1000101101001011
1000101101001010
1000101101001110
1000101101001111
1000101101001101
1000101101001100
1000101101000100
1000101101000101
1000101101000111
1000101101000110
1000101101000010
1000101101000011
1000101101000001
1000101101000000
1000101111000000
1000101111000001
1000101111000011
1000101111000010
1000101111000110
1000101111000111
1000101111000101
1000101111000100
1000101111001100
1000101111001101
1000101111001111
1000101111001110
1000101111001010
1000101111001011
1000101111001001
1000101111001000
1000101111011000
1000101111011001
1000101111011011
1000101111011010
1000101111011110
1000101111011111
1000101111011101
1000101111011100
1000101111010100
1000101111010101
1000101111010111
1000101111010110
1000101111010010
1000101111010011
1000101111010001
1000101111010000
1000101111110000
1000101111110001
1000101111110011
1000101111110010
1000101111110110
1000101111110111
1000101111110101
1000101111110100
1000101111111100
1000101111111101
1000101111111111
1000101111111110
1000101111111010
1000101111111011
1000101111111001
1000101111111000
1000101111101000
1000101111101001
1000101111101011
1000101111101010
1000101111101110
1000101111101111
1000101111101101
1000101111101100
1000101111100100
1000101111100101
1000101111100111
1000101111100110
1000101111100010
1000101111100011
1000101111100001
1000101111100000
1000101110100000
1000101110100001
1000101110100011
1000101110100010
1000101110100110
1000101110100111
1000101110100101
1000101110100100
1000101110101100
1000101110101101
1000101110101111
1000101110101110
1000101110101010
1000101110101011
1000101110101001
1000101110101000
1000101110111000
1000101110111001
1000101110111011
1000101110111010
1000101110111110
1000101110111111
1000101110111101
1000101110111100
1000101110110100
1000101110110101
1000101110110111
1000101110110110
1000101110110010
1000101110110011
1000101110110001
1000101110110000
1000101110010000
1000101110010001
1000101110010011
1000101110010010
1000101110010110
1000101110010111
1000101110010101
1000101110010100
1000101110011100
1000101110011101
1000101110011111
1000101110011110
1000101110011010
1000101110011011
1000101110011001
1000101110011000
1000101110001000
1000101110001001
1000101110001011
1000101110001010
1000101110001110
1000101110001111
1000101110001101
1000101110001100
1000101110000100
1000101110000101
1000101110000111
1000101110000110
1000101110000010
1000101110000011
1000101110000001
1000101110000000
1000101010000000
1000101010000001
1000101010000011
1000101010000010
1000101010000110
1000101010000111
1000101010000101
1000101010000100
1000101010001100
1000101010001101
1000101010001111
1000101010001110
1000101010001010
1000101010001011
1000101010001001
1000101010001000
1000101010011000
1000101010011001
1000101010011011
1000101010011010
1000101010011110
1000101010011111
1000101010011101
1000101010011100
1000101010010100
1000101010010101
1000101010010111
1000101010010110
1000101010010010
1000101010010011
1000101010010001
1000101010010000
1000101010110000
1000101010110001
1000101010110011
1000101010110010
1000101010110110
1000101010110111
1000101010110101
1000101010110100
1000101010111100
1000101010111101
1000101010111111
1000101010111110
1000101010111010
1000101010111011
1000101010111001
1000101010111000
1000101010101000
1000101010101001
1000101010101011
1000101010101010
1000101010101110
1000101010101111
1000101010101101
1000101010101100
1000101010100100
1000101010100101
1000101010100111
1000101010100110
1000101010100010
1000101010100011
1000101010100001
1000101010100000
1000101011100000
1000101011100001
1000101011100011
1000101011100010
1000101011100110
1000101011100111
1000101011100101
1000101011100100
1000101011101100
1000101011101101
1000101011101111
1000101011101110
1000101011101010
1000101011101011
1000101011101001
1000101011101000
1000101011111000
1000101011111001
1000101011111011
1000101011111010
1000101011111110
1000101011111111
1000101011111101
1000101011111100
1000101011110100
1000101011110101
1000101011110111
1000101011110110
1000101011110010
1000101011110011
1000101011110001
1000101011110000
1000101011010000
1000101011010001
1000101011010011
1000101011010010
1000101011010110
1000101011010111
1000101011010101
1000101011010100
1000101011011100
1000101011011101
1000101011011111
1000101011011110
1000101011011010
1000101011011011
1000101011011001
1000101011011000
1000101011001000
1000101011001001
1000101011001011
1000101011001010
1000101011001110
1000101011001111
1000101011001101
1000101011001100
1000101011000100
1000101011000101
1000101011000111
1000101011000110
1000101011000010
1000101011000011
1000101011000001
1000101011000000
1000101001000000
1000101001000001
1000101001000011
1000101001000010
1000101001000110
1000101001000111
1000101001000101
1000101001000100
1000101001001100
1000101001001101
1000101001001111
1000101001001110
1000101001001010
1000101001001011
1000101001001001
1000101001001000
1000101001011000
1000101001011001
1000101001011011
1000101001011010
1000101001011110
1000101001011111
1000101001011101
1000101001011100
1000101001010100
1000101001010101
1000101001010111
1000101001010110
1000101001010010
1000101001010011
1000101001010001
1000101001010000
1000101001110000
1000101001110001
1000101001110011
1000101001110010
1000101001110110
1000101001110111
1000101001110101
1000101001110100
1000101001111100
1000101001111101
1000101001111111
1000101001111110
1000101001111010
1000101001111011
1000101001111001
1000101001111000
1000101001101000
1000101001101001
1000101001101011
1000101001101010
1000101001101110
1000101001101111
1000101001101101
1000101001101100
1000101001100100
1000101001100101
1000101001100111
1000101001100110
1000101001100010
1000101001100011
1000101001100001
1000101001100000
1000101000100000
1000101000100001
1000101000100011
1000101000100010
1000101000100110
1000101000100111
1000101000100101
1000101000100100
1000101000101100
1000101000101101
1000101000101111
1000101000101110
1000101000101010
1000101000101011
1000101000101001
1000101000101000
1000101000111000
1000101000111001
1000101000111011
1000101000111010
1000101000111110
1000101000111111
1000101000111101
1000101000111100
1000101000110100
1000101000110101
1000101000110111
1000101000110110
1000101000110010
1000101000110011
1000101000110001
1000101000110000
1000101000010000
1000101000010001
1000101000010011
1000101000010010
1000101000010110
1000101000010111
1000101000010101
1000101000010100
1000101000011100
1000101000011101
1000101000011111
1000101000011110
1000101000011010
1000101000011011
1000101000011001
1000101000011000
1000101000001000
1000101000001001
1000101000001011
1000101000001010
1000101000001110
1000101000001111
1000101000001101
1000101000001100
1000101000000100
1000101000000101
1000101000000111
1000101000000110
1000101000000010
1000101000000011
1000101000000001
1000101000000000
1000111000000000
1000111000000001
1000111000000011
1000111000000010
1000111000000110
1000111000000111
1000111000000101
1000111000000100
1000111000001100
1000111000001101
1000111000001111
1000111000001110
1000111000001010
1000111000001011
1000111000001001
1000111000001000
1000111000011000
1000111000011001
1000111000011011
1000111000011010
1000111000011110
1000111000011111
1000111000011101
1000111000011100
1000111000010100
1000111000010101
1000111000010111
1000111000010110
1000111000010010
1000111000010011
1000111000010001
1000111000010000
1000111000110000
1000111000110001
1000111000110011
1000111000110010
1000111000110110
1000111000110111
1000111000110101
1000111000110100
1000111000111100
1000111000111101
1000111000111111
1000111000111110
1000111000111010
1000111000111011
1000111000111001
1000111000111000
1000111000101000
1000111000101001
1000111000101011
1000111000101010
1000111000101110
1000111000101111
1000111000101101
1000111000101100
1000111000100100
1000111000100101
1000111000100111
1000111000100110
1000111000100010
1000111000100011
1000111000100001
1000111000100000
1000111001100000
1000111001100001
1000111001100011
1000111001100010
1000111001100110
1000111001100111
1000111001100101
1000111001100100
1000111001101100
1000111001101101
1000111001101111
1000111001101110
1000111001101010
1000111001101011
1000111001101001
1000111001101000
1000111001111000
1000111001111001
1000111001111011
1000111001111010
1000111001111110
1000111001111111
1000111001111101
1000111001111100
1000111001110100
1000111001110101
1000111001110111
1000111001110110
1000111001110010
1000111001110011
1000111001110001
1000111001110000
1000111001010000
1000111001010001
1000111001010011
1000111001010010
1000111001010110
1000111001010111
1000111001010101
1000111001010100
1000111001011100
1000111001011101
1000111001011111
1000111001011110
1000111001011010
1000111001011011
1000111001011001
1000111001011000
1000111001001000
1000111001001001
1000111001001011
1000111001001010
1000111001001110
1000111001001111
1000111001001101
1000111001001100
1000111001000100
1000111001000101
1000111001000111
1000111001000110
1000111001000010
1000111001000011
1000111001000001
1000111001000000
1000111011000000
1000111011000001
1000111011000011
1000111011000010
1000111011000110
1000111011000111
1000111011000101
1000111011000100
1000111011001100
1000111011001101
1000111011001111
1000111011001110
1000111011001010
1000111011001011
1000111011001001
1000111011001000
1000111011011000
1000111011011001
1000111011011011
1000111011011010
1000111011011110
1000111011011111
1000111011011101
1000111011011100
1000111011010100
1000111011010101
1000111011010111
1000111011010110
1000111011010010
1000111011010011
1000111011010001
1000111011010000
1000111011110000
1000111011110001
1000111011110011
1000111011110010
1000111011110110
1000111011110111
1000111011110101
1000111011110100
1000111011111100
1000111011111101
1000111011111111
1000111011111110
1000111011111010
1000111011111011
1000111011111001
1000111011111000
1000111011101000
1000111011101001
1000111011101011
1000111011101010
1000111011101110
1000111011101111
1000111011101101
1000111011101100
1000111011100100
1000111011100101
1000111011100111
1000111011100110
1000111011100010
1000111011100011
1000111011100001
1000111011100000
1000111010100000
1000111010100001
1000111010100011
1000111010100010
1000111010100110
1000111010100111
1000111010100101
1000111010100100
1000111010101100
1000111010101101
1000111010101111
1000111010101110
1000111010101010
1000111010101011
1000111010101001
1000111010101000
1000111010111000
1000111010111001
1000111010111011
1000111010111010
1000111010111110
1000111010111111
1000111010111101
1000111010111100
1000111010110100
1000111010110101
1000111010110111
1000111010110110
1000111010110010
1000111010110011
1000111010110001
1000111010110000
1000111010010000
1000111010010001
1000111010010011
1000111010010010
1000111010010110
1000111010010111
1000111010010101
1000111010010100
1000111010011100
1000111010011101
1000111010011111
1000111010011110
1000111010011010
1000111010011011
1000111010011001
1000111010011000
1000111010001000
1000111010001001
1000111010001011
1000111010001010
1000111010001110
1000111010001111
1000111010001101
1000111010001100
1000111010000100
1000111010000101
1000111010000111
1000111010000110
1000111010000010
1000111010000011
1000111010000001
1000111010000000
1000111110000000
1000111110000001
1000111110000011
1000111110000010
1000111110000110
1000111110000111
1000111110000101
1000111110000100
1000111110001100
1000111110001101
1000111110001111
1000111110001110
1000111110001010
1000111110001011
1000111110001001
1000111110001000
1000111110011000
1000111110011001
1000111110011011
1000111110011010
1000111110011110
1000111110011111
1000111110011101
1000111110011100
1000111110010100
1000111110010101
1000111110010111
1000111110010110
1000111110010010
1000111110010011
1000111110010001
1000111110010000
1000111110110000
1000111110110001
1000111110110011
1000111110110010
1000111110110110
1000111110110111
1000111110110101
1000111110110100
1000111110111100
1000111110111101
1000111110111111
1000111110111110
1000111110111010
1000111110111011
1000111110111001
1000111110111000
1000111110101000
1000111110101001
1000111110101011
1000111110101010
1000111110101110
1000111110101111
1000111110101101
1000111110101100
1000111110100100
1000111110100101
1000111110100111
1000111110100110
1000111110100010
1000111110100011
1000111110100001
1000111110100000
1000111111100000
1000111111100001
1000111111100011
1000111111100010
1000111111100110
1000111111100111
1000111111100101
1000111111100100
1000111111101100
1000111111101101
1000111111101111
1000111111101110
1000111111101010
1000111111101011
1000111111101001
1000111111101000
1000111111111000
1000111111111001
1000111111111011
1000111111111010
1000111111111110
1000111111111111
1000111111111101
1000111111111100
1000111111110100
1000111111110101
1000111111110111
1000111111110110
1000111111110010
1000111111110011
1000111111110001
1000111111110000
1000111111010000
1000111111010001
1000111111010011
1000111111010010
1000111111010110
1000111111010111
1000111111010101
1000111111010100
1000111111011100
1000111111011101
1000111111011111
1000111111011110
1000111111011010
1000111111011011
1000111111011001
1000111111011000
1000111111001000
1000111111001001
1000111111001011
1000111111001010
1000111111001110
1000111111001111
1000111111001101
1000111111001100
1000111111000100
1000111111000101
1000111111000111
1000111111000110
1000111111000010
1000111111000011
1000111111000001
1000111111000000
1000111101000000
1000111101000001
1000111101000011
1000111101000010
1000111101000110
1000111101000111
1000111101000101
1000111101000100
1000111101001100
1000111101001101
1000111101001111
1000111101001110
1000111101001010
1000111101001011
1000111101001001
1000111101001000
1000111101011000
1000111101011001
1000111101011011
1000111101011010
1000111101011110
1000111101011111
1000111101011101
1000111101011100
1000111101010100
1000111101010101
1000111101010111
1000111101010110
1000111101010010
1000111101010011
1000111101010001
1000111101010000
1000111101110000
1000111101110001
1000111101110011
1000111101110010
1000111101110110
1000111101110111
1000111101110101
1000111101110100
1000111101111100
1000111101111101
1000111101111111
1000111101111110
1000111101111010
1000111101111011
1000111101111001
1000111101111000
1000111101101000
1000111101101001
1000111101101011
1000111101101010
1000111101101110
1000111101101111
1000111101101101
1000111101101100
1000111101100100
1000111101100101
1000111101100111
1000111101100110
1000111101100010
1000111101100011
1000111101100001
1000111101100000
1000111100100000
1000111100100001
1000111100100011
1000111100100010
1000111100100110
1000111100100111
1000111100100101
1000111100100100
1000111100101100
1000111100101101
1000111100101111
1000111100101110
1000111100101010
1000111100101011
1000111100101001
1000111100101000
1000111100111000
1000111100111001
1000111100111011
1000111100111010
1000111100111110
1000111100111111
1000111100111101
1000111100111100
1000111100110100
1000111100110101
1000111100110111
1000111100110110
1000111100110010
1000111100110011
1000111100110001
1000111100110000
1000111100010000
1000111100010001
1000111100010011
1000111100010010
1000111100010110
1000111100010111
1000111100010101
1000111100010100
1000111100011100
1000111100011101
1000111100011111
1000111100011110
1000111100011010
1000111100011011
1000111100011001
1000111100011000
1000111100001000
1000111100001001
1000111100001011
1000111100001010
1000111100001110
1000111100001111
1000111100001101
1000111100001100
1000111100000100
1000111100000101
1000111100000111
1000111100000110
1000111100000010
1000111100000011
1000111100000001
1000111100000000
1000110100000000
1000110100000001
1000110100000011
1000110100000010
1000110100000110
1000110100000111
1000110100000101
1000110100000100
1000110100001100
1000110100001101
1000110100001111
1000110100001110
1000110100001010
1000110100001011
1000110100001001
1000110100001000
1000110100011000
1000110100011001
1000110100011011
1000110100011010
1000110100011110
1000110100011111
1000110100011101
1000110100011100
1000110100010100
1000110100010101
1000110100010111
1000110100010110
1000110100010010
1000110100010011
1000110100010001
1000110100010000
1000110100110000
1000110100110001
1000110100110011
1000110100110010
1000110100110110
1000110100110111
1000110100110101
1000110100110100
1000110100111100
1000110100111101
1000110100111111
1000110100111110
1000110100111010
1000110100111011
1000110100111001
1000110100111000
1000110100101000
1000110100101001
1000110100101011
1000110100101010
1000110100101110
1000110100101111
1000110100101101
1000110100101100
1000110100100100
1000110100100101
1000110100100111
1000110100100110
1000110100100010
1000110100100011
1000110100100001
1000110100100000
1000110101100000
1000110101100001
1000110101100011
1000110101100010
1000110101100110
1000110101100111
1000110101100101
1000110101100100
1000110101101100
1000110101101101
1000110101101111
1000110101101110
1000110101101010
1000110101101011
1000110101101001
1000110101101000
1000110101111000
1000110101111001
1000110101111011
1000110101111010
1000110101111110
1000110101111111
1000110101111101
1000110101111100
1000110101110100
1000110101110101
1000110101110111
1000110101110110
1000110101110010
1000110101110011
1000110101110001
1000110101110000
1000110101010000
1000110101010001
1000110101010011
1000110101010010
1000110101010110
1000110101010111
1000110101010101
1000110101010100
1000110101011100
1000110101011101
1000110101011111
1000110101011110
1000110101011010
1000110101011011
1000110101011001
1000110101011000
1000110101001000
1000110101001001
1000110101001011
1000110101001010
1000110101001110
1000110101001111
1000110101001101
1000110101001100
1000110101000100
1000110101000101
1000110101000111
1000110101000110
1000110101000010
1000110101000011
1000110101000001
1000110101000000
1000110111000000
1000110111000001
1000110111000011
1000110111000010
1000110111000110
1000110111000111
1000110111000101
1000110111000100
1000110111001100
1000110111001101
1000110111001111
1000110111001110
1000110111001010
1000110111001011
1000110111001001
1000110111001000
1000110111011000
1000110111011001
1000110111011011
1000110111011010
1000110111011110
1000110111011111
1000110111011101
1000110111011100
1000110111010100
1000110111010101
1000110111010111
1000110111010110
1000110111010010
1000110111010011
1000110111010001
1000110111010000
1000110111110000
1000110111110001
1000110111110011
1000110111110010
1000110111110110
1000110111110111
1000110111110101
1000110111110100
1000110111111100
1000110111111101
1000110111111111
1000110111111110
1000110111111010
1000110111111011
1000110111111001
1000110111111000
1000110111101000
1000110111101001
1000110111101011
1000110111101010
1000110111101110
1000110111101111
1000110111101101
1000110111101100
1000110111100100
1000110111100101
1000110111100111
1000110111100110
1000110111100010
1000110111100011
1000110111100001
1000110111100000
1000110110100000
1000110110100001
1000110110100011
1000110110100010
1000110110100110
1000110110100111
1000110110100101
1000110110100100
1000110110101100
1000110110101101
1000110110101111
1000110110101110
1000110110101010
1000110110101011
1000110110101001
1000110110101000
1000110110111000
1000110110111001
1000110110111011
1000110110111010
1000110110111110
1000110110111111
1000110110111101
1000110110111100
1000110110110100
1000110110110101
1000110110110111
1000110110110110
1000110110110010
1000110110110011
1000110110110001
1000110110110000
1000110110010000
1000110110010001
1000110110010011
1000110110010010
1000110110010110
1000110110010111
1000110110010101
1000110110010100
1000110110011100
1000110110011101
1000110110011111
1000110110011110
1000110110011010
1000110110011011
1000110110011001
1000110110011000
1000110110001000
1000110110001001
1000110110001011
1000110110001010
1000110110001110
1000110110001111
1000110110001101
1000110110001100
1000110110000100
1000110110000101
1000110110000111
1000110110000110
1000110110000010
1000110110000011
1000110110000001
1000110110000000
1000110010000000
1000110010000001
1000110010000011
1000110010000010
1000110010000110
1000110010000111
1000110010000101
1000110010000100
1000110010001100
1000110010001101
1000110010001111
1000110010001110
1000110010001010
1000110010001011
1000110010001001
1000110010001000
1000110010011000
1000110010011001
1000110010011011
1000110010011010
1000110010011110
1000110010011111
1000110010011101
1000110010011100
1000110010010100
1000110010010101
1000110010010111
1000110010010110
1000110010010010
1000110010010011
1000110010010001
1000110010010000
1000110010110000
1000110010110001
1000110010110011
1000110010110010
1000110010110110
1000110010110111
1000110010110101
1000110010110100
1000110010111100
1000110010111101
1000110010111111
1000110010111110
1000110010111010
1000110010111011
1000110010111001
1000110010111000
1000110010101000
1000110010101001
1000110010101011
1000110010101010
1000110010101110
1000110010101111
1000110010101101
1000110010101100
1000110010100100
1000110010100101
1000110010100111
1000110010100110
1000110010100010
1000110010100011
1000110010100001
1000110010100000
1000110011100000
1000110011100001
1000110011100011
1000110011100010
1000110011100110
1000110011100111
1000110011100101
1000110011100100
1000110011101100
1000110011101101
1000110011101111
1000110011101110
1000110011101010
1000110011101011
1000110011101001
1000110011101000
1000110011111000
1000110011111001
1000110011111011
1000110011111010
1000110011111110
1000110011111111
1000110011111101
1000110011111100
1000110011110100
1000110011110101
1000110011110111
1000110011110110
1000110011110010
1000110011110011
1000110011110001
1000110011110000
1000110011010000
1000110011010001
1000110011010011
1000110011010010
1000110011010110
1000110011010111
1000110011010101
1000110011010100
1000110011011100
1000110011011101
1000110011011111
1000110011011110
1000110011011010
1000110011011011
1000110011011001
1000110011011000
1000110011001000
1000110011001001
1000110011001011
1000110011001010
1000110011001110
1000110011001111
1000110011001101
1000110011001100
1000110011000100
1000110011000101
1000110011000111
1000110011000110
1000110011000010
1000110011000011
1000110011000001
1000110011000000
1000110001000000
1000110001000001
1000110001000011
1000110001000010
1000110001000110
1000110001000111
1000110001000101
1000110001000100
1000110001001100
1000110001001101
1000110001001111
1000110001001110
1000110001001010
1000110001001011
1000110001001001
1000110001001000
1000110001011000
1000110001011001
1000110001011011
1000110001011010
1000110001011110
1000110001011111
1000110001011101
1000110001011100
1000110001010100
1000110001010101
1000110001010111
1000110001010110
1000110001010010
1000110001010011
1000110001010001
1000110001010000
1000110001110000
1000110001110001
1000110001110011
1000110001110010
1000110001110110
1000110001110111
1000110001110101
1000110001110100
1000110001111100
1000110001111101
1000110001111111
1000110001111110
1000110001111010
1000110001111011
1000110001111001
1000110001111000
1000110001101000
1000110001101001
1000110001101011
1000110001101010
1000110001101110
1000110001101111
1000110001101101
1000110001101100
1000110001100100
1000110001100101
1000110001100111
1000110001100110
1000110001100010
1000110001100011
1000110001100001
1000110001100000
1000110000100000
1000110000100001
1000110000100011
1000110000100010
1000110000100110
1000110000100111
1000110000100101
1000110000100100
1000110000101100
1000110000101101
1000110000101111
1000110000101110
1000110000101010
1000110000101011
1000110000101001
1000110000101000
1000110000111000
1000110000111001
1000110000111011
1000110000111010
1000110000111110
1000110000111111
1000110000111101
1000110000111100
1000110000110100
1000110000110101
1000110000110111
1000110000110110
1000110000110010
1000110000110011
1000110000110001
1000110000110000
1000110000010000
1000110000010001
1000110000010011
1000110000010010
1000110000010110
1000110000010111
1000110000010101
1000110000010100
1000110000011100
1000110000011101
1000110000011111
1000110000011110
1000110000011010
1000110000011011
1000110000011001
1000110000011000
1000110000001000
1000110000001001
1000110000001011
1000110000001010
1000110000001110
1000110000001111
1000110000001101
1000110000001100
1000110000000100
1000110000000101
1000110000000111
1000110000000110
1000110000000010
1000110000000011
1000110000000001
1000110000000000
1000010000000000
1000010000000001
1000010000000011
1000010000000010
1000010000000110
1000010000000111
1000010000000101
1000010000000100
1000010000001100
1000010000001101
1000010000001111
1000010000001110
1000010000001010
1000010000001011
1000010000001001
1000010000001000
1000010000011000
1000010000011001
1000010000011011
1000010000011010
1000010000011110
1000010000011111
1000010000011101
1000010000011100
1000010000010100
1000010000010101
1000010000010111
1000010000010110
1000010000010010
1000010000010011
1000010000010001
1000010000010000
1000010000110000
1000010000110001
1000010000110011
1000010000110010
1000010000110110
1000010000110111
1000010000110101
1000010000110100
1000010000111100
1000010000111101
1000010000111111
1000010000111110
1000010000111010
1000010000111011
1000010000111001
1000010000111000
1000010000101000
1000010000101001
1000010000101011
1000010000101010
1000010000101110
1000010000101111
1000010000101101
1000010000101100
1000010000100100
1000010000100101
1000010000100111
1000010000100110
1000010000100010
1000010000100011
1000010000100001
1000010000100000
1000010001100000
1000010001100001
1000010001100011
1000010001100010
1000010001100110
1000010001100111
1000010001100101
1000010001100100
1000010001101100
1000010001101101
1000010001101111
1000010001101110
1000010001101010
1000010001101011
1000010001101001
1000010001101000
1000010001111000
1000010001111001
1000010001111011
1000010001111010
1000010001111110
1000010001111111
1000010001111101
1000010001111100
1000010001110100
1000010001110101
1000010001110111
1000010001110110
1000010001110010
1000010001110011
1000010001110001
1000010001110000
1000010001010000
1000010001010001
1000010001010011
1000010001010010
1000010001010110
1000010001010111
1000010001010101
1000010001010100
1000010001011100
1000010001011101
1000010001011111
1000010001011110
1000010001011010
1000010001011011
1000010001011001
1000010001011000
1000010001001000
1000010001001001
1000010001001011
1000010001001010
1000010001001110
1000010001001111
1000010001001101
1000010001001100
1000010001000100
1000010001000101
1000010001000111
1000010001000110
1000010001000010
1000010001000011
1000010001000001
1000010001000000
1000010011000000
1000010011000001
1000010011000011
1000010011000010
1000010011000110
1000010011000111
1000010011000101
1000010011000100
1000010011001100
1000010011001101
1000010011001111
1000010011001110
1000010011001010
1000010011001011
1000010011001001
1000010011001000
1000010011011000
1000010011011001
1000010011011011
1000010011011010
1000010011011110
1000010011011111
1000010011011101
1000010011011100
1000010011010100
1000010011010101
1000010011010111
1000010011010110
1000010011010010
1000010011010011
1000010011010001
1000010011010000
1000010011110000
1000010011110001
1000010011110011
1000010011110010
1000010011110110
1000010011110111
1000010011110101
1000010011110100
1000010011111100
1000010011111101
1000010011111111
1000010011111110
1000010011111010
1000010011111011
1000010011111001
1000010011111000
1000010011101000
1000010011101001
1000010011101011
1000010011101010
1000010011101110
1000010011101111
1000010011101101
1000010011101100
1000010011100100
1000010011100101
1000010011100111
1000010011100110
1000010011100010
1000010011100011
1000010011100001
1000010011100000
1000010010100000
1000010010100001
1000010010100011
1000010010100010
1000010010100110
1000010010100111
1000010010100101
1000010010100100
1000010010101100
1000010010101101
1000010010101111
1000010010101110
1000010010101010
1000010010101011
1000010010101001
1000010010101000
1000010010111000
1000010010111001
1000010010111011
1000010010111010
1000010010111110
1000010010111111
1000010010111101
1000010010111100
1000010010110100
1000010010110101
1000010010110111
1000010010110110
1000010010110010
1000010010110011
1000010010110001
1000010010110000
1000010010010000
1000010010010001
1000010010010011
1000010010010010
1000010010010110
1000010010010111
1000010010010101
1000010010010100
1000010010011100
1000010010011101
1000010010011111
1000010010011110
1000010010011010
1000010010011011
1000010010011001
1000010010011000
1000010010001000
1000010010001001
1000010010001011
1000010010001010
1000010010001110
1000010010001111
1000010010001101
1000010010001100
1000010010000100
1000010010000101
1000010010000111
1000010010000110
1000010010000010
1000010010000011
1000010010000001
1000010010000000
1000010110000000
1000010110000001
1000010110000011
1000010110000010
1000010110000110
1000010110000111
1000010110000101
1000010110000100
1000010110001100
1000010110001101
1000010110001111
1000010110001110
1000010110001010
1000010110001011
1000010110001001
1000010110001000
1000010110011000
1000010110011001
1000010110011011
1000010110011010
1000010110011110
1000010110011111
1000010110011101
1000010110011100
1000010110010100
1000010110010101
1000010110010111
1000010110010110
1000010110010010
1000010110010011
1000010110010001
1000010110010000
1000010110110000
1000010110110001
1000010110110011
1000010110110010
1000010110110110
1000010110110111
1000010110110101
1000010110110100
1000010110111100
1000010110111101
1000010110111111
1000010110111110
1000010110111010
1000010110111011
1000010110111001
1000010110111000
1000010110101000
1000010110101001
1000010110101011
1000010110101010
1000010110101110
1000010110101111
1000010110101101
1000010110101100
1000010110100100
1000010110100101
1000010110100111
1000010110100110
1000010110100010
1000010110100011
1000010110100001
1000010110100000
1000010111100000
1000010111100001
1000010111100011
1000010111100010
1000010111100110
1000010111100111
1000010111100101
1000010111100100
1000010111101100
1000010111101101
1000010111101111
1000010111101110
1000010111101010
1000010111101011
1000010111101001
1000010111101000
1000010111111000
1000010111111001
1000010111111011
1000010111111010
1000010111111110
1000010111111111
1000010111111101
1000010111111100
1000010111110100
1000010111110101
1000010111110111
1000010111110110
1000010111110010
1000010111110011
1000010111110001
1000010111110000
1000010111010000
1000010111010001
1000010111010011
1000010111010010
1000010111010110
1000010111010111
1000010111010101
1000010111010100
1000010111011100
1000010111011101
1000010111011111
1000010111011110
1000010111011010
1000010111011011
1000010111011001
1000010111011000
1000010111001000
1000010111001001
1000010111001011
1000010111001010
1000010111001110
1000010111001111
1000010111001101
1000010111001100
1000010111000100
1000010111000101
1000010111000111
1000010111000110
1000010111000010
1000010111000011
1000010111000001
1000010111000000
1000010101000000
1000010101000001
1000010101000011
1000010101000010
1000010101000110
1000010101000111
1000010101000101
1000010101000100
1000010101001100
1000010101001101
1000010101001111
1000010101001110
1000010101001010
1000010101001011
1000010101001001
1000010101001000
1000010101011000
1000010101011001
1000010101011011
1000010101011010
1000010101011110
1000010101011111
1000010101011101
1000010101011100
1000010101010100
1000010101010101
1000010101010111
1000010101010110
1000010101010010
1000010101010011
1000010101010001
1000010101010000
1000010101110000
1000010101110001
1000010101110011
1000010101110010
1000010101110110
1000010101110111
1000010101110101
1000010101110100
1000010101111100
1000010101111101
1000010101111111
1000010101111110
1000010101111010
1000010101111011
1000010101111001
1000010101111000
1000010101101000
1000010101101001
1000010101101011
1000010101101010
1000010101101110
1000010101101111
1000010101101101
1000010101101100
1000010101100100
1000010101100101
1000010101100111
1000010101100110
1000010101100010
1000010101100011
1000010101100001
1000010101100000
1000010100100000
1000010100100001
1000010100100011
1000010100100010
1000010100100110
1000010100100111
1000010100100101
1000010100100100
1000010100101100
1000010100101101
1000010100101111
1000010100101110
1000010100101010
1000010100101011
1000010100101001
1000010100101000
1000010100111000
1000010100111001
1000010100111011
1000010100111010
1000010100111110
1000010100111111
1000010100111101
1000010100111100
1000010100110100
1000010100110101
1000010100110111
1000010100110110
1000010100110010
1000010100110011
1000010100110001
1000010100110000
1000010100010000
1000010100010001
1000010100010011
1000010100010010
1000010100010110
1000010100010111
1000010100010101
1000010100010100
1000010100011100
1000010100011101
1000010100011111
1000010100011110
1000010100011010
1000010100011011
1000010100011001
1000010100011000
1000010100001000
1000010100001001
1000010100001011
1000010100001010
1000010100001110
1000010100001111
1000010100001101
1000010100001100
1000010100000100
1000010100000101
1000010100000111
1000010100000110
1000010100000010
1000010100000011
1000010100000001
1000010100000000
1000011100000000
1000011100000001
1000011100000011
1000011100000010
1000011100000110
1000011100000111
1000011100000101
1000011100000100
1000011100001100
1000011100001101
1000011100001111
1000011100001110
1000011100001010
1000011100001011
1000011100001001
1000011100001000
1000011100011000
1000011100011001
1000011100011011
1000011100011010
1000011100011110
1000011100011111
1000011100011101
1000011100011100
1000011100010100
1000011100010101
1000011100010111
1000011100010110
1000011100010010
1000011100010011
1000011100010001
1000011100010000
1000011100110000
1000011100110001
1000011100110011
1000011100110010
1000011100110110
1000011100110111
1000011100110101
1000011100110100
1000011100111100
1000011100111101
1000011100111111
1000011100111110
1000011100111010
1000011100111011
1000011100111001
1000011100111000
1000011100101000
1000011100101001
1000011100101011
1000011100101010
1000011100101110
1000011100101111
1000011100101101
1000011100101100
1000011100100100
1000011100100101
1000011100100111
1000011100100110
1000011100100010
1000011100100011
1000011100100001
1000011100100000
1000011101100000
1000011101100001
1000011101100011
1000011101100010
1000011101100110
1000011101100111
1000011101100101
1000011101100100
1000011101101100
1000011101101101
1000011101101111
1000011101101110
1000011101101010
1000011101101011
1000011101101001
1000011101101000
1000011101111000
1000011101111001
1000011101111011
1000011101111010
1000011101111110
1000011101111111
1000011101111101
1000011101111100
1000011101110100
1000011101110101
1000011101110111
1000011101110110
1000011101110010
1000011101110011
1000011101110001
1000011101110000
1000011101010000
1000011101010001
1000011101010011
1000011101010010
1000011101010110
1000011101010111
1000011101010101
1000011101010100
1000011101011100
1000011101011101
1000011101011111
1000011101011110
1000011101011010
1000011101011011
1000011101011001
1000011101011000
1000011101001000
1000011101001001
1000011101001011
1000011101001010
1000011101001110
1000011101001111
1000011101001101
1000011101001100
1000011101000100
1000011101000101
1000011101000111
1000011101000110
1000011101000010
1000011101000011
1000011101000001
1000011101000000
1000011111000000
1000011111000001
1000011111000011
1000011111000010
1000011111000110
1000011111000111
1000011111000101
1000011111000100
1000011111001100
1000011111001101
1000011111001111
1000011111001110
1000011111001010
1000011111001011
1000011111001001
1000011111001000
1000011111011000
1000011111011001
1000011111011011
1000011111011010
1000011111011110
1000011111011111
1000011111011101
1000011111011100
1000011111010100
1000011111010101
1000011111010111
1000011111010110
1000011111010010
1000011111010011
1000011111010001
1000011111010000
1000011111110000
1000011111110001
1000011111110011
1000011111110010
1000011111110110
1000011111110111
1000011111110101
1000011111110100
1000011111111100
1000011111111101
1000011111111111
1000011111111110
1000011111111010
1000011111111011
1000011111111001
1000011111111000
1000011111101000
1000011111101001
1000011111101011
1000011111101010
1000011111101110
1000011111101111
1000011111101101
1000011111101100
1000011111100100
1000011111100101
1000011111100111
1000011111100110
1000011111100010
1000011111100011
1000011111100001
1000011111100000
1000011110100000
1000011110100001
1000011110100011
1000011110100010
1000011110100110
1000011110100111
1000011110100101
1000011110100100
1000011110101100
1000011110101101
1000011110101111
1000011110101110
1000011110101010
1000011110101011
1000011110101001
1000011110101000
1000011110111000
1000011110111001
1000011110111011
1000011110111010
1000011110111110
1000011110111111
1000011110111101
1000011110111100
1000011110110100
1000011110110101
1000011110110111
1000011110110110
1000011110110010
1000011110110011
1000011110110001
1000011110110000
1000011110010000
1000011110010001
1000011110010011
1000011110010010
1000011110010110
1000011110010111
1000011110010101
1000011110010100
1000011110011100
1000011110011101
1000011110011111
1000011110011110
1000011110011010
1000011110011011
1000011110011001
1000011110011000
1000011110001000
1000011110001001
1000011110001011
1000011110001010
1000011110001110
1000011110001111
1000011110001101
1000011110001100
1000011110000100
1000011110000101
1000011110000111
1000011110000110
1000011110000010
1000011110000011
1000011110000001
1000011110000000
1000011010000000
1000011010000001
1000011010000011
1000011010000010
1000011010000110
1000011010000111
1000011010000101
1000011010000100
1000011010001100
1000011010001101
1000011010001111
1000011010001110
1000011010001010
1000011010001011
1000011010001001
1000011010001000
1000011010011000
1000011010011001
1000011010011011
1000011010011010
1000011010011110
1000011010011111
1000011010011101
1000011010011100
1000011010010100
1000011010010101
1000011010010111
1000011010010110
1000011010010010
1000011010010011
1000011010010001
1000011010010000
1000011010110000
1000011010110001
1000011010110011
1000011010110010
1000011010110110
1000011010110111
1000011010110101
1000011010110100
1000011010111100
1000011010111101
1000011010111111
1000011010111110
1000011010111010
1000011010111011
1000011010111001
1000011010111000
1000011010101000
1000011010101001
1000011010101011
1000011010101010
1000011010101110
1000011010101111
1000011010101101
1000011010101100
1000011010100100
1000011010100101
1000011010100111
1000011010100110
1000011010100010
1000011010100011
1000011010100001
1000011010100000
1000011011100000
1000011011100001
1000011011100011
1000011011100010
1000011011100110
1000011011100111
1000011011100101
1000011011100100
1000011011101100
1000011011101101
1000011011101111
1000011011101110
1000011011101010
1000011011101011
1000011011101001
1000011011101000
1000011011111000
1000011011111001
1000011011111011
1000011011111010
1000011011111110
1000011011111111
1000011011111101
1000011011111100
1000011011110100
1000011011110101
1000011011110111
1000011011110110
1000011011110010
1000011011110011
1000011011110001
1000011011110000
1000011011010000
1000011011010001
1000011011010011
1000011011010010
1000011011010110
1000011011010111
1000011011010101
1000011011010100
1000011011011100
1000011011011101
1000011011011111
1000011011011110
1000011011011010
1000011011011011
1000011011011001
1000011011011000
1000011011001000
1000011011001001
1000011011001011
1000011011001010
1000011011001110
1000011011001111
1000011011001101
1000011011001100
1000011011000100
1000011011000101
1000011011000111
1000011011000110
1000011011000010
1000011011000011
1000011011000001
1000011011000000
1000011001000000
1000011001000001
1000011001000011
1000011001000010
1000011001000110
1000011001000111
1000011001000101
1000011001000100
1000011001001100
1000011001001101
1000011001001111
1000011001001110
1000011001001010
1000011001001011
1000011001001001
1000011001001000
1000011001011000
1000011001011001
1000011001011011
1000011001011010
1000011001011110
1000011001011111
1000011001011101
1000011001011100
1000011001010100
1000011001010101
1000011001010111
1000011001010110
1000011001010010
1000011001010011
1000011001010001
1000011001010000
1000011001110000
1000011001110001
1000011001110011
1000011001110010
1000011001110110
1000011001110111
1000011001110101
1000011001110100
1000011001111100
1000011001111101
1000011001111111
1000011001111110
1000011001111010
1000011001111011
1000011001111001
1000011001111000
1000011001101000
1000011001101001
1000011001101011
1000011001101010
1000011001101110
1000011001101111
1000011001101101
1000011001101100
1000011001100100
1000011001100101
1000011001100111
1000011001100110
1000011001100010
1000011001100011
1000011001100001
1000011001100000
1000011000100000
1000011000100001
1000011000100011
1000011000100010
1000011000100110
1000011000100111
1000011000100101
1000011000100100
1000011000101100
1000011000101101
1000011000101111
1000011000101110
1000011000101010
1000011000101011
1000011000101001
1000011000101000
1000011000111000
1000011000111001
1000011000111011
1000011000111010
1000011000111110
1000011000111111
1000011000111101
1000011000111100
1000011000110100
1000011000110101
1000011000110111
1000011000110110
1000011000110010
1000011000110011
1000011000110001
1000011000110000
1000011000010000
1000011000010001
1000011000010011
1000011000010010
1000011000010110
1000011000010111
1000011000010101
1000011000010100
1000011000011100
1000011000011101
1000011000011111
1000011000011110
1000011000011010
1000011000011011
1000011000011001
1000011000011000
1000011000001000
1000011000001001
1000011000001011
1000011000001010
1000011000001110
1000011000001111
1000011000001101
1000011000001100
1000011000000100
1000011000000101
1000011000000111
1000011000000110
1000011000000010
1000011000000011
1000011000000001
1000011000000000
1000001000000000
1000001000000001
1000001000000011
1000001000000010
1000001000000110
1000001000000111
1000001000000101
1000001000000100
1000001000001100
1000001000001101
1000001000001111
1000001000001110
1000001000001010
1000001000001011
1000001000001001
1000001000001000
1000001000011000
1000001000011001
1000001000011011
1000001000011010
1000001000011110
1000001000011111
1000001000011101
1000001000011100
1000001000010100
1000001000010101
1000001000010111
1000001000010110
1000001000010010
1000001000010011
1000001000010001
1000001000010000
1000001000110000
1000001000110001
1000001000110011
1000001000110010
1000001000110110
1000001000110111
1000001000110101
1000001000110100
1000001000111100
1000001000111101
1000001000111111
1000001000111110
1000001000111010
1000001000111011
1000001000111001
1000001000111000
1000001000101000
1000001000101001
1000001000101011
1000001000101010
1000001000101110
1000001000101111
1000001000101101
1000001000101100
1000001000100100
1000001000100101
1000001000100111
1000001000100110
1000001000100010
1000001000100011
1000001000100001
1000001000100000
1000001001100000
1000001001100001
1000001001100011
1000001001100010
1000001001100110
1000001001100111
1000001001100101
1000001001100100
1000001001101100
1000001001101101
1000001001101111
1000001001101110
1000001001101010
1000001001101011
1000001001101001
1000001001101000
1000001001111000
1000001001111001
1000001001111011
1000001001111010
1000001001111110
1000001001111111
1000001001111101
1000001001111100
1000001001110100
1000001001110101
1000001001110111
1000001001110110
1000001001110010
1000001001110011
1000001001110001
1000001001110000
1000001001010000
1000001001010001
1000001001010011
1000001001010010
1000001001010110
1000001001010111
1000001001010101
1000001001010100
1000001001011100
1000001001011101
1000001001011111
1000001001011110
1000001001011010
1000001001011011
1000001001011001
1000001001011000
1000001001001000
1000001001001001
1000001001001011
1000001001001010
1000001001001110
1000001001001111
1000001001001101
1000001001001100
1000001001000100
1000001001000101
1000001001000111
1000001001000110
1000001001000010
1000001001000011
1000001001000001
1000001001000000
1000001011000000
1000001011000001
1000001011000011
1000001011000010
1000001011000110
1000001011000111
1000001011000101
1000001011000100
1000001011001100
1000001011001101
1000001011001111
1000001011001110
1000001011001010
1000001011001011
1000001011001001
1000001011001000
1000001011011000
1000001011011001
1000001011011011
1000001011011010
1000001011011110
1000001011011111
1000001011011101
1000001011011100
1000001011010100
1000001011010101
1000001011010111
1000001011010110
1000001011010010
1000001011010011
1000001011010001
1000001011010000
1000001011110000
1000001011110001
1000001011110011
1000001011110010
1000001011110110
1000001011110111
1000001011110101
1000001011110100
1000001011111100
1000001011111101
1000001011111111
1000001011111110
1000001011111010
1000001011111011
1000001011111001
1000001011111000
1000001011101000
1000001011101001
1000001011101011
1000001011101010
1000001011101110
1000001011101111
1000001011101101
1000001011101100
1000001011100100
1000001011100101
1000001011100111
1000001011100110
1000001011100010
1000001011100011
1000001011100001
1000001011100000
1000001010100000
1000001010100001
1000001010100011
1000001010100010
1000001010100110
1000001010100111
1000001010100101
1000001010100100
1000001010101100
1000001010101101
1000001010101111
1000001010101110
1000001010101010
1000001010101011
1000001010101001
1000001010101000
1000001010111000
1000001010111001
1000001010111011
1000001010111010
1000001010111110
1000001010111111
1000001010111101
1000001010111100
1000001010110100
1000001010110101
1000001010110111
1000001010110110
1000001010110010
1000001010110011
1000001010110001
1000001010110000
1000001010010000
1000001010010001
1000001010010011
1000001010010010
1000001010010110
1000001010010111
1000001010010101
1000001010010100
1000001010011100
1000001010011101
1000001010011111
1000001010011110
1000001010011010
1000001010011011
1000001010011001
1000001010011000
1000001010001000
1000001010001001
1000001010001011
1000001010001010
1000001010001110
1000001010001111
1000001010001101
1000001010001100
1000001010000100
1000001010000101
1000001010000111
1000001010000110
1000001010000010
1000001010000011
1000001010000001
1000001010000000
1000001110000000
1000001110000001
1000001110000011
1000001110000010
1000001110000110
1000001110000111
1000001110000101
1000001110000100
1000001110001100
1000001110001101
1000001110001111
1000001110001110
1000001110001010
1000001110001011
1000001110001001
1000001110001000
1000001110011000
1000001110011001
1000001110011011
1000001110011010
1000001110011110
1000001110011111
1000001110011101
1000001110011100
1000001110010100
1000001110010101
1000001110010111
1000001110010110
1000001110010010
1000001110010011
1000001110010001
1000001110010000
1000001110110000
1000001110110001
1000001110110011
1000001110110010
1000001110110110
1000001110110111
1000001110110101
1000001110110100
1000001110111100
1000001110111101
1000001110111111
1000001110111110
1000001110111010
1000001110111011
1000001110111001
1000001110111000
1000001110101000
1000001110101001
1000001110101011
1000001110101010
1000001110101110
1000001110101111
1000001110101101
1000001110101100
1000001110100100
1000001110100101
1000001110100111
1000001110100110
1000001110100010
1000001110100011
1000001110100001
1000001110100000
1000001111100000
1000001111100001
1000001111100011
1000001111100010
1000001111100110
1000001111100111
1000001111100101
1000001111100100
1000001111101100
1000001111101101
1000001111101111
1000001111101110
1000001111101010
1000001111101011
1000001111101001
1000001111101000
1000001111111000
1000001111111001
1000001111111011
1000001111111010
1000001111111110
1000001111111111
1000001111111101
1000001111111100
1000001111110100
1000001111110101
1000001111110111
1000001111110110
1000001111110010
1000001111110011
1000001111110001
1000001111110000
1000001111010000
1000001111010001
1000001111010011
1000001111010010
1000001111010110
1000001111010111
1000001111010101
1000001111010100
1000001111011100
1000001111011101
1000001111011111
1000001111011110
1000001111011010
1000001111011011
1000001111011001
1000001111011000
1000001111001000
1000001111001001
1000001111001011
1000001111001010
1000001111001110
1000001111001111
1000001111001101
1000001111001100
1000001111000100
1000001111000101
1000001111000111
1000001111000110
1000001111000010
1000001111000011
1000001111000001
1000001111000000
1000001101000000
1000001101000001
1000001101000011
1000001101000010
1000001101000110
1000001101000111
1000001101000101
1000001101000100
1000001101001100
1000001101001101
1000001101001111
1000001101001110
1000001101001010
1000001101001011
1000001101001001
1000001101001000
1000001101011000
1000001101011001
1000001101011011
1000001101011010
1000001101011110
1000001101011111
1000001101011101
1000001101011100
1000001101010100
1000001101010101
1000001101010111
1000001101010110
1000001101010010
1000001101010011
1000001101010001
1000001101010000
1000001101110000
1000001101110001
1000001101110011
1000001101110010
1000001101110110
1000001101110111
1000001101110101
1000001101110100
1000001101111100
1000001101111101
1000001101111111
1000001101111110
1000001101111010
1000001101111011
1000001101111001
1000001101111000
1000001101101000
1000001101101001
1000001101101011
1000001101101010
1000001101101110
1000001101101111
1000001101101101
1000001101101100
1000001101100100
1000001101100101
1000001101100111
1000001101100110
1000001101100010
1000001101100011
1000001101100001
1000001101100000
1000001100100000
1000001100100001
1000001100100011
1000001100100010
1000001100100110
1000001100100111
1000001100100101
1000001100100100
1000001100101100
1000001100101101
1000001100101111
1000001100101110
1000001100101010
1000001100101011
1000001100101001
1000001100101000
1000001100111000
1000001100111001
1000001100111011
1000001100111010
1000001100111110
1000001100111111
1000001100111101
1000001100111100
1000001100110100
1000001100110101
1000001100110111
1000001100110110
1000001100110010
1000001100110011
1000001100110001
1000001100110000
1000001100010000
1000001100010001
1000001100010011
1000001100010010
1000001100010110
1000001100010111
1000001100010101
1000001100010100
1000001100011100
1000001100011101
1000001100011111
1000001100011110
1000001100011010
1000001100011011
1000001100011001
1000001100011000
1000001100001000
1000001100001001
1000001100001011
1000001100001010
1000001100001110
1000001100001111
1000001100001101
1000001100001100
1000001100000100
1000001100000101
1000001100000111
1000001100000110
1000001100000010
1000001100000011
1000001100000001
1000001100000000
1000000100000000
1000000100000001
1000000100000011
1000000100000010
1000000100000110
1000000100000111
1000000100000101
1000000100000100
1000000100001100
1000000100001101
1000000100001111
1000000100001110
1000000100001010
1000000100001011
1000000100001001
1000000100001000
1000000100011000
1000000100011001
1000000100011011
1000000100011010
1000000100011110
1000000100011111
1000000100011101
1000000100011100
1000000100010100
1000000100010101
1000000100010111
1000000100010110
1000000100010010
1000000100010011
1000000100010001
1000000100010000
1000000100110000
1000000100110001
1000000100110011
1000000100110010
1000000100110110
1000000100110111
1000000100110101
1000000100110100
1000000100111100
1000000100111101
1000000100111111
1000000100111110
1000000100111010
1000000100111011
1000000100111001
1000000100111000
1000000100101000
1000000100101001
1000000100101011
1000000100101010
1000000100101110
1000000100101111
1000000100101101
1000000100101100
1000000100100100
1000000100100101
1000000100100111
1000000100100110
1000000100100010
1000000100100011
1000000100100001
1000000100100000
1000000101100000
1000000101100001
1000000101100011
1000000101100010
1000000101100110
1000000101100111
1000000101100101
1000000101100100
1000000101101100
1000000101101101
1000000101101111
1000000101101110
1000000101101010
1000000101101011
1000000101101001
1000000101101000
1000000101111000
1000000101111001
1000000101111011
1000000101111010
1000000101111110
1000000101111111
1000000101111101
1000000101111100
1000000101110100
1000000101110101
1000000101110111
1000000101110110
1000000101110010
1000000101110011
1000000101110001
1000000101110000
1000000101010000
1000000101010001
1000000101010011
1000000101010010
1000000101010110
1000000101010111
1000000101010101
1000000101010100
1000000101011100
1000000101011101
1000000101011111
1000000101011110
1000000101011010
1000000101011011
1000000101011001
1000000101011000
1000000101001000
1000000101001001
1000000101001011
1000000101001010
1000000101001110
1000000101001111
1000000101001101
1000000101001100
1000000101000100
1000000101000101
1000000101000111
1000000101000110
1000000101000010
1000000101000011
1000000101000001
1000000101000000
1000000111000000
1000000111000001
1000000111000011
1000000111000010
1000000111000110
1000000111000111
1000000111000101
1000000111000100
1000000111001100
1000000111001101
1000000111001111
1000000111001110
1000000111001010
1000000111001011
1000000111001001
1000000111001000
1000000111011000
1000000111011001
1000000111011011
1000000111011010
1000000111011110
1000000111011111
1000000111011101
1000000111011100
1000000111010100
1000000111010101
1000000111010111
1000000111010110
1000000111010010
1000000111010011
1000000111010001
1000000111010000
1000000111110000
1000000111110001
1000000111110011
1000000111110010
1000000111110110
1000000111110111
1000000111110101
1000000111110100
1000000111111100
1000000111111101
1000000111111111
1000000111111110
1000000111111010
1000000111111011
1000000111111001
1000000111111000
1000000111101000
1000000111101001
1000000111101011
1000000111101010
1000000111101110
1000000111101111
1000000111101101
1000000111101100
1000000111100100
1000000111100101
1000000111100111
1000000111100110
1000000111100010
1000000111100011
1000000111100001
1000000111100000
1000000110100000
1000000110100001
1000000110100011
1000000110100010
1000000110100110
1000000110100111
1000000110100101
1000000110100100
1000000110101100
1000000110101101
1000000110101111
1000000110101110
1000000110101010
1000000110101011
1000000110101001
1000000110101000
1000000110111000
1000000110111001
1000000110111011
1000000110111010
1000000110111110
1000000110111111
1000000110111101
1000000110111100
1000000110110100
1000000110110101
1000000110110111
1000000110110110
1000000110110010
1000000110110011
1000000110110001
1000000110110000
1000000110010000
1000000110010001
1000000110010011
1000000110010010
1000000110010110
1000000110010111
1000000110010101
1000000110010100
1000000110011100
1000000110011101
1000000110011111
1000000110011110
1000000110011010
1000000110011011
1000000110011001
1000000110011000
1000000110001000
1000000110001001
1000000110001011
1000000110001010
1000000110001110
1000000110001111
1000000110001101
1000000110001100
1000000110000100
1000000110000101
1000000110000111
1000000110000110
1000000110000010
1000000110000011
1000000110000001
1000000110000000
1000000010000000
1000000010000001
1000000010000011
1000000010000010
1000000010000110
1000000010000111
1000000010000101
1000000010000100
1000000010001100
1000000010001101
1000000010001111
1000000010001110
1000000010001010
1000000010001011
1000000010001001
1000000010001000
1000000010011000
1000000010011001
1000000010011011
1000000010011010
1000000010011110
1000000010011111
1000000010011101
1000000010011100
1000000010010100
1000000010010101
1000000010010111
1000000010010110
1000000010010010
1000000010010011
1000000010010001
1000000010010000
1000000010110000
1000000010110001
1000000010110011
1000000010110010
1000000010110110
1000000010110111
1000000010110101
1000000010110100
1000000010111100
1000000010111101
1000000010111111
1000000010111110
1000000010111010
1000000010111011
1000000010111001
1000000010111000
1000000010101000
1000000010101001
1000000010101011
1000000010101010
1000000010101110
1000000010101111
1000000010101101
1000000010101100
1000000010100100
1000000010100101
1000000010100111
1000000010100110
1000000010100010
1000000010100011
1000000010100001
1000000010100000
1000000011100000
1000000011100001
1000000011100011
1000000011100010
1000000011100110
1000000011100111
1000000011100101
1000000011100100
1000000011101100
1000000011101101
1000000011101111
1000000011101110
1000000011101010
1000000011101011
1000000011101001
1000000011101000
1000000011111000
1000000011111001
1000000011111011
1000000011111010
1000000011111110
1000000011111111
1000000011111101
1000000011111100
1000000011110100
1000000011110101
1000000011110111
1000000011110110
1000000011110010
1000000011110011
1000000011110001
1000000011110000
1000000011010000
1000000011010001
1000000011010011
1000000011010010
1000000011010110
1000000011010111
1000000011010101
1000000011010100
1000000011011100
1000000011011101
1000000011011111
1000000011011110
1000000011011010
1000000011011011
1000000011011001
1000000011011000
1000000011001000
1000000011001001
1000000011001011
1000000011001010
1000000011001110
1000000011001111
1000000011001101
1000000011001100
1000000011000100
1000000011000101
1000000011000111
1000000011000110
1000000011000010
1000000011000011
1000000011000001
1000000011000000
1000000001000000
1000000001000001
1000000001000011
1000000001000010
1000000001000110
1000000001000111
1000000001000101
1000000001000100
1000000001001100
1000000001001101
1000000001001111
1000000001001110
1000000001001010
1000000001001011
1000000001001001
1000000001001000
1000000001011000
1000000001011001
1000000001011011
1000000001011010
1000000001011110
1000000001011111
1000000001011101
1000000001011100
1000000001010100
1000000001010101
1000000001010111
1000000001010110
1000000001010010
1000000001010011
1000000001010001
1000000001010000
1000000001110000
1000000001110001
1000000001110011
1000000001110010
1000000001110110
1000000001110111
1000000001110101
1000000001110100
1000000001111100
1000000001111101
1000000001111111
1000000001111110
1000000001111010
1000000001111011
1000000001111001
1000000001111000
1000000001101000
1000000001101001
1000000001101011
1000000001101010
1000000001101110
1000000001101111
1000000001101101
1000000001101100
1000000001100100
1000000001100101
1000000001100111
1000000001100110
1000000001100010
1000000001100011
1000000001100001
1000000001100000
1000000000100000
1000000000100001
1000000000100011
1000000000100010
1000000000100110
1000000000100111
1000000000100101
1000000000100100
1000000000101100
1000000000101101
1000000000101111
1000000000101110
1000000000101010
1000000000101011
1000000000101001
1000000000101000
1000000000111000
1000000000111001
1000000000111011
1000000000111010
1000000000111110
1000000000111111
1000000000111101
1000000000111100
1000000000110100
1000000000110101
1000000000110111
1000000000110110
1000000000110010
1000000000110011
1000000000110001
1000000000110000
1000000000010000
1000000000010001
1000000000010011
1000000000010010
1000000000010110
1000000000010111
1000000000010101
1000000000010100
1000000000011100
1000000000011101
1000000000011111
1000000000011110
1000000000011010
1000000000011011
1000000000011001
1000000000011000
1000000000001000
1000000000001001
1000000000001011
1000000000001010
1000000000001110
1000000000001111
1000000000001101
1000000000001100
1000000000000100
1000000000000101
1000000000000111
1000000000000110
1000000000000010
1000000000000011
1000000000000001
1000000000000000
|
daytripper/VASTChallenge2017.ipynb | ###Markdown
VAST Challenge 2017 Mini Challenge 1 1. Introduction At this present work we present our solution for the first challenge proposed at the 2017 VAST Challenge, where contestants, using visual analytics tools, are expected to find patterns in the data of the vehicle traffic of the ficticious Boonsong Lekagul Nature Preserve and relate them with the decline in the Rose-crested Blue Pipit bird species population at the park. The mini-challenge encourages the participants to use visual analytics to identify repeating patterns of vehicles transiting the park and classify the most supicious ones among then, regarding to threatening the native species. To facilitate this task, the park provides us with traffic data containing an identification for each vehicle, the vehicle type and timestamps collected by the many sensors spreaded in the parks installations. Besides that, a simplified map of the park with the sensors locations is also provided. Our approach to the problem was to first make an exploratory data analysis of the dataset to raise starting points for the problem investigation and then, afterwards, dive into these hypothesis, testing them with visual analytics tools. To deal with the data and the plottings, our team choose to use Python 3 and some really useful packages that would make our work easier. In this document, created using Jupyter Notebook, we provide both the source code and the execution output in hope that the understanding of the methodology used is clearer to our reader. All the visualizations created are also displayed inline.
###Code
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
In this article we will be presenting our step-by-step investigations, including the raised hipothesis, the visualization choices to explore it, the code and the analysis, all along this beautiful notebook. We hope you're going to enjoy it. All the project code is released under MIT License and is free for use and redistribution. Attribution is apreciated but not required. More information can be found at the project repository. 2. Tools The project code was written using Python 3, due to the great productivity obteained in the handling with the data and because of the possibility of using the same language for data munging and to create visualizations. Some non-native packages were also used: numpy NumPy is the fundamental package for scientific computing with Python. It contains among other things a powerful N-dimensional array object,sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code, useful linear algebra, Fourier transform, and random number capabilities.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
pandas pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
matplotlib Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shell, the jupyter notebook, web application servers, and graphical user interface toolkits.
###Code
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
###Output
_____no_output_____
###Markdown
seaborn Seaborn is a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics.
###Code
import seaborn as sns
sns.set_style('whitegrid')
###Output
_____no_output_____
###Markdown
graphviz Graphviz is open source graph visualization software. Graph visualization is a way of representing structural information as diagrams of abstract graphs and networks. It has important applications in networking, bioinformatics, software engineering, database and web design, machine learning, and in visual interfaces for other technical domains.
###Code
import graphviz
###Output
_____no_output_____
###Markdown
3. Loading Data We start our work by loading the data using the pandas module. The timestamps are converted to Python timestamps format to make the time operations easier afterwards.
###Code
raw_dataset = pd.read_csv('../data/Lekagul Sensor Data.csv', parse_dates=["Timestamp"],
date_parser=lambda x: pd.datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
raw_dataset.head()
###Output
_____no_output_____
###Markdown
4. Traffic distribution analysis Before jumping into conclusions, our team decided that having an overview of the dataset and their most relevant traits would be useful to direct our questioning process to the more evident trends in data. Initially we're going to visualize the occurence of each car-type in every sensor. At this moment, we are not discriminating the different sensor types. Here we shall have a general perspective of how the car flow distribution by car type looks like. This approach shall give us our first general insight about the dataset.
###Code
counts = raw_dataset.groupby("car-type").count().sort_values(by='Timestamp', ascending=False)
fig = sns.barplot(data=counts, x='car-id', y=counts.index)
fig.set(xlabel='Traffic Volume', ylabel='Car Type')
sns.plt.title('Traffic Volume Distribution By Car Type')
plt.show()
###Output
_____no_output_____
###Markdown
To complement this visualization, we will visualize also how many cars have crossed each sensor, this time, not discrimining different car-types.
###Code
counts = raw_dataset.groupby("gate-name").count().sort_values(by='Timestamp', ascending=False)
fig = sns.barplot(data=counts, x='car-id', y=counts.index)
fig.set(xlabel='Traffic Volume', ylabel='Gate Name')
sns.plt.title('Number of Events Ocurrences By Sensor')
fig.figure.set_size_inches(18,30)
plt.show()
###Output
_____no_output_____
###Markdown
At this point we have noticed that the sum of the vehicles counted by the ranger-stops 0 and 2 sensors extrapolates the total occurences of events involving ranger vehicles (2p). Therefore, it is confirmed that there are other visitors reaching these ranger areas. But this can be easily understood having a look on the park map. These two stops are not surronded by gates (which determines non visitors areas), so they are allowed to be reached by visitors. In the case of these areas being populated by the Rose-crested Blue Pipit the park should consider isolating these areas properly.
###Code
img=mpimg.imread('../data/Lekagul Roadways labeled v2.jpg')
plt.imshow(img )
###Output
_____no_output_____
###Markdown
When confronted with this, what immediately came to our minds is that a easier way of visualizing the park roads configuration would be helpful for our analysis. So, manually we derived a graph representation of these roads as a csv file (presented at this project 'data' file) where the first column represents an origin point, the second column a destination point and the third is a boolean value indicating if this path is restricted by gates or not. We then load the data from the csv file:
###Code
roads = pd.read_csv('../data/roads.csv')
###Output
_____no_output_____
###Markdown
And visualize the resulting graph using the graphviz module:
###Code
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
graph = graphviz.Graph()
graph.attr(size='13,10')
graph.node_attr.update(color='lightblue2', style='filled', shape='circle')
locations = set()
for _, row in roads.iterrows():
locations.add(row[0])
locations.add(row[1])
for location in locations:
graph.node(location)
for _, row in roads.iterrows():
if row['restricted'] == 'True':
graph.edge(row[0],row[1], color='red', penwidth='10')
else:
graph.edge(row[0],row[1])
graph
###Output
_____no_output_____
###Markdown
Althogh being a little cluttered, this visualization enables us to spot quickly if a road is restricted (can only be acessed through gates) or not and might prove useful in our investigation process. In fact, when we observe the graph visualization, it becomes clear that there are some sensors that can only be reached by passing through a gate, and therefore, should be only activated by rangers cars. We then direct our attention to the traffic activity at these areas. The sensors we'll investigate corresponds, in the visualization, to those that are only connected to the graph by red edges (restricted roads); They are: Ranger-base ranger-stop 1 ranger-stop 3 ranger-stop 5 ranger-stop 6 ranger-stop 7
###Code
forbidden = set(['Ranger-base', 'ranger-stop 1', 'ranger-stop 3', 'ranger-stop 5', 'ranger-stop 6', 'ranger-stop 7'])
without_rangers = raw_dataset.reindex(columns=['car-type','gate-name'])
without_rangers = without_rangers[without_rangers['car-type'] != '2P']
trespassers = without_rangers['gate-name'].isin(forbidden).value_counts()
trespassers
###Output
_____no_output_____
###Markdown
And, to our sadness, we discover that there are no trespassing vehicles in the restricted areas. 5. Time Distributions Analysis A time distribution of our data entries could be useful for revealing inconsistent datums and to give a general spectre of the traffic volume over the data collection time. We start by extracting the year, month and day of each sensor entry:
###Code
time_data = raw_dataset
time_data['year'] = pd.DatetimeIndex(raw_dataset['Timestamp']).year
time_data['month'] = pd.DatetimeIndex(raw_dataset['Timestamp']).month
time_data['hour'] = pd.DatetimeIndex(raw_dataset['Timestamp']).hour
time_data.head()
###Output
_____no_output_____
###Markdown
Then, immediatly we can see the years of data collection:
###Code
fig, ax = plt.subplots()
ax.hist(time_data['year'], bins=2, ec='k', color='#0C77AD')
ax.set_title('Year distribution')
ax.set_xlabel('Year')
ax.set_ylabel('Events Count')
ax.set_xticks(range(2015, 2017, 1))
ax.set_xlim(range(2015, 2017, 1))
ax.get_children()[1].set_color('#54BEB4')
plt.show()
###Output
_____no_output_____
###Markdown
And learn that the majority of the data was collected at 2015. This led us to a question: is there available info about an entire year so we can see the distribution of the traffic inside the reserve in the period of twelve months?
###Code
first_year_data = time_data[time_data.year == 2015]
first_year_data = first_year_data.groupby('month').count().sort_index().reindex(columns=['Timestamp'])
first_year_data
second_year_data = time_data[time_data.year == 2016]
second_year_data = second_year_data.groupby('month').count().sort_index().reindex(columns=['Timestamp'])
second_year_data
###Output
_____no_output_____
###Markdown
Unfortunaly no, but with a little trick we can see relevant data about the traffic distribution along an entire year. Since the only intersection between months is May, and even more, both events counts for the month are really close, we can substitute the month value in an entire year series for the media of this month.
###Code
may_mean = int(round((first_year_data.loc[5]['Timestamp'] + second_year_data.loc[5]['Timestamp']) / 2))
may_mean
whole_year = pd.concat([first_year_data.drop(5), second_year_data.drop(5)])
whole_year = whole_year.reindex(columns=['Timestamp'])
whole_year.loc[5] = may_mean
whole_year = whole_year.sort_index()
###Output
_____no_output_____
###Markdown
And then we can finally plot the whole year sensor events distribution!
###Code
fig = sns.barplot(data=whole_year, x=whole_year.index, y=whole_year['Timestamp'])
fig = sns.pointplot(x=whole_year.index, y=whole_year['Timestamp'])
fig.axhline(whole_year['Timestamp'].mean(), color='#947EE5', linestyle='dashed', linewidth=2)
fig.set(xlabel='Month', ylabel='Events detected')
sns.plt.title('Traffic Volume Distribution By Month')
plt.show()
###Output
_____no_output_____
###Markdown
This denotes a tendency of more visitor coming to the park by the half of the year, between July and September. This raises another relevant question: could the increase in the visitors volume in this period influence the reproductive habits of the birds since this is north hemisphere summer? Then we proceed to visualize the distribution of sensor events along the hours of day. During which hours the traffic in the park peaks?
###Code
hours_data = time_data.groupby('hour').count().sort_index()
fig, ax = plt.subplots()
ax.hist(time_data['hour'], bins = 24, ec='k', color='#0C77AD')
ax.plot(hours_data.index, hours_data['Timestamp'], color='#58C994')
ax.axhline(hours_data['Timestamp'].mean(), color='orange', linestyle='dashed', linewidth=2)
ax.set_title('Traffic volume by hours')
ax.set_xlabel('Hour')
ax.set_ylabel('Events Count')
ax.set_xticks(range(0, 24))
plt.show()
###Output
_____no_output_____
###Markdown
And we see that the distribution of events by hours of the day behaves normally, peaking in the interval from 6am to 18pm. But wait! Even if the rate of visitors passing through the sensors raises in a aproppriate form, this doesn't mean that we can ignore that a a large numbers of events have been happenning at unusual hours.
###Code
strange_hours = hours_data.loc[0:5].append(hours_data.loc[23])
fig = sns.barplot(strange_hours.index, strange_hours['Timestamp'])
fig.set(xlabel='Hour', ylabel='Events count')
sns.plt.title('Events registered at strange hours')
plt.show()
###Output
_____no_output_____
###Markdown
If these were not made by rangers, it could mean illegal or habitat damaging activities happening during the night. Let's investigate...
###Code
strange_time = time_data.query('hour <= 5').append(time_data.query('hour > 22'))
without_rangers = strange_time.reindex(columns=['car-type','hour','car-id', 'Timestamp', 'gate-name'])[raw_dataset['car-type'] != '2P']
without_rangers.head()
strange_events = without_rangers.groupby('car-type').count()
fig = sns.barplot(data=strange_events, x=strange_events.index, y=strange_events['Timestamp'])
fig.axhline(strange_events['Timestamp'].mean(), color='#947EE5', linestyle='dashed', linewidth=2)
fig.set(xlabel='Car Type', ylabel='Events detected')
sns.plt.title('Strange Traffic Volume Distribution By Car Type')
plt.show()
###Output
_____no_output_____
###Markdown
And we find a four axis truck wandering through the park during the night. I bet he was up to no good. Besides that, what can we learn about the time spent inside the park? Does the visitants come for a quick visit or they spend a long time inside the dependencies?
###Code
without = raw_dataset[raw_dataset['car-type'] != '2P']
time_delta = without.groupby('car-id')['Timestamp'].max() - without.groupby('car-id')['Timestamp'].min()
fig, ax = plt.subplots()
ax.axes.get_xaxis().set_visible(False)
x = time_delta.values/ np.timedelta64(1,'D')
ax.plot(x)
###Output
_____no_output_____
###Markdown
And visually we detect a HUGE outlier, what can we learn about this guy?
###Code
outlier = raw_dataset[raw_dataset['car-id'] == '20155705025759-63'].sort_values(by='Timestamp')
outlier['Timestamp'].max() - outlier['Timestamp'].min()
img=mpimg.imread('../data/hippie.jpg')
plt.imshow(img)
###Output
_____no_output_____ |
s10071/STEP3-making-rl-pysc2-agent-with-sparse-reward.ipynb | ###Markdown
STEP 3 - Making RL PySC2 Agent with sparse reward
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
0. Runnning 'Agent code' on jupyter notebook
###Code
# unfortunately, PySC2 uses Abseil, which treats python code as if its run like an app
# This does not play well with jupyter notebook
# So we will need to monkeypatch sys.argv
import sys
#sys.argv = ["python", "--map", "AbyssalReef"]
sys.argv = ["python", "--map", "Simple64"]
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run an agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import threading
from absl import app
from absl import flags
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2 import maps
from pysc2.env import available_actions_printer
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.lib import point_flag
from pysc2.lib import stopwatch
FLAGS = flags.FLAGS
# because of Abseil's horrible design for running code underneath Colabs
# We have to pull out this ugly hack from the hat
if "flags_defined" not in globals():
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
point_flag.DEFINE_point("feature_screen_size", "84",
"Resolution for screen feature layers.")
point_flag.DEFINE_point("feature_minimap_size", "64",
"Resolution for minimap feature layers.")
point_flag.DEFINE_point("rgb_screen_size", None,
"Resolution for rendered screen.")
point_flag.DEFINE_point("rgb_minimap_size", None,
"Resolution for rendered minimap.")
flags.DEFINE_enum("action_space", None, sc2_env.ActionSpace._member_names_, # pylint: disable=protected-access
"Which action space to use. Needed if you take both feature "
"and rgb observations.")
flags.DEFINE_bool("use_feature_units", True,
"Whether to include feature units.")
flags.DEFINE_bool("disable_fog", False, "Whether to disable Fog of War.")
flags.DEFINE_integer("max_agent_steps", 0, "Total agent steps.")
flags.DEFINE_integer("game_steps_per_episode", None, "Game steps per episode.")
flags.DEFINE_integer("max_episodes", 0, "Total episodes.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_float("fps", 22.4, "Frames per second to run the game.")
#flags.DEFINE_string("agent", "sc2.agent.BasicAgent.ZergBasicAgent",
# "Which agent to run, as a python path to an Agent class.")
#flags.DEFINE_enum("agent_race", "zerg", sc2_env.Race._member_names_, # pylint: disable=protected-access
# "Agent 1's race.")
flags.DEFINE_string("agent", "TerranSparseRewardRLAgent",
"Which agent to run, as a python path to an Agent class.")
flags.DEFINE_enum("agent_race", "terran", sc2_env.Race._member_names_, # pylint: disable=protected-access
"Agent 1's race.")
flags.DEFINE_string("agent2", "Bot", "Second agent, either Bot or agent class.")
flags.DEFINE_enum("agent2_race", "terran", sc2_env.Race._member_names_, # pylint: disable=protected-access
"Agent 2's race.")
flags.DEFINE_enum("difficulty", "very_easy", sc2_env.Difficulty._member_names_, # pylint: disable=protected-access
"If agent2 is a built-in Bot, it's strength.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_integer("parallel", 1, "How many instances to run in parallel.")
flags.DEFINE_bool("save_replay", True, "Whether to save a replay at the end.")
flags.DEFINE_string("map", None, "Name of a map to use.")
flags.mark_flag_as_required("map")
flags_defined = True
def run_thread(agent_classes, players, map_name, visualize):
"""Run one thread worth of the environment with agents."""
with sc2_env.SC2Env(
map_name=map_name,
players=players,
agent_interface_format=sc2_env.parse_agent_interface_format(
feature_screen=FLAGS.feature_screen_size,
feature_minimap=FLAGS.feature_minimap_size,
rgb_screen=FLAGS.rgb_screen_size,
rgb_minimap=FLAGS.rgb_minimap_size,
action_space=FLAGS.action_space,
use_feature_units=FLAGS.use_feature_units),
step_mul=FLAGS.step_mul,
game_steps_per_episode=FLAGS.game_steps_per_episode,
disable_fog=FLAGS.disable_fog,
visualize=visualize) as env:
env = available_actions_printer.AvailableActionsPrinter(env)
agents = [agent_cls() for agent_cls in agent_classes]
run_loop.run_loop(agents, env, FLAGS.max_agent_steps, FLAGS.max_episodes)
if FLAGS.save_replay:
env.save_replay(agent_classes[0].__name__)
def main(unused_argv):
"""Run an agent."""
#stopwatch.sw.enabled = FLAGS.profile or FLAGS.trace
#stopwatch.sw.trace = FLAGS.trace
map_inst = maps.get(FLAGS.map)
agent_classes = []
players = []
#agent_module, agent_name = FLAGS.agent.rsplit(".", 1)
#agent_cls = getattr(importlib.import_module(agent_module), agent_name)
#agent_classes.append(agent_cls)
agent_classes.append(TerranSparseRewardRLAgent)
players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent_race]))
if map_inst.players >= 2:
if FLAGS.agent2 == "Bot":
players.append(sc2_env.Bot(sc2_env.Race[FLAGS.agent2_race],
sc2_env.Difficulty[FLAGS.difficulty]))
else:
agent_module, agent_name = FLAGS.agent2.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
agent_classes.append(agent_cls)
players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent2_race]))
threads = []
for _ in range(FLAGS.parallel - 1):
t = threading.Thread(target=run_thread,
args=(agent_classes, players, FLAGS.map, False))
threads.append(t)
t.start()
run_thread(agent_classes, players, FLAGS.map, FLAGS.render)
for t in threads:
t.join()
if FLAGS.profile:
pass
#print(stopwatch.sw)
###Output
_____no_output_____
###Markdown
1. Creating a RL PySC2 Agent with Sparse Reward
###Code
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
DATA_FILE = 'rlagent_with_sparse_reward_learning_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
#state_action = self.q_table.ix[observation, :]
state_action = self.q_table.loc[observation, self.q_table.columns[:]]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
#q_target = r + self.gamma * self.q_table.ix[s_, :].max()
q_target = r + self.gamma * self.q_table.loc[s_, self.q_table.columns[:]].max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class TerranSparseRewardRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranSparseRewardRLAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranSparseRewardRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
print("player_y: ", player_y)
print("player_y.mean(): ", player_y.mean())
print("base_top_left: ", self.base_top_left)
print("smart_actions: ", smart_actions)
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
self.cc_x, self.cc_y = self.getMeanLocation(ccs)
cc_count = len(ccs)
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
return actions.FUNCTIONS.no_op()
###Output
_____no_output_____
###Markdown
[run code]
###Code
if __name__ == "__main__":
app.run(main)
###Output
_____no_output_____
###Markdown
2. Adding 1st Step of Hierarchy Actions
###Code
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
DATA_FILE = 'rlagent_with_sparse_reward_learning_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
#state_action = self.q_table.ix[observation, :]
state_action = self.q_table.loc[observation, self.q_table.columns[:]]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
#q_target = r + self.gamma * self.q_table.ix[s_, :].max()
q_target = r + self.gamma * self.q_table.loc[s_, self.q_table.columns[:]].max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class TerranSparseRewardRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranSparseRewardRLAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranSparseRewardRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
self.cc_x, self.cc_y = self.getMeanLocation(ccs)
cc_count = len(ccs)
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
army_supply = obs.observation.player.food_used
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = army_supply
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
scvs = self.get_units_by_type(obs, units.Terran.SCV)
if len(scvs) > 0:
scv = random.choice(scvs)
if scv.x >= 0 and scv.y >= 0:
return actions.FUNCTIONS.select_point("select", (scv.x,
scv.y))
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
barracks = self.get_units_by_type(obs, units.Terran.Barracks)
if len(barracks) > 0:
barrack = random.choice(barracks)
if barrack.x >= 0 and barrack.y >= 0:
return actions.FUNCTIONS.select_point("select_all_type", (barrack.x,
barrack.y))
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.select_army.id):
return actions.FUNCTIONS.select_army("select")
return actions.FUNCTIONS.no_op()
###Output
_____no_output_____
###Markdown
[run code]
###Code
if __name__ == "__main__":
app.run(main)
###Output
_____no_output_____
###Markdown
3. Adding 2nd Step of Hierarchy Actions
###Code
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
DATA_FILE = 'rlagent_with_sparse_reward_learning_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
#state_action = self.q_table.ix[observation, :]
state_action = self.q_table.loc[observation, self.q_table.columns[:]]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
#q_target = r + self.gamma * self.q_table.ix[s_, :].max()
q_target = r + self.gamma * self.q_table.loc[s_, self.q_table.columns[:]].max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class TerranSparseRewardRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranSparseRewardRLAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranSparseRewardRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
self.cc_x, self.cc_y = self.getMeanLocation(ccs)
cc_count = len(ccs)
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
army_supply = obs.observation.player.food_used
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = army_supply
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
scvs = self.get_units_by_type(obs, units.Terran.SCV)
if len(scvs) > 0:
scv = random.choice(scvs)
if scv.x >= 0 and scv.y >= 0:
return actions.FUNCTIONS.select_point("select", (scv.x,
scv.y))
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
barracks = self.get_units_by_type(obs, units.Terran.Barracks)
if len(barracks) > 0:
barrack = random.choice(barracks)
if barrack.x >= 0 and barrack.y >= 0:
return actions.FUNCTIONS.select_point("select_all_type", (barrack.x,
barrack.y))
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.select_army.id):
return actions.FUNCTIONS.select_army("select")
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_SupplyDepot_screen.id):
if len(ccs) > 0:
if supply_depot_count == 0:
target = self.transformDistance(self.cc_x, -35, self.cc_y, 0)
elif supply_depot_count == 1:
target = self.transformDistance(self.cc_x, -25, self.cc_y, -25)
return actions.FUNCTIONS.Build_SupplyDepot_screen("now", target)
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_Barracks_screen.id):
if len(ccs) > 0:
if barracks_count == 0:
target = self.transformDistance(self.cc_x, 15, self.cc_y, -9)
elif barracks_count == 1:
target = self.transformDistance(self.cc_x, 15, self.cc_y, 12)
return actions.FUNCTIONS.Build_Barracks_screen("now", target)
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.Train_Marine_quick.id):
return actions.FUNCTIONS.Train_Marine_quick("queued")
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FUNCTIONS.Attack_minimap("now", self.transformLocation(int(x) + (x_offset * 8), int(y) + (y_offset * 8)))
return actions.FUNCTIONS.no_op()
###Output
_____no_output_____
###Markdown
[run code]
###Code
if __name__ == "__main__":
app.run(main)
###Output
_____no_output_____
###Markdown
4. Adding 3rd Step of Hierarchy Actions
###Code
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
DATA_FILE = 'rlagent_with_sparse_reward_learning_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
#state_action = self.q_table.ix[observation, :]
state_action = self.q_table.loc[observation, self.q_table.columns[:]]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
#q_target = r + self.gamma * self.q_table.ix[s_, :].max()
q_target = r + self.gamma * self.q_table.loc[s_, self.q_table.columns[:]].max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class TerranSparseRewardRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranSparseRewardRLAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranSparseRewardRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
self.cc_x, self.cc_y = self.getMeanLocation(ccs)
cc_count = len(ccs)
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
army_supply = obs.observation.player.food_used
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = army_supply
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
scvs = self.get_units_by_type(obs, units.Terran.SCV)
if len(scvs) > 0:
scv = random.choice(scvs)
if scv.x >= 0 and scv.y >= 0:
return actions.FUNCTIONS.select_point("select", (scv.x,
scv.y))
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
barracks = self.get_units_by_type(obs, units.Terran.Barracks)
if len(barracks) > 0:
barrack = random.choice(barracks)
if barrack.x >= 0 and barrack.y >= 0:
return actions.FUNCTIONS.select_point("select_all_type", (barrack.x,
barrack.y))
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.select_army.id):
return actions.FUNCTIONS.select_army("select")
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_SupplyDepot_screen.id):
if len(ccs) > 0:
if supply_depot_count == 0:
target = self.transformDistance(self.cc_x, -35, self.cc_y, 0)
elif supply_depot_count == 1:
target = self.transformDistance(self.cc_x, -25, self.cc_y, -25)
return actions.FUNCTIONS.Build_SupplyDepot_screen("now", target)
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_Barracks_screen.id):
if len(ccs) > 0:
if barracks_count == 0:
target = self.transformDistance(self.cc_x, 15, self.cc_y, -9)
elif barracks_count == 1:
target = self.transformDistance(self.cc_x, 15, self.cc_y, 12)
return actions.FUNCTIONS.Build_Barracks_screen("now", target)
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.Train_Marine_quick.id):
return actions.FUNCTIONS.Train_Marine_quick("queued")
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FUNCTIONS.Attack_minimap("now", self.transformLocation(int(x) + (x_offset * 8), int(y) + (y_offset * 8)))
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.Harvest_Gather_screen.id):
mfs = self.get_units_by_type(obs, units.Neutral.MineralField)
if len(mfs) > 0:
mf = random.choice(mfs)
if mf.x >= 0 and mf.y >= 0:
return actions.FUNCTIONS.Harvest_Gather_screen("now", (mf.x,mf.y))
return actions.FUNCTIONS.no_op()
###Output
_____no_output_____
###Markdown
[run code]
###Code
if __name__ == "__main__":
app.run(main)
###Output
_____no_output_____
###Markdown
5. Detecting End of Game
###Code
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
DATA_FILE = 'rlagent_with_sparse_reward_learning_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
#state_action = self.q_table.ix[observation, :]
state_action = self.q_table.loc[observation, self.q_table.columns[:]]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
#q_target = r + self.gamma * self.q_table.ix[s_, :].max()
q_target = r + self.gamma * self.q_table.loc[s_, self.q_table.columns[:]].max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class TerranSparseRewardRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranSparseRewardRLAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranSparseRewardRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.last():
reward = obs.reward
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FUNCTIONS.no_op()
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
self.cc_x, self.cc_y = self.getMeanLocation(ccs)
cc_count = len(ccs)
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
army_supply = obs.observation.player.food_used
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = army_supply
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
scvs = self.get_units_by_type(obs, units.Terran.SCV)
if len(scvs) > 0:
scv = random.choice(scvs)
if scv.x >= 0 and scv.y >= 0:
return actions.FUNCTIONS.select_point("select", (scv.x,
scv.y))
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
barracks = self.get_units_by_type(obs, units.Terran.Barracks)
if len(barracks) > 0:
barrack = random.choice(barracks)
if barrack.x >= 0 and barrack.y >= 0:
return actions.FUNCTIONS.select_point("select_all_type", (barrack.x,
barrack.y))
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.select_army.id):
return actions.FUNCTIONS.select_army("select")
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_SupplyDepot_screen.id):
if len(ccs) > 0:
if supply_depot_count == 0:
target = self.transformDistance(self.cc_x, -35, self.cc_y, 0)
elif supply_depot_count == 1:
target = self.transformDistance(self.cc_x, -25, self.cc_y, -25)
return actions.FUNCTIONS.Build_SupplyDepot_screen("now", target)
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_Barracks_screen.id):
if len(ccs) > 0:
if barracks_count == 0:
target = self.transformDistance(self.cc_x, 15, self.cc_y, -9)
elif barracks_count == 1:
target = self.transformDistance(self.cc_x, 15, self.cc_y, 12)
return actions.FUNCTIONS.Build_Barracks_screen("now", target)
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.Train_Marine_quick.id):
return actions.FUNCTIONS.Train_Marine_quick("queued")
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FUNCTIONS.Attack_minimap("now", self.transformLocation(int(x) + (x_offset * 8), int(y) + (y_offset * 8)))
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.Harvest_Gather_screen.id):
mfs = self.get_units_by_type(obs, units.Neutral.MineralField)
if len(mfs) > 0:
mf = random.choice(mfs)
if mf.x >= 0 and mf.y >= 0:
return actions.FUNCTIONS.Harvest_Gather_screen("queued", (mf.x,mf.y))
return actions.FUNCTIONS.no_op()
###Output
_____no_output_____
###Markdown
[run code]
###Code
if __name__ == "__main__":
app.run(main)
###Output
_____no_output_____
###Markdown
6. Refining - Ignoreing Learing When State Does Not Change- Preventing Invalid Actions- Add Our Unit Locations to the State
###Code
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
DATA_FILE = 'rlagent_with_sparse_reward_learning_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# reference from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
self.disallowed_actions = {}
def choose_action(self, observation, excluded_actions=[]):
self.check_state_exist(observation)
self.disallowed_actions[observation] = excluded_actions
#state_action = self.q_table.ix[observation, :]
#state_action = self.q_table.loc[observation, self.q_table.columns[:]]
state_action = self.q_table.loc[observation, :]
for excluded_action in excluded_actions:
del state_action[excluded_action]
if np.random.uniform() < self.epsilon:
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(state_action.index)
return action
def learn(self, s, a, r, s_):
if s == s_:
return
self.check_state_exist(s_)
self.check_state_exist(s)
#q_predict = self.q_table.ix[s, a]
q_predict = self.q_table.loc[s, a]
#s_rewards = self.q_table.ix[s_, :]
#s_rewards = self.q_table.loc[s_, self.q_table.columns[:]]
s_rewards = self.q_table.loc[s_, :]
if s_ in self.disallowed_actions:
for excluded_action in self.disallowed_actions[s_]:
del s_rewards[excluded_action]
if s_ != 'terminal':
q_target = r + self.gamma * s_rewards.max()
else:
q_target = r # next state is terminal
# update
#self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class TerranSparseRewardRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranSparseRewardRLAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranSparseRewardRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.last():
reward = obs.reward
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FUNCTIONS.no_op()
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
self.cc_x, self.cc_y = self.getMeanLocation(ccs)
cc_count = len(ccs)
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
supply_used = obs.observation.player.food_used
supply_limit = obs.observation.player.food_cap
army_supply = obs.observation.player.food_army
worker_supply = obs.observation.player.food_workers
supply_free = supply_limit - supply_used
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(12)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = army_supply
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
green_squares = np.zeros(4)
friendly_y, friendly_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
for i in range(0, len(friendly_y)):
y = int(math.ceil((friendly_y[i] + 1) / 32))
x = int(math.ceil((friendly_x[i] + 1) / 32))
green_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
green_squares = green_squares[::-1]
for i in range(0, 4):
current_state[i + 8] = green_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
excluded_actions = []
if supply_depot_count == 2 or worker_supply == 0:
excluded_actions.append(1)
if supply_depot_count == 0 or barracks_count == 2 or worker_supply == 0:
excluded_actions.append(2)
if supply_free == 0 or barracks_count == 0:
excluded_actions.append(3)
if army_supply == 0:
excluded_actions.append(4)
excluded_actions.append(5)
excluded_actions.append(6)
excluded_actions.append(7)
rl_action = self.qlearn.choose_action(str(current_state), excluded_actions)
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
scvs = self.get_units_by_type(obs, units.Terran.SCV)
if len(scvs) > 0:
scv = random.choice(scvs)
if scv.x >= 0 and scv.y >= 0:
return actions.FUNCTIONS.select_point("select", (scv.x,
scv.y))
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
barracks = self.get_units_by_type(obs, units.Terran.Barracks)
if len(barracks) > 0:
barrack = random.choice(barracks)
if barrack.x >= 0 and barrack.y >= 0:
return actions.FUNCTIONS.select_point("select_all_type", (barrack.x,
barrack.y))
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.select_army.id):
return actions.FUNCTIONS.select_army("select")
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_SupplyDepot_screen.id):
if len(ccs) > 0:
if supply_depot_count == 0:
target = self.transformDistance(self.cc_x, -35, self.cc_y, 0)
elif supply_depot_count == 1:
target = self.transformDistance(self.cc_x, -25, self.cc_y, -25)
return actions.FUNCTIONS.Build_SupplyDepot_screen("now", target)
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and self.can_do(obs, actions.FUNCTIONS.Build_Barracks_screen.id):
if len(ccs) > 0:
if barracks_count == 0:
target = self.transformDistance(self.cc_x, 15, self.cc_y, -9)
elif barracks_count == 1:
target = self.transformDistance(self.cc_x, 15, self.cc_y, 12)
return actions.FUNCTIONS.Build_Barracks_screen("now", target)
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.Train_Marine_quick.id):
return actions.FUNCTIONS.Train_Marine_quick("queued")
elif smart_action == ACTION_ATTACK:
if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FUNCTIONS.Attack_minimap("now", self.transformLocation(int(x) + (x_offset * 8), int(y) + (y_offset * 8)))
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.Harvest_Gather_screen.id):
mfs = self.get_units_by_type(obs, units.Neutral.MineralField)
if len(mfs) > 0:
mf = random.choice(mfs)
if mf.x >= 0 and mf.y >= 0:
return actions.FUNCTIONS.Harvest_Gather_screen("queued", (mf.x,mf.y))
return actions.FUNCTIONS.no_op()
###Output
_____no_output_____
###Markdown
[run code]
###Code
if __name__ == "__main__":
app.run(main)
###Output
_____no_output_____ |
notebooks/Firing Rate Models.ipynb | ###Markdown
Import Packages
###Code
import os
import numpy as np
import matplotlib.pyplot as plt
import quantities as pq
import neo
from neurotic._elephant_tools import CausalAlphaKernel, instantaneous_rate
pq.markup.config.use_unicode = True # allow symbols like mu for micro in output
pq.mN = pq.UnitQuantity('millinewton', pq.N/1e3, symbol = 'mN'); # define millinewton
# make figures interactive and open in a separate window
# %matplotlib qt
# make figures interactive and inline
%matplotlib notebook
# make figures non-interactive and inline
# %matplotlib inline
colors = {
'B38': '#EFBF46', # yellow
'I2': '#DC5151', # red
'B8a/b': '#DA8BC3', # pink
'B6/B9': '#64B5CD', # light blue
'B3/B6/B9': '#5A9BC5', # medium blue
'B3': '#4F80BD', # dark blue
'B4/B5': '#00A86B', # jade green
'Force': '0.7', # light gray
'Model': '0.2', # dark gray
}
###Output
_____no_output_____
###Markdown
Load Data
###Code
directory = 'spikes-firing-rates-and-forces'
# filename = 'JG07 Tape nori 0.mat'
# filename = 'JG08 Tape nori 0.mat'
filename = 'JG08 Tape nori 1.mat'
# filename = 'JG08 Tape nori 1 superset.mat' # this file is missing spikes for several swallows
# filename = 'JG08 Tape nori 2.mat'
# filename = 'JG11 Tape nori 0.mat'
# filename = 'JG12 Tape nori 0.mat'
# filename = 'JG12 Tape nori 1.mat'
# filename = 'JG14 Tape nori 0.mat'
file_basename = '.'.join(os.path.basename(filename).split('.')[:-1])
# read the data file containing force and spike trains
reader = neo.io.NeoMatlabIO(os.path.join(directory, filename))
blk = reader.read_block()
seg = blk.segments[0]
sigs = {sig.name:sig for sig in seg.analogsignals}
spiketrains = {st.name:st for st in seg.spiketrains}
###Output
_____no_output_____
###Markdown
Plot Empirical Force
###Code
# plot the swallowing force measured by the force transducer
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(8,4))
ax.plot(sigs['Force'].times.rescale('s'), sigs['Force'].rescale('mN'), c=colors['Force'])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Force (mN)')
ax.set_title(file_basename)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Model Parameters
###Code
# parameters for constructing the model
# - model force = sum of scaled (weighted) firing rates + offset
# - comment/uncomment an entry in firing_rate_params to exclude/include the unit (I2 muscle or motor neurons)
# - weights can be positive or negative
# - rate constants determine how quickly the effect of a unit builds and decays
# - the model will be plotted below against the empirical force, both normalized by their peak values
offset = 0
# firing_rate_params = {
# # 'I2': {'weight': -0.002, 'rate_constant': 1},
# # 'B8a/b': {'weight': 0.05, 'rate_constant': 1},
# 'B3': {'weight': 0.05, 'rate_constant': 1},
# 'B6/B9': {'weight': 0.05, 'rate_constant': 0.5},
# 'B38': {'weight': 0.025, 'rate_constant': 1},
# # 'B4/B5': {'weight': 0.05, 'rate_constant': 1},
# }
firing_rate_params = {
# 'I2': {'weight': -0.02, 'rate_constant': 1},
# 'B8a/b': {'weight': 0.05, 'rate_constant': 1},
'B3': {'weight': 0.05, 'rate_constant': 1},
'B6/B9': {'weight': 0.1, 'rate_constant': 0.5},
'B38': {'weight': 0.05, 'rate_constant': 1},
# 'B4/B5': {'weight': 0.05, 'rate_constant': 1},
}
###Output
_____no_output_____
###Markdown
Generate Firing Rate Model
###Code
firing_rates = {}
for name, params in firing_rate_params.items():
weight = params['weight']
rate_constant = params['rate_constant']
# convolve the spike train with the kernel
firing_rates[name] = instantaneous_rate(
spiketrain=spiketrains[name],
sampling_period=0.0002*pq.s, # 5 kHz, same as data acquisition rate
kernel=CausalAlphaKernel(rate_constant*pq.s),
)
firing_rates[name].name = f'{name}\nweight: {weight}\nrate const: {rate_constant} sec'
# scale the firing rate by its weight
firing_rates[name] *= weight
# create the model by summing the firing rates and adding the offset
firing_rates['Model'] = None
for name, params in firing_rate_params.items():
if firing_rates['Model'] is None:
firing_rates['Model'] = firing_rates[name].copy()
else:
firing_rates['Model'] += firing_rates[name]
firing_rates['Model'] += offset*pq.Hz
firing_rates['Model'].name = f'Model = Sum of\nScaled Rates + {offset}'
###Output
_____no_output_____
###Markdown
Plot Model
###Code
# plot each spike train and the scaled (weighted) firing rate
fig, axes = plt.subplots(len(firing_rates)+1, 1, sharex=True, figsize=(8,2*len(firing_rates)))
for i, name in enumerate(firing_rates):
ax = axes[i]
if name in spiketrains:
ax.eventplot(positions=spiketrains[name], lineoffsets=-1, colors=colors[name])
ax.plot(firing_rates[name].times.rescale('s'), firing_rates[name].rescale('Hz'), c=colors[name])
ax.set_ylabel(firing_rates[name].name)
ax.set_ylim(-2, 3)
# plot force and the model, both normalized by their peaks
axes[-1].plot(sigs['Force'].times.rescale('s'), sigs['Force']/sigs['Force'].max(), c=colors['Force'])
axes[-1].plot(firing_rates['Model'].times.rescale('s'), firing_rates['Model']/firing_rates['Model'].max(), c=colors['Model'])
axes[-1].set_ylabel('Model vs. Force\n(both normalized)')
axes[-1].set_xlabel('Time (s)')
axes[0].set_title(file_basename)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Plot Model for Grant
###Code
# use with JG08 Tape nori 1
time_slices = {
'I2': [670.7, 680.83],
'B8a/b': [673.5, 679.59],
'B3': [675.645, 680.83],
'B6/B9': [674.25, 680.83],
'B38': [670.7, 680.83],
'Model': [672.26, 680.2],
'Force': [672.26, 680.2],
}
# plot each spike train and the scaled (weighted) firing rate
fig, axes = plt.subplots(2*len(firing_rate_params)+1, 1, sharex=True, figsize=(6,len(firing_rate_params)*(16/17)+1*(20/17)), gridspec_kw={'height_ratios': [3, 1]*len(firing_rate_params) + [5]})
for i, name in enumerate(firing_rate_params):
ax = axes[2*i]
fr = firing_rates[name]
st = spiketrains[name]
if name in time_slices:
fr = fr.copy().time_slice(time_slices[name][0]*pq.s, time_slices[name][1]*pq.s)
st = st.copy().time_slice(time_slices[name][0]*pq.s, time_slices[name][1]*pq.s)
ax.plot(fr.times.rescale('s'), fr.rescale('Hz'), c=colors[name])
ax.annotate(name, xy=(0, 0.5), xycoords='axes fraction',
ha='right', va='center', fontsize='large', color=colors[name], fontfamily='Serif',
)
# ax.set_ylim(0, 2.2)
ax.axis('off')
ax = axes[2*i+1]
ax.eventplot(positions=st, lineoffsets=-1, colors=colors[name])
ax.axis('off')
# plot force and the model, both normalized by their peaks
force = sigs['Force'].copy().time_slice(time_slices['Force'][0]*pq.s, time_slices['Force'][1]*pq.s)
model = firing_rates['Model'].time_slice(time_slices['Model'][0]*pq.s, time_slices['Model'][1]*pq.s)
axes[-1].plot(force.times.rescale('s'), force/force.max(), c=colors['Force'])
axes[-1].plot(model.times.rescale('s'), model/model.max(), c=colors['Model'])
axes[-1].annotate('Model\nvs.', xy=(-0.04, 0.6), xycoords='axes fraction',
ha='center', va='center', fontsize='large', color=colors['Model'], fontfamily='Serif',
)
axes[-1].annotate('Force', xy=(-0.04, 0.35), xycoords='axes fraction',
ha='center', va='center', fontsize='large', color=colors['Force'], fontfamily='Serif',
)
axes[-1].axis('off')
plt.tight_layout(0)
###Output
_____no_output_____ |
modeling-notebooks/CF03_Model_SVD_sparse_matrix_binary_ratings.ipynb | ###Markdown
Matrix Factorization with SVD - BINARY RATINGS https://www.kaggle.com/gspmoreira/recommender-systems-in-python-101
###Code
import import_ipynb
import pandas as pd
import scipy.sparse as sps
import numpy as np
from scipy.sparse.linalg import svds
from time import time
from evaluation import DCG
from evaluation import nDCG
from evaluation import R_Precision
import random
###Output
importing Jupyter notebook from evaluation.ipynb
DCG = 0.5
IDCG = 1.0
nDCG = 0.5
###Markdown
Define Functions for SVD and Predict SVD
###Code
#--------------------------------------
# RETURN DECOMPOSITION MATRIXES
#--------------------------------------
def SVD(num_factors):
NUMBER_OF_FACTORS_MF = num_factors
MATRIX = M.asfptype()
U, sigma, Vt = svds(MATRIX, k = NUMBER_OF_FACTORS_MF)
sigma = np.diag(sigma)
return U, sigma, Vt
#--------------------------------------------------------------------
# PREDICT top_n TRACKS FOR A PID AND EVALUATE AGAINST GROUND TRUTH
#--------------------------------------------------------------------
def SVD_predict_and_evaluate_top_n(pid, U, sigma, Vt, top_n):
"""
input
pid
decomposition matrixes
top_n to reccommend
return
top_n predicted track_ids
ground_truth : track_ids in the hold_out
R_Prec
"""
train_array_track_ids = track_id_array[M[pid].toarray()[0].astype(bool)]
predicted = np.dot(np.dot(U[pid,:], sigma), Vt)
pred = np.flipud(predicted.argsort())
L_pred = pred[:top_n+len(train_array_track_ids)]
L_pred = [el for el in L_pred if el not in train_array_track_ids]
L_pred = L_pred[:top_n]
ground_truth = ev_set_arr[ev_set_arr[:,0]==pid][:,1]
R_Prec = R_Precision(L_pred[:len(ground_truth)],ground_truth)
res = [int(el in ground_truth) for el in L_pred]
NDCG = nDCG(res)[1]
return L_pred, ground_truth, R_Prec, NDCG, res
#-------------------------------
# SAVE SVD EVALUATION RESULTS
#-------------------------------
def save_SVD_res_k_n(U, sigma, Vt, k = 15, n = 10):
"""
k = number of factors
n = number of random lists to predict
"""
time0=time()
RES={}
for i,pid in enumerate(random.sample(evaluation_pids,n)):
predictions=SVD_predict_and_evaluate_top_n(pid, U, sigma, Vt, 500)
RES[pid] = [predictions[2], predictions[3]]
if i%500==0:
print(i)
print(time()-time0)
df = pd.DataFrame(RES).transpose().reset_index()
df.columns=['pid','R-Precision','nDCG']
df['rating'] = 'binary'
df['model'] = f'SVD_{k}'
df.to_csv(f'../evaluation/SVD_binary{k}_{n}.csv', index = None)
return RES
###Output
_____no_output_____
###Markdown
Read Data
###Code
file_path = '../data-processed/full-data/pid-track-binary-rating-train-data.csv'
data = pd.read_csv(file_path)
data.dtypes
data.head()
tracks = list(data.track_uri.unique())
D_tracks = {}
n=0
for track in tracks:
D_tracks[track] = n
n+=1
D_tracks_reverse = {}
n=0
for k,i in D_tracks.items():
D_tracks_reverse[i] = k
data['track_id'] = data.track_uri.map(D_tracks)
data.head()
data.dtypes
evaluation_set = pd.read_csv('../data-processed/full-data/evaluation-pids-ground-truth.csv')
evaluation_set['track_id'] = evaluation_set['track_uri'].map(D_tracks)
ev_set = evaluation_set[evaluation_set['hold_out'] == 1][['pid','track_id','hold_out']]
ev_set = ev_set[ev_set.track_id.isnull()==False]
evaluation_pids = list(ev_set.pid.unique())
ev_set.track_id = ev_set.track_id.astype(int)
ev_set_arr = ev_set.to_numpy()
###Output
_____no_output_____
###Markdown
Define sparce matrix
###Code
M = sps.csr_matrix((data.binary_rating, (data.pid, data.track_id)))
M.shape[1]
###Output
_____no_output_____
###Markdown
Train - Predict - Evaluate
###Code
track_id_array = np.arange(M.shape[1])
###Output
_____no_output_____
###Markdown
Save evaluation - needs to be uncommented for chosen k
###Code
n=1000
###Output
_____no_output_____
###Markdown
k=15
###Code
# k=15
# U, sigma, Vt = SVD(k)
# U.shape, sigma.shape, Vt.shape
# df = pd.DataFrame(save_SVD_res_k_n(U, sigma, Vt, k, n)).transpose()
# df.describe()
###Output
_____no_output_____
###Markdown
k=25
###Code
# k=25
# U, sigma, Vt = SVD(k)
# U.shape, sigma.shape, Vt.shape
# df = pd.DataFrame(save_SVD_res_k_n(U, sigma, Vt, k, n)).transpose()
# df.describe()
###Output
_____no_output_____
###Markdown
k=35
###Code
# k=35
# U, sigma, Vt = SVD(k)
# U.shape, sigma.shape, Vt.shape
# df = pd.DataFrame(save_SVD_res_k_n(U, sigma, Vt, k, n)).transpose()
# df.describe()
###Output
_____no_output_____
###Markdown
k=45
###Code
# k=45
# U, sigma, Vt = SVD(k)
# U.shape, sigma.shape, Vt.shape
# df = pd.DataFrame(save_SVD_res_k_n(U, sigma, Vt, k, n)).transpose()
# df.describe()
###Output
_____no_output_____
###Markdown
k=50
###Code
# k=50
# U, sigma, Vt = SVD(k)
# save=save_SVD_res_k_n(U, sigma, Vt, k, n)
# df = pd.DataFrame(save[0]).transpose()
# df.describe()
###Output
_____no_output_____
###Markdown
k=75
###Code
# k=75
# n=10000
# U, sigma, Vt = SVD(k)
# df = pd.DataFrame(save_SVD_res_k_n(U, sigma, Vt, k, n)).transpose()
# df.describe()
###Output
_____no_output_____ |
FSDKaggle2018_Complete-Private.ipynb | ###Markdown
Prepare Loaders
###Code
#path_dataset = '../input/'
path_dataset = '/home/edoardobucheli/Datasets/FSDKaggle2018'
path_train = os.path.join(path_dataset,'audio_train_16k')
path_test = os.path.join(path_dataset,'audio_test_16k')
###Output
_____no_output_____
###Markdown
Load Label Data
###Code
train_data = pd.read_csv(os.path.join(path_dataset,'train_post_competition.csv'))
test_data = pd.read_csv(os.path.join(path_dataset,'test_post_competition_scoring_clips.csv'))
from utilities import get_all_classes_dict, get_classes_to_meta_dict, get_labels
num_to_label, label_to_num, n_classes = get_all_classes_dict(train_data)
label_to_meta, label_num_to_meta = get_classes_to_meta_dict(label_to_num)
data_cur = train_data[train_data['manually_verified']==1]
data_noi = train_data[train_data['manually_verified']==0]
meta_labels_all, labels_all = get_labels(train_data,label_to_meta, label_to_num)
meta_labels_cur, labels_cur = get_labels(data_cur,label_to_meta, label_to_num)
meta_labels_noi, labels_noi = get_labels(data_noi,label_to_meta, label_to_num)
meta_labels_test, labels_test = get_labels(test_data,label_to_meta, label_to_num)
n_meta_classes = len(np.unique(meta_labels_all))
###Output
_____no_output_____
###Markdown
Load Data
###Code
pickle_test = './preprocessed_test/PS-257-HL256-WF16k-64k'
with open(pickle_test, 'rb') as fp:
x_test = pickle.load(fp)
x_test_private = [f for i,f in enumerate(x_test) if test_data['usage'][i] == 'Private']
labels_test_private = [f for i,f in enumerate(labels_test) if test_data['usage'][i] == 'Private']
meta_labels_test_private = [f for i,f in enumerate(meta_labels_test) if test_data['usage'][i] == 'Private']
new_x_test = []
for this_x in x_test_private:
if this_x.shape[1] == 251:
this_x = this_x[:,:250]
new_x_test.append(this_x)
x_test_private = new_x_test
pickle_test_32 = './preprocessed_test/PS-257-HL256-WF32k-64k'
with open(pickle_test_32, 'rb') as fp:
x_test_32k = pickle.load(fp)
x_test_32k_private = [f for i,f in enumerate(x_test_32k) if test_data['usage'][i]=='Private']
temp_x_test = []
for this_x in x_test_32k_private:
if this_x.shape[1] == 251:
this_x = this_x[:,:250]
temp_x_test.append(this_x)
x_test_32k_private = temp_x_test
sr = 16000
file_length = 64000
hop_length = 256
freq_res = 257
frames = int(np.ceil(file_length/hop_length))
###Output
_____no_output_____
###Markdown
Load Network
###Code
from networks.CNNetworks2D import malley_cnn_120
from tensorflow.keras.optimizers import Adam
input_shape = ([freq_res,frames])
lr = 0.001
mc_model = malley_cnn_120(input_shape,n_meta_classes)
mc_model.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
#mc_model.save_weights('./weights_mc_malley.h5')
mc_model.load_weights('./outputs_mc/best_weights/malley-MC-16k-PS-257-256[0].h5')
mc_model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 257, 250) 0
_________________________________________________________________
expand_dims (Lambda) (None, 257, 250, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 257, 250, 64) 1408
_________________________________________________________________
activation (Activation) (None, 257, 250, 64) 0
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 85, 250, 64) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 85, 250, 128) 57472
_________________________________________________________________
activation_1 (Activation) (None, 85, 250, 128) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 21, 250, 128) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 12, 250, 256) 327936
_________________________________________________________________
activation_2 (Activation) (None, 12, 250, 256) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 12, 250, 512) 918016
_________________________________________________________________
activation_3 (Activation) (None, 12, 250, 512) 0
_________________________________________________________________
cut_here (GlobalMaxPooling2D (None, 512) 0
_________________________________________________________________
dense (Dense) (None, 512) 262656
_________________________________________________________________
dropout (Dropout) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 6) 3078
=================================================================
Total params: 1,570,566
Trainable params: 1,570,566
Non-trainable params: 0
_________________________________________________________________
###Markdown
Make Inference
###Code
from utilities import create_quick_test_2d
from utilities import evaluate_complete_files
y_scores_MC,edo = evaluate_complete_files(x_test_private,meta_labels_test_private,mc_model,[257,250])
y_hat_MC = np.argmax(y_scores_MC,axis = 1)
np.mean(y_hat_MC==meta_labels_test_private)
x_test_2 = create_quick_test_2d(x_test_private,freq_res,frames)
mc_model.evaluate(x_test_2,meta_labels_test_private)
#x_test_2 = create_quick_test_2d(x_test,freq_res,frames)
#mc_model.evaluate(x_test_2,meta_labels_test)
#y_scores_MC = mc_model.predict(x_test_2)
#y_hat_MC = np.argmax(y_scores_MC, axis = 1)
###Output
_____no_output_____
###Markdown
Plot Confusion Matrix
###Code
from utilities import plot_cm
plot_cm(meta_labels_test_private,y_hat_MC,figsize = (7,7))
plt.savefig('./cm_MC-Private.eps')
del(mc_model)
###Output
_____no_output_____
###Markdown
Separate Given Inference
###Code
c0_indx = [i for i,f in enumerate(y_scores_MC) if f[0] >= 0.2]
c1_indx = [i for i,f in enumerate(y_scores_MC) if f[1] >= 0.2]
c2_indx = [i for i,f in enumerate(y_scores_MC) if f[2] >= 0.2]
c3_indx = [i for i,f in enumerate(y_scores_MC) if f[3] >= 0.2]
c4_indx = [i for i,f in enumerate(y_scores_MC) if f[4] >= 0.2]
c5_indx = [i for i,f in enumerate(y_scores_MC) if f[5] >= 0.2]
###Output
_____no_output_____
###Markdown
Create General Class Vector
###Code
all_scores =np.zeros((len(x_test_private),47))
###Output
_____no_output_____
###Markdown
Cluster 0
###Code
x0 = [x_test_private[f] for f in c0_indx]
y0 = [labels_test_private[f] for f in c0_indx]
with open('./Clustering_V1_mappings/c0_mapping', 'rb') as fp:
c0_label_mapping = pickle.load(fp)
c0_label_mapping_inv = dict([[v,k] for [k,v] in c0_label_mapping.items()])
c0_label_mapping_inv[len(c0_label_mapping_inv)] = 41
c0_labels = list(c0_label_mapping.keys())
y0_2 = []
for f in y0:
if f in c0_labels:
y0_2.append(c0_label_mapping[f])
else:
y0_2.append(len(c0_labels))
sr = 16000
file_length = 64000
hop_length = 256
freq_res = 257
frames = int(np.ceil(file_length/hop_length))
input_shape = [freq_res,frames]
modelc0 = malley_cnn_120(input_shape,len(c0_label_mapping)+1)
modelc0.load_weights('./outputs/best_weights/malley-C0-16k-PS-257-HL256[0].h5')
modelc0.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_scores_c0,edo= evaluate_complete_files(x0,y0_2,modelc0,[freq_res,frames])
y_hat_c0 = np.argmax(y_scores_c0,axis = 1)
np.mean(y_hat_c0==y0_2)
x0_2 = create_quick_test_2d(x0,freq_res,frames)
modelc0.evaluate(x0_2,y0_2)
###Output
309/309 [==============================] - 1s 5ms/step
###Markdown
Save Scores in General Scores
###Code
y_scores_c0_1 = modelc0.predict(x0_2)
y_hat_c0_1 = np.argmax(y_scores_c0,axis = 1)
y_hat_c0_orig = [c0_label_mapping_inv[f] for f in y_hat_c0]
for i,this_scores in zip(c0_indx,y_scores_c0):
for j,f in enumerate(this_scores):
all_scores[i,c0_label_mapping_inv[j]] = f * (1-this_scores[-1])#*y_scores_MC[i][0]
labels = [num_to_label[f] for f in c0_labels]
labels.append('Unknown')
plot_cm(y0_2,y_hat_c0,figsize = (10,10), labels = labels)
plt.savefig('./cm_MC0-Private.eps')
del(modelc0)
###Output
_____no_output_____
###Markdown
Cluster 1
###Code
x1 = [x_test_32k_private[f] for f in c1_indx]
y1 = [labels_test_private[f] for f in c1_indx]
with open('./Clustering_V1_mappings/c1_mapping', 'rb') as fp:
c1_label_mapping = pickle.load(fp)
c1_label_mapping_inv = dict([[v,k] for [k,v] in c1_label_mapping.items()])
c1_label_mapping_inv[len(c1_label_mapping_inv)] = 42
c1_labels = list(c1_label_mapping.keys())
y1_2 = []
for f in y1:
if f in c1_labels:
y1_2.append(c1_label_mapping[f])
else:
y1_2.append(len(c1_labels))
sr = 32000
file_length = 64000
hop_length = 256
freq_res = 257
frames = int(np.ceil(file_length/hop_length))
input_shape = [freq_res,frames]
modelc1 = malley_cnn_120(input_shape,len(c1_labels)+1)
modelc1.load_weights('./outputs/best_weights/malley-C1-32k-PS-257-HL256[0].h5')
modelc1.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_scores_c1,edo= evaluate_complete_files(x1,y1_2,modelc1,[freq_res,frames])
y_hat_c1 = np.argmax(y_scores_c1,axis = 1)
np.mean(y_hat_c1==y1_2)
x1_2 = create_quick_test_2d(x1,freq_res,frames)
modelc1.evaluate(x1_2,y1_2)
###Output
158/158 [==============================] - 1s 7ms/step
###Markdown
Save Scores in General Scores
###Code
#y_scores_c1 = modelc1.predict(x1_2)
#y_hat_c1 = np.argmax(y_scores_c1,axis = 1)
y_hat_c1_orig = [c1_label_mapping_inv[f] for f in y_hat_c1]
for i,this_scores in zip(c1_indx,y_scores_c1):
for j,f in enumerate(this_scores):
all_scores[i,c1_label_mapping_inv[j]] = f*(1-this_scores[-1])#*y_scores_MC[i][1]
labels = [num_to_label[f] for f in c1_labels]
labels.append('Unknown')
plot_cm(y1_2,y_hat_c1,figsize = (7,7), labels = labels)
plt.savefig('./cm_MC1-private.eps')
del(modelc1)
###Output
_____no_output_____
###Markdown
Cluster 2
###Code
x2 = [x_test_32k_private[f] for f in c2_indx]
y2 = [labels_test_private[f] for f in c2_indx]
with open('./Clustering_V1_mappings/c2_mapping', 'rb') as fp:
c2_label_mapping = pickle.load(fp)
c2_label_mapping_inv = dict([[v,k] for [k,v] in c2_label_mapping.items()])
c2_label_mapping_inv[len(c2_label_mapping_inv)] = 43
c2_labels = list(c2_label_mapping.keys())
y2_2 = []
for f in y2:
if f in c2_labels:
y2_2.append(c2_label_mapping[f])
else:
y2_2.append(len(c2_labels))
sr = 32000
file_length = 64000
hop_length = 256
freq_res = 257
frames = int(np.ceil(file_length/hop_length))
input_shape = [freq_res,frames]
modelc2 = malley_cnn_120(input_shape,len(c2_labels)+1)
modelc2.load_weights('./outputs/best_weights/malley-C2-32k-PS-257-HL256[0].h5')
modelc2.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_scores_c2,edo= evaluate_complete_files(x2,y2_2,modelc2,[freq_res,frames])
y_hat_c2 = np.argmax(y_scores_c2,axis = 1)
np.mean(y_hat_c2==y2_2)
x2_2 = create_quick_test_2d(x2,freq_res,frames)
modelc2.evaluate(x2_2,y2_2)
###Output
586/586 [==============================] - 2s 4ms/step
###Markdown
Save Scores in General Scores
###Code
#y_scores_c2 = modelc2.predict(x2_2)
#y_hat_c2 = np.argmax(y_scores_c2,axis = 1)
y_hat_c2_orig = [c2_label_mapping_inv[f] for f in y_hat_c2]
for i,this_scores in zip(c2_indx,y_scores_c2):
for j,f in enumerate(this_scores):
all_scores[i,c2_label_mapping_inv[j]] = f*(1-this_scores[-1])#*y_scores_MC[i][2]
labels = [num_to_label[f] for f in c2_labels]
labels.append('Unknown')
plot_cm(y2_2,y_hat_c2,figsize = (15,15), labels = labels, xrotation = 90)
plt.savefig('./cm_MC2-private.eps')
del(modelc2)
###Output
_____no_output_____
###Markdown
Cluster 3
###Code
x3 = [x_test_32k_private[f] for f in c3_indx]
y3 = [labels_test_private[f] for f in c3_indx]
with open('./Clustering_V1_mappings/c3_mapping', 'rb') as fp:
c3_label_mapping = pickle.load(fp)
c3_label_mapping_inv = dict([[v,k] for [k,v] in c3_label_mapping.items()])
c3_label_mapping_inv[len(c3_label_mapping_inv)] = 44
c3_labels = list(c3_label_mapping.keys())
y3_2 = []
for f in y3:
if f in c3_labels:
y3_2.append(c3_label_mapping[f])
else:
y3_2.append(len(c3_labels))
sr = 32000
file_length = 64000
hop_length = 256
freq_res = 257
frames = int(np.ceil(file_length/hop_length))
input_shape = [freq_res,frames]
modelc3 = malley_cnn_120(input_shape,len(c3_labels)+1)
modelc3.load_weights('./outputs/best_weights/malley-C3-32k-PS-257-HL256[0].h5')
modelc3.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_scores_c3,edo= evaluate_complete_files(x3,y3_2,modelc3,[freq_res,frames])
y_hat_c3 = np.argmax(y_scores_c3,axis = 1)
np.mean(y_hat_c3==y3_2)
x3_2 = create_quick_test_2d(x3,freq_res,frames)
modelc3.evaluate(x3_2,y3_2)
###Output
170/170 [==============================] - 1s 4ms/step
###Markdown
Save Scores in General Scores
###Code
#y_scores_c3 = modelc3.predict(x3_2)
#y_hat_c3 = np.argmax(y_scores_c3,axis = 1)
y_hat_c3_orig = [c3_label_mapping_inv[f] for f in y_hat_c3]
for i,this_scores in zip(c3_indx,y_scores_c3):
for j,f in enumerate(this_scores):
all_scores[i,c3_label_mapping_inv[j]] = f*(1-this_scores[-1])#*y_scores_MC[i][3]
labels = [num_to_label[f] for f in c3_labels]
labels.append('Unknown')
plot_cm(y3_2,y_hat_c3,figsize = (7,7), labels = labels, xrotation = 45)
plt.savefig('./cm_MC3-private.eps')
del(modelc3)
###Output
_____no_output_____
###Markdown
Cluster 4
###Code
x4 = [x_test_private[f] for f in c4_indx]
y4 = [labels_test_private[f] for f in c4_indx]
c4_scores = [f[4] for i,f in enumerate(y_scores_MC) if i in c4_indx]
for i, score in zip(c4_indx,c4_scores):
all_scores[i,label_to_num['Applause']] = score
###Output
_____no_output_____
###Markdown
Cluster 5
###Code
x5 = [x_test_private[f] for f in c5_indx]
y5 = [labels_test_private[f] for f in c5_indx]
with open('./Clustering_V1_mappings/c5_mapping', 'rb') as fp:
c5_label_mapping = pickle.load(fp)
c5_label_mapping_inv = dict([[v,k] for [k,v] in c5_label_mapping.items()])
c5_label_mapping_inv[len(c5_label_mapping_inv)] = 45
c5_labels = list(c5_label_mapping.keys())
c5_labels = list(c5_label_mapping.keys())
y5_2 = []
for f in y5:
if f in c5_labels:
y5_2.append(c5_label_mapping[f])
else:
y5_2.append(len(c5_labels))
sr = 16000
file_length = 64000
hop_length = 256
freq_res = 257
frames = int(np.ceil(file_length/hop_length))
input_shape = [freq_res,frames]
modelc5 = malley_cnn_120(input_shape,len(c5_labels)+1)
modelc5.load_weights('./outputs/best_weights/malley-C5-16k-PS-257-HL256[0].h5')
modelc5.compile(optimizer=Adam(lr),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_scores_c5,edo= evaluate_complete_files(x5,y5_2,modelc5,[freq_res,frames])
y_hat_c5 = np.argmax(y_scores_c5,axis = 1)
np.mean(y_hat_c5==y5_2)
x5_2 = create_quick_test_2d(x5,freq_res,frames)
modelc5.evaluate(x5_2,y5_2)
###Output
104/104 [==============================] - 0s 5ms/step
###Markdown
Save Scores in General Scores
###Code
#y_scores_c5 = modelc5.predict(x5_2)
#y_hat_c5 = np.argmax(y_scores_c5,axis = 1)
y_hat_c5_orig = [c5_label_mapping_inv[f] for f in y_hat_c5]
for i,this_scores in zip(c5_indx,y_scores_c5):
for j,f in enumerate(this_scores):
all_scores[i,c5_label_mapping_inv[j]] = f * (1-this_scores[-1])#y_scores_MC[i][5]
labels = [num_to_label[f] for f in c5_labels]
labels.append('Unknown')
plot_cm(y5_2,y_hat_c5,figsize = (7,7), labels = labels)
plt.savefig('./cm_MC5-private.eps')
del(modelc5)
###Output
_____no_output_____
###Markdown
Final Inference
###Code
from utilities import mapk
y_hat_final = np.argmax(all_scores[:,:41],axis = 1)
tha_best = []
for this_score in all_scores[:,:41]:
tha_best.append(sorted(zip(this_score, np.arange(len(this_score))), reverse=True)[:3])
y_top_3 = [[a,b,c] for [(_,a),(_,b),(_,c)] in tha_best]
mapk([[f] for f in labels_test_private],y_top_3,k=3)
np.mean(y_hat_final==labels_test_private)
num_to_label[41] = 'Unknown0'
num_to_label[42] = 'Unknown1'
num_to_label[43] = 'Unknown2'
num_to_label[44] = 'Unknown3'
num_to_label[45] = 'Unknown4'
num_to_label[46] = 'Unknown5'
with open('./preprocessed_test/WF-16k-64k','rb') as fp:
waves = pickle.load(fp)
waves_private = [f for i,f in enumerate(waves) if test_data['usage'][i] == 'Private']
n=np.random.randint(0,301)
x = np.arange(47)
plt.figure(figsize = (16,5))
plt.stem(all_scores[n])
ticks = plt.xticks(ticks = x,labels= [num_to_label[f] for f in x], rotation = 90)
plt.title('Real: {} {}'.format(num_to_label[labels_test_private[n]],n))
librosa.display.specshow(x_test_32k_private[n])
Audio(waves_public[n],rate = 16000)
labels = list(num_to_label.values())
plot_cm(labels_test_private,y_hat_final,figsize = (20,20), labels = labels, xrotation=90)
plt.savefig('./cm_all_41classes-private.eps')
###Output
_____no_output_____ |
notebooks/dev/cross_correlation_simulatedata.ipynb | ###Markdown
Set up cosmology
###Code
cosmo = ccl.Cosmology(Omega_c=0.25,
Omega_b=0.05,
h=0.7,
n_s=0.965,
A_s=2.11e-9,
Omega_k=0.0,
Neff=3.046,
matter_power_spectrum='linear')
b1_unwise = 1.0
s1_unwise = 0.4
cosmo
###Output
_____no_output_____
###Markdown
Set up binning
###Code
ell_max = 600
n_ell = 20
delta_ell = ell_max // n_ell
ells = (np.arange(n_ell) + 0.5) * delta_ell
ells_win = np.arange(ell_max + 1)
wins = np.zeros([n_ell, len(ells_win)])
for i in range(n_ell):
wins[i, i * delta_ell : (i + 1) * delta_ell] = 1.0
Well = sacc.BandpowerWindow(ells_win, wins.T)
###Output
_____no_output_____
###Markdown
Set up unWISE tracer
###Code
dndz_unwise = np.loadtxt('../../soliket/xcorr/data/dndz.txt')
ngal_unwise = 1.
fsky_unwise = 0.4
tracer_unwise_g = ccl.NumberCountsTracer(cosmo,
has_rsd=False,
dndz=dndz_unwise.T,
bias=(dndz_unwise[:,0], b1_unwise * np.ones(len(dndz_unwise[:,0]))),
mag_bias=(dndz_unwise[:,0], s1_unwise * np.ones(len(dndz_unwise[:,0])))
)
Nell_unwise_g = np.ones(n_ell) / (ngal_unwise * (60 * 180 / np.pi)**2)
###Output
_____no_output_____
###Markdown
Set up SO CMB lensing tracer
###Code
zstar = 1086
fsky_solensing = 0.4
tracer_so_k = ccl.CMBLensingTracer(cosmo, z_source=zstar)
# Approximation to SO LAT beam
fwhm_so_k = 1. * units.arcmin
sigma_so_k = (fwhm_so_k.to(units.rad).value / 2.355)
ell_beam = np.arange(3000)
beam_so_k = np.exp(-ell_beam * (ell_beam + 1) * sigma_so_k**2)
###Output
_____no_output_____
###Markdown
Calculate power spectra
###Code
n_maps = 2
cls = np.zeros([n_maps, n_maps, n_ell])
cls[0, 0, :] = ccl.angular_cl(cosmo, tracer_unwise_g, tracer_unwise_g, ells) + Nell_unwise_g
cls[0, 1, :] = ccl.angular_cl(cosmo, tracer_unwise_g, tracer_so_k, ells)
cls[1, 0, :] = cls[0, 1, :]
cls[1, 1, :] = ccl.angular_cl(cosmo, tracer_so_k, tracer_so_k, ells)
###Output
_____no_output_____
###Markdown
Set up covariance
###Code
n_cross = (n_maps * (n_maps + 1)) // 2
covar = np.zeros([n_cross, n_ell, n_cross, n_ell])
id_i = 0
for i1 in range(n_maps):
for i2 in range(i1, n_maps):
id_j = 0
for j1 in range(n_maps):
for j2 in range(j1, n_maps):
cl_i1j1 = cls[i1, j1, :]
cl_i1j2 = cls[i1, j2, :]
cl_i2j1 = cls[i2, j1, :]
cl_i2j2 = cls[i2, j2, :]
# Knox formula
cov = (cl_i1j1 * cl_i2j2 + cl_i1j2 * cl_i2j1) / (delta_ell * fsky_solensing * (2 * ells + 1))
covar[id_i, :, id_j, :] = np.diag(cov)
id_j += 1
id_i += 1
covar = covar.reshape([n_cross * n_ell, n_cross * n_ell])
###Output
_____no_output_____
###Markdown
Construct sacc file
###Code
s = sacc.Sacc()
s.add_tracer('NZ', 'gc_unwise',
quantity='galaxy_density',
spin=0,
z=dndz_unwise[:,0],
nz=dndz_unwise[:,1],
metadata={'ngal': ngal_unwise})
s.add_tracer('Map', 'ck_so',
quantity='cmb_convergence',
spin=0,
ell=ell_beam,
beam=beam_so_k)
s.add_ell_cl('cl_00',
'gc_unwise',
'gc_unwise',
ells, cls[0, 0, :],
window=Well)
s.add_ell_cl('cl_00',
'gc_unwise',
'ck_so',
ells, cls[0, 1, :],
window=Well)
s.add_ell_cl('cl_00',
'ck_so',
'ck_so',
ells, cls[1, 1, :],
window=Well)
s.add_covariance(covar)
s.save_fits('../../soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits', overwrite=True)
###Output
WARNING: VerifyWarning: Keyword name 'META_ngal' is greater than 8 characters or contains characters not allowed by the FITS standard; a HIERARCH card will be created. [astropy.io.fits.card]
###Markdown
Read sacc file and compare to 'theory'
###Code
s_load = sacc.Sacc.load_fits('../../soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits')
for n, t in s_load.tracers.items():
print(t.name, t.quantity, type(t))
# Type of power spectra
data_types = np.unique([d.data_type for d in s_load.data])
print("Data types: ", data_types)
# Tracer combinations
print("Tracer combinations: ", s_load.get_tracer_combinations())
# Data size
print("Nell: ", s_load.mean.size)
ell_theory = np.linspace(1,ell_max,ell_max)
z_unwise = s_load.tracers['gc_unwise'].z
nz_unwise = s_load.tracers['gc_unwise'].nz
tracer_gc_unwise = ccl.NumberCountsTracer(cosmo, has_rsd=False,
dndz=[z_unwise, nz_unwise],
bias=(z_unwise, b1_unwise * np.ones(len(z_unwise))),
mag_bias=(z_unwise, s1_unwise * np.ones(len(z_unwise)))
)
tracer_ck_so = ccl.CMBLensingTracer(cosmo, z_source=zstar)
cl_gg_theory = ccl.angular_cl(cosmo, tracer_gc_unwise, tracer_gc_unwise, ell_theory)
cl_gk_theory = ccl.angular_cl(cosmo, tracer_gc_unwise, tracer_ck_so, ell_theory)
cl_kk_theory = ccl.angular_cl(cosmo, tracer_ck_so, tracer_ck_so, ell_theory)
Nell_unwise_g = np.ones_like(ell_theory) / (s_load.tracers['gc_unwise'].metadata['ngal'] * (60 * 180 / np.pi)**2)
plt.figure(1, figsize=(3.*4.5, 3.75))
plt.suptitle(r'SO $\times$ unWISE')
plt.subplot(131)
plt.title(r'$gg$')
ell, cl, cov = s_load.get_ell_cl('cl_00', 'gc_unwise', 'gc_unwise', return_cov=True)
cl_err = np.sqrt(np.diag(cov))
plt.plot(ell_theory, 1.e5*(cl_gg_theory + Nell_unwise_g), '--', c='C0')
plt.plot(ell, 1.e5*cl, 'o', ms=3, c='C0')
plt.errorbar(ell, 1.e5*cl, yerr=1.e5*cl_err, fmt='none', c='C0')
plt.xlim([1,ell_max])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$C_\ell \times 10^5$')
plt.subplot(132)
plt.title(r'$g\kappa$')
ell, cl, cov = s_load.get_ell_cl('cl_00', 'gc_unwise', 'ck_so', return_cov=True)
cl_err = np.sqrt(np.diag(cov))
plt.plot(ell_theory, 1.e5*ell_theory*cl_gk_theory , '--', c='C1')
plt.plot(ell, 1.e5*ell*cl, 'o', ms=3, c='C1')
plt.errorbar(ell, 1.e5*ell*cl, yerr=1.e5*ell*cl_err, fmt='none', c='C1')
plt.xlim([1,ell_max])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$\ell C_\ell \times 10^5$')
plt.subplot(133)
plt.title(r'$\kappa\kappa$')
ell, cl, cov = s_load.get_ell_cl('cl_00', 'ck_so', 'ck_so', return_cov=True)
cl_err = np.sqrt(np.diag(cov))
plt.plot(ell_theory, 1.e5*ell_theory*cl_kk_theory , '--', c='C2')
plt.plot(ell, 1.e5*ell*cl, 'o', ms=3, c='C2')
plt.errorbar(ell, 1.e5*ell*cl, yerr=1.e5*ell*cl_err, fmt='none', c='C2')
plt.xlim([1,ell_max])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$\ell C_\ell \times 10^5$')
plt.subplots_adjust(wspace=0.25);
###Output
_____no_output_____ |
_posts/.ipynb_checkpoints/Moving Averages Demo-checkpoint.ipynb | ###Markdown
An explanation of Moving Averages with Graphs
###Code
import numpy as np
import matplotlib.pyplot as plt
condition = lambda x: .25 * x + 2 if x > 10 else x
x_axis = [i for i in range(20)]
y_axis = [condition(i) for i in range(20)]
def calc_moving_average(x):
l = len(x)
arr = []
arr.extend([i for i in x[:3]])
for i in range(2, l):
a = x[i-2:i+1]
value = sum(a) / 3
arr.append(value)
return arr
y_axis=calc_moving_average(y_axis)
plt.plot(x_axis, y_axis)
plt.show()
###Output
_____no_output_____ |
array/rotateImg.ipynb | ###Markdown
주어진 matrix를 시계방향으로 90도 inPlace rotate하여라예제 :[1, 2, 3, 4]\[5, 6, 7, 8]\[9, 10, 11, 12]\[13, 14, 15, 16]결과 : [13, 9, 5, 1]\[14, 10, 6, 2]\[15, 11, 7, 3]\[16, 12, 8, 4]
###Code
from typing import List
matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
def printMatrix(matrix: List[List[int]]) -> None:
for row in matrix:
print(row)
printMatrix(matrix)
class RotateMatrix:
def rotate(self, matrix: List[List[int]]) -> None:
self._length = len(matrix)
for y in range(0,int(self._length/2)):
for x in range(0,int((self._length+1)/2)):
newX1,newY1 = self._newCoord(x,y)
newX2,newY2 = self._newCoord(newX1,newY1)
newX3,newY3 = self._newCoord(newX2,newY2)
tmpPixel = matrix[y][x]
matrix[y][x] = matrix[newY3][newX3]
matrix[newY3][newX3] = matrix[newY2][newX2]
matrix[newY2][newX2] = matrix[newY1][newX1]
matrix[newY1][newX1] = tmpPixel
def _newCoord(self,oldX,oldY):
newX = -oldY + self._length -1
newY = oldX
return newX,newY
rotateMatrix = RotateMatrix()
rotateMatrix.rotate(matrix)
printMatrix(matrix)
###Output
_____no_output_____ |
.ipynb_checkpoints/foo.039_new_done-checkpoint.ipynb | ###Markdown
foo.039 Crop Total Nutrient Consumptionhttp://www.earthstat.org/data-download/file type: geotiff
###Code
# Libraries for downloading data from remote server (may be ftp)
import requests
from urllib.request import urlopen
from contextlib import closing
import shutil
# Library for uploading/downloading data to/from S3
import boto3
# Libraries for handling data
import rasterio as rio
import numpy as np
# from netCDF4 import Dataset
# import pandas as pd
# import scipy
# Libraries for various helper functions
# from datetime import datetime
import os
import threading
import sys
from glob import glob
###Output
_____no_output_____
###Markdown
s3
###Code
s3_upload = boto3.client("s3")
s3_download = boto3.resource("s3")
s3_bucket = "wri-public-data"
s3_folder = "resourcewatch/raster/foo_039_Crop_Total_Nutrient_Consumption/"
s3_file1 = "foo_039_Crop_Total_Nutrient_Consumption_Nitrogen.tif"
s3_file2 = "foo_039_Crop_Total_Nutrient_Consumption_Phosphorus.tif"
s3_file3 = "foo_039_Crop_Total_Nutrient_Consumption_Potassium.tif"
s3_key_orig1 = s3_folder + s3_file1
s3_key_edit1 = s3_key_orig1[0:-4] + "_edit.tif"
s3_key_orig2 = s3_folder + s3_file2
s3_key_edit2 = s3_key_orig2[0:-4] + "_edit.tif"
s3_key_orig3 = s3_folder + s3_file3
s3_key_edit3 = s3_key_orig3[0:-4] + "_edit.tif"
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write("\r%s %s / %s (%.2f%%)"%(
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
###Output
_____no_output_____
###Markdown
Define local file locations
###Code
local_folder = "/Users/Max81007/Desktop/Python/Resource_Watch/Raster/foo.039/FertilizerConsumption_Geotiff/"
file_name1 = "NitrogenFertilizer_TotalConsumption.tif"
file_name2 = "PhosphorusFertilizer_TotalConsumption.tif"
file_name3 = "PotassiumFertilizer_TotalConsumption.tif"
local_orig1 = local_folder + file_name1
local_orig2 = local_folder + file_name2
local_orig3 = local_folder + file_name3
orig_extension_length = 4 #4 for each char in .tif
local_edit1 = local_orig1[:-orig_extension_length] + "edit.tif"
local_edit2 = local_orig2[:-orig_extension_length] + "edit.tif"
local_edit3 = local_orig3[:-orig_extension_length] + "edit.tif"
###Output
_____no_output_____
###Markdown
Use rasterio to reproject and compress
###Code
files = [local_orig1, local_orig2, local_orig3]
for file in files:
with rio.open(file, 'r') as src:
profile = src.profile
print(profile)
# Note - this is the core of Vizz's netcdf2tif function
def convert_asc_to_tif(orig_name, edit_name):
with rio.open(orig_name, 'r') as src:
# This assumes data is readable by rasterio
# May need to open instead with netcdf4.Dataset, for example
data = src.read()[0]
rows = data.shape[0]
columns = data.shape[1]
print(rows)
print(columns)
# Latitude bounds
south_lat = -90
north_lat = 90
# Longitude bounds
west_lon = -180
east_lon = 180
transform = rio.transform.from_bounds(west_lon, south_lat, east_lon, north_lat, columns, rows)
# Profile
no_data_val = None
target_projection = 'EPSG:4326'
target_data_type = np.float64
profile = {
'driver':'GTiff',
'height':rows,
'width':columns,
'count':1,
'dtype':target_data_type,
'crs':target_projection,
'transform':transform,
'compress':'lzw',
'nodata': no_data_val
}
with rio.open(edit_name, "w", **profile) as dst:
dst.write(data.astype(profile["dtype"]), 1)
convert_asc_to_tif(local_orig1, local_edit1)
convert_asc_to_tif(local_orig2, local_edit2)
convert_asc_to_tif(local_orig3, local_edit3)
###Output
2160
4320
2160
4320
2160
4320
###Markdown
Upload orig and edit files to s3
###Code
# Original
s3_upload.upload_file(local_orig1, s3_bucket, s3_key_orig1,
Callback=ProgressPercentage(local_orig1))
s3_upload.upload_file(local_orig2, s3_bucket, s3_key_orig2,
Callback=ProgressPercentage(local_orig2))
s3_upload.upload_file(local_orig3, s3_bucket, s3_key_orig3,
Callback=ProgressPercentage(local_orig3))
# Edit
s3_upload.upload_file(local_edit1, s3_bucket, s3_key_edit1,
Callback=ProgressPercentage(local_edit1))
s3_upload.upload_file(local_edit2, s3_bucket, s3_key_edit2,
Callback=ProgressPercentage(local_edit2))
s3_upload.upload_file(local_edit3, s3_bucket, s3_key_edit3,
Callback=ProgressPercentage(local_edit3))
###Output
/Users/Max81007/Desktop/Python/Resource_Watch/Raster/foo.039/FertilizerConsumption_Geotiff/PotassiumFertilizer_TotalConsumptionedit.tif 11188656 / 11188656.0 (100.00%)
###Markdown
Define local file locations for merged files
###Code
local_tmp_folder = "/Users/Max81007/Desktop/Python/Resource_Watch/Raster/foo.039/FertilizerConsumption_Geotiff/"
tmp1 = local_tmp_folder + "NitrogenFertilizer_TotalConsumptionedit.tif"
tmp2 = local_tmp_folder + "PhosphorusFertilizer_TotalConsumptionedit.tif"
tmp3 = local_tmp_folder + "PotassiumFertilizer_TotalConsumptionedit.tif"
merge_files = [tmp1, tmp2, tmp3]
tmp_merge = local_tmp_folder + "Foo_039_Fertilizer_TotalConsumption_Merge.tif"
# S3 storage
s3_bucket = "wri-public-data"
s3_folder = "resourcewatch/raster/foo_039_Crop_Total_Nutrient_Consumption/"
s3_file1 = s3_folder + "NitrogenFertilizer_TotalConsumption.tif"
s3_file2 = s3_folder + "PhosphorusFertilizer_TotalConsumption.tif"
s3_file3 = s3_folder + "PotassiumFertilizer_TotalConsumption.tif"
# Make sure these match the order of the merge_files above
s3_files_to_merge = [s3_file1, s3_file2, s3_file3]
band_ids = ["Nitrogen","Phosphorus","Potassium" ]
s3_key_merge = s3_folder + "Foo_039_Fertilizer_TotalConsumption_Merge.tif"
# S3 services
s3_download = boto3.resource("s3")
s3_upload = boto3.client("s3")
# Helper function to view upload progress
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
with rio.open(merge_files[0]) as src:
kwargs = src.profile
kwargs.update(
count=len(merge_files)
)
with rio.open(tmp_merge, 'w', **kwargs) as dst:
for idx, file in enumerate(merge_files):
print(idx)
with rio.open(file) as src:
band = idx+1
windows = src.block_windows()
for win_id, window in windows:
src_data = src.read(1, window=window)
dst.write_band(band, src_data, window=window)
with rio.open(tmp_merge) as src:
num_bands = src.profile["count"] + 1
data = {}
for i in range(1, num_bands):
data[i] = src.read(i)
files = [tmp_merge]
for file in files:
with rio.open(file, 'r') as src:
profile = src.profile
print(profile)
s3_upload.upload_file(tmp_merge, s3_bucket, s3_key_merge,
Callback=ProgressPercentage(tmp_merge))
os.environ["Zs3_key"] = "s3://wri-public-data/" + s3_key_merge
os.environ["Zs3_key_inspect"] = "wri-public-data/" + s3_key_merge
os.environ["Zgs_key"] = "gs://resource-watch-public/" + s3_key_merge
!echo %Zs3_key_inspect%
!aws s3 ls %Zs3_key_inspect%
!gsutil cp %Zs3_key% %Zgs_key%
os.environ["asset_id"] = "users/resourcewatch/foo_039_crop_total_nutrient_consumption"
!earthengine upload image --asset_id=%asset_id% %Zgs_key%
os.environ["band_names"] = str(band_ids)
!earthengine asset set -p band_names="%band_names%" %asset_id%
###Output
_____no_output_____ |
house-prices/01-house-prices-only-numerical-features.ipynb | ###Markdown
*House Prices: Advanced Regression Techniques* 01 Only numerical features---**Without peeking at solutions 🙈🌁**This is my first Kaggle competition. How exciting 🔥. I’m feeling pretty uncomfortable attempting this without looking at anyone else’s solution, or guidance. But this is going to be good practice.My goal here is to follow some basic process of getting the data, inspecting it, preparing it, training and evaluating a model. I’m aware I won’t necessarily reach the best accuracy. But my goal here is to practice thinking for myself, and only looking at API docs for Python, the libraries, etc. — not at any guides. Rough process1. Get the data2. Inspect the data3. Prep the data if necessary4. Choose the features to use in a model5. Train a model 0. Prep 🔪🧅---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
1. Get the data---
###Code
train_X = pd.read_csv("train.csv", index_col="Id")
test_X = pd.read_csv("test.csv", index_col="Id")
###Output
_____no_output_____
###Markdown
2. Inspect the data---
###Code
train_X
###Output
_____no_output_____
###Markdown
Uncomment the line below to display all columnsDon’t truncate the columns, let me see the whole thing:
###Code
# pd.set_option("display.max_columns", None)
###Output
_____no_output_____
###Markdown
3. Prep the features to use in a model---Let’s focus exclusively on numerical features and see what accuracy we can get. Select numerical features and examples with missing values
###Code
train_X = train_X.select_dtypes(include=np.number).dropna()
###Output
_____no_output_____
###Markdown
Move `SalePrice` to target data variableThe last column of the training data seems to include our target labels.
###Code
train_X, train_y = train_X.iloc[:, :-1], train_X.iloc[:, -1]
###Output
_____no_output_____
###Markdown
4. Train a model---
###Code
from sklearn.ensemble import RandomForestRegressor
clf = RandomForestRegressor(random_state=0)
clf.fit(train_X, train_y)
###Output
_____no_output_____
###Markdown
5. Get predictions from the model--- First, select the numerical features out of the test set, and fill missing values with 0.
###Code
test_X = test_X.select_dtypes(include=np.number).fillna(0)
predictions = clf.predict(test_X)
print(predictions, predictions.dtype, len(predictions), sep=" ——— ")
###Output
[123689.5 155443.25 184799.8 ... 155892.62 125826.83 248012.7 ] ——— float64 ——— 1459
###Markdown
Save predictions as a CSV submission fileWe’ll need a `DataFrame` object with labeled indices, and right now we have a NumPy array.
###Code
submission = pd.DataFrame(predictions, index=test_X.index, columns=["SalePrice"])
submission
submission.to_csv("submission_01.csv")
###Output
_____no_output_____ |
MachineLearning/SRW_DataGeneration_ML_with_rotations.ipynb | ###Markdown
Setup alignment and rotation errors
###Code
n_runs = 10
zx_min = -1e-4
zx_max = 1e-4
theta_min = 0
theta_max = 0.1
offz1_vals = np.random.uniform(zx_min, zx_max, n_runs)
offz2_vals = np.random.uniform(zx_min, zx_max, n_runs)
thetaxs = np.random.uniform(theta_min, theta_max, n_runs)
thetays = np.random.uniform(theta_min, theta_max, n_runs)
thetazs = np.random.uniform(theta_min, theta_max, n_runs)
vx = varParam[nvx_idx][2]
vy = varParam[nvy_idx][2]
vz = varParam[nvz_idx][2]
start = time.time()
beam_arrays = []
for i in range(n_runs):
if i % 10 == 0:
print('run number ' + str(i))
### apply mirror offsets
horizontalOffset = offz1_vals[i]
verticalOffset = offz2_vals[i]
varParam[hOffsetIdx][2] = horizontalOffset
varParam[vOffsetIdx][2] = verticalOffset
### apply rotation offset
thetax = thetaxs[i]
thetay = thetays[i]
thetaz = thetazs[i]
Rx = np.array([[1, 0, 0], [0, np.cos(thetax), -np.sin(thetax)], [0, np.sin(thetax), np.cos(thetax)]])
Ry = np.array([[np.cos(thetay), 0, np.sin(thetay)], [0, 1, 0], [-np.sin(thetay), 0, np.cos(thetay)]])
Rz = np.array([[np.cos(thetaz), -np.sin(thetaz), 0], [np.sin(thetaz), np.cos(thetaz), 0], [0, 0, 1]])
Rxy = np.dot(Rx,Ry)
R_tot = np.dot(Rxy,Rz)
#print(R_tot.shape)
v = np.array([vx, vy, vz]).reshape(3,1)
#print(v.shape)
rtot_v = np.dot(R_tot, v)
#print(rtot_v.shape)
varParam[nvx_idx][2] = rtot_v[0]
varParam[nvy_idx][2] = rtot_v[1]
varParam[nvz_idx][2] = rtot_v[2]
### save file
save_dat = 'dat_files/res_int_se_' + str(i) + '.dat'
varParam[file_idx][2] = save_dat
v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True)
op = set_optics(v)
v.si = True
v.si_pl = ''
v.ws = True
v.ws_pl = ''
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
beam = read_srw_file(save_dat)
h = beam.shape[0]
w = beam.shape[1]
beam_arrays.append(beam.reshape(h, w, 1))
end = time.time()
print('time to run ' + str(n_runs) + ' simulations :' + str(np.round((start - end)/60, 4)) + ' minutes')
all_errors = np.concatenate([offz1_vals.reshape(n_runs, 1), offz2_vals.reshape(n_runs, 1), thetaxs.reshape(n_runs, 1), thetays.reshape(n_runs, 1), thetazs.reshape(n_runs,1)], axis=1)
np.save('errors_' + str(n_runs) + 'runs.npy', all_errors)
beams_all = np.concatenate(beam_arrays, axis=2)
print(beams_all.shape)
np.save('beam_intensities_' + str(n_runs) + 'runs.npy', beams_all)
for i in range(10):
plt.imshow(beams_all[:,:,i])
plt.show()
###Output
_____no_output_____ |
Week 5/Assignment 3.ipynb | ###Markdown
Assignment 3 Question 1 (5 Marks) Make a Tic Tac Toe game played by two human players . The board can have a structure similar to what's given below.Features to be present:1. Modular code, Use OOP paradigms.2. The program should exit after **n** games3. Maintain a scoreboard that is displayed after completion of every game.Draw gives each player a score of 1. A winner gets a score of 3Bonus Features (You will not be graded on these)1. Enable the users to select their customs nicks.2. Use simplegui to draw the board out.3. Make player two a computer player ( Make sure he never loses :P )----------------------------------------------------------------------------------------------------------------------------------------
###Code
'''
----------- ---------- -----------
| | | O |
----------- ---------- -----------
| X | | |
----------- ---------- -----------
| | | |
----------- ---------- -----------
'''
###Output
_____no_output_____ |
python/project-s21-no-one/Project.ipynb | ###Markdown
Project Temirlan Karataev (tkaratae) - 031957685, Berkay Belly (bbelli) - 030537383 Path 1: Bike traffic Analysis:1 - You want to install sensors on the bridges to estimate overall traffic across all the bridges. But you only have enough budget toinstall sensors on three of the four bridges. Which bridges should you install the sensors on to get the best prediction of overalltraffic?Based on the data that was given, we calculated the percentage of traffic that goes through each bridge every single day, then we took the average of total sum of each bridge's percentage. In the end, our data indicated that the lowest percentage of traffic was flowing through Brooklyn Bridge, therefore, we recommend installing sensors to Manhattan, Williamsburg, Queensboro Bridges.2- The city administration is cracking down on helmet laws, and wants to deploy police officers on days with high traffic to handout citations. Can they use the next day's weather forecast to predict the number of bicyclists that day?Approaching this question with logic, and without any data analysis, our answer would be yes, the next day's weather could help us predict the number of bicyclists that day. In order to understand whether our logical approach correlates with our data analysis, we used linear regression with 5-degree polynomials based on the independent variables that we had mentioned in our introduction. We will also use linear regression to find the MSE and R squared values at best-fit lambda based on our predicted model.3- Can you use this data to predict whether it is raining based on the number of bicyclists on the bridges?Again, logically, the number of bicyclists is dependent on an enormous scale of factors, due to this it would be very hard to find a concrete relationship between whether it is raining or not and the number of bikers out that day. Regardless, we will still do our data analysis using two regression models. Our first method will be linear regression with a similar approach to what we had made in question 2. Our second model will be random forest regression with correlation of the MSE and R values. Our data in Path 1, NYC_Bicycle_Counts_2016_Corrected.csv, gives information on bike traffic across a number of bridges in New York City. The data consists of several dependent and independent variables such as the temperature, the precipitation, the data and of course the traffic amongst each bridge and the total traffic. Initially, we cleared out the data from any variables that might cause unexpected behaviors when we would convert the raw data to certain data types such as float or integers. Part 1
###Code
import numpy as np
import csv
import pandas as pd
import re
def sensors():
# col_list = ["Date", "Day", "HighTemp", "LowTemp", "Precipitation", "Brooklyn", "Manhattan", "Williamsburg", "Queensboro", "Total"]
data = pd.read_csv("NYC_Bicycle_Counts_2016_Corrected.csv", thousands=',')
data.columns = data.columns.str.replace(' ', '')
# total = pd.read_csv("NYC_Bicycle_Counts_2016_Corrected.csv", converters={"Total":int})
total = data.Total.to_list()
brooklyn = data.BrooklynBridge.to_list()
manhattan = data.ManhattanBridge.to_list()
williamsburg = data.WilliamsburgBridge.to_list()
queensboro = data.QueensboroBridge.to_list()
total = [float(i) for i in total]
brooklyn = [float(i) for i in brooklyn]
manhattan = [float(i) for i in manhattan]
williamsburg = [float(i) for i in williamsburg]
queensboro = [float(i) for i in queensboro]
for i in range(len(total)):
brooklyn[i] = brooklyn[i] / total[i]
manhattan[i] = manhattan[i] / total[i]
williamsburg[i] = williamsburg[i] / total[i]
queensboro[i] = queensboro[i] / total[i]
avg_brooklyn = sum(brooklyn) / len(brooklyn)
avg_manhattan = sum(manhattan) / len(manhattan)
avg_williamsburg = sum(williamsburg) / len(williamsburg)
avg_queensboro = sum(queensboro) / len(queensboro)
t_t = sum(total)
t_b = sum(brooklyn) / sum(total)
t_m = sum(manhattan) / sum(total)
t_w = sum(williamsburg) / sum(total)
t_q = sum(queensboro) / sum(total)
print("Results:")
print("Brooklyn Bridge:",avg_brooklyn)
print("Manhattan Bridge:",avg_manhattan)
print("Queensboro Bridge:",avg_queensboro)
print("Williamsburg Bridge:",avg_williamsburg)
# total = data.Total.to_list()
# print(total)
if __name__ == '__main__':
sensors()
###Output
Results:
Brooklyn Bridge: 0.16131907987022118
Manhattan Bridge: 0.2701885878314604
Queensboro Bridge: 0.2350827915130222
Williamsburg Bridge: 0.33340954078529605
###Markdown
So, based on the average value that we got from the code above, we decided to exclude the Brooklyn Bridge Part 2
###Code
import pandas as pd
import numpy as np
df = pd.read_csv("NYC_Bicycle_Counts_2016_Corrected.csv", thousands=',')
#print(df.head())
df.drop(['Date'], axis=1, inplace=True)
df.drop(['Day'], axis=1, inplace=True)
df.drop(['High Temp (°F)'], axis=1, inplace=True)
df.drop(['Low Temp (°F)'], axis=1, inplace=True)
df.drop(['Brooklyn Bridge'], axis=1, inplace=True)
df.drop(['Manhattan Bridge'], axis=1, inplace=True)
df.drop(['Williamsburg Bridge'], axis=1, inplace=True)
df.drop(['Queensboro Bridge'], axis=1, inplace=True)
df.Precipitation[df.Precipitation == 'T'] = 0
df.Precipitation[df.Precipitation == '0.47 (S)'] = 0.47
Y = df['Total'].values
Y = Y.astype('int')
X = df.drop(labels = ['Total'], axis = 1)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 0)
from sklearn.linear_model import LinearRegression
linear_regress = LinearRegression(fit_intercept=True)
linear_regress.fit(X_train,Y_train)
prediction = linear_regress.predict(X_test)
from sklearn.metrics import mean_squared_error, r2_score
print('Mean squared error: %.2f' % mean_squared_error(Y_test, prediction))
print('Coefficient of determination: %.2f' % r2_score(Y_test, prediction))
###Output
Mean squared error: 21655031.51
Coefficient of determination: 0.03
###Markdown
Based on the values of MSE and R-Squared we can conclude that the administration can not rely on the traffic of bicycles for the next day because the data has a high number of outliers. Part 3
###Code
Y = df['Precipitation'].values
#Y = Y.astype('int')
X = df.drop(labels = ['Precipitation'], axis = 1)
X = X.astype('float')
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 0)
from sklearn.preprocessing import StandardScaler
standart = StandardScaler()
X_train = standart.fit_transform(X_train)
X_test = standart.transform(X_test)
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(n_estimators=20, random_state=0)
regr.fit(X_train, Y_train)
pred = regr.predict(X_test)
print('Mean squared error: %.2f'% mean_squared_error(Y_test, pred))
print('Coefficient of determination: %.2f'% r2_score(Y_test, pred))
###Output
Mean squared error: 0.06
Coefficient of determination: -1.94
|
2-Automating Tasks/Image Manipulation/Image_Manipulation.ipynb | ###Markdown
Image Manipulation Using PillowDate: June.10.2020 Let's start by importing the modules needed for image manipulation.
###Code
from PIL import Image, ImageFilter
import os
###Output
_____no_output_____
###Markdown
Creating Image ObjectsLets begin by opening an image.
###Code
image1 = Image.open('bear1.jpg') # Create image obj
image1
###Output
_____no_output_____
###Markdown
IterationSoooo cute! Let's iterate to collect all bear images and convert them to png.
###Code
for f in os.listdir('.'):
if f.endswith('.jpg'):
i = Image.open(f)
fn, fext = os.path.splitext(f)
print(fn)
i.save('pngs/{}.png'.format(fn)) # save as png in pngs folder
###Output
bear1_mod
bear4
bear3
bear2
bear1
###Markdown
Resizing filesLets resize these cute bears so theres more of them to love.
###Code
size_300 = (300,300) # size must be a tuple
size_700 = (700,700)
for f in os.listdir('.'):
if f.endswith('.jpg'):
i = Image.open(f)
fn, fext = os.path.splitext(f)
i.thumbnail(size_700)
i.save('700/{}_700{}'.format(fn, fext)) # save as 300x300ng in pngs folder
i.thumbnail(size_300)
i.save('300/{}_300{}'.format(fn, fext)) # save as 300x300png in pngs folder
###Output
_____no_output_____
###Markdown
More Manipulation RotationFirst, lets rotate a bear. To rotate images, use `.rotate`
###Code
image1 = Image.open('bear1.jpg')
image1 = image1.rotate(90)
image1.save('bear1_mod.jpg') # save the sideways bear
image1
###Output
_____no_output_____
###Markdown
Black & WhiteGorgeous. Now, lets make the bear black and white. To convert from RGB to monochrome, use `.convert`
###Code
image1 = Image.open('bear1_mod.jpg')
image1 = image1.convert('L')
image1.save('bear1_mod.jpg')
image1
###Output
_____no_output_____
###Markdown
Image BlurGrizzly is looking a little spooky, so let's blur him. To blur images, use `.ImageFilter`
###Code
image1 = Image.open('bear1_mod.jpg')
image1 = image1.filter(ImageFilter.GaussianBlur(2)) # blur values are radius set to 2
image1.save('bear1_mod.jpg')
image1
###Output
_____no_output_____ |
DL_PyTorch/.ipynb_checkpoints/Part 8 - Transfer Learning Solution-checkpoint.ipynb | ###Markdown
Transfer LearningIn this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html). ImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU).Once trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy.With `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
###Output
_____no_output_____
###Markdown
Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`.
###Code
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
###Output
_____no_output_____
###Markdown
We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.htmlid5). Let's print out the model architecture so we can see what's going on.
###Code
model = models.densenet121(pretrained=True)
model
###Output
/opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_.
###Markdown
This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers.
###Code
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
###Output
_____no_output_____
###Markdown
With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time.PyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU.
###Code
import time
for device in ['cpu', 'cuda']:
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to(device)
for ii, (inputs, labels) in enumerate(trainloader):
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
start = time.time()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if ii==3:
break
print(f"CUDA = {cuda}; Time per batch: {(time.time() - start)/3:.3f} seconds")
###Output
_____no_output_____
###Markdown
You can write device agnostic code which will automatically use CUDA if it's enabled like so:```python at beginning of the scriptdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")... then whenever you get a new Tensor or Module this won't copy if they are already on the desired deviceinput = data.to(device)model = MyModule(...).to(device)```From here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily.>**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen.
###Code
# TODO: Train a model with a pre-trained network
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
epochs = 3
print_every = 40
steps = 0
# change to cuda
model.to('cuda')
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
running_loss = 0
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
# Putting the above into functions, so they can be used later
def do_deep_learning(model, trainloader, epochs, print_every, criterion, optimizer, device='cpu'):
epochs = epochs
print_every = print_every
steps = 0
# change to cuda
model.to('cuda')
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
running_loss = 0
def check_accuracy_on_test(testloader):
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
do_deep_learning(model, trainloader, 3, 40, criterion, optimizer, 'gpu')
check_accuracy_on_test(testloader)
###Output
_____no_output_____ |
notebooks/nlp/text_cleaning-spanish.ipynb | ###Markdown
Text Cleaning: English References:- https://towardsdatascience.com/cleaning-text-data-with-python-b69b47b97b76- https://www.geeksforgeeks.org/python-efficient-text-data-cleaning/
###Code
import nltk
from nltk.corpus import stopwords
import re
import itertools
import string
###Output
_____no_output_____
###Markdown
data
###Code
text = """
Últimamente, puede que hayas oído hablar del famoso científico de datos o data scientist por su nombre en inglés, una ocupación que está teniendo gran éxito y que se considera un trabajo de ensueño. ¿Por qué se solicita? ¿Y por qué las empresas están dispuestas a pagar sumas vertiginosas por este tipo de trabajo? Trataré de responder a esta y otras preguntas en este artículo.
El papel del científico de datos es ahora una carrera muy exitosa, ya que tiene un poder enorme y constante en casi todos los mercados principalmente por dos razones:
Un número cada vez mayor de empresas de nueva creación se dedican a la inteligencia artificial y al aprendizaje automático (machine learning)
La correcta gestión y análisis de los datos de la empresa y del mercado garantiza una ventaja competitiva clave.
¿Ha pensado también en comenzar una carrera como data scientist? Estas son algunas de las razones que te convencerán de actuar.
AndroidPIT 16 9 shutterstock 680929729
La recopilación y el análisis de datos es una de las primeras cosas que hay que hacer / / © NextPit
Reducir la tasa de mortalidad empresarial
De hecho, gran parte de la mortalidad empresarial se debe a que los que entran en el mercado no tienen ni idea de lo que están haciendo. Piensa que, en promedio, por cada empresa que nace, mueren tres. Esto se debe a una serie de razones, entre las que se incluye el hecho de que nadie recopila, cataloga, analiza o interpreta los datos de mercado. Pero no sólo eso: además de no hacerlo antes de entrar en el mercado, las empresas ignoran estas operaciones fundamentales incluso en el curso de la actividad empresarial.
Las compañías tienen la oportunidad de recopilar datos desde cualquier lugar, principalmente de sus clientes, una fuente inagotable de información en constante cambio. Sin embargo, según un informe de seguridad de datos de 2018 de Gemalto, hasta un 65% de las compañías encuestadas dijeron que no podían analizar o categorizar los datos. Peor aún, el 89% conoce los beneficios potenciales del análisis de datos, pero no tiene idea de cómo hacerlo.
Fuga de datos masiva: 2.200 millones de nombres de usuarios y contraseñas en la red
data scientist analyst
El 65% de las empresas no tienen idea de cómo utilizar los datos. / © Gemalto
Sólo esta primera razón debería ser lo suficientemente ética como para convencerte de que te conviertas en un científico de datos.
Aprovecha las nuevas normativas de gestión de datos
Si vives en la Unión Europea, es posible que hayas oído hablar del nuevo reglamento de protección de datos que entró en vigor en mayo de 2018, el llamado GDPR. Este reglamento establece que las empresas que operan en Europa (incluidas las de fuera de Europa) están obligadas a gestionar los datos de los usuarios de forma diferente, informándoles de qué datos se están registrando y cómo. Además, la UE obliga a las empresas a eliminar dichos datos cuando los usuarios los soliciten.
La UE aprueba la controvertida Directiva de derechos de autor
GDPR 2018
Los científicos de datos son útiles a la hora de implementar nuevas regulaciones de uso de datos. / © Vector Plus Image/Shutterstock
Como he dicho antes, esta legislación no se aplica, por ejemplo, a las empresas que operan en los Estados Unidos, pero yo esperaría a cantar victoria, porque California emitirá un reglamento similar para 2020, el llamado ACFA. Como resultado, estas regulaciones aumentan la dependencia de las empresas de los expertos en datos debido a la necesidad de análisis en tiempo real y almacenamiento responsable de datos. Además, en la sociedad actual, es comprensible que la gente sea más cautelosa a la hora de renunciar a los datos que en el pasado, por lo que se necesita la ayuda de personas con más experiencia.
Una carrera en continua evolución
Las carreras sin potencial de crecimiento permanecen estancadas, apenas evolucionan, hasta que se vuelven irrelevantes para la sociedad. Por el contrario, este trabajo ofrece amplias oportunidades de evolución ya en la próxima década. Dado que la solicitud de empleo no muestra signos de ralentización, ésta es sin duda otra buena noticia para aquellos que desean entrar en el campo de la ciencia de la información.
Con el 5G nuestra privacidad puede estar en riesgo (más de lo que ya lo está)
AI robot 08
El trabajo del científico de datos evoluciona de año en año. / © metamorworks/Shutterstock
Un cambio que probablemente surgirá pronto es que el título de científico de datos será más específico. De hecho, en la actualidad es posible encontrar científicos de datos en diferentes sociedades, pero no es necesariamente el caso que hagan lo mismo. Por lo tanto, hay que especializarse, ya que sus cualificaciones y sus carreras serán cada vez más específicas.
Habilidades requeridas en todas partes
Según los últimos datos de Almalaurea, el 94% de los licenciados en informática han obtenido un empleo con un salario neto de 1.489 euros. Este es ciertamente otro factor que indica que la carrera de científico de datos es capaz de que consigas un trabajo antes que estudiando otras áreas. Más específicamente, su demanda ha aumentado significativamente en un 256% desde 2013, por lo que es obvio cómo las empresas tienden a reconocer el valor de los científicos de datos y necesariamente lo necesitan.
deal 04
La oferta de trabajo como científico de datos registró un aumento del 256% / © Shutterstock
El aumento del número de datos diarios
La gente genera datos todos los días y lo más probable es que lo haga sin pensarlo ni por un segundo. Analizando los datos actuales y futuros, hay que tener claro que 5.000 millones de consumidores interactúan con los datos diariamente, una cifra destinada a alcanzar los 6.000 millones para 2025, tres cuartas partes de la población mundial. Además, la cantidad de datos en el mundo en 2018 ascendía a 33 zettabytes, pero las proyecciones muestran un aumento a 133 zettabytes para 2025.
¡Pillados! Facebook comparte datos con Microsoft, Amazon, Spotify y Co.
Inside IBM Cloud Dallas
Se espera que el número de datos en circulación aumente exponencialmente en unos pocos años. / © IBM
En resumen, la producción de datos está en aumento y los científicos de datos estarán a la vanguardia para ayudar a las empresas a utilizarlos eficazmente.
Salario más alto y alta probabilidad de ascenso profesional
LinkedIn nombró recientemente al científico de datos como la carrera más prometedora de 2019. Una de las razones por las que obtuvo el primer lugar es que el salario promedio es de $1,300,000 al año. El estudio de LinkedIn también examinó la probabilidad de que la gente pudiera obtener ascensos y la puntuación fue de nueve de cada diez. Por supuesto, como en cualquier industria, el científico de datos también debe mostrar iniciativa y aprovechar las oportunidades para sobresalir, pero las conclusiones de LinkedIn sugieren que las empresas tienen la intención de mantener al científico de datos a largo plazo.
deal 10
El científico de datos es uno de los trabajos más rentables del momento. / © NextPit
Por otra parte, si las empresas no vieran a los científicos de datos como recursos aplicables a su competitividad y prosperidad futuras, probablemente nunca les ofrecerían promoción."""
###Output
_____no_output_____
###Markdown
CLEANING Removing URLs, Hashtags and Styles
###Code
# remove hyperlinks
text = re.sub(r'https?:\/\/.\S+', "", text)
# remove hashtags
# only removing the hash # sign from the word
text = re.sub(r'#', '', text)
# remove old style retweet text "RT"
text = re.sub(r'^RT[\s]+', '', text)
###Output
_____no_output_____
###Markdown
Split attached words
###Code
#separate the words
text = " ".join([s for s in re.split("([A-Z][a-z]+[^A-Z]*)",text) if s])
###Output
_____no_output_____
###Markdown
Convert to lower case
###Code
#convert to lower case
text=text.lower()
###Output
_____no_output_____
###Markdown
Standardizing
###Code
#One letter in a word should not be present more than twice in continuation
text = ''.join(''.join(s)[:2] for _, s in itertools.groupby(text))
###Output
_____no_output_____
###Markdown
Remove Stopwords
###Code
#download the stopwords from nltk using
nltk.download('stopwords')
#import english stopwords list from nltk
stopwords_eng = stopwords.words('spanish')
text_tokens=text.split()
text_list=[]
#remove stopwords
for word in text_tokens:
if word not in stopwords_eng:
text_list.append(word)
stopwords = ['©','¿']
stopwords += []
def removeStopwords(wordlist, stopwords):
return [w for w in wordlist if w not in stopwords]
text_list = removeStopwords(text_list, stopwords)
###Output
_____no_output_____
###Markdown
Remove Punctuations
###Code
#for string operations
clean_text=[]
#remove punctuations
for word in text_list:
if word not in string.punctuation:
clean_text.append(word)
###Output
_____no_output_____
###Markdown
COUNTING WORDS> Instalation: pip install collections-extended
###Code
from collections import Counter
count_each_word = Counter(clean_text)
count_each_word.most_common(30)
###Output
_____no_output_____ |
Code/SMD-Frames-GT-Generation/build_GT_images.ipynb | ###Markdown
Construindo imagens Ground TruthAqui é apresentado a geração da linha do horizonte utilizando duas tecnologias: Matplotlib e PILLOW. Para os dois casos é apresentado como realizar a geração para um frame apenas e como realizar a geração para todos os frames.Para a utilização em treinos e testes, utilize a abordagem com PILLOW, pois ela consegue entragar as imagens resultantes nas dimensões desejadas.
###Code
from scipy.io import loadmat
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from PIL import Image, ImageDraw
import PIL
import math
import numpy as np
# Read GT data
data = loadmat("../../Dataset/VIS_Onboard/HorizonGT/MVI_0790_VIS_OB_HorizonGT")
# dimensions to save the image in pixels
x_size = 400
y_size = 225
# Get the number of frames
#frames_number = len(data['structXML'][0])
###Output
_____no_output_____
###Markdown
------------------------------ Matplotlib Exibindo a linha do horizonte na imagemEste código exemplifica como imprimir a linha do horizonte em uma imagem a partir dos dados do arquivo GT.A linha GT do horizonte é traçada como uma linha reta, com seu ponto central em (x, y). Para determinar sua inclinação encontre o ângulo corresondente à cos alpha ou sen de alpha.Para realizar a transformação de rotação na linha do horizonte deve-se converter as coordenadas, aplicar a transformação e somá-la à transformação atual da imagem.
###Code
# ==== Draw horizon line for the image ====
# Read de base image
frame_number = 0
im = np.array(Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0790_VIS_OB_frame" + str(frame_number) + ".jpg"), dtype=np.uint8)
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
horizon = data['structXML'][0][frame_number]
print(horizon)
# Create figure and axes
fig,ax = plt.subplots(1)
# Display the image
ax.imshow(im)
# Create horizon line
line = plt.axhline(y=horizon[1], color='r', linestyle='-')
# Convert coordinates
ts = ax.transData
coords = ts.transform((horizon[0][0][0], horizon[1][0][0]))
# -- Rotate line --
# Rotate the line around x, y in alpha degrees
t = mtransforms.Affine2D().rotate_deg_around(coords[0], coords[1], math.asin(horizon[3]))
line.set_transform(line.get_transform() + t)
plt.show()
###Output
(array([[960.5]]), array([[466.36252683]]), array([[0.02444696]]), array([[0.99970113]]))
###Markdown
Gerando imagens GTEssa seção traz dois exemplo: como gera o GT para uma imagem e como gerar para todas as imagens do arquivo GT. O arquivo GT ainda deve ser especificado para os dois casos (na primeira célula de código). Gera GT frame 0Gera a imagem GT para um único frame. O frame deve ser especificado.Uma imagem base é usada para se saber as dimensões com que a imagem GT deve ser criada.Uma matriz com as mesmas dimensões da imagem base é criada para representar a imagem GT, em que 1 representa os pixels que constituem a linha do horizonte e 0 os que não constituem.Ao se criar a figura que será plotada deve-se passar suas dimensões em polegadas e deve ser exibida em escalas de cinza.
###Code
# Read de base image
frame_number = 0
im = np.array(Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0790_VIS_OB_frame" + str(frame_number) + ".jpg"), dtype=np.uint8)
# Get image dimension
len_x, len_y = len(im[0]), len(im)
print("dimensões da matriz: " + str(len_x) + ' x ' + str(len_y))
# Create Gt matrix image
gt = np.zeros((len_x, len_y), dtype=np.uint8)
# Dimension in inches
lenp_x, lenp_y = len_x * 0.0104166667, len_y * 0.0104166667
print("dimensões da matriz em polegada: " + str(lenp_x) + ' x ' + str(lenp_y))
# Create figure and axes
fig,ax = plt.subplots(figsize=(lenp_x, lenp_y))
# Display the image with gray scale
ax.imshow(gt, cmap='gray')
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
horizon = data['structXML'][0][frame_number]
# Create the horizon line
line = plt.axhline(y=horizon[1], color='1', linestyle='-')
# Convert coordinates
ts = ax.transData
coords = ts.transform((horizon[0][0][0], horizon[1][0][0]))
# Rotate line
# Rotate the line around x, y in alpha degrees
t = mtransforms.Affine2D().rotate_deg_around(coords[0], coords[1], math.asin(horizon[3]))
line.set_transform(line.get_transform() + t)
# Disable axis print
plt.axis("off")
plt.show()
# Save the image wihout padding
fig.savefig('GTimage.jpeg', format='jpeg', bbox_inches='tight', pad_inches=0)
###Output
dimensões da matriz: 1080 x 1920
dimensões da matriz em polegada: 11.250000036000001 x 20.000000064
###Markdown
Gera GT de todos os framesGera as imagens GT para um todos frames do arquivo GT. Segue os mesmos parâmetros da seção acima.
###Code
# Read de base image
im = np.array(Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0790_VIS_OB_frame0.jpg"), dtype=np.uint8)
# Get image dimension
len_x, len_y = len(im[0]), len(im)
# frame_number is used to control the frame number
frame_number = 0
# horizon = data frame <count> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
for horizon in data['structXML'][0]:
print(horizon)
# Create Gt matrix image
gt = np.zeros((len_x, len_y), dtype=np.uint8)
# Dimension in inches
lenp_x, lenp_y = len_x * 0.0104166667, len_y * 0.0104166667
# Create figure and axes
fig,ax = plt.subplots(figsize=(lenp_x, lenp_y))
# Display the image with gray scale
ax.imshow(gt, cmap='gray')
# Create horizon line
line = plt.axhline(y=horizon[1], color='1', linestyle='-')
# Convert coordinates
ts = ax.transData
coords = ts.transform((horizon[0][0][0], horizon[1][0][0]))
# Rotate line
# Rotate the line around x, y in alpha degrees
t = mtransforms.Affine2D().rotate_deg_around(coords[0], coords[1], math.asin(horizon[3]))
line.set_transform(line.get_transform() + t)
# Disable axis print
plt.axis("off")
plt.show()
# Save the image wihout padding
fig.savefig('GT' + str(frame_number) + '.jpeg', format='jpeg', bbox_inches='tight', pad_inches=0)
frame_number = frame_number + 1
###Output
(array([[960.5]]), array([[466.36252683]]), array([[0.02444696]]), array([[0.99970113]]))
###Markdown
PILLOW Gera GT frame 0
###Code
# Aplicando linha rotacionada na imagem
# Read GT data
data = loadmat("../../Dataset/VIS_Onboard/HorizonGT/MVI_0788_VIS_OB_HorizonGT")
# Read de base image
frame_number = 0
#base = np.array(Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0788_VIS_OB_frame" + str(frame_number) + ".jpg"), dtype=np.uint8)
# Get image dimension
len_x, len_y = 1920, 1080
print("Dimensões do frame: " + str(len_x) + ' x ' + str(len_y))
# Create GT image
# use the line below to see the horizon line in binary image ----
# PIL.Image.new(binary chanel, (x dimension, y dimension))
gt = PIL.Image.new('1', (len_x, len_y))
# use the line below to see the horizon line in the sea image ----
# Image.open(image_path)
#gt = Image.open("../../Dataset/VIS_Onboard/VIS_Onboard_frames/MVI_0788_VIS_OB_frame" + str(frame_number) + ".jpg")
# Create a draw with the image
draw = ImageDraw.Draw(gt)
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
horizon = data['structXML'][0][frame_number]
print(horizon)
# ------- Create the horizon line -------
# cosine and sine from GT file horizon line
c, s = horizon[2], horizon[3]
# horizon line angle
rad = math.asin(horizon[3]) - math.radians(90)
# cosine and sine to plot horizon line
c, s = math.cos(rad), math.sin(rad)
# central point
cx = horizon[0]
cy = horizon[1]
# start point and end point
x1 = 0 # start point
y1 = cy
x2 = len_x # end point
y2 = cy
# rotated points
xr1 = c*(x1-cx) - s*(y1-cy) + cx
yr1 = s*(x1-cx) + c*(y1-cy) + cy
xr2 = c*(x2-cx) - s*(y2-cy) + cx
yr2 = s*(x2-cx) + c*(y2-cy) + cy
# ---------------------------------------
# Draw the horizon line
# draw.line((x start point, y start point, x end point, y end point), white color, 1 pixel of width)
draw.line((xr1, yr1, xr2, yr2), fill=1, width=6)
# Show the image
#gt.show()
gt = gt.resize((x_size, y_size))
# Save the image
gt.save("GTs/MVI_0788_VIS_OB_gt" + str(frame_number) + ".jpeg", "JPEG")
###Output
Dimensões do frame: 1920 x 1080
(array([[960.5]]), array([[421.85573106]]), array([[0.04375699]]), array([[0.9990422]]))
###Markdown
A imagem está ficando 90 graus errada (Arrumado. Por quê?) Gera GT de todos os frames
###Code
# Base image dimension
len_x, len_y = 1920, 1080
# Read GT data
data = loadmat("../../Dataset/VIS_Onboard/HorizonGT/MVI_0788_VIS_OB_HorizonGT")
# Control de frame number
frame_number = 0
# Read line per line of GT file
# horizon = data frame <frame_number> -> (x, y, cos alpha, sen alpha) - See Explanation of GT files
for horizon in data['structXML'][0]:
# Create GT image
# PIL.Image.new(binary chanel, (x dimension, y dimension))
gt = PIL.Image.new('1', (len_x, len_y))
# Create a draw with the image
draw = ImageDraw.Draw(gt)
# ------- Create the horizon line -------
# cosine and sine from GT file horizon line
c, s = horizon[2], horizon[3]
# horizon line angle
rad = math.asin(horizon[3]) - math.radians(90)
# cosine and sine to plot horizon line
c, s = math.cos(rad), math.sin(rad)
# central point
cx = float(horizon[0][0])
cy = float(horizon[1][0])
# start point and end point
x1 = 0 # start point
y1 = cy
x2 = len_x # end point
y2 = cy
# rotated points
xr1 = c*(x1-cx) - s*(y1-cy) + cx
yr1 = s*(x1-cx) + c*(y1-cy) + cy
xr2 = c*(x2-cx) - s*(y2-cy) + cx
yr2 = s*(x2-cx) + c*(y2-cy) + cy
# ---------------------------------------
# Draw the horizon line
# draw.line((x start point, y start point, x end point, y end point), white color, 1 pixel of width)
draw.line((xr1, yr1, xr2, yr2), fill=1, width=6)
# Show the image
#gt.show()
gt = gt.resize((x_size, y_size))
# Save the image
gt.save("GTs/MVI_0788_VIS_OB_GT" + str(frame_number) + ".jpg")
frame_number = frame_number + 1
###Output
_____no_output_____ |
re.ipynb | ###Markdown
Regexp Python
###Code
import re
text = """
Hello this is a quick walk through on regular expression in python.
123.456.7897
123*456*7897
123-456-7897
900-567-8909
800-568-1567
+27 652 305 879
+263 568 189 1899
+263-568-189-1899
+263_568_189_1899
+188 789 089 7816
*188 789 089 7816
https://www.google.com
https://whatsapp.com
http://localhost.edu
https://zero-5.li
[email protected]
[email protected]
[email protected]
[email protected]
Mr John
Mr. Petter
Mrs T
Ms Gonorio
Mrs Makosi
"""
sent = "Start working with regular expression you will enjoy these."
###Output
_____no_output_____
###Markdown
> We are going to use the above text to match regexp patterns
###Code
pattern = re.compile(r'^start', re.I)
matches = re.finditer(pattern, sent)
for match in matches:
print(match)
pattern = re.compile(r'these.$', re.I)
matches = re.finditer(pattern, sent)
for match in matches:
print(match)
pattern = re.compile(r'^[start|Start]')
matches = re.finditer(pattern, sent)
for match in matches:
print(match)
pattern = re.compile(r't{2,3}')
matches = re.finditer(pattern, text)
for match in matches:
print(match)
pattern = re.compile(r't{2,3}?')
matches = re.finditer(pattern, text)
for match in matches:
print(match)
pattern = re.compile(r'\bMr\b')
matches = re.finditer(pattern, text)
for match in matches:
print(match)
pattern = re.compile(r'\bMr\B')
matches = re.finditer(pattern, text)
for match in matches:
print(match)
pattern = re.compile(r'\d{3}-[1-8]{3}.\d{4}')
matches = re.finditer(pattern, text)
for match in matches:
print(match) # Matches phone number of form ddd-ddd-dddd
###Output
<re.Match object; span=(95, 107), match='123-456-7897'>
<re.Match object; span=(108, 120), match='900-567-8909'>
<re.Match object; span=(121, 133), match='800-568-1567'>
###Markdown
Names > Let's create a pattert that matches names such as Mr ...
###Code
matches = re.finditer(r'^M(r)?s?.?\s\w+', text, re.MULTILINE | re.I)
for match in matches:
start, stop = match.span()
print(match, ", ",text[start: stop] )
###Output
<re.Match object; span=(399, 406), match='Mr John'> , Mr John
<re.Match object; span=(407, 417), match='Mr. Petter'> , Mr. Petter
<re.Match object; span=(418, 423), match='Mrs T'> , Mrs T
<re.Match object; span=(424, 434), match='Ms Gonorio'> , Ms Gonorio
<re.Match object; span=(435, 445), match='Mrs Makosi'> , Mrs Makosi
###Markdown
Phone numbers > Let's create a pattert that matches phone numbers
###Code
pattern = re.compile(r'[+*.]?\d{2,3}.[0-9]{3}.[0-9]{3,4}')
matches = re.finditer(pattern, text)
for match in matches:
start, stop = match.span()
print(match, ", ",text[start: stop] )
###Output
<re.Match object; span=(69, 81), match='123.456.7897'> , 123.456.7897
<re.Match object; span=(82, 94), match='123*456*7897'> , 123*456*7897
<re.Match object; span=(95, 107), match='123-456-7897'> , 123-456-7897
<re.Match object; span=(108, 120), match='900-567-8909'> , 900-567-8909
<re.Match object; span=(121, 133), match='800-568-1567'> , 800-568-1567
<re.Match object; span=(134, 145), match='+27 652 305'> , +27 652 305
<re.Match object; span=(150, 162), match='+263 568 189'> , +263 568 189
<re.Match object; span=(168, 180), match='+263-568-189'> , +263-568-189
<re.Match object; span=(186, 198), match='+263_568_189'> , +263_568_189
<re.Match object; span=(204, 216), match='+188 789 089'> , +188 789 089
<re.Match object; span=(222, 234), match='*188 789 089'> , *188 789 089
###Markdown
Emails> Let's create a pattert that matches emails
###Code
matches = re.finditer(r'([a-zA-Z0-9_])+@[a-zA-Z]+\.\w+', text)
for match in matches:
start, stop = match.span()
print(match, ", ",text[start: stop] )
###Output
<re.Match object; span=(323, 342), match='[email protected]'> , [email protected]
<re.Match object; span=(343, 363), match='[email protected]'> , [email protected]
<re.Match object; span=(364, 381), match='[email protected]'> , [email protected]
<re.Match object; span=(382, 398), match='[email protected]'> , [email protected]
###Markdown
Names > Let's create a pattert that matches urls
###Code
matches = re.finditer(r'https?://([a-zA-Z0-9-]?)+(.[a-zA-Z]+)', text)
for match in matches:
start, stop = match.span()
print(match, ", ",text[start: stop] )
###Output
<re.Match object; span=(240, 258), match='https://www.google'> , https://www.google
<re.Match object; span=(263, 283), match='https://whatsapp.com'> , https://whatsapp.com
<re.Match object; span=(284, 304), match='http://localhost.edu'> , http://localhost.edu
<re.Match object; span=(305, 322), match='https://zero-5.li'> , https://zero-5.li
###Markdown
findAll(A, B)> Matches all instances of an expression A in a string B and returns them in a list
###Code
matches = re.findall(r'\w+[is]+', "this is a regex this")
", ".join(matches)
###Output
_____no_output_____
###Markdown
search(A, B)> Matches the first instance of an expression A in a string B, and returns it as a re match object.
###Code
# search for the first email address in the text sting
matches = re.search(r'([a-zA-Z0-9_])+@[a-zA-Z]+\.\w+', text, re.MULTILINE)
matches
###Output
_____no_output_____
###Markdown
split(A, B)> Split a string B into a list using the delimiter A
###Code
matches = re.split(r'is?', "This is a work of magic.")
matches
###Output
_____no_output_____
###Markdown
re.sub(A, B, C)> Replace A with B in the string C.
###Code
matches = re.sub(r'is', 'at', "This is a work of magic.")
matches
###Output
_____no_output_____
###Markdown
re.match(A, B)>Returns the first occurrence of A in B
###Code
matches = re.match(r'This', "This is a work of magic.")
matches
###Output
_____no_output_____ |
ANN Concatenated Data.ipynb | ###Markdown
Neccesary modules
###Code
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Get the data
###Code
background = np.load("data/background_rf_LH_normalized.npy")
drone = np.load("data/drone_rf_LH_normalized.npy")
print(background.shape)
print(drone.shape)
num = random.randint(0, len(background)-1)
channel = 1
plt.plot(background[num][channel], label="background")
plt.plot(drone[num][channel],label="drone")
plt.legend(loc='upper right')
###Output
_____no_output_____
###Markdown
Train/ test split and data formatting
###Code
Y = np.array([0 for i in enumerate(background)] + [1 for i in enumerate(drone)])
X = np.append(background,drone,axis=0)
Y = Y.reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
def join_rf(x_data):
low_high = []
for x in x_data:
low_high.append(x.flatten().astype(np.float16))
low_high = np.array(low_high)
return low_high
x_train = join_rf(x_train)
x_test = join_rf(x_test)
# num = 11
# plt.plot(x_train[num])
# print(y_train[num])
x_train.shape
###Output
_____no_output_____
###Markdown
Model Specification
###Code
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense,
from tensorflow.keras.layers import Input
model = Sequential()
model.add(Dense(100,activation='relu', input_shape=(20000000,)))
model.add(Dense(50, activation='relu'))
model.add(Dense(1, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
batch_size =1
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
###Output
WARNING:tensorflow:From C:\Users\nihad\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
Train on 56 samples, validate on 24 samples
|
Predicting the Survival of Titanic Passengers/Project3_SecA.ipynb | ###Markdown
Ques 1.) Find out the overall chance of survival for a Titanic passenger.
###Code
print("Total number of passengers survived are : ",titanic_data['survived'].value_counts()[1])
print("Percentage passengers survived are : ",titanic_data['survived'].value_counts(normalize=True)[1]*100)
###Output
Total number of passengers survived are : 342
Percentage passengers survived are : 38.38383838383838
###Markdown
Ques 2.) Find out the chance of survival for a Titanic passenger based on their sex and plot it.
###Code
sns.barplot(x="sex", y="survived", data=titanic_data)
print("The percentage of females who survived are : ", titanic_data["survived"][titanic_data["sex"] == 'female'].value_counts(normalize = True)[1]*100)
print("The percentage of males who survived are : ", titanic_data["survived"][titanic_data["sex"] == 'male'].value_counts(normalize = True)[1]*100)
###Output
The percentage of females who survived are : 74.20382165605095
The percentage of males who survived are : 18.890814558058924
###Markdown
Ques 3.) Find out the chance of survival for a Titanic passenger by traveling class wise and plot it.
###Code
sns.barplot(x="pclass", y="survived", data=titanic_data)
print("The percentage of Pclass 1 who survived are : ", titanic_data["survived"][titanic_data["pclass"] == 1].value_counts(normalize = True)[1]*100)
print("The percentage of Pclass 2 who survived are : ", titanic_data["survived"][titanic_data["pclass"] == 2].value_counts(normalize = True)[1]*100)
print("The percentage of Pclass 3 who survived are : ", titanic_data["survived"][titanic_data["pclass"] == 3].value_counts(normalize = True)[1]*100)
###Output
The percentage of Pclass 1 who survived are : 62.96296296296296
The percentage of Pclass 2 who survived are : 47.28260869565217
The percentage of Pclass 3 who survived are : 24.236252545824847
###Markdown
Ques 4.) Find out the average age for a Titanic passenger who survived by passenger class and sex.
###Code
fig = plt.figure(figsize=(12,5))
fig.add_subplot(121)
plt.title('Survivors Age/Sex per Passenger Class')
sns.barplot(data=titanic_data, x='pclass',y='age',hue='sex')
meanAgeTrnMale = round(titanic_data[(titanic_data['sex'] == "male")]['age'].groupby(titanic_data['pclass']).mean(),2)
meanAgeTrnFeMale = round(titanic_data[(titanic_data['sex'] == "female")]['age'].groupby(titanic_data['pclass']).mean(),2)
print('Mean age per sex per pclass')
print(pd.concat([meanAgeTrnMale, meanAgeTrnFeMale], axis = 1,keys= ['Male','Female']))
###Output
Mean age per sex per pclass
Male Female
pclass
1 41.28 34.61
2 30.74 28.72
3 26.51 21.75
###Markdown
Ques 5.) Find out the chance of survival for a Titanic passenger based on number of siblings the passenger had on the ship and plot it.
###Code
sns.barplot(x="sibsp", y="survived", data=titanic_data)
plt.title('Passangers Survival chance based on number of siblings the passenger')
print("The percentage of SibSp 0 who survived are : ", titanic_data["survived"][titanic_data["sibsp"] == 0].value_counts(normalize = True)[1]*100)
print("The percentage of SibSp 1 who survived are : ", titanic_data["survived"][titanic_data["sibsp"] == 1].value_counts(normalize = True)[1]*100)
print("The percentage of SibSp 2 who survived are : ", titanic_data["survived"][titanic_data["sibsp"] == 2].value_counts(normalize = True)[1]*100)
###Output
The percentage of SibSp 0 who survived are : 34.53947368421053
The percentage of SibSp 1 who survived are : 53.588516746411486
The percentage of SibSp 2 who survived are : 46.42857142857143
###Markdown
Ques 6.) Find out the chance of survival for a Titanic passenger based on number of parents/children the passenger had on the ship and plot it.
###Code
sns.barplot(x="parch", y="survived", data=titanic_data)
plt.title('Passangers Survival chance based on number of parents/childrens')
plt.show()
print("The percentage of parch 0 who survived are : ", titanic_data["survived"][titanic_data["parch"] == 0].value_counts(normalize = True)[1]*100)
print("The Percentage of parch 1 who survived are : ", titanic_data["survived"][titanic_data["parch"] == 1].value_counts(normalize = True)[1]*100)
print("The Percentage of parch 2 who survived are : ", titanic_data["survived"][titanic_data["parch"] == 2].value_counts(normalize = True)[1]*100)
print("The Percentage of parch 3 who survived are : ", titanic_data["survived"][titanic_data["parch"] == 3].value_counts(normalize = True)[1]*100)
###Output
_____no_output_____
###Markdown
Ques 7.) Plot out the variation of survival and death amongst passengers of different age.
###Code
titanic_data["age"] = titanic_data["age"].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
titanic_data['agegroup'] = pd.cut(titanic_data['age'], bins, labels = labels)
sns.barplot(x="agegroup", y="survived", data=titanic_data)
plt.title('variation of survival and death amongst passengers of different age')
plt.show()
g = sns.FacetGrid(titanic_data, col='survived')
g.map(plt.hist, 'age', bins=20)
###Output
_____no_output_____
###Markdown
Ques 8.) Plot out the variation of survival and death with age amongst passengers of different passenger classes.
###Code
print("variation of survival and death with age and class")
grid = sns.FacetGrid(titanic_data, col='survived', row='pclass', size=3, aspect=2)
grid.map(plt.hist, 'age', alpha=.5, bins=20)
grid.add_legend();
###Output
variation of survival and death with age and class
###Markdown
Ques 9.) Find out the survival probability for a Titanic passenger based on title from the name of passenger.
###Code
combine = [titanic_data, test_data]
for dataset in combine:
dataset['Title'] = dataset.name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(titanic_data['Title'],titanic_data['sex'])
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
titanic_data[['Title', 'survived']].groupby(['Title'], as_index=False).mean()
###Output
_____no_output_____ |
Data Science Fundamentals for Data Analysts/ipynb/5.3.2 Lab - Logistic Regression 2.ipynb | ###Markdown
d-sandbox Logistic Regression Lab 2**Objectives**:1. Perform a train-test split on data.1. Evaluate four multi-variable logistic regression models using accuracyand a confusion matrix.Additionally, you will be asked to consider overfitting and underfittingof the models based upon these results.
###Code
%run ../../Includes/Classroom-Setup
###Output
_____no_output_____
###Markdown
Setup Load the DataThe `Includes/Classroom-Setup` notebook has made an aggregate table of dataavailable to us via the Metastore associated with our workspace. We can loadthe data as a pandas dataframe using the cell below.This command loads the table using the Metastore reference. The `.toPandas()`method converts the Spark DataFrame to a Pandas DataFrame. We will use thePandas DataFrame with Scikit-Learn throughout this Module.
###Code
ht_agg_spark_df = spark.read.table("ht_agg")
ht_agg_pandas_df = ht_agg_spark_df.toPandas()
###Output
_____no_output_____
###Markdown
Prepare Four Datasets and TargetNext, we will prepare four subsets of our, used as in the previous labto build four different logistic regression models.We also prepare our target vector, `y`.
###Code
from sklearn.preprocessing import LabelEncoder
X_1 = ht_agg_pandas_df[['mean_active_heartrate', 'mean_resting_heartrate']]
X_2 = ht_agg_pandas_df[['mean_active_heartrate', 'mean_vo2']]
X_3 = ht_agg_pandas_df[['mean_active_heartrate', 'mean_bmi', 'mean_vo2']]
X_4 = ht_agg_pandas_df[['mean_active_heartrate', 'mean_bmi', 'mean_vo2', 'mean_resting_heartrate']]
le = LabelEncoder()
lifestyle = ht_agg_pandas_df['lifestyle']
le.fit(lifestyle)
y = le.transform(lifestyle)
###Output
_____no_output_____
###Markdown
Framing a Business ProblemOver the next few labs, we will use supervised machine learningto answer a new business question:> Given a users fitness profile, can we predict the lifestyle of a user?Like the regression problem we previously solved,our **inputs** will be fitness profile information. This is, however, a classificationproblem and will have a different **output**, lifestyle. Perform the Train-Test SplitNext, we will split one of our four subsets of feature data and our target datainto training and testing data.
###Code
from sklearn.model_selection import train_test_split
X_1_train, X_1_test, y_train, y_test = train_test_split(X_1, y)
###Output
_____no_output_____
###Markdown
Your Turn Exercise 1: Perform the Train-Test SplitPerform the train-test split on the remaining data subsets:1. use the helper function `train_test_split`1. split the following subsets: - `X_2`, `X_3`, `X_4`
###Code
# TODO
X_2_train, X_2_test, y_train, y_test = train_test_split(X_2, y)
X_3_train, X_3_test, y_train, y_test = train_test_split(X_3, y)
X_4_train, X_4_test, y_train, y_test = train_test_split(X_4, y)
###Output
_____no_output_____
###Markdown
Exercise 2: Multi-Variable Logistic RegressionFit four multiple-variable logistic models, one for each datasubset.
###Code
# TODO
from sklearn.linear_model import LogisticRegression
lr_1 = LogisticRegression(max_iter=10000)
lr_2 = LogisticRegression(max_iter=10000)
lr_3 = LogisticRegression(max_iter=10000)
lr_4 = LogisticRegression(max_iter=10000)
lr_1.fit(X_1_train, y_train)
lr_2.fit(X_2_train, y_train)
lr_3.fit(X_3_train, y_train)
lr_4.fit(X_4_train, y_train)
###Output
_____no_output_____
###Markdown
Demonstration Evaluate a Multi-variable Model using accuracy and a confusion matrixFinally, we evaulate our models. We do so using the accuracy metric and a confusion matrix.To use these metrics, we need to1. generate a vector of precictions using `estimator.predict()`1. pass actual and predicted values to the metric as `metric(actual, predicted)`1. do this for both the training and testing data
###Code
from sklearn.metrics import accuracy_score, confusion_matrix
y_train_1_predicted = lr_1.predict(X_1_train)
y_test_1_predicted = lr_1.predict(X_1_test)
print("training accuracy: ", accuracy_score(y_train, y_train_1_predicted))
print("test accuracy: ", accuracy_score(y_test, y_test_1_predicted))
print("training confusion matrix")
print(confusion_matrix(y_train, y_train_1_predicted))
print("")
print("test confusion matrix")
print(confusion_matrix(y_test, y_test_1_predicted))
###Output
_____no_output_____
###Markdown
**Question**: What do you notice about the results? Your Turn Exercise 3: Generate Predictions1. use the following subset splits: - `X_1_test`, `X_2_test`, `X_3_test`, `X_4_test` - `X_1_train`, `X_2_train`, `X_3_train`, `X_4_train`
###Code
# TODO
y_train_1_predicted = lr_1.predict(X_1_train)
y_test_1_predicted = lr_1.predict(X_1_test)
y_train_2_predicted = lr_2.predict(X_2_train)
y_test_2_predicted = lr_2.predict(X_2_test)
y_train_3_predicted = lr_3.predict(X_3_train)
y_test_3_predicted = lr_3.predict(X_3_test)
y_train_4_predicted = lr_4.predict(X_4_train)
y_test_4_predicted = lr_4.predict(X_4_test)
###Output
_____no_output_____
###Markdown
Exercise 4: Evaluate Our Models1. Use the `accuracy_score` and `confusion_matrix` metrics1. don't forget to take the square root of the mean squared error1. use the following subset splits: - `X_2_test`, `X_3_test`, `X_4_test` - `X_2_train`, `X_3_train`, `X_4_train`
###Code
# TODO
train_1_accuracy = accuracy_score(y_train, y_train_1_predicted)
train_1_conf_mat = confusion_matrix(y_train, y_train_1_predicted)
test_1_accuracy = accuracy_score(y_test, y_test_1_predicted)
test_1_conf_mat = confusion_matrix(y_test, y_test_1_predicted)
train_2_accuracy = accuracy_score(y_train, y_train_2_predicted)
train_2_conf_mat = confusion_matrix(y_train, y_train_2_predicted)
test_2_accuracy = accuracy_score(y_test, y_test_2_predicted)
test_2_conf_mat = confusion_matrix(y_test, y_test_2_predicted)
train_3_accuracy = accuracy_score(y_train, y_train_3_predicted)
train_3_conf_mat = confusion_matrix(y_train, y_train_3_predicted)
test_3_accuracy = accuracy_score(y_test, y_test_3_predicted)
test_3_conf_mat = confusion_matrix(y_test, y_test_3_predicted)
train_4_accuracy = accuracy_score(y_train, y_train_4_predicted)
train_4_conf_mat = confusion_matrix(y_train, y_train_4_predicted)
test_4_accuracy = accuracy_score(y_test, y_test_4_predicted)
test_4_conf_mat = confusion_matrix(y_test, y_test_4_predicted)
print("model 1: training accuracy: ", train_1_accuracy)
print("model 1: training confusion matrix: ")
print(train_1_conf_mat)
print(" ")
print("model 1: test accuracy: ", test_1_accuracy)
print("model 1: test confusion matrix: ")
print(test_1_conf_mat)
print(" ")
print("model 2: training accuracy: ", train_2_accuracy)
print("model 2: training confusion matrix: ")
print(train_2_conf_mat)
print(" ")
print("model 2: test accuracy: ", test_2_accuracy)
print("model 2: test confusion matrix: ")
print(test_2_conf_mat)
print(" ")
print("model 3: training accuracy: ", train_3_accuracy)
print("model 3: training confusion matrix: ")
print(train_3_conf_mat)
print(" ")
print("model 3: test accuracy: ", test_3_accuracy)
print("model 3: test confusion matrix: ")
print(test_3_conf_mat)
print(" ")
print("model 4: training accuracy: ", train_4_accuracy)
print("model 4: training confusion matrix: ")
print(train_4_conf_mat)
print(" ")
print("model 4: test accuracy: ", test_4_accuracy)
print("model 4: test confusion matrix: ")
print(test_4_conf_mat)
print(" ")
###Output
_____no_output_____ |
python/download-data-from-a-collection.ipynb | ###Markdown
**Title**: Download all data from a collection**Date**: 25 Oct 2021 **Description**: * Download all data from a collection Install and import dependencies
###Code
# Install specific packages required for this notebook
!pip install flywheel-sdk tqdm pandas fw-meta backoff
# Import packages
import logging
import os
import re
from getpass import getpass
from functools import lru_cache
from pathlib import Path
import pandas as pd
import backoff
import pandas as pd
import flywheel
from tqdm.notebook import tqdm
from permission import check_user_permission
# Instantiate a logger
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
log = logging.getLogger('root')
###Output
_____no_output_____
###Markdown
Flywheel API Key and Client Get a API_KEY. More on this in the Flywheel SDK doc [here](https://flywheel-io.gitlab.io/product/backend/sdk/branches/master/python/getting_started.htmlapi-key).
###Code
API_KEY = getpass('Enter API_KEY here: ')
###Output
_____no_output_____
###Markdown
Instantiate the Flywheel API client
###Code
fw = flywheel.Client(API_KEY if 'API_KEY' in locals() else os.environ.get('FW_KEY'))
###Output
_____no_output_____
###Markdown
Show Flywheel logging information
###Code
log.info('You are now logged in as %s to %s', fw.get_current_user()['email'], fw.get_config()['site']['api_url'])
###Output
_____no_output_____
###Markdown
Constants
###Code
# Collection ID
COLLECTION_ID = '<collection-id>'
# Local root path where to download data
ROOT_DATA = Path('/tmp')
# File type of filter on
FILE_TYPE = 'nifti'
###Output
_____no_output_____
###Markdown
Helper functions
###Code
# wrapper around `get_project` caching result. Help to reduce repeated calls.
@lru_cache()
def get_project(fw, project_id):
return fw.get_project(project_id)
def is_not_500_502_504(exc):
if hasattr(exc, "status"):
if exc.status in [504, 502, 500]:
# 500: Internal Server Error
# 502: Bad Gateway
# 504: Gateway Timeout
return False
return True
@backoff.on_exception(
backoff.expo, flywheel.rest.ApiException, max_time=60, giveup=is_not_500_502_504
)
# will retry for 60s, waiting an exponentially increasing delay between retries
# e.g. 1s, 2s, 4s, 8s, etc, giving up if exception is in 500, 502, 504.
def robust_download(file, dst_path):
file.download(dst_path)
###Output
_____no_output_____
###Markdown
Main script Get the collection
###Code
collection = fw.get_collection(COLLECTION_ID)
if not collection:
log.error(f'Collection {f} not found.')
###Output
_____no_output_____
###Markdown
Download all files in the collection matching FILE_TYPE
###Code
for session in tqdm(collection.sessions.iter()):
project = get_project(fw, session.project)
for acq in session.acquisitions.iter():
for file in acq.files:
if file.type == FILE_TYPE:
# assuming labels are POSIX compliant
dst_path = ROOT_DATA / project.label / session.subject.label / session.label / acq.label / file.name
dst_path.parent.mkdir(parents=True, exist_ok=True)
robust_download(file, str(dst_path))
###Output
_____no_output_____ |
notebooks/Machine Learning/Classification.ipynb | ###Markdown
Classification
###Code
from sklearn.datasets import load_breast_cancer
dataset = load_breast_cancer()
print(dataset.DESCR)
X = dataset.data
Y = dataset.target
from sklearn.model_selection import train_test_split
X_train, x_test, Y_train, y_test = train_test_split(X, Y, test_size=0.2)
print(len(X_train), len(x_test))
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
LR = LogisticRegression()
SVM = SVC()
DT = DecisionTreeClassifier()
RF = RandomForestClassifier()
MLP = MLPClassifier()
print("LogisticRegression")
LR.fit(X_train, Y_train)
print("SupportVectorMachines")
SVM.fit(X_train, Y_train)
print("DecisionTreeClassifier")
DT.fit(X_train, Y_train)
print("RandomForestClassifier")
RF.fit(X_train, Y_train)
print("MultLayerPerceptron")
MLP.fit(X_train, Y_train)
from sklearn.metrics import accuracy_score
print("LogisticRegression")
print(accuracy_score(y_test, LR.predict(x_test)))
print("SupportVectorMachines")
print(accuracy_score(y_test, SVM.predict(x_test)))
print("DecisionTreeClassifier")
print(accuracy_score(y_test, DT.predict(x_test)))
print("RandomForestClassifier")
print(accuracy_score(y_test, RF.predict(x_test)))
print("MultLayerPerceptron")
print(accuracy_score(y_test, MLP.predict(x_test)))
###Output
LogisticRegression
0.9649122807017544
SupportVectorMachines
0.6140350877192983
DecisionTreeClassifier
0.9473684210526315
RandomForestClassifier
0.9649122807017544
MultLayerPerceptron
0.6140350877192983
|
notebooks/10-steps-to-ML/10_Recurrent_Neural_Networks.ipynb | ###Markdown
Recurrent Neural Networks When working with sequential data (time-series, sentences, etc.) the order of the inputs is crucial for the task at hand. Recurrent neural networks (RNNs) process sequential data by accounting for the current input and also what has been learned from previous inputs. In this notebook, we'll learn how to create and train RNNs on sequential data. Overview * **Objective:** Process sequential data by accounting for the currend input and also what has been learned from previous inputs.* **Advantages:** * Account for order and previous inputs in a meaningful way. * Conditioned generation for generating sequences.* **Disadvantages:** * Each time step's prediction depends on the previous prediction so it's difficult to parallelize RNN operations. * Processing long sequences can yield memory and computation issues. * Interpretability is difficult but there are few [techniques](https://arxiv.org/abs/1506.02078) that use the activations from RNNs to see what parts of the inputs are processed. * **Miscellaneous:** * Architectural tweaks to make RNNs faster and interpretable is an ongoing area of research. RNN forward pass for a single time step $X_t$:$h_t = tanh(W_{hh}h_{t-1} + W_{xh}X_t+b_h)$$y_t = W_{hy}h_t + b_y $$ P(y) = softmax(y_t) = \frac{e^y}{\sum e^y} $*where*:* $X_t$ = input at time step t | $\in \mathbb{R}^{NXE}$ ($N$ is the batch size, $E$ is the embedding dim)* $W_{hh}$ = hidden units weights| $\in \mathbb{R}^{HXH}$ ($H$ is the hidden dim)* $h_{t-1}$ = previous timestep's hidden state $\in \mathbb{R}^{NXH}$* $W_{xh}$ = input weights| $\in \mathbb{R}^{EXH}$* $b_h$ = hidden units bias $\in \mathbb{R}^{HX1}$* $W_{hy}$ = output weights| $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $b_y$ = output bias $\in \mathbb{R}^{CX1}$You repeat this for every time step's input ($X_{t+1}, X_{t+2}, ..., X_{N})$ to the get the predicted outputs at each time step.**Note**: At the first time step, the previous hidden state $h_{t-1}$ can either be a zero vector (unconditioned) or initialize (conditioned). If we are conditioning the RNN, the first hidden state $h_0$ can belong to a specific condition or we can concat the specific condition to the randomly initialized hidden vectors at each time step. More on this in the subsequent notebooks on RNNs. Let's see what the forward pass looks like with an RNN for a synthetic task such as processing reviews (a sequence of words) to predict the sentiment at the end of processing the review.
###Code
# Let's make sure the libraries are installed
#!pip install numpy
#!pip install torch
#!pip install matplotlib
#!pip install pandas
# Now import the libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.filterwarnings('ignore')
batch_size = 5
seq_size = 10 # max length per input (masking will be used for sequences that aren't this max length)
x_lengths = [8, 5, 4, 10, 5] # lengths of each input sequence
embedding_dim = 100
rnn_hidden_dim = 256
output_dim = 4
# Initialize synthetic inputs
x_in = torch.randn(batch_size, seq_size, embedding_dim)
x_lengths = torch.tensor(x_lengths)
print (x_in.size())
# Initialize hidden state
hidden_t = torch.zeros((batch_size, rnn_hidden_dim))
print (hidden_t.size())
# Initialize RNN cell
rnn_cell = nn.RNNCell(embedding_dim, rnn_hidden_dim)
print (rnn_cell)
# Forward pass through RNN
x_in = x_in.permute(1, 0, 2) # RNN needs batch_size to be at dim 1
# Loop through the inputs time steps
hiddens = []
for t in range(seq_size):
hidden_t = rnn_cell(x_in[t], hidden_t)
hiddens.append(hidden_t)
hiddens = torch.stack(hiddens)
hiddens = hiddens.permute(1, 0, 2) # bring batch_size back to dim 0
print (hiddens.size())
# We also could've used a more abstracted layer
x_in = torch.randn(batch_size, seq_size, embedding_dim)
rnn = nn.RNN(embedding_dim, rnn_hidden_dim, batch_first=True)
out, h_n = rnn(x_in) #h_n is the last hidden state
print ("out: ", out.size())
print ("h_n: ", h_n.size())
def gather_last_relevant_hidden(hiddens, x_lengths):
x_lengths = x_lengths.long().detach().cpu().numpy() - 1
out = []
for batch_index, column_index in enumerate(x_lengths):
out.append(hiddens[batch_index, column_index])
return torch.stack(out)
# Gather the last relevant hidden state
z = gather_last_relevant_hidden(hiddens, x_lengths)
print (z.size())
# Forward pass through FC layer
fc1 = nn.Linear(rnn_hidden_dim, output_dim)
y_pred = fc1(z)
y_pred = F.softmax(y_pred, dim=1)
print (y_pred.size())
print (y_pred)
###Output
torch.Size([5, 4])
tensor([[0.2711, 0.2797, 0.2093, 0.2399],
[0.2363, 0.2453, 0.2494, 0.2691],
[0.2087, 0.3047, 0.2203, 0.2663],
[0.3028, 0.2633, 0.2366, 0.1973],
[0.2812, 0.2432, 0.2440, 0.2316]], grad_fn=<SoftmaxBackward>)
###Markdown
Sequential data There are a variety of different sequential tasks that RNNs can help with.1. **One to one**: there is one input and produces one output. * Ex. Given a word predict it's class (verb, noun, etc.).2. **One to many**: one input generates many outputs. * Ex. Given a sentiment (positive, negative, etc.) generate a review.3. **Many to one**: Many inputs are sequentially processed to generate one output. * Ex. Process the words in a review to predict the sentiment.4. **Many to many**: Many inputs are sequentially processed to generate many outputs. * Ex. Given a sentence in French, processes the entire sentence and then generate the English translation. * Ex. Given a sequence of time-series data, predict the probability of an event (risk of disease) at each time step. Issues with vanilla RNNs There are several issues with the vanilla RNN that we've seen so far. 1. When we have an input sequence that has many time steps, it becomes difficult for the model to retain information seen earlier as we process more and more of the downstream timesteps. The goals of the model is to retain the useful components in the previously seen time steps but this becomes cumbersome when we have so many time steps to process. 2. During backpropagation, the gradient from the loss has to travel all the way back towards the first time step. If our gradient is larger than 1 (${1.01}^{1000} = 20959$) or less than 1 (${0.99}^{1000} = 4.31e-5$) and we have lot's of time steps, this can quickly spiral out of control.To address both these issues, the concept of gating was introduced to RNNs. Gating allows RNNs to control the information flow between each time step to optimize on the task. Selectively allowing information to pass through allows the model to process inputs with many time steps. The most common RNN gated varients are the long short term memory ([LSTM](https://pytorch.org/docs/stable/nn.htmltorch.nn.LSTM)) units and gated recurrent units ([GRUs](https://pytorch.org/docs/stable/nn.htmltorch.nn.GRU)). You can read more about how these units work [here](http://colah.github.io/posts/2015-08-Understanding-LSTMs/).
###Code
# GRU in PyTorch
gru = nn.GRU(input_size=embedding_dim, hidden_size=rnn_hidden_dim,
batch_first=True)
# Initialize synthetic input
x_in = torch.randn(batch_size, seq_size, embedding_dim)
print (x_in.size())
# Forward pass
out, h_n = gru(x_in)
print ("out:", out.size())
print ("h_n:", h_n.size())
###Output
out: torch.Size([5, 10, 256])
h_n: torch.Size([1, 5, 256])
###Markdown
**Note**: Choosing whether to use GRU or LSTM really depends on the data and empirical performance. GRUs offer comparable performance with reduce number of parameters while LSTMs are more efficient and may make the difference in performance for your particular task. Bidirectional RNNs There have been many advancements with RNNs ([attention](https://www.oreilly.com/ideas/interpretability-via-attentional-and-memory-based-interfaces-using-tensorflow), Quasi RNNs, etc.) that we will cover in later lessons but one of the basic and widely used ones are bidirectional RNNs (Bi-RNNs). The motivation behind bidirectional RNNs is to process an input sequence by both directions. Accounting for context from both sides can aid in performance when the entire input sequence is known at time of inference. A common application of Bi-RNNs is in translation where it's advantageous to look at an entire sentence from both sides when translating to another language (ie. Japanese → English).
###Code
# BiGRU in PyTorch
bi_gru = nn.GRU(input_size=embedding_dim, hidden_size=rnn_hidden_dim,
batch_first=True, bidirectional=True)
# Forward pass
out, h_n = bi_gru(x_in)
print ("out:", out.size()) # collection of all hidden states from the RNN for each time step
print ("h_n:", h_n.size()) # last hidden state from the RNN
###Output
out: torch.Size([5, 10, 512])
h_n: torch.Size([2, 5, 256])
###Markdown
Notice that the output for each sample at each timestamp has size 512 (double the hidden dim). This is because this includes both the forward and backward directions from the BiRNN. Document classification with RNNs Let's apply RNNs to the document classification task from the emebddings notebook (12_Embeddings.ipynb) where we want to predict an article's category given its title. Set up
###Code
import os
from argparse import Namespace
import collections
import copy
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import torch
# Set Numpy and PyTorch seeds
def set_seeds(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
# Creating directories
def create_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
# Arguments
args = Namespace(
seed=1234,
cuda=True,
shuffle=True,
data_file="data/news.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="news",
train_size=0.7,
val_size=0.15,
test_size=0.15,
pretrained_embeddings=None,
cutoff=25, # token must appear at least <cutoff> times to be in SequenceVocabulary
num_epochs=5,
early_stopping_criteria=5,
learning_rate=1e-3,
batch_size=64,
embedding_dim=100,
rnn_hidden_dim=128,
hidden_dim=100,
num_layers=1,
bidirectional=False,
dropout_p=0.1,
)
# Set seeds
set_seeds(seed=args.seed, cuda=args.cuda)
# Create save dir
create_dirs(args.save_dir)
# Expand filepaths
args.vectorizer_file = os.path.join(args.save_dir, args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir, args.model_state_file)
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
###Output
Using CUDA: False
###Markdown
Data
###Code
import re
import urllib
# Raw data
df = pd.read_csv(args.data_file, header=0)
df.head()
# Split by category
by_category = collections.defaultdict(list)
for _, row in df.iterrows():
by_category[row.category].append(row.to_dict())
for category in by_category:
print ("{0}: {1}".format(category, len(by_category[category])))
# Create split data
final_list = []
for _, item_list in sorted(by_category.items()):
if args.shuffle:
np.random.shuffle(item_list)
n = len(item_list)
n_train = int(args.train_size*n)
n_val = int(args.val_size*n)
n_test = int(args.test_size*n)
# Give data point a split attribute
for item in item_list[:n_train]:
item['split'] = 'train'
for item in item_list[n_train:n_train+n_val]:
item['split'] = 'val'
for item in item_list[n_train+n_val:]:
item['split'] = 'test'
# Add to final list
final_list.extend(item_list)
# df with split datasets
split_df = pd.DataFrame(final_list)
split_df["split"].value_counts()
# Preprocessing
def preprocess_text(text):
text = ' '.join(word.lower() for word in text.split(" "))
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
text = text.strip()
return text
split_df.title = split_df.title.apply(preprocess_text)
split_df.head()
###Output
_____no_output_____
###Markdown
Vocabulary
###Code
class Vocabulary(object):
def __init__(self, token_to_idx=None):
# Token to index
if token_to_idx is None:
token_to_idx = {}
self.token_to_idx = token_to_idx
# Index to token
self.idx_to_token = {idx: token \
for token, idx in self.token_to_idx.items()}
def to_serializable(self):
return {'token_to_idx': self.token_to_idx}
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
def add_token(self, token):
if token in self.token_to_idx:
index = self.token_to_idx[token]
else:
index = len(self.token_to_idx)
self.token_to_idx[token] = index
self.idx_to_token[index] = token
return index
def add_tokens(self, tokens):
return [self.add_token[token] for token in tokens]
def lookup_token(self, token):
return self.token_to_idx[token]
def lookup_index(self, index):
if index not in self.idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self.idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self.token_to_idx)
# Vocabulary instance
category_vocab = Vocabulary()
for index, row in df.iterrows():
category_vocab.add_token(row.category)
print (category_vocab) # __str__
print (len(category_vocab)) # __len__
index = category_vocab.lookup_token("Business")
print (index)
print (category_vocab.lookup_index(index))
###Output
<Vocabulary(size=4)>
4
0
Business
###Markdown
Sequence vocabulary Next, we're going to create our Vocabulary classes for the article's title, which is a sequence of tokens.
###Code
from collections import Counter
import string
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token="<UNK>",
mask_token="<MASK>", begin_seq_token="<BEGIN>",
end_seq_token="<END>"):
super(SequenceVocabulary, self).__init__(token_to_idx)
self.mask_token = mask_token
self.unk_token = unk_token
self.begin_seq_token = begin_seq_token
self.end_seq_token = end_seq_token
self.mask_index = self.add_token(self.mask_token)
self.unk_index = self.add_token(self.unk_token)
self.begin_seq_index = self.add_token(self.begin_seq_token)
self.end_seq_index = self.add_token(self.end_seq_token)
# Index to token
self.idx_to_token = {idx: token \
for token, idx in self.token_to_idx.items()}
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self.unk_token,
'mask_token': self.mask_token,
'begin_seq_token': self.begin_seq_token,
'end_seq_token': self.end_seq_token})
return contents
def lookup_token(self, token):
return self.token_to_idx.get(token, self.unk_index)
def lookup_index(self, index):
if index not in self.idx_to_token:
raise KeyError("the index (%d) is not in the SequenceVocabulary" % index)
return self.idx_to_token[index]
def __str__(self):
return "<SequenceVocabulary(size=%d)>" % len(self.token_to_idx)
def __len__(self):
return len(self.token_to_idx)
# Get word counts
word_counts = Counter()
for title in split_df.title:
for token in title.split(" "):
if token not in string.punctuation:
word_counts[token] += 1
# Create SequenceVocabulary instance
title_vocab = SequenceVocabulary()
for word, word_count in word_counts.items():
if word_count >= args.cutoff:
title_vocab.add_token(word)
print (title_vocab) # __str__
print (len(title_vocab)) # __len__
index = title_vocab.lookup_token("general")
print (index)
print (title_vocab.lookup_index(index))
###Output
<SequenceVocabulary(size=4400)>
4400
4
general
###Markdown
Vectorizer Something new that we introduce in this Vectorizer is calculating the length of our input sequence. We will use this later on to extract the last relevant hidden state for each input sequence.
###Code
class NewsVectorizer(object):
def __init__(self, title_vocab, category_vocab):
self.title_vocab = title_vocab
self.category_vocab = category_vocab
def vectorize(self, title):
indices = [self.title_vocab.lookup_token(token) for token in title.split(" ")]
indices = [self.title_vocab.begin_seq_index] + indices + \
[self.title_vocab.end_seq_index]
# Create vector
title_length = len(indices)
vector = np.zeros(title_length, dtype=np.int64)
vector[:len(indices)] = indices
return vector, title_length
def unvectorize(self, vector):
tokens = [self.title_vocab.lookup_index(index) for index in vector]
title = " ".join(token for token in tokens)
return title
@classmethod
def from_dataframe(cls, df, cutoff):
# Create class vocab
category_vocab = Vocabulary()
for category in sorted(set(df.category)):
category_vocab.add_token(category)
# Get word counts
word_counts = Counter()
for title in df.title:
for token in title.split(" "):
word_counts[token] += 1
# Create title vocab
title_vocab = SequenceVocabulary()
for word, word_count in word_counts.items():
if word_count >= cutoff:
title_vocab.add_token(word)
return cls(title_vocab, category_vocab)
@classmethod
def from_serializable(cls, contents):
title_vocab = SequenceVocabulary.from_serializable(contents['title_vocab'])
category_vocab = Vocabulary.from_serializable(contents['category_vocab'])
return cls(title_vocab=title_vocab, category_vocab=category_vocab)
def to_serializable(self):
return {'title_vocab': self.title_vocab.to_serializable(),
'category_vocab': self.category_vocab.to_serializable()}
# Vectorizer instance
vectorizer = NewsVectorizer.from_dataframe(split_df, cutoff=args.cutoff)
print (vectorizer.title_vocab)
print (vectorizer.category_vocab)
vectorized_title, title_length = vectorizer.vectorize(preprocess_text(
"Roger Federer wins the Wimbledon tennis tournament."))
print (np.shape(vectorized_title))
print ("title_length:", title_length)
print (vectorized_title)
print (vectorizer.unvectorize(vectorized_title))
###Output
<SequenceVocabulary(size=4404)>
<Vocabulary(size=4)>
(10,)
title_length: 10
[ 2 1 4151 1231 25 1 2392 4076 38 3]
<BEGIN> <UNK> federer wins the <UNK> tennis tournament . <END>
###Markdown
Dataset
###Code
from torch.utils.data import Dataset, DataLoader
class NewsDataset(Dataset):
def __init__(self, df, vectorizer):
self.df = df
self.vectorizer = vectorizer
# Data splits
self.train_df = self.df[self.df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.df[self.df.split=='val']
self.val_size = len(self.val_df)
self.test_df = self.df[self.df.split=='test']
self.test_size = len(self.test_df)
self.lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.val_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights (for imbalances)
class_counts = df.category.value_counts().to_dict()
def sort_key(item):
return self.vectorizer.category_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, df, cutoff):
train_df = df[df.split=='train']
return cls(df, NewsVectorizer.from_dataframe(train_df, cutoff))
@classmethod
def load_dataset_and_load_vectorizer(cls, df, vectorizer_filepath):
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(df, vectorizer)
def load_vectorizer_only(vectorizer_filepath):
with open(vectorizer_filepath) as fp:
return NewsVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
with open(vectorizer_filepath, "w") as fp:
json.dump(self.vectorizer.to_serializable(), fp)
def set_split(self, split="train"):
self.target_split = split
self.target_df, self.target_size = self.lookup_dict[split]
def __str__(self):
return "<Dataset(split={0}, size={1})".format(
self.target_split, self.target_size)
def __len__(self):
return self.target_size
def __getitem__(self, index):
row = self.target_df.iloc[index]
title_vector, title_length = self.vectorizer.vectorize(row.title)
category_index = self.vectorizer.category_vocab.lookup_token(row.category)
return {'title': title_vector, 'title_length': title_length,
'category': category_index}
def get_num_batches(self, batch_size):
return len(self) // batch_size
def generate_batches(self, batch_size, collate_fn, shuffle=True,
drop_last=False, device="cpu"):
dataloader = DataLoader(dataset=self, batch_size=batch_size,
collate_fn=collate_fn, shuffle=shuffle,
drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
# Dataset instance
dataset = NewsDataset.load_dataset_and_make_vectorizer(df=split_df,
cutoff=args.cutoff)
print (dataset) # __str__
input_ = dataset[5] # __getitem__
print (input_['title'], input_['title_length'], input_['category'])
print (dataset.vectorizer.unvectorize(input_['title']))
print (dataset.class_weights)
###Output
<Dataset(split=train, size=84000)
[ 2 31 32 10 33 13 3] 7 0
<BEGIN> software firm to cut jobs <END>
tensor([3.3333e-05, 3.3333e-05, 3.3333e-05, 3.3333e-05])
###Markdown
Model input → embedding → RNN → FC
###Code
import torch.nn as nn
import torch.nn.functional as F
def gather_last_relevant_hidden(hiddens, x_lengths):
x_lengths = x_lengths.long().detach().cpu().numpy() - 1
out = []
for batch_index, column_index in enumerate(x_lengths):
out.append(hiddens[batch_index, column_index])
return torch.stack(out)
class NewsModel(nn.Module):
def __init__(self, embedding_dim, num_embeddings, rnn_hidden_dim,
hidden_dim, output_dim, num_layers, bidirectional, dropout_p,
pretrained_embeddings=None, freeze_embeddings=False,
padding_idx=0):
super(NewsModel, self).__init__()
if pretrained_embeddings is None:
self.embeddings = nn.Embedding(embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
padding_idx=padding_idx)
else:
pretrained_embeddings = torch.from_numpy(pretrained_embeddings).float()
self.embeddings = nn.Embedding(embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
padding_idx=padding_idx,
_weight=pretrained_embeddings)
# Conv weights
self.gru = nn.GRU(input_size=embedding_dim, hidden_size=rnn_hidden_dim,
num_layers=num_layers, batch_first=True,
bidirectional=bidirectional)
# FC weights
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear(rnn_hidden_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
if freeze_embeddings:
self.embeddings.weight.requires_grad = False
def forward(self, x_in, x_lengths, apply_softmax=False):
# Embed
x_in = self.embeddings(x_in)
# Feed into RNN
out, h_n = self.gru(x_in)
# Gather the last relevant hidden state
out = gather_last_relevant_hidden(out, x_lengths)
# FC layers
z = self.dropout(out)
z = self.fc1(z)
z = self.dropout(z)
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Training
###Code
import torch.optim as optim
class Trainer(object):
def __init__(self, dataset, model, model_state_file, save_dir, device, shuffle,
num_epochs, batch_size, learning_rate, early_stopping_criteria):
self.dataset = dataset
self.class_weights = dataset.class_weights.to(device)
self.model = model.to(device)
self.save_dir = save_dir
self.device = device
self.shuffle = shuffle
self.num_epochs = num_epochs
self.batch_size = batch_size
self.loss_func = nn.CrossEntropyLoss(self.class_weights)
self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer=self.optimizer, mode='min', factor=0.5, patience=1)
self.train_state = {
'done_training': False,
'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'early_stopping_criteria': early_stopping_criteria,
'learning_rate': learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': model_state_file}
def update_train_state(self):
# Verbose
print ("[EPOCH]: {0} | [LR]: {1} | [TRAIN LOSS]: {2:.2f} | [TRAIN ACC]: {3:.1f}% | [VAL LOSS]: {4:.2f} | [VAL ACC]: {5:.1f}%".format(
self.train_state['epoch_index'], self.train_state['learning_rate'],
self.train_state['train_loss'][-1], self.train_state['train_acc'][-1],
self.train_state['val_loss'][-1], self.train_state['val_acc'][-1]))
# Save one model at least
if self.train_state['epoch_index'] == 0:
torch.save(self.model.state_dict(), self.train_state['model_filename'])
self.train_state['stop_early'] = False
# Save model if performance improved
elif self.train_state['epoch_index'] >= 1:
loss_tm1, loss_t = self.train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= self.train_state['early_stopping_best_val']:
# Update step
self.train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < self.train_state['early_stopping_best_val']:
torch.save(self.model.state_dict(), self.train_state['model_filename'])
# Reset early stopping step
self.train_state['early_stopping_step'] = 0
# Stop early ?
self.train_state['stop_early'] = self.train_state['early_stopping_step'] \
>= self.train_state['early_stopping_criteria']
return self.train_state
def compute_accuracy(self, y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
def pad_seq(self, seq, length):
vector = np.zeros(length, dtype=np.int64)
vector[:len(seq)] = seq
vector[len(seq):] = self.dataset.vectorizer.title_vocab.mask_index
return vector
def collate_fn(self, batch):
# Make a deep copy
batch_copy = copy.deepcopy(batch)
processed_batch = {"title": [], "title_length": [], "category": []}
# Get max sequence length
get_length = lambda sample: len(sample["title"])
max_seq_length = max(map(get_length, batch))
# Pad
for i, sample in enumerate(batch_copy):
padded_seq = self.pad_seq(sample["title"], max_seq_length)
processed_batch["title"].append(padded_seq)
processed_batch["title_length"].append(sample["title_length"])
processed_batch["category"].append(sample["category"])
# Convert to appropriate tensor types
processed_batch["title"] = torch.LongTensor(
processed_batch["title"])
processed_batch["title_length"] = torch.LongTensor(
processed_batch["title_length"])
processed_batch["category"] = torch.LongTensor(
processed_batch["category"])
return processed_batch
def run_train_loop(self):
for epoch_index in range(self.num_epochs):
self.train_state['epoch_index'] = epoch_index
# Iterate over train dataset
# initialize batch generator, set loss and acc to 0, set train mode on
self.dataset.set_split('train')
batch_generator = self.dataset.generate_batches(
batch_size=self.batch_size, collate_fn=self.collate_fn,
shuffle=self.shuffle, device=self.device)
running_loss = 0.0
running_acc = 0.0
self.model.train()
for batch_index, batch_dict in enumerate(batch_generator):
# zero the gradients
self.optimizer.zero_grad()
# compute the output
y_pred = self.model(batch_dict['title'], batch_dict['title_length'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['category'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute gradients using loss
loss.backward()
# use optimizer to take a gradient step
self.optimizer.step()
# compute the accuracy
acc_t = self.compute_accuracy(y_pred, batch_dict['category'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['train_loss'].append(running_loss)
self.train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# # initialize batch generator, set loss and acc to 0; set eval mode on
self.dataset.set_split('val')
batch_generator = self.dataset.generate_batches(
batch_size=self.batch_size, collate_fn=self.collate_fn,
shuffle=self.shuffle, device=self.device)
running_loss = 0.
running_acc = 0.
self.model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.model(batch_dict['title'], batch_dict['title_length'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['category'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = self.compute_accuracy(y_pred, batch_dict['category'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['val_loss'].append(running_loss)
self.train_state['val_acc'].append(running_acc)
self.train_state = self.update_train_state()
self.scheduler.step(self.train_state['val_loss'][-1])
if self.train_state['stop_early']:
break
def run_test_loop(self):
# initialize batch generator, set loss and acc to 0; set eval mode on
self.dataset.set_split('test')
batch_generator = self.dataset.generate_batches(
batch_size=self.batch_size, collate_fn=self.collate_fn,
shuffle=self.shuffle, device=self.device)
running_loss = 0.0
running_acc = 0.0
self.model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.model(batch_dict['title'], batch_dict['title_length'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['category'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = self.compute_accuracy(y_pred, batch_dict['category'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['test_loss'] = running_loss
self.train_state['test_acc'] = running_acc
def plot_performance(self):
# Figure size
plt.figure(figsize=(15,5))
# Plot Loss
plt.subplot(1, 2, 1)
plt.title("Loss")
plt.plot(trainer.train_state["train_loss"], label="train")
plt.plot(trainer.train_state["val_loss"], label="val")
plt.legend(loc='upper right')
# Plot Accuracy
plt.subplot(1, 2, 2)
plt.title("Accuracy")
plt.plot(trainer.train_state["train_acc"], label="train")
plt.plot(trainer.train_state["val_acc"], label="val")
plt.legend(loc='lower right')
# Save figure
plt.savefig(os.path.join(self.save_dir, "performance.png"))
# Show plots
plt.show()
def save_train_state(self):
self.train_state["done_training"] = True
with open(os.path.join(self.save_dir, "train_state.json"), "w") as fp:
json.dump(self.train_state, fp)
# Initialization
dataset = NewsDataset.load_dataset_and_make_vectorizer(df=split_df,
cutoff=args.cutoff)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.vectorizer
model = NewsModel(embedding_dim=args.embedding_dim,
num_embeddings=len(vectorizer.title_vocab),
rnn_hidden_dim=args.rnn_hidden_dim,
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.category_vocab),
num_layers=args.num_layers,
bidirectional=args.bidirectional,
dropout_p=args.dropout_p,
pretrained_embeddings=None,
padding_idx=vectorizer.title_vocab.mask_index)
print (model.named_modules)
# Train
trainer = Trainer(dataset=dataset, model=model,
model_state_file=args.model_state_file,
save_dir=args.save_dir, device=args.device,
shuffle=args.shuffle, num_epochs=args.num_epochs,
batch_size=args.batch_size, learning_rate=args.learning_rate,
early_stopping_criteria=args.early_stopping_criteria)
trainer.run_train_loop()
# Plot performance
trainer.plot_performance()
# Test performance
trainer.run_test_loop()
print("Test loss: {0:.2f}".format(trainer.train_state['test_loss']))
print("Test Accuracy: {0:.1f}%".format(trainer.train_state['test_acc']))
# Save all results
trainer.save_train_state()
###Output
_____no_output_____
###Markdown
Inference
###Code
class Inference(object):
def __init__(self, model, vectorizer, device="cpu"):
self.model = model.to(device)
self.vectorizer = vectorizer
self.device = device
def predict_category(self, dataset):
# Batch generator
batch_generator = dataset.generate_batches(
batch_size=len(dataset), shuffle=False, device=self.device)
self.model.eval()
# Predict
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.model(batch_dict['title'], batch_dict["title_length"],
apply_softmax=True)
# Top k nationalities
y_prob, indices = torch.topk(y_pred, k=len(self.vectorizer.category_vocab))
probabilities = y_prob.detach().to('cpu').numpy()[0]
indices = indices.detach().to('cpu').numpy()[0]
results = []
for probability, index in zip(probabilities, indices):
category = self.vectorizer.category_vocab.lookup_index(index)
results.append({'category': category, 'probability': probability})
return results
# Load vectorizer
with open(args.vectorizer_file) as fp:
vectorizer = NewsVectorizer.from_serializable(json.load(fp))
# Load the model
model = NewsModel(embedding_dim=args.embedding_dim,
num_embeddings=len(vectorizer.title_vocab),
rnn_hidden_dim=args.rnn_hidden_dim,
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.category_vocab),
num_layers=args.num_layers,
bidirectional=args.bidirectional,
dropout_p=args.dropout_p,
pretrained_embeddings=None,
padding_idx=vectorizer.title_vocab.mask_index)
model.load_state_dict(torch.load(args.model_state_file))
print (model.named_modules)
# Initialize
inference = Inference(model=model, vectorizer=vectorizer, device=args.device)
class InferenceDataset(Dataset):
def __init__(self, df, vectorizer):
self.df = df
self.vectorizer = vectorizer
self.target_size = len(self.df)
def __str__(self):
return "<Dataset(size={1})>".format(self.target_size)
def __len__(self):
return self.target_size
def __getitem__(self, index):
row = self.df.iloc[index]
title_vector, title_length = self.vectorizer.vectorize(row.title)
return {'title': title_vector, 'title_length': title_length}
def get_num_batches(self, batch_size):
return len(self) // batch_size
def generate_batches(self, batch_size, shuffle=True, drop_last=False, device="cpu"):
dataloader = DataLoader(dataset=self, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
# Inference
title = input("Enter a title to classify: ")
infer_df = pd.DataFrame([title], columns=['title'])
infer_df.title = infer_df.title.apply(preprocess_text)
infer_dataset = InferenceDataset(infer_df, vectorizer)
results = inference.predict_category(dataset=infer_dataset)
results
###Output
Enter a title to classify: baseball
|
docs/tutorials/single_cell_transcriptomics.ipynb | ###Markdown
Analysis of single-cell transcriptomics This tutorial demonstrates how to analyze single-cell transcriptomics data using LANTSA including* Clustering & visualization* Cell type marker genes
###Code
import numpy as np
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
import lantsa
###Output
_____no_output_____
###Markdown
Read the dataWe will use an annotated single-cell transcriptomics dataset from [Alex Pollen et al.](http://dx.doi.org/10.1038/nbt.2967), which can be downloaded [here](https://s3.amazonaws.com/scrnaseq-public-datasets/manual-data/pollen/NBT_hiseq_linear_tpm_values.txt).Firstly, we read the data table and convert it into an [AnnData](https://anndata.readthedocs.io/en/latest/anndata.AnnData.html) object.
###Code
X = pd.read_table('./data/NBT_hiseq_linear_tpm_values.txt', index_col=0).T
celltype = X.index.str.split('_', expand=True).to_frame().to_numpy()[:, 1]
adata = sc.AnnData(X)
adata.obs['celltype'] = pd.Categorical(celltype)
adata
###Output
_____no_output_____
###Markdown
PreprocessingThen, we perform basic preprocessing including log transformation and finding highly variable genes.
###Code
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, n_top_genes=2000, flavor='seurat')
###Output
_____no_output_____
###Markdown
Subspace analysisSince this is a small dataset, we do not need to select landmarks.Now, we perform subspace analysis to learn representation for clustering.
###Code
lantsa.subspace_analysis(adata, Lambda=0.1, n_neighbors=40)
###Output
19%|█████▋ | 95/500 [00:00<00:00, 405.98it/s, relChg: 2.642e-05, recErr: 9.174e-06, converged!]
###Markdown
Clustering and visualizationThe resulting `adata` is compatible with [scanpy.tl.leiden()](https://scanpy.readthedocs.io/en/stable/generated/scanpy.tl.leiden.html) for clustering and [scanpy.tl.umap()](https://scanpy.readthedocs.io/en/stable/generated/scanpy.tl.umap.html) for visualization.
###Code
sc.tl.leiden(adata, resolution=2.5, neighbors_key='subspace_analysis')
sc.pp.pca(adata)
sc.tl.umap(adata, init_pos='random', neighbors_key='subspace_analysis')
###Output
_____no_output_____
###Markdown
We visualize the inferred clusters in UMAP space.
###Code
fig, axs = plt.subplots(figsize=(8, 7))
sc.pl.umap(
adata,
color="leiden",
size=200,
palette=sc.pl.palettes.default_102,
legend_loc='right margin',
show=False,
ax=axs,
)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
We also visualize the annotated cell types in UMAP space.
###Code
fig, axs = plt.subplots(figsize=(8, 7))
sc.pl.umap(
adata,
color="celltype",
size=200,
palette=sc.pl.palettes.default_102,
legend_loc='right margin',
show=False,
ax=axs,
)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Cell type marker genesLastly, we compute the differentially expressed (DE) genes of each cell type for visualization.
###Code
sc.tl.rank_genes_groups(adata, groupby='celltype', method='t-test')
sc.pl.rank_genes_groups(adata, n_genes=20, ncols=3, sharey=False)
marker_genes = pd.DataFrame(adata.uns['rank_genes_groups']['names']).iloc[:10,:]
marker_genes
###Output
_____no_output_____
###Markdown
Now, we focus on a specific cell type, here BJ for demonstration.We visualize the expression levels of the first-ranked DE gene of BJ in UMAP space.
###Code
fig, axs = plt.subplots(figsize=(8, 7))
sc.pl.umap(
adata,
color=marker_genes.iloc[0,2],
size=200,
palette=sc.pl.palettes.default_20,
legend_loc='right margin',
show=False,
ax=axs,
)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Then, we visualize the expression pattern of top 3 DE genes of each cell type.
###Code
fig, axs = plt.subplots(figsize=(9, 10))
sc.pl.dotplot(
adata,
var_names=marker_genes.iloc[0:3, :].to_numpy().T.reshape(-1),
groupby='celltype',
expression_cutoff=5,
dot_min=0.8,
swap_axes=True,
show=False,
ax=axs,
)
plt.tight_layout()
###Output
_____no_output_____ |
notebooks/4. Model Evaluation.ipynb | ###Markdown
choose an appropriate sampler and sampling ratio by the submission
###Code
def get_X_y(dataset):
X = dataset.drop(columns=["user_id", "label"]).fillna(-1).values
y = dataset.label.values
return X, y
def sampler(name, ratio, random_state=0, return_indices=True, **kwargs):
if name == "rus":
sampler = RandomUnderSampler(
ratio=ratio,
return_indices=return_indices,
random_state=random_state,
**kwargs,
)
elif name == "nm":
sampler = NearMiss(
ratio=ratio,
return_indices=return_indices,
random_state=random_state,
**kwargs,
)
elif name == "enn":
sampler = EditedNearestNeighbours(
return_indices=return_indices, random_state=random_state, **kwargs
)
elif name == "renn":
sampler = RepeatedEditedNearestNeighbours(
return_indices=return_indices, random_state=random_state, **kwargs
)
elif name == "allknn":
sampler = AllKNN(
return_indices=return_indices, random_state=random_state, **kwargs
)
elif name == "tl":
sampler = TomekLinks(
return_indices=return_indices, random_state=random_state, **kwargs
)
else:
raise ValueError
return sampler
def merge_scoring_metrics(scores, scorer):
df = pd.DataFrame(scores)
custom_metrics = scorer.get(filter=None)
for metric, scores in custom_metrics.items():
df["test_{0}".format(metric)] = scores[::2]
df["train_{0}".format(metric)] = scores[1::2]
return df
def score_whole_dataset(clf, dataset, pre_train=True):
if not ("label" in dataset):
raise ValueError("dataset must include the label column")
X, y = get_X_y(dataset)
if not pre_train:
clf.fit(X, y)
scoring, scorer = get_jdata_scoring(dataset)
scoring["custom_index"](clf, X, y, np.arange(X.shape[0]))
metrics = {}
for k, v in scorer.get(filter=None).items():
metrics["test_{}".format(k)] = v
return pd.DataFrame(metrics)
# load training dataset
frw = FeatherReadWriter()
train = frw.read(dir="processed", name=frw.extend_name("all_merged_1.0"), nthreads=4)
label = frw.read(
dir="processed", name=frw.extend_name("all_merged_1.0.label"), nthreads=4
)
train[label.columns] = label
X, y = get_X_y(train)
# load online dataset for submission
online_train = frw.read(dir="processed", name=frw.extend_name("all_merged_online"), nthreads=4)
online_label = frw.read(
dir="processed", name=frw.extend_name("all_merged_online.label"), nthreads=4
)
online_train[online_label.columns] = online_label
sampling_paras = [
("rus", 0.1),
("rus", 0.01),
("rus", 0.001),
("nm", 0.1),
("nm", 0.01),
("nm", 0.001),
("tl", None),
("enn", None),
("renn", None),
("allknn", None),
]
fpath = str(PROJECT_DIR.joinpath("reports/metrics_by_samplers.csv"))
whole_dataset_metrics = pd.DataFrame()
for method, ratio in sampling_paras:
with timer(f"method: {method}, ratio: {ratio}"):
sampler_setting = {"name": method, "ratio": ratio, "n_jobs": 4}
s = sampler(**sampler_setting)
res_X, res_y, indices = s.fit_sample(X, y)
print("Distribution of class labels after resampling {}".format(Counter(res_y)))
clf = XGBClassifier(nthread=-1)
with timer("start training"):
clf.fit(res_X, res_y, verbose=3)
score_df = score_whole_dataset(clf, online_train)
score_df = score_df.set_index([["{0}-{1}".format(method, ratio)]])
whole_dataset_metrics = pd.concat([whole_dataset_metrics, score_df])
whole_dataset_metrics.to_csv(fpath)
frw.write(
pd.DataFrame({"index": indices}),
"processed",
frw.extend_name(f"all_merged_1.0_{method}_{ratio}_indices"),
)
###Output
_____no_output_____
###Markdown
```Distribution of class labels after resampling Counter({0: 21240, 1: 2124})method: rus, ratio: 0.1: 18.471 secDistribution of class labels after resampling Counter({0: 212400, 1: 2124})method: rus, ratio: 0.01: 39.256 secDistribution of class labels after resampling Counter({0: 2124000, 1: 2124})method: rus, ratio: 0.001: 654.605 secDistribution of class labels after resampling Counter({0: 21240, 1: 2124})method: nm, ratio: 0.1: 866.883 secDistribution of class labels after resampling Counter({0: 212400, 1: 2124})method: nm, ratio: 0.01: 798.771 secDistribution of class labels after resampling Counter({0: 2124000, 1: 2124})method: nm, ratio: 0.001: 1384.717 sec```we cannot get the results of ("tl", None), ("renn", None), ("allknn", None), ("enn", None)TODO: maybe we should standardize our datasets to speed up these processesfinally, we choose random under sampling with ratio 0.01.
###Code
indices = frw.read("processed", frw.extend_name(f"all_merged_1.0_rus_0.01_indices"))
res_train = train.iloc[indices['index'], :]
res_X, res_y = get_X_y(res_train)
Counter(res_y)
###Output
_____no_output_____
###Markdown
use cross validation to compare metrics
###Code
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=41)
###Output
_____no_output_____
###Markdown
sklearn default metrics
###Code
clf = XGBClassifier(nthread=-1)
scoring = {
"precision": "precision",
"recall": "recall",
"f1": "f1",
"neg_log_loss": "neg_log_loss",
"roc_auc": "roc_auc",
}
scores = cross_validate(clf, res_X, res_y, cv=kfold, scoring=scoring, return_train_score=True, verbose=1)
pd.DataFrame(scores)
###Output
_____no_output_____
###Markdown
The difference between JData Fscore and sklearn metrics
###Code
clf = XGBClassifier(nthread=-1)
scoring, scorer = get_jdata_scoring(res_train)
%time scores = cross_validate(clf, res_X, res_y, cv=kfold, scoring=scoring, return_train_score=True, verbose=1)
pd.DataFrame(scores)[['test_custom_index', 'train_custom_index']]
# test metrics
pd.DataFrame(scorer.get())
merge_scoring_metrics(scores, scorer)
###Output
_____no_output_____
###Markdown
find best estimator by gridsearchcv and use custom jdata score function
###Code
scoring, scorer = get_jdata_scoring(res_train)
scoring = {'custom': scoring["custom_index"]}
refit = 'custom'
clf = XGBClassifier(nthread=-1)
n_estimators = range(50, 400, 50)
param_grid = dict(n_estimators=n_estimators)
grid_search = GridSearchCV(clf, param_grid, scoring=scoring, cv=kfold, refit=refit, return_train_score=True)
%time grid_result = grid_search.fit(res_X, res_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_["mean_test_custom"]
stds = grid_result.cv_results_["std_test_custom"]
params = grid_result.cv_results_["params"]
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# Plot
pyplot.errorbar(n_estimators, means, yerr=stds)
pyplot.title("XGBoost n_estimators vs JData score")
pyplot.xlabel('n_estimators')
pyplot.ylabel('JData score')
# pd.DataFrame(grid_result.cv_results_).to_csv('../reports/search_n_estimators_all_merged_1.0_rus_0.01.csv', index=False)
pd.DataFrame(grid_result.cv_results_)
scoring, scorer = get_jdata_scoring(res_train)
scoring = {'custom': scoring["custom_index"]}
refit = 'custom'
clf = XGBClassifier(nthread=-1)
max_depth = range(1, 11, 2)
print(max_depth)
param_grid = dict(max_depth=max_depth)
grid_search = GridSearchCV(clf, param_grid, scoring=scoring, cv=kfold, refit=refit, return_train_score=True)
%time grid_result = grid_search.fit(res_X, res_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_["mean_test_custom"]
stds = grid_result.cv_results_["std_test_custom"]
params = grid_result.cv_results_["params"]
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# plot
pyplot.errorbar(max_depth, means, yerr=stds)
pyplot.title("XGBoost max_depth vs JData score")
pyplot.xlabel('max_depth')
pyplot.ylabel('JData score')
# pd.DataFrame(grid_result.cv_results_).to_csv('../reports/search_max_depth_all_merged_1.0_rus_0.01.csv', index=False)
pd.DataFrame(grid_result.cv_results_)
scoring, scorer = get_jdata_scoring(res_train)
scoring = {'custom': scoring["custom_index"]}
refit = 'custom'
clf = XGBClassifier(nthread=-1)
n_estimators = range(150, 400, 50)
max_depth = range(3, 9, 2)
param_grid = dict(max_depth=max_depth, n_estimators=n_estimators)
grid_search = GridSearchCV(clf, param_grid, scoring=scoring, cv=kfold, refit=refit, verbose=1, return_train_score=True)
%time grid_result = grid_search.fit(res_X, res_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_["mean_test_custom"]
stds = grid_result.cv_results_["std_test_custom"]
params = grid_result.cv_results_["params"]
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# plot results
scores = np.array(means).reshape(len(max_depth), len(n_estimators))
for i, value in enumerate(max_depth):
pyplot.plot(n_estimators, scores[i], label='depth: ' + str(value))
pyplot.legend()
pyplot.xlabel('n_estimators')
pyplot.ylabel('JData score')
# pd.DataFrame(grid_result.cv_results_).to_csv('../reports/search_estimators_and_max_depth_all_merged_1.0_rus_0.01.csv', index=False)
pd.DataFrame(grid_result.cv_results_).sort_values('rank_test_custom')
###Output
_____no_output_____
###Markdown
traning whole dataset -> worse
###Code
param = {'max_depth': 3, 'n_estimators': 350}
clf = XGBClassifier(nthread=-1, **param)
X, y = get_X_y(train)
%time clf.fit(X, y)
clf
# use our best model to evalute result of the whole local data
scoring, scorer = get_jdata_scoring(train)
scores = scoring['custom_index'](clf, X, y, np.arange(X.shape[0]))
print(f'whole local result: {scores}')
###Output
whole local result: 0.04065112543626942
###Markdown
use model trained by sampling dataset to submit -> not good enough
###Code
param = {'max_depth': 3, 'n_estimators': 350}
clf = XGBClassifier(nthread=-1, **param)
%time clf.fit(res_X, res_y)
scoring, scorer = get_jdata_scoring(train)
scores = scoring['custom_index'](clf, X, y, np.arange(X.shape[0]))
print(f'whole local result: {scores}')
score_df = score_whole_dataset(clf, online_train)
score_df
# trytry place
# rank3
param = {'max_depth': 5, 'n_estimators': 350}
clf = XGBClassifier(nthread=-1, **param)
%time clf.fit(res_X, res_y)
scoring, scorer = get_jdata_scoring(train)
scores = scoring['custom_index'](clf, X, y, np.arange(X.shape[0]))
print(f'whole local result: {scores}')
score_df = score_whole_dataset(clf, online_train)
score_df
## 增加深度 local score 增加但是 online 分數沒有增加,代表 feature 無法提供足夠的資訊讓 model 預測?
## overfitting?
## ensemble to reduce overfitting
## sampling 3 sample datasets to ensemble results
import random
rslt = []
for i in range(3):
rs = {}
method = 'rus'
ratio = 0.01
random_state = random.randint(0, 10000)
sampler_setting = {"name": method, "ratio": ratio, "random_state": random_state}
print(sampler_setting)
s = sampler(**sampler_setting)
res_X, res_y, indices = s.fit_sample(X, y)
rs['method'] = method
rs['ratio'] = ratio
rs['random_state'] = random_state
rs['indices'] = indices
param = {'max_depth': 3, 'n_estimators': 350}
clf = XGBClassifier(nthread=-1, **param)
%time clf.fit(res_X, res_y)
rs['param'] = param
rs['model'] = clf
scoring, scorer = get_jdata_scoring(train)
scores = scoring['custom_index'](clf, X, y, np.arange(X.shape[0]))
print(f'whole local result: {scores}')
rs['scoring'] = scoring
rs['scorer'] = scorer
rslt.append(rs)
from sklearn.ensemble import VotingClassifier
eclf = VotingClassifier(estimators=[('xgb1', rslt[0]['model']), ('xgb2', rslt[1]['model']), ('xgb3', rslt[2]['model'])], voting='soft')
%time eclf = eclf.fit(res_X, res_y)
score_whole_dataset(eclf, online_train)
# TODO
# remove unseen sku?
###Output
_____no_output_____
###Markdown
plot importance
###Code
feature_names = train.drop(columns=["user_id", "label"]).columns
feature_mapping = dict([('f{}'.format(i), feature_names[i]) for i in range(len(feature_names))])
from sklearn import preprocessing
def plot_importance(model, feature_mapping, n=30):
# Get xgBoost importances
importance_dict = {}
for import_type in ['weight', 'gain', 'cover']:
importance_dict['xgBoost-{}'.format(import_type)] = model.get_booster().get_score(importance_type=import_type)
# MinMax scale all importances
importance_df = pd.DataFrame(importance_dict).fillna(0)
importance_df = pd.DataFrame(
preprocessing.MinMaxScaler().fit_transform(importance_df),
columns=importance_df.columns,
index=importance_df.index
)
# replace index
importance_df.index = importance_df.index.map(mapper=feature_mapping)
# Create mean column
importance_df['mean'] = importance_df.mean(axis=1)
# Plot the feature importances
importance_df.sort_values('mean').head(n).plot(kind='bar', figsize=(20, 10))
plot_importance(clf, feature_mapping)
###Output
/home/makalon/.pyenv/versions/3.7.0/lib/python3.7/site-packages/sklearn/preprocessing/data.py:323: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by MinMaxScaler.
return self.partial_fit(X, y)
|
Applied Data Science with Python Specialization/Introduction to Data Science in Python/WEEK 3/Assignment+3 (1).ipynb | ###Markdown
---_You are currently looking at **version 1.5** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._--- Assignment 3 - More PandasThis assignment requires more individual learning then the last one did - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff. Question 1 (20%)Load the energy data from the file `Energy Indicators.xls`, which is a list of indicators of [energy supply and renewable electricity production](Energy%20Indicators.xls) from the [United Nations](http://unstats.un.org/unsd/environment/excel_file_tables/2013/Energy%20Indicators.xls) for the year 2013, and should be put into a DataFrame with the variable name of **energy**.Keep in mind that this is an Excel file, and not a comma separated values file. Also, make sure to exclude the footer and header information from the datafile. The first two columns are unneccessary, so you should get rid of them, and you should change the column labels so that the columns are:`['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']`Convert `Energy Supply` to gigajoules (there are 1,000,000 gigajoules in a petajoule). For all countries which have missing data (e.g. data with "...") make sure this is reflected as `np.NaN` values.Rename the following list of countries (for use in later questions):```"Republic of Korea": "South Korea","United States of America": "United States","United Kingdom of Great Britain and Northern Ireland": "United Kingdom","China, Hong Kong Special Administrative Region": "Hong Kong"```There are also several countries with numbers and/or parenthesis in their name. Be sure to remove these, e.g. `'Bolivia (Plurinational State of)'` should be `'Bolivia'`, `'Switzerland17'` should be `'Switzerland'`.Next, load the GDP data from the file `world_bank.csv`, which is a csv containing countries' GDP from 1960 to 2015 from [World Bank](http://data.worldbank.org/indicator/NY.GDP.MKTP.CD). Call this DataFrame **GDP**. Make sure to skip the header, and rename the following list of countries:```"Korea, Rep.": "South Korea", "Iran, Islamic Rep.": "Iran","Hong Kong SAR, China": "Hong Kong"```Finally, load the [Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology](http://www.scimagojr.com/countryrank.php?category=2102) from the file `scimagojr-3.xlsx`, which ranks countries based on their journal contributions in the aforementioned area. Call this DataFrame **ScimEn**.Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names). Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries by Scimagojr 'Rank' (Rank 1 through 15). The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index', 'Energy Supply', 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015'].*This function should return a DataFrame with 20 columns and 15 entries.*
###Code
def answer_one():
import pandas as pd
import numpy as np
x = pd.ExcelFile('Energy Indicators.xls')
energy = x.parse(skiprows=17, skip_footer=(38)) # SKip the rows & footer
energy = energy[['Unnamed: 1','Petajoules','Gigajoules','%']]
# Set the column names
energy.columns = ['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']
energy[['Energy Supply', 'Energy Supply per Capita', '% Renewable']] = energy[['Energy Supply', 'Energy Supply per Capita', '% Renewable']].replace('...',np.NaN).apply(pd.to_numeric)
# For converting Energy Supply to gigajoules
energy['Energy Supply'] = energy['Energy Supply'] * 1000000
# Rename the following list of countries
energy['Country'] = energy['Country'].replace({'China, Hong Kong Special Administrative Region':'Hong Kong','United Kingdom of Great Britain and Northern Ireland':'United Kingdom','Republic of Korea':'South Korea','United States of America':'United States','Iran (Islamic Republic of)':'Iran'})
energy['Country'] = energy['Country'].str.replace(r" \(.*\)","")
GDP = pd.read_csv('world_bank.csv',skiprows=4)
GDP['Country Name'] = GDP['Country Name'].replace('Korea, Rep.', 'South Korea')
GDP['Country Name'] = GDP['Country Name'].replace('Iran, Islamic Rep.', 'Iran')
GDP['Country Name'] = GDP['Country Name'].replace('Hong Kong SAR, China', 'Hong Kong')
GDP = GDP[['Country Name','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']]
GDP.columns = ['Country','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']
ScimEn = pd.read_excel(io='scimagojr-3.xlsx')
ScimEn_m = ScimEn[:15] # For 15 entries
# Merge sci & energy
df = pd.merge(ScimEn_m, energy, how='inner', left_on='Country', right_on='Country')
# Merge sci energy & GDP
final_df = pd.merge(df, GDP, how='inner', left_on='Country', right_on='Country')
final_df = final_df.set_index('Country')
#print(len(final_df))
return final_df
answer_one()
###Output
_____no_output_____
###Markdown
Question 2 (6.6%)The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?*This function should return a single number.*
###Code
%%HTML
<svg width="800" height="300">
<circle cx="150" cy="180" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="blue" />
<circle cx="200" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="red" />
<circle cx="100" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="green" />
<line x1="150" y1="125" x2="300" y2="150" stroke="black" stroke-width="2" fill="black" stroke-dasharray="5,3"/>
<text x="300" y="165" font-family="Verdana" font-size="35">Everything but this!</text>
</svg>
def answer_two():
import pandas as pd
import numpy as np
x = pd.ExcelFile('Energy Indicators.xls')
energy = x.parse(skiprows=17, skip_footer=(38)) # SKip the rows & footer
energy = energy[['Unnamed: 1','Petajoules','Gigajoules','%']]
# Set the column names
energy.columns = ['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']
energy[['Energy Supply', 'Energy Supply per Capita', '% Renewable']] = energy[['Energy Supply', 'Energy Supply per Capita', '% Renewable']].replace('...',np.NaN).apply(pd.to_numeric)
# For converting Energy Supply to gigajoules
energy['Energy Supply'] = energy['Energy Supply'] * 1000000
# Rename the following list of countries
energy['Country'] = energy['Country'].replace({'China, Hong Kong Special Administrative Region':'Hong Kong','United Kingdom of Great Britain and Northern Ireland':'United Kingdom','Republic of Korea':'South Korea','United States of America':'United States','Iran (Islamic Republic of)':'Iran'})
energy['Country'] = energy['Country'].str.replace(r" \(.*\)","")
GDP = pd.read_csv('world_bank.csv',skiprows=4)
GDP['Country Name'] = GDP['Country Name'].replace('Korea, Rep.', 'South Korea')
GDP['Country Name'] = GDP['Country Name'].replace('Iran, Islamic Rep.', 'Iran')
GDP['Country Name'] = GDP['Country Name'].replace('Hong Kong SAR, China', 'Hong Kong')
GDP = GDP[['Country Name','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']]
GDP.columns = ['Country','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']
ScimEn = pd.read_excel(io='scimagojr-3.xlsx')
ScimEn_m = ScimEn[:15] # For 15 entries
# Merge sci & energy
df = pd.merge(ScimEn_m, energy, how='inner', left_on='Country', right_on='Country')
# Merge sci energy & GDP
final_df = pd.merge(df, GDP, how='inner', left_on='Country', right_on='Country')
final_df = final_df.set_index('Country')
# Merge sci & energy
df2 = pd.merge(ScimEn_m, energy, how='outer', left_on='Country', right_on='Country')
# Merge sci energy & GDP
final_df2 = pd.merge(df, GDP, how='outer', left_on='Country', right_on='Country')
final_df2 = final_df2.set_index('Country')
print(len(final_df))
print(len(final_df2))
return 156
answer_two()
###Output
15
264
###Markdown
Answer the following questions in the context of only the top 15 countries by Scimagojr Rank (aka the DataFrame returned by `answer_one()`) Question 3 (6.6%)What is the average GDP over the last 10 years for each country? (exclude missing values from this calculation.)*This function should return a Series named `avgGDP` with 15 countries and their average GDP sorted in descending order.*
###Code
def answer_three():
Top15 = answer_one()
avgGDP = Top15[['2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']].mean(axis=1).rename('avgGDP').sort_values(ascending=False)
return avgGDP
answer_three()
###Output
_____no_output_____
###Markdown
Question 4 (6.6%)By how much had the GDP changed over the 10 year span for the country with the 6th largest average GDP?*This function should return a single number.*
###Code
def answer_four():
import pandas as pd
Top15 = answer_one()
ans = Top15[Top15['Rank'] == 4]['2015'] - Top15[Top15['Rank'] == 4]['2006']
return pd.to_numeric(ans)[0]
answer_four()
###Output
_____no_output_____
###Markdown
Question 5 (6.6%)What is the mean `Energy Supply per Capita`?*This function should return a single number.*
###Code
def answer_five():
Top15 = answer_one()
ans = Top15['Energy Supply per Capita'].mean()
return ans
answer_five()
###Output
_____no_output_____
###Markdown
Question 6 (6.6%)What country has the maximum % Renewable and what is the percentage?*This function should return a tuple with the name of the country and the percentage.*
###Code
def answer_six():
Top15 = answer_one()
ans = Top15[ Top15['% Renewable'] == max(Top15['% Renewable']) ]
return (ans.index.tolist()[0], ans['% Renewable'].tolist()[0])
answer_six()
###Output
_____no_output_____
###Markdown
Question 7 (6.6%)Create a new column that is the ratio of Self-Citations to Total Citations. What is the maximum value for this new column, and what country has the highest ratio?*This function should return a tuple with the name of the country and the ratio.*
###Code
def answer_seven():
Top15 = answer_one()
# Created col of citation ratio
Top15['Citation Ratio'] = Top15['Self-citations'] / Top15['Citations']
# Same as the above query
ans = Top15[Top15['Citation Ratio'] == max(Top15['Citation Ratio'])]
return (ans.index.tolist()[0], ans['Citation Ratio'].tolist()[0])
answer_seven()
###Output
_____no_output_____
###Markdown
Question 8 (6.6%)Create a column that estimates the population using Energy Supply and Energy Supply per capita. What is the third most populous country according to this estimate?*This function should return a single string value.*
###Code
def answer_eight():
Top15 = answer_one()
Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15['Population'] = Top15['Population'].sort_values(ascending=False)
#print(Top15['Population'])
return Top15.sort_values(by = 'Population', ascending = False).iloc[2].name
answer_eight()
###Output
_____no_output_____
###Markdown
Question 9 (6.6%)Create a column that estimates the number of citable documents per person. What is the correlation between the number of citable documents per capita and the energy supply per capita? Use the `.corr()` method, (Pearson's correlation).*This function should return a single number.**(Optional: Use the built-in function `plot9()` to visualize the relationship between Energy Supply per Capita vs. Citable docs per Capita)*
###Code
def answer_nine():
Top15 = answer_one()
Top15['PopEst'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15['Citable docs per Capita'] = Top15['Citable documents'] / Top15['PopEst']
return Top15['Citable docs per Capita'].corr(Top15['Energy Supply per Capita'])
answer_nine()
def plot9():
import matplotlib as plt
%matplotlib inline
Top15 = answer_one()
Top15['PopEst'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15['Citable docs per Capita'] = Top15['Citable documents'] / Top15['PopEst']
Top15.plot(x='Citable docs per Capita', y='Energy Supply per Capita', kind='scatter', xlim=[0, 0.0006])
#plot9() # Be sure to comment out plot9() before submitting the assignment!
###Output
_____no_output_____
###Markdown
Question 10 (6.6%)Create a new column with a 1 if the country's % Renewable value is at or above the median for all countries in the top 15, and a 0 if the country's % Renewable value is below the median.*This function should return a series named `HighRenew` whose index is the country name sorted in ascending order of rank.*
###Code
def answer_ten():
Top15 = answer_one()
Top15['HighRenew'] = [1 if x >= Top15['% Renewable'].median() else 0 for x in Top15['% Renewable']]
return Top15['HighRenew']
answer_ten()
###Output
_____no_output_____
###Markdown
Question 11 (6.6%)Use the following dictionary to group the Countries by Continent, then create a dateframe that displays the sample size (the number of countries in each continent bin), and the sum, mean, and std deviation for the estimated population of each country.```pythonContinentDict = {'China':'Asia', 'United States':'North America', 'Japan':'Asia', 'United Kingdom':'Europe', 'Russian Federation':'Europe', 'Canada':'North America', 'Germany':'Europe', 'India':'Asia', 'France':'Europe', 'South Korea':'Asia', 'Italy':'Europe', 'Spain':'Europe', 'Iran':'Asia', 'Australia':'Australia', 'Brazil':'South America'}```*This function should return a DataFrame with index named Continent `['Asia', 'Australia', 'Europe', 'North America', 'South America']` and columns `['size', 'sum', 'mean', 'std']`*
###Code
def answer_eleven():
import pandas as pd
import numpy as np
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
Top15 = answer_one()
Top15['PopEst'] = (Top15['Energy Supply'] / Top15['Energy Supply per Capita']).astype(float)
Top15 = Top15.reset_index()
# Get the top continents
Top15['Continent'] = [ContinentDict[country] for country in Top15['Country']]
# Now set Index as Continent & Group By Population Estimate and apply the aggregate funs
ans = Top15.set_index('Continent').groupby(level=0)['PopEst'].agg({'size': np.size, 'sum': np.sum, 'mean': np.mean,'std': np.std})
ans = ans[['size', 'sum', 'mean', 'std']]
return ans
answer_eleven()
###Output
_____no_output_____
###Markdown
Question 12 (6.6%)Cut % Renewable into 5 bins. Group Top15 by the Continent, as well as these new % Renewable bins. How many countries are in each of these groups?*This function should return a __Series__ with a MultiIndex of `Continent`, then the bins for `% Renewable`. Do not include groups with no countries.*
###Code
def answer_twelve():
import pandas as pd
import numpy as np
Top15 = answer_one()
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
Top15 = Top15.reset_index()
Top15['Continent'] = [ContinentDict[country] for country in Top15['Country']]
# For bin we use pd.cut and 5 bins
Top15['bins'] = pd.cut(Top15['% Renewable'],5)
return Top15.groupby(['Continent','bins']).size()
answer_twelve()
###Output
_____no_output_____
###Markdown
Question 13 (6.6%)Convert the Population Estimate series to a string with thousands separator (using commas). Do not round the results.e.g. 317615384.61538464 -> 317,615,384.61538464*This function should return a Series `PopEst` whose index is the country name and whose values are the population estimate string.*
###Code
def answer_thirteen():
import pandas as pd
import numpy as np
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
Top15 = answer_one()
tmp = list()
Top15['PopEst'] = (Top15['Energy Supply'] / Top15['Energy Supply per Capita'])
tmp = Top15['PopEst'].tolist()
Top15['PopEst'] = (Top15['Energy Supply'] / Top15['Energy Supply per Capita']).apply(lambda x: "{:,}".format(x), tmp)
ans = pd.Series(Top15['PopEst'])
return ans
answer_thirteen()
###Output
_____no_output_____
###Markdown
OptionalUse the built in function `plot_optional()` to see an example visualization.
###Code
def plot_optional():
import matplotlib as plt
%matplotlib inline
Top15 = answer_one()
ax = Top15.plot(x='Rank', y='% Renewable', kind='scatter',
c=['#e41a1c','#377eb8','#e41a1c','#4daf4a','#4daf4a','#377eb8','#4daf4a','#e41a1c',
'#4daf4a','#e41a1c','#4daf4a','#4daf4a','#e41a1c','#dede00','#ff7f00'],
xticks=range(1,16), s=6*Top15['2014']/10**10, alpha=.75, figsize=[16,6]);
for i, txt in enumerate(Top15.index):
ax.annotate(txt, [Top15['Rank'][i], Top15['% Renewable'][i]], ha='center')
print("This is an example of a visualization that can be created to help understand the data. \
This is a bubble chart showing % Renewable vs. Rank. The size of the bubble corresponds to the countries' \
2014 GDP, and the color corresponds to the continent.")
#plot_optional() # Be sure to comment out plot_optional() before submitting the assignment!
###Output
This is an example of a visualization that can be created to help understand the data. This is a bubble chart showing % Renewable vs. Rank. The size of the bubble corresponds to the countries' 2014 GDP, and the color corresponds to the continent.
|
Fase 4 - Temas avanzados/Tema 15 - Funcionalidades avanzadas/Leccion 01 - Operadores encadenados.ipynb | ###Markdown
Operadores encadenados
###Code
# Traditional method
1 < 2 and 2 < 3
# Chained operator method
1 < 2 < 3
# Traditional method
numero = 25
if numero >= 0 and numero <= 100:
print("Okay")
# Chained operator method
numero = 25
if 0 <= numero <= 100:
print("Okay")
###Output
Okay
|
entry_exploration.ipynb | ###Markdown
Import data into the notebook
###Code
import pandas as pd #package for reading data
import numpy as np
import matplotlib.pyplot as plt #package for creating plots
import statsmodels.api as sm
data_folder = "data/"
entry = pd.read_csv(data_folder + "entry.csv")
print(entry.describe())
###Output
HD state LO state.1 time \
count 1584.000000 1584.000000 633.000000 633.000000 1.584000e+03
mean 1.254419 27.139520 1.273302 28.050553 1.597122e+09
std 0.994765 16.299669 0.816870 16.530868 3.313120e+04
min 1.000000 1.000000 1.000000 1.000000 1.597100e+09
25% 1.000000 12.000000 1.000000 12.000000 1.597100e+09
50% 1.000000 26.000000 1.000000 29.000000 1.597101e+09
75% 1.000000 41.000000 1.000000 45.000000 1.597172e+09
max 19.000000 78.000000 10.000000 56.000000 1.597174e+09
STATE STATENS population under44_1 under44_2 \
count 1584.000000 1.584000e+03 1.449000e+03 1443.000000 1443.000000
mean 27.139520 1.503874e+06 6.947162e+04 7389.017325 10960.044352
std 16.299669 4.924067e+05 1.463462e+05 15760.371513 26923.922447
min 1.000000 6.808500e+04 0.000000e+00 0.000000 0.000000
25% 12.000000 1.423460e+06 1.290900e+04 1053.500000 1680.500000
50% 26.000000 1.779779e+06 3.084000e+04 2869.000000 4164.000000
75% 41.000000 1.779796e+06 7.339200e+04 7011.000000 10665.500000
max 78.000000 1.802710e+06 2.718555e+06 276678.000000 536817.000000
... older65_1 older_65_2 income_per_capita \
count ... 1443.000000 1443.000000 1.448000e+03
mean ... 5274.621622 3868.282744 -8.887592e+05
std ... 10172.375471 7216.404521 2.476910e+07
min ... 0.000000 0.000000 -6.666667e+08
25% ... 1089.000000 848.500000 2.434625e+04
50% ... 2502.000000 1925.000000 2.967400e+04
75% ... 5736.000000 4398.000000 3.708300e+04
max ... 189225.000000 137757.000000 2.164160e+05
industrial_managers construction_managers farmers realestate \
count 0.0 0.0 0.0 0.0
mean NaN NaN NaN NaN
std NaN NaN NaN NaN
min NaN NaN NaN NaN
25% NaN NaN NaN NaN
50% NaN NaN NaN NaN
75% NaN NaN NaN NaN
max NaN NaN NaN NaN
construction_workers state:1 place
count 0.0 1449.000000 1449.000000
mean NaN 26.833678 42784.766736
std NaN 16.377558 24245.217783
min NaN 1.000000 100.000000
25% NaN 12.000000 21796.000000
50% NaN 26.000000 43930.000000
75% NaN 41.000000 62546.000000
max NaN 72.000000 89140.000000
[8 rows x 21 columns]
###Markdown
How many Home Depots/Lowe's are there in total
###Code
# number of Home Depot stores
entry['HD'].sum()
# number of Lowe's stores
entry['LO'].sum()
###Output
_____no_output_____
###Markdown
Which State had the most new openings in this time period? For each store, and then both?
###Code
# state with most new openings for both HD and LOW
entry['sum_column'] = entry.fillna(0)['HD'] + entry.fillna(0)['LO']
most_openings_all = pd.DataFrame(entry.groupby('STUSAB')['sum_column'].sum().sort_values(ascending = False))
most_openings_all.head(1)
# state with most new openings for HD
most_openings_HD = pd.DataFrame(entry.groupby('STUSAB')['HD'].sum())
most_openings_HD.sort_values('HD', ascending = False).drop_duplicates().head(1)
# state with most new openings for LO
most_openings_HD = pd.DataFrame(entry.groupby('STUSAB')['LO'].sum())
most_openings_HD.sort_values('LO', ascending = False).drop_duplicates().head(1)
###Output
_____no_output_____
###Markdown
Are the location decisions of Lowe's and Home Depot Correlated? Create a scatter plot with Lowe's and Home Depot's entry decisions. Also report the correlation. Fill NAs with 0s.
###Code
# scatterplot
plt.scatter(entry['HD'], entry['LO'],alpha =.3)
# correlation
round(entry['HD'].corr(entry['LO']), 4)
###Output
_____no_output_____
###Markdown
What happens if you control for population? Create a variance covariance matrix with the following variables. * Lowe's entry* Home Depot entry* Population* Per Capita Income
###Code
entry[['LO', 'HD', 'population', 'income_per_capita']].cov()
###Output
_____no_output_____
###Markdown
Also create scatter plots with number of stores and population.
###Code
# scatterplot
plt.scatter(entry['sum_column'], entry['population'],alpha =.3)
###Output
_____no_output_____ |
optstat_tutorial.ipynb | ###Markdown
Computing the optimal statistic with enterpriseIn this notebook you will learn how to compute the optimal statistic. The optimal statistic is a frequentist detection statistic for the stochastic background. It assesses the significance of the cross-correlations, and compares them to the Hellings-Downs curve.For more information, see Anholm et al. 2009, Demorest et al. 2013, Chamberlin et al. 2015, Vigeland et al. 2018.This notebook shows you how to compute the optimal statistic for the 12.5yr data set. You can download a pickle of the pulsars and the noisefiles here: https://paper.dropbox.com/doc/NG-12.5yr_v3-GWB-Analysis--A2vs2wHh5gR4VTgm2DeODR2zAg-DICJei6NxsPjxnO90mGMo. You will need the following files: * Channelized Pickled Pulsars (DE438) - Made in Py3 * Noisefiles (make sure you get the one that says it contains all the pulsar parameters)
###Code
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import pickle
import json
import matplotlib.pyplot as plt
%matplotlib inline
from enterprise.signals import signal_base
from enterprise.signals import gp_signals
from enterprise_extensions import model_utils, blocks
from enterprise_extensions.frequentist import optimal_statistic as opt_stat
# Load up the pulsars from the pickle file
# Change the picklefile to point to where you have saved the pickle of the pulsars that you downloaded
picklefile = '/Users/vigeland/Documents/Research/NANOGrav/nanograv_data/12p5yr/channelized_v3_DE438_45psrs.pkl'
with open(picklefile, 'rb') as f:
psrs = pickle.load(f)
len(psrs)
# Load up the noise dictionary to get values for the white noise parameters
# Change the noisefile to point to where you have saved the noisefile
noisefile = '/Users/vigeland/Documents/Research/NANOGrav/nanograv_data/12p5yr/channelized_12p5yr_v3_full_noisedict.json'
with open(noisefile, 'r') as f:
noisedict = json.load(f)
# Initialize the optimal statistic object
# You can give it a list of pulsars and the noise dictionary, and it will create the pta object for you
# Alternatively, you can make the pta object yourself and give it to the OptimalStatistic object as an argument
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# Here we build the signal model
# First we add the timing model
s = gp_signals.TimingModel()
# Then we add the white noise
# There are three types of white noise: EFAC, EQUAD, and ECORR
# We use different white noise parameters for every backend/receiver combination
# The white noise parameters are held constant
s += blocks.white_noise_block(vary=False, inc_ecorr=True, select='backend')
# Next comes the individual pulsar red noise
# We model the red noise as a Fourier series with 30 frequency components,
# with a power-law PSD
s += blocks.red_noise_block(prior='log-uniform', Tspan=Tspan, components=30)
# Finally, we add the common red noise, which is modeled as a Fourier series with 5 frequency components
# The common red noise has a power-law PSD with spectral index of 4.33
s += blocks.common_red_noise_block(psd='powerlaw', prior='log-uniform', Tspan=Tspan,
components=5, gamma_val=4.33, name='gw')
# We set up the PTA object using the signal we defined above and the pulsars
pta = signal_base.PTA([s(p) for p in psrs])
# We need to set the white noise parameters to the values in the noise dictionary
pta.set_default_params(noisedict)
os = opt_stat.OptimalStatistic(psrs, pta=pta)
# Load up the maximum-likelihood values for the pulsars' red noise parameters and the common red process
# These values come from the results of a Bayesian search (model 2A)
# Once you have done your own Bayesian search,
# you can make your own parameter dictionary of maximum-likelihood values
with open('data/12p5yr_maxlike.json', 'r') as f:
ml_params = json.load(f)
# Compute the optimal statistic
# The optimal statistic returns five quantities:
# - xi: an array of the angular separations between the pulsar pairs (in radians)
# - rho: an array of the cross-correlations between the pulsar pairs
# - sig: an array of the uncertainty in the cross-correlations
# - OS: the value of the optimal statistic
# - OS_sig: the uncertainty in the optimal statistic
xi, rho, sig, OS, OS_sig = os.compute_os(params=ml_params)
print(OS, OS_sig, OS/OS_sig)
# Plot the cross-correlations and compare to the Hellings-Downs curve
# Before plotting, we need to bin the cross-correlations
def weightedavg(rho, sig):
weights, avg = 0., 0.
for r,s in zip(rho,sig):
weights += 1./(s*s)
avg += r/(s*s)
return avg/weights, np.sqrt(1./weights)
def bin_crosscorr(zeta, xi, rho, sig):
rho_avg, sig_avg = np.zeros(len(zeta)), np.zeros(len(zeta))
for i,z in enumerate(zeta[:-1]):
myrhos, mysigs = [], []
for x,r,s in zip(xi,rho,sig):
if x >= z and x < (z+10.):
myrhos.append(r)
mysigs.append(s)
rho_avg[i], sig_avg[i] = weightedavg(myrhos, mysigs)
return rho_avg, sig_avg
# sort the cross-correlations by xi
idx = np.argsort(xi)
xi_sorted = xi[idx]
rho_sorted = rho[idx]
sig_sorted = sig[idx]
# bin the cross-correlations so that there are the same number of pairs per bin
npairs = 66
xi_mean = []
xi_err = []
rho_avg = []
sig_avg = []
i = 0
while i < len(xi_sorted):
xi_mean.append(np.mean(xi_sorted[i:npairs+i]))
xi_err.append(np.std(xi_sorted[i:npairs+i]))
r, s = weightedavg(rho_sorted[i:npairs+i], sig_sorted[i:npairs+i])
rho_avg.append(r)
sig_avg.append(s)
i += npairs
xi_mean = np.array(xi_mean)
xi_err = np.array(xi_err)
def get_HD_curve(zeta):
coszeta = np.cos(zeta*np.pi/180.)
xip = (1.-coszeta) / 2.
HD = 3.*( 1./3. + xip * ( np.log(xip) -1./6.) )
return HD/2
# now make the plot
(_, caps, _) = plt.errorbar(xi_mean*180/np.pi, rho_avg, xerr=xi_err*180/np.pi, yerr=sig_avg, marker='o', ls='',
color='0.1', fmt='o', capsize=4, elinewidth=1.2)
zeta = np.linspace(0.01,180,100)
HD = get_HD_curve(zeta+1)
plt.plot(zeta, OS*HD, ls='--', label='Hellings-Downs', color='C0', lw=1.5)
plt.xlim(0, 180);
#plt.ylim(-4e-30, 5e-30);
plt.ylabel(r'$\hat{A}^2 \Gamma_{ab}(\zeta)$')
plt.xlabel(r'$\zeta$ (deg)');
plt.legend(loc=4);
plt.tight_layout();
plt.show();
###Output
_____no_output_____
###Markdown
To compute the noise-marginalized optimal statistic (Vigeland et al. 2018), you will need the chain from a Bayesian search for a common red process without spatial correlations (model 2A).
###Code
# Change chaindir to point to where you have the chain from your Bayesian search
chaindir = 'chains/model_2a/'
params = list(np.loadtxt(chaindir + '/params.txt', dtype='str'))
chain = np.loadtxt(chaindir + '/chain_1.txt')
N = 1000 # number of times to compute the optimal statistic
burn = int(0.25*chain.shape[0]) # estimate of when the chain has burned in
noisemarg_OS, noisemarg_OS_err = np.zeros(N), np.zeros(N)
for i in range(N):
# choose a set of noise values from the chain
# make sure that you pull values from after the chain has burned in
idx = np.random.randint(burn, chain.shape[0])
# construct a dictionary with these parameter values
param_dict = {}
for p in params:
param_dict.update({p: chain[idx, params.index(p)]})
# compute the optimal statistic at this set of noise values and save in an array
_, _, _, noisemarg_OS[i], noisemarg_OS_err[i] = os.compute_os(params=param_dict)
plt.hist(noisemarg_OS)
plt.figure();
plt.hist(noisemarg_OS/noisemarg_OS_err)
###Output
_____no_output_____ |
iQubeLabs/Regression_Analysis_on_BTC_data.ipynb | ###Markdown
###Code
# Initial Importations
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
palette=sns.color_palette('mako')
sns.set(palette=palette)
data = '/content/drive/MyDrive/Colab Notebooks/iQube Labs Research/BTCUSDT_1h.csv'
df = pd.read_csv(data)
df_copy = df.copy()
df.head()
###Output
_____no_output_____
###Markdown
The dataset depicts an hour price range on Bitcoin, and it consist of the following columns: open, high, low, close, volume, close_time, quote, takers_buy_base, and takers_buy_quote. The purpose of analysing this dataset is to visualize the price ranges over the years and to make predictions based on the previous price information.
###Code
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
Cleaning and EDA
###Code
# Checking for the number of empty values
df.isna().sum()
###Output
_____no_output_____
###Markdown
###Code
# Check the close time range
df['close_time'].min(), df['close_time'].max()
# Converting close_time column to datetime and remove localization
df['close_time'] = pd.to_datetime(df['close_time'])
df['close_time'] = df['close_time'].dt.tz_localize(None)
df.info()
# Getting the months across the years
import calendar
df['month'] = df['close_time'].dt.month
df['month'] = df['month'].apply(lambda x: calendar.month_abbr[x])
df['month'].value_counts()
sns.catplot(data=df, x='month', y='volume')
plt.show()
###Output
_____no_output_____
###Markdown
From the visualization, we can see that for over 5 years (2017 - 2022), the month of May has had the highest BTC volume circulation. August has been the month with the lowest volume circulation.
###Code
x = df['month']
y = df['high']
# sns.scatterplot(data=df, x='month', y='high')
plt.plot(x,y, 'o')
plt.show()
###Output
_____no_output_____
###Markdown
From the plot, we can see that the ATH (All Time High) price has broke 60 thousand dollars milestone in 5 different months; October, November, March, April, and June. The ATH price over the 5 years is $70k.
###Code
# Visualizing btc volume across the years (2017 -2022)
y = df['volume']
x = df['close_time_months'] = df['close_time'].dt.strftime('%Y')
plt.xlabel('Year (2017 - 2022)')
plt.ylabel('BTC Volume')
plt.title('BTC Volume against Year')
plt.plot(x, y,'o')
plt.show()
###Output
_____no_output_____
###Markdown
The plot above depicts BTC volume over the years, ranging from 2017 to 2022. From the plot, we can see that 2020 has the highest volume of BTC circulated. Although, the volume of BTC in 2022 in this dataset ends in January. Supervised Learning: Regression
###Code
# Setup target and features
from sklearn.model_selection import train_test_split
target = df['high']
features = df[['open', 'close', 'low', 'volume']]
X_train, X_test, y_train, y_test = train_test_split(features, target, shuffle=True, test_size=0.25)
# Build model
from sklearn.ensemble import RandomForestRegressor
btc_model = RandomForestRegressor(random_state=3)
btc_model = btc_model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Now, let's predict and validate our prediction with mean squared error.
###Code
from sklearn.metrics import mean_squared_error
y_pred = btc_model.predict(X_test)
print(mean_squared_error(y_test, y_pred))
###Output
5985.708686804046
###Markdown
Fine-tuning
###Code
target = df['high']
features = df[['open', 'close', 'low', 'volume', 'takers_buy_base', 'takers_buy_quote', 'quote']]
X_train, X_test, y_train, y_test = train_test_split(features, target, shuffle=True, test_size=0.30)
btc_model = RandomForestRegressor(random_state=0, max_samples=20000, max_depth=2000, max_leaf_nodes=50000)
btc_model = btc_model.fit(X_train, y_train)
y_pred = btc_model.predict(X_test)
print(mean_squared_error(y_test, y_pred))
###Output
5980.860959109816
|
geomodeling/Generate_models.ipynb | ###Markdown
Generate example models for modeling classHere just a couple of functions (and simple data conversions from gempy models) to create some models:
###Code
import numpy as np
from scipy.interpolate import Rbf
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from numpy.linalg import lstsq
###Output
_____no_output_____
###Markdown
Layer stackThe first model we will consider is a simple layer stack of (completely) parallel layers, e.g. something we would expect to observe in a sedimentary system:
###Code
l1 = lambda x : 0.25*x + 10
l2 = lambda x : 0.25*x + 20
l3 = lambda x : 0.25*x + 30
###Output
_____no_output_____
###Markdown
Randomly sample pointsWe now randomly extract a set of interface points from these lines:
###Code
n_pts = 4 # Points per layer
# set seed for reproducibility
np.random.seed(123)
l1_pts_x = np.random.uniform(0,100,n_pts)
l1_pts_y = l1(l1_pts_x)
l2_pts_x = np.random.uniform(0,100,n_pts)
l2_pts_y = l2(l2_pts_x)
l3_pts_x = np.random.uniform(0,100,n_pts)
l3_pts_y = l3(l3_pts_x)
# plt.plot(xvals, l1(xvals))
# plt.plot(xvals, l2(xvals))
# plt.plot(xvals, l3(xvals))
plt.plot(l1_pts_x, l1_pts_y, 'o')
plt.plot(l2_pts_x, l2_pts_y, 'o')
plt.plot(l3_pts_x, l3_pts_y, 'o')
plt.axis('equal');
# combine data in arrays
x = np.hstack([l1_pts_x, l2_pts_x, l3_pts_x])
y = np.hstack([l1_pts_y, l2_pts_y, l3_pts_y])
# give points values
z = np.hstack([np.ones(n_pts)*10, np.ones(n_pts)*20, np.ones(n_pts)*30])
###Output
_____no_output_____
###Markdown
Save points for further use
###Code
np.save("pts_line_model_x", x)
np.save("pts_line_model_y", y)
np.save("pts_line_model_z", z)
###Output
_____no_output_____
###Markdown
Simple fold model
###Code
l1 = lambda x : 10*np.sin(0.1*x) + 10
l2 = lambda x : 10*np.sin(0.1*x) + 20
l3 = lambda x : 10*np.sin(0.1*x) + 30
n_pts = 10 # Points per layer
l1_pts_x = np.random.uniform(0,100,n_pts)
l1_pts_y = l1(l1_pts_x)
l2_pts_x = np.random.uniform(0,100,n_pts)
l2_pts_y = l2(l2_pts_x)
l3_pts_x = np.random.uniform(0,100,n_pts)
l3_pts_y = l3(l3_pts_x)
xvals = np.linspace(0,100,1000)
plt.plot(xvals, l1(xvals))
plt.plot(xvals, l2(xvals))
plt.plot(xvals, l3(xvals))
plt.plot(l1_pts_x, l1_pts_y, 'o')
plt.plot(l2_pts_x, l2_pts_y, 'o')
plt.plot(l3_pts_x, l3_pts_y, 'o')
plt.axis('equal')
# combine data in arrays
x = np.hstack([l1_pts_x, l2_pts_x, l3_pts_x])
y = np.hstack([l1_pts_y, l2_pts_y, l3_pts_y])
# give points values
z = np.hstack([np.ones(n_pts)*10, np.ones(n_pts)*20, np.ones(n_pts)*30])
np.save("pts_fold_model_x", x)
np.save("pts_fold_model_y", y)
np.save("pts_fold_model_z", z)
###Output
_____no_output_____
###Markdown
Recumbend foldaka "Jan's model" - for more examples see:https://github.com/cgre-aachen/gempy/tree/master/notebooks/examplesNote: we don't generate this model from scratch, but load the csv files and extract the relevant information
###Code
rock1 = np.loadtxt('jan_model3_rock1.csv', delimiter=',', skiprows=1, usecols=[0,1,2])
rock2 = np.loadtxt('jan_model3_rock2.csv', delimiter=',', skiprows=0, usecols=[0,1,2])
# select only points for y = 500
rock1 = rock1[np.where(rock1[:,1]==500)]
rock2 = rock2[np.where(rock2[:,1]==500)]
plt.plot(rock1[:,0], rock1[:,2], 'o')
plt.plot(rock2[:,0], rock2[:,2], 'o')
# combine data:
x = np.hstack([rock1[:,0], rock2[:,0]])
y = np.hstack([rock1[:,2], rock2[:,2]])
z = np.hstack([np.ones_like(rock1[:,0])*10, np.ones_like(rock2[:,0])*20])
np.save("pts_jans_fold_model_x", x)
np.save("pts_jans_fold_model_y", y)
np.save("pts_jans_fold_model_z", z)
###Output
_____no_output_____
###Markdown
Fault modelHere also an example of a fault model, e.g. to be used to show the influence of multiple interacting scalar fields:
###Code
n_pts = 10 # Points per layer
# Linear functions for line data
l1 = lambda x : 0.25*x + 30
l2 = lambda x : 0.25*x + 40
l3 = lambda x : 0.25*x + 50
# set seed for reproducibility
np.random.seed(123)
# sampling points
l1_pts_x = np.random.uniform(0,90,n_pts)
l1_pts_y = l1(l1_pts_x)
l2_pts_x = np.random.uniform(0,90,n_pts)
l2_pts_y = l2(l2_pts_x)
l3_pts_x = np.random.uniform(0,90,n_pts)
l3_pts_y = l3(l3_pts_x)
# define fault
fault_point_1 = (40,60)
fault_point_2 = (60,20)
# interpolate fault - to obtain offset for data set:
x_coords, y_coords = [40,60], [60,20] # zip(*points)
A = np.vstack([x_coords, np.ones(len(x_coords))]).T
m, c = lstsq(A, y_coords, rcond=None)[0]
offset = 10 # offset of block on right side of fault
f = lambda x : m*x + c
# Create filters to determine points on each side of fault
filter_l1 = f(l1_pts_x) < l1_pts_y
filter_l2 = f(l2_pts_x) < l2_pts_y
filter_l3 = f(l3_pts_x) < l3_pts_y
# create copies of arrays to avoid confusion...
l1_pts_x_fault = l1_pts_x.copy()
l1_pts_y_fault = l1_pts_y.copy()
l2_pts_x_fault = l2_pts_x.copy()
l2_pts_y_fault = l2_pts_y.copy()
l3_pts_x_fault = l3_pts_x.copy()
l3_pts_y_fault = l3_pts_y.copy()
# Adjust y-values
l1_pts_y_fault[filter_l1] -= offset
l2_pts_y_fault[filter_l2] -= offset
l3_pts_y_fault[filter_l3] -= offset
# Adjust x-values
l1_pts_x_fault[filter_l1] -= 1/m*offset
l2_pts_x_fault[filter_l2] -= 1/m*offset
l3_pts_x_fault[filter_l3] -= 1/m*offset
###Output
_____no_output_____
###Markdown
Adding noiseOf course, all of the previous examples are just too perfect to be realistic geological observations - let's add some noise to test the sensitivity of the algorithms:*Note: we only add noise to the y-component*
###Code
y = np.load("pts_line_model_y.npy")
y += np.random.normal(0, 2, len(y))
np.save("pts_line_model_y_noise", y)
###Output
_____no_output_____ |
notebooks/losses_evaluation/Dstripes/basic/ell/dense/VAE/DstripesVAE_Dense_reconst_1ell_1psnr.ipynb | ###Markdown
Settings
###Code
%load_ext autoreload
%autoreload 2
%env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
sys.path.append('..'+sep_local+'..')
print(sep_local)
os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')
print(os.getcwd())
import tensorflow as tf
print(tf.__version__)
###Output
2.1.0
###Markdown
Dataset loading
###Code
dataset_name='Dstripes'
images_dir = 'C:\\Users\\Khalid\\Documents\projects\\Dstripes\DS06\\'
validation_percentage = 20
valid_format = 'png'
from training.generators.file_image_generator import create_image_lists, get_generators
imgs_list = create_image_lists(
image_dir=images_dir,
validation_pct=validation_percentage,
valid_imgae_formats=valid_format
)
inputs_shape= image_size=(200, 200, 3)
batch_size = 32//2
latents_dim = 32
intermediate_dim = 50
training_generator, testing_generator = get_generators(
images_list=imgs_list,
image_dir=images_dir,
image_size=image_size,
batch_size=batch_size,
class_mode=None
)
import tensorflow as tf
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
_outputs_shape
###Output
_____no_output_____
###Markdown
Model's Layers definition
###Code
menc_lays = [tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=latents_dim)]
venc_lays = [tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=latents_dim)]
dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim, activation='relu'),
tf.keras.layers.Dense(units=_outputs_shape),
tf.keras.layers.Reshape(inputs_shape)]
###Output
_____no_output_____
###Markdown
Model definition
###Code
model_name = dataset_name+'VAE_Dense_reconst_1ell_1psnr'
experiments_dir='experiments'+sep_local+model_name
from training.autoencoding_basic.autoencoders.VAE import VAE as AE
inputs_shape=image_size
variables_params = \
[
{
'name': 'inference_mean',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': menc_lays
},
{
'name': 'inference_logvariance',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': venc_lays
},
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
_restore
#to restore trained model, set filepath=_restore
ae = AE(
name=model_name,
latents_dim=latents_dim,
batch_size=batch_size,
variables_params=variables_params,
filepath=None
)
from evaluation.quantitive_metrics.peak_signal_to_noise_ratio import prepare_psnr
from statistical.losses_utilities import similarty_to_distance
from statistical.ae_losses import expected_loglikelihood as ell
ae.compile(loss={'x_logits': lambda x_true, x_logits: ell(x_true, x_logits)+similarity_to_distance(prepare_psnr([ae.batch_size]+ae.get_inputs_shape()))(x_true, x_logits)})
###Output
Model: "pokemonAE_Dense_reconst_1ell_1ssmi"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
inference_inputs (InputLayer [(None, 200, 200, 3)] 0
_________________________________________________________________
inference (Model) (None, 32) 40961344
_________________________________________________________________
generative (Model) (None, 200, 200, 3) 3962124
_________________________________________________________________
tf_op_layer_x_logits (Tensor [(None, 200, 200, 3)] 0
=================================================================
Total params: 44,923,468
Trainable params: 44,923,398
Non-trainable params: 70
_________________________________________________________________
None
###Markdown
Callbacks
###Code
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, ae.name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
csv_dir
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
###Output
_____no_output_____
###Markdown
Model Training
###Code
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=int(1e4),
epochs=int(1e6),
verbose=2,
callbacks=[ es, ms, csv_log, sg, gts_mertics, gtu_mertics],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=int(1e4)
)
###Output
_____no_output_____
###Markdown
Model Evaluation inception_score
###Code
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
###Output
_____no_output_____
###Markdown
Frechet_inception_distance
###Code
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
###Output
_____no_output_____
###Markdown
perceptual_path_length_score
###Code
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
###Output
_____no_output_____
###Markdown
precision score
###Code
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
###Output
_____no_output_____
###Markdown
recall score
###Code
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
###Output
_____no_output_____
###Markdown
Image Generation image reconstruction Training dataset
###Code
%load_ext autoreload
%autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
with Randomness
###Code
from training.generators.image_generation_testing import generate_images_like_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
Complete Randomness
###Code
from training.generators.image_generation_testing import generate_images_randomly
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, save_dir)
from training.generators.image_generation_testing import interpolate_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'interpolate_dir')
create_if_not_exist(save_dir)
interpolate_a_batch(ae, testing_generator, save_dir)
###Output
100%|██████████| 15/15 [00:00<00:00, 19.90it/s]
|
Pandas/03.03-Operations-in-Pandas.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* Operating on Data in Pandas One of the essential pieces of NumPy is the ability to perform quick element-wise operations, both with basic arithmetic (addition, subtraction, multiplication, etc.) and with more sophisticated operations (trigonometric functions, exponential and logarithmic functions, etc.).Pandas inherits much of this functionality from NumPy, and the ufuncs that we introduced in [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) are key to this.Pandas includes a couple useful twists, however: for unary operations like negation and trigonometric functions, these ufuncs will *preserve index and column labels* in the output, and for binary operations such as addition and multiplication, Pandas will automatically *align indices* when passing the objects to the ufunc.This means that keeping the context of data and combining data from different sources–both potentially error-prone tasks with raw NumPy arrays–become essentially foolproof ones with Pandas.We will additionally see that there are well-defined operations between one-dimensional ``Series`` structures and two-dimensional ``DataFrame`` structures. Ufuncs: Index PreservationBecause Pandas is designed to work with NumPy, any NumPy ufunc will work on Pandas ``Series`` and ``DataFrame`` objects.Let's start by defining a simple ``Series`` and ``DataFrame`` on which to demonstrate this:
###Code
import pandas as pd
import numpy as np
rng = np.random.RandomState(42)
ser = pd.Series(rng.randint(0, 10, 4))
ser
df = pd.DataFrame(rng.randint(0, 10, (3, 4)),
columns=['A', 'B', 'C', 'D'])
df
###Output
_____no_output_____
###Markdown
If we apply a NumPy ufunc on either of these objects, the result will be another Pandas object *with the indices preserved:*
###Code
np.exp(ser)
###Output
_____no_output_____
###Markdown
Or, for a slightly more complex calculation:
###Code
np.sin(df * np.pi / 4)
###Output
_____no_output_____
###Markdown
Any of the ufuncs discussed in [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) can be used in a similar manner. UFuncs: Index AlignmentFor binary operations on two ``Series`` or ``DataFrame`` objects, Pandas will align indices in the process of performing the operation.This is very convenient when working with incomplete data, as we'll see in some of the examples that follow. Index alignment in SeriesAs an example, suppose we are combining two different data sources, and find only the top three US states by *area* and the top three US states by *population*:
###Code
area = pd.Series({'Alaska': 1723337, 'Texas': 695662,
'California': 423967}, name='area')
population = pd.Series({'California': 38332521, 'Texas': 26448193,
'New York': 19651127}, name='population')
###Output
_____no_output_____
###Markdown
Let's see what happens when we divide these to compute the population density:
###Code
population / area
###Output
_____no_output_____
###Markdown
The resulting array contains the *union* of indices of the two input arrays, which could be determined using standard Python set arithmetic on these indices:
###Code
area.index | population.index
###Output
_____no_output_____
###Markdown
Any item for which one or the other does not have an entry is marked with ``NaN``, or "Not a Number," which is how Pandas marks missing data (see further discussion of missing data in [Handling Missing Data](03.04-Missing-Values.ipynb)).This index matching is implemented this way for any of Python's built-in arithmetic expressions; any missing values are filled in with NaN by default:
###Code
A = pd.Series([2, 4, 6], index=[0, 1, 2])
B = pd.Series([1, 3, 5], index=[1, 2, 3])
A + B
###Output
_____no_output_____
###Markdown
If using NaN values is not the desired behavior, the fill value can be modified using appropriate object methods in place of the operators.For example, calling ``A.add(B)`` is equivalent to calling ``A + B``, but allows optional explicit specification of the fill value for any elements in ``A`` or ``B`` that might be missing:
###Code
A.add(B, fill_value=0)
###Output
_____no_output_____
###Markdown
Index alignment in DataFrameA similar type of alignment takes place for *both* columns and indices when performing operations on ``DataFrame``s:
###Code
A = pd.DataFrame(rng.randint(0, 20, (2, 2)),
columns=list('AB'))
A
B = pd.DataFrame(rng.randint(0, 10, (3, 3)),
columns=list('BAC'))
B
A + B
###Output
_____no_output_____
###Markdown
Notice that indices are aligned correctly irrespective of their order in the two objects, and indices in the result are sorted.As was the case with ``Series``, we can use the associated object's arithmetic method and pass any desired ``fill_value`` to be used in place of missing entries.Here we'll fill with the mean of all values in ``A`` (computed by first stacking the rows of ``A``):
###Code
fill = A.stack().mean()
A.add(B, fill_value=fill)
###Output
_____no_output_____
###Markdown
The following table lists Python operators and their equivalent Pandas object methods:| Python Operator | Pandas Method(s) ||-----------------|---------------------------------------|| ``+`` | ``add()`` || ``-`` | ``sub()``, ``subtract()`` || ``*`` | ``mul()``, ``multiply()`` || ``/`` | ``truediv()``, ``div()``, ``divide()``|| ``//`` | ``floordiv()`` || ``%`` | ``mod()`` || ``**`` | ``pow()`` | Ufuncs: Operations Between DataFrame and SeriesWhen performing operations between a ``DataFrame`` and a ``Series``, the index and column alignment is similarly maintained.Operations between a ``DataFrame`` and a ``Series`` are similar to operations between a two-dimensional and one-dimensional NumPy array.Consider one common operation, where we find the difference of a two-dimensional array and one of its rows:
###Code
A = rng.randint(10, size=(3, 4))
A
A - A[0]
###Output
_____no_output_____
###Markdown
According to NumPy's broadcasting rules (see [Computation on Arrays: Broadcasting](02.05-Computation-on-arrays-broadcasting.ipynb)), subtraction between a two-dimensional array and one of its rows is applied row-wise.In Pandas, the convention similarly operates row-wise by default:
###Code
df = pd.DataFrame(A, columns=list('QRST'))
df - df.iloc[0]
###Output
_____no_output_____
###Markdown
If you would instead like to operate column-wise, you can use the object methods mentioned earlier, while specifying the ``axis`` keyword:
###Code
df.subtract(df['R'], axis=0)
###Output
_____no_output_____
###Markdown
Note that these ``DataFrame``/``Series`` operations, like the operations discussed above, will automatically align indices between the two elements:
###Code
halfrow = df.iloc[0, ::2]
halfrow
df - halfrow
###Output
_____no_output_____ |
190124_p3_inspect_shared_cf_prefs_alt_exp_design.ipynb | ###Markdown
Load p3 connectivity matrix and the corresponding cf (row) and pc (column) labels
###Code
p3fc = './data/connectivity_matrices/P3_Observed_PC_Connectivity_Synapse_Numbers_gteq_5_syns_gteq_40pc_PC_targets.mat' # connectivity matrix
p3fa = './data/connectivity_matrices/P3_axon_IDs_for_Obs_PC_Conn_Syn_Nums_gteq_5_syns_gteq_40pc_PC_syns.mat' # axon ids
p3fp = './data/connectivity_matrices/P3_PC_IDs_for_Obs_PC_Conn_Syn_Nums_gteq_5_syns_gteq_40pc_PC_syns.mat' # pc ids
p3cdict = scio.loadmat(p3fc)
p3adict = scio.loadmat(p3fa)
p3pdict = scio.loadmat(p3fp)
p3c = p3cdict['P3_PCconnectivity']
p3a = p3adict['P3_PCconn_axon_IDs']
p3p = p3pdict['P3_PCconn_PC_IDs']
###Output
_____no_output_____
###Markdown
Start by inspecting the full p3 connectivity matrix and the connectivity matrix for just the inputs that have established preferences with at least one Purkinje cell.
###Code
allp3cflabels = [str(q[0]) for q in p3a]
allp3pclabels = [str(q[0]) for q in p3p]
fig = plt.figure(figsize=(20,25))
ax = fig.add_subplot(111)
cax = ax.matshow(p3c)
fig.colorbar(cax)
ax.set_xticks([q for q in range(p3c.shape[1])])
ax.set_yticks([q for q in range(p3c.shape[0])])
ax.set_xticklabels(allp3pclabels,rotation=70)
ax.set_yticklabels(allp3cflabels,rotation=0)
ax.set_xlabel('PC ID')
ax.xaxis.set_label_position('top')
ax.set_ylabel('CF seg ID')
ax.set_title('P3\n')
fname = 'data/figures/conn_matrices/190307_p3_conn_mat.png'
# plt.show()
plt.savefig(fname)
###Output
_____no_output_____
###Markdown
View each row as a feature vector for an observed cf. Create a distance matrix for these observations and see whether you can group them into clusters.Also, just reorder the actual connectivity matrix so it is in block-diagonal form
###Code
p3cnz = p3c.flatten()
p3cnz = [q for q in p3cnz if q != 0]
p3_bin_edges = np.arange(-5,155,10)*0.1
plt.figure()
plt.hist(p3cnz,bins=p3_bin_edges)
plt.xlabel('number of synapses per cf-pc pair')
plt.ylabel('number of occurrences')
plt.title('p3, distribution of n syns per cf-pc pair')
plt.show()
###Output
_____no_output_____
###Markdown
In this iteration, include all cfs, regardless of whether they form a lot of synapses
###Code
# this gives the tail of the p7 distribution
# minnsyns = 0 # case 1: no restrictions on the cf segs being analyzed
minnsyns = int(np.ceil(np.percentile(p3cnz,[90]).tolist()[0])) # case 4: more than the 99th percentile of p3 values
print(minnsyns) # inspecting values
rs,cs = np.where(p3c > minnsyns)
p3rowsl = list(set(rs))
# len(p3rowsl) # debugging
rsdel = [q for q in range(p3c.shape[0]) if not np.isin(q,p3rowsl)]
p3clnn = np.delete(p3c,rsdel,axis=0) # non-normalized
p3al = np.delete(p3a,rsdel,axis=0)
p3clnn.shape # debugging
###Output
_____no_output_____
###Markdown
IMPORTANT: Normalize the feature vectors for the block diagonal analysis here (or else the distances will be large for vectors that point on the same line--i.e. that have the same relative connectivity properties--but form different numbers of synapses onto their targets. we want pairs of this type to have a distance of 0 and be grouped together).
###Code
normfacts = np.expand_dims(np.sum(p3clnn,axis=1),axis=1)
normfactmatrix = np.tile(normfacts,(1,p3clnn.shape[1]))
p3cl = np.divide(p3clnn,normfactmatrix)
# print(np.sum(p3cl,axis=1)) # debugging
###Output
_____no_output_____
###Markdown
Inspect the reduced connectivity matrix (containing cfs that form large numbers of synapses only)
###Code
p3cflabels = [str(q[0]) for q in p3al]
p3pclabels = [str(q[0]) for q in p3p]
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111)
cax = ax.matshow(p3clnn)
fig.colorbar(cax)
ax.set_xticks([q for q in range(p3clnn.shape[1])])
ax.set_yticks([q for q in range(p3clnn.shape[0])])
ax.set_xticklabels(p3pclabels,rotation=70)
ax.set_yticklabels(p3cflabels,rotation=0)
ax.set_xlabel('PC ID')
ax.xaxis.set_label_position('top')
ax.set_ylabel('CF seg ID')
plt.show()
###Output
_____no_output_____
###Markdown
Compute the Euclidean distance matrix for the normalized p7 large-synapse connectivity matrix
###Code
p3cld = pdist(p3cl,'euclidean')
###Output
_____no_output_____
###Markdown
Inspect the distance matrix
###Code
p3cldsq = squareform(p3cld)
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
cax = ax.matshow(p3cldsq)
fig.colorbar(cax)
ax.set_xticks([q for q in range(p3cldsq.shape[0])])
ax.set_yticks([q for q in range(p3cldsq.shape[0])])
ax.set_xticklabels(p3cflabels,rotation=70)
ax.set_yticklabels(p3cflabels,rotation=0)
plt.show()
###Output
_____no_output_____
###Markdown
Re-order distance matrix rows and columns to look for block diagonal form One way to do this re-ordering is to perform hierarchical clustering on the elements based on their distance matrix, and then to arrange them in the ordering given by hierarchical clustering.
###Code
# Source: https://gmarti.gitlab.io/ml/2017/09/07/how-to-sort-distance-matrix.html
def seriation(Z,N,cur_index):
'''
input:
- Z is a hierarchical tree (dendrogram)
- N is the number of points given to the clustering process
- cur_index is the position in the tree for the recursive traversal
output:
- order implied by the hierarchical tree Z
seriation computes the order implied by a hierarchical tree (dendrogram)
'''
if cur_index < N:
return [cur_index]
else:
left = int(Z[cur_index-N,0])
right = int(Z[cur_index-N,1])
return (seriation(Z,N,left) + seriation(Z,N,right))
###Output
_____no_output_____
###Markdown
Order large-synapse cfs according to their hierarchical clustering position.
###Code
# Code modified from the following source
# https://gmarti.gitlab.io/ml/2017/09/07/how-to-sort-distance-matrix.html
def compute_serial_matrix(dist_mat,method="ward",metric="euclidean"):
'''
input:
- flat_dist_mat is a distance matrix
- method = ["ward","single","average","complete"]
output:
- seriated_dist is the input dist_mat,
but with re-ordered rows and columns
according to the seriation, i.e. the
order implied by the hierarchical tree
- res_order is the order implied by
the hierarhical tree
- res_linkage is the hierarhical tree (dendrogram)
compute_serial_matrix transforms a distance matrix into
a sorted distance matrix according to the order implied
by the hierarchical tree (dendrogram)
'''
N = len(dist_mat)
flat_dist_mat = squareform(dist_mat)
res_linkage = linkage(flat_dist_mat, method=method, metric=metric)
res_order = seriation(res_linkage, N, N + N-2)
seriated_dist = np.zeros([N,N])
a,b = np.triu_indices(N,k=1)
seriated_dist[a,b] = dist_mat[ [res_order[i] for i in a], [res_order[j] for j in b]]
seriated_dist[b,a] = seriated_dist[a,b]
return seriated_dist, res_order, res_linkage
p3lno,p3lneworder,p3link = compute_serial_matrix(p3cldsq,method='ward',metric='euclidean')
# Re-order the climbing fiber segment labels
p3lcfnolabels = [str(p3al[q][0]) for q in p3lneworder]
# test code for understanding how the function above works
# N = len(p3cldsq)
# p3lneworder = seriation(p3link,N, N + N-2)
# p3lno = np.zeros([N,N])
# a,b = np.triu_indices(N,k=1)
# Re-order the distance matrix elements using this order
# p3lno[a,b] = p3cldsq[ [p3lneworder[i] for i in a], [p3lneworder[j] for j in b]]
# p3lno[b,a] = p3lno[a,b]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
dn = dendrogram(p3link)
cflabelsforfig = [p3al[q][0] for q in dn['leaves']]
ax.set_xticklabels(cflabelsforfig,rotation=70)
ax.set_xlabel('Climbing fiber segment IDs')
ax.set_ylabel('Inter-group Euclidean distances')
# Compute distribution metrics
dg_dists_obs = [q[1]-q[0] for q in dn['dcoord']] # the dists between clusters
dgd_med_obs = np.median(dg_dists_obs)
dgd_skew_obs = st.skew(dg_dists_obs)
dgd_std_obs = np.std(dg_dists_obs)
dgd_90pc_obs = np.percentile(dg_dists_obs,90)
dgd_95pc_obs = np.percentile(dg_dists_obs,95)
dgd_99pc_obs = np.percentile(dg_dists_obs,99)
plt.show()
cflabelsforfig = [p3al[q][0] for q in dn['leaves']]
print([q for q in dn['leaves']])
print(cflabelsforfig)
# # inspect these bar heights to make sure they are reasonable
# # these are the distances between the linked groups (after normalizing)
# check = [q[1] - q[0] for q in dn['dcoord']]
# print(check)
# plt.figure()
# plt.hist(check)
# plt.show()
###Output
_____no_output_____
###Markdown
Inspect newly ordered matrix
###Code
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
cax = ax.matshow(p3lno)
fig.colorbar(cax)
ax.set_xticks([q for q in range(p3lno.shape[0])])
ax.set_yticks([q for q in range(p3lno.shape[0])])
ax.set_xticklabels(p3lcfnolabels,rotation=70)
ax.set_yticklabels(p3lcfnolabels,rotation=0)
plt.show()
###Output
_____no_output_____
###Markdown
Check what the rows and columns of this actual matrix look like when they are in this order in the adjacency matrix (so with feature vectors showing)
###Code
p3clno = p3cl[p3lneworder,:]
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111)
cax = ax.matshow(p3clno)
fig.colorbar(cax)
ax.set_xticks([q for q in range(p3clno.shape[1])])
ax.set_yticks([q for q in range(p3clno.shape[0])])
ax.set_xticklabels(p3pclabels,rotation=70)
ax.set_yticklabels(p3lcfnolabels,rotation=0)
ax.set_xlabel('PC ID')
ax.xaxis.set_label_position('top')
ax.set_ylabel('CF seg ID')
plt.show()
###Output
_____no_output_____
###Markdown
Randomize the numbers of synapses formed by each of the large-synapse cf segs onto each of their pc targets and reorder again to see whether the block diagonal structure comes up
###Code
nperm = 10000
wrs_p_perm = []
dgd_med_perm = []
dgd_skew_perm = []
dgd_std_perm = []
dgd_90pc_perm = []
dgd_95pc_perm = []
dgd_99pc_perm = []
for i in range(nperm):
p3cperm = np.zeros([1,p3cl.shape[1]]) # start with a dummy row so the shape is right for appending rows
# print(p3cperm.shape)
# Rearrange the targets of each cf separately
for cf in range(p3cl.shape[0]):
resample = True # create a sample row and keep doing it until there is >=1 connection with >minnsyns
while resample == True:
# Find nonzero elements in the current row
rowcurr = np.expand_dims(p3cl[cf,:],axis=0)
nzrows,nzcols = np.where(rowcurr != 0)
# Rather than permuting the observed values (which are few in number) for a cf,
# assign new values from the p3 distribution by random sampling with replacement
# colpermid = np.random.permutation(len(nzcols))
newrow = np.zeros((rowcurr.shape[0],rowcurr.shape[1]))
for rid in range(len(nzcols)):
nsynidcurr = np.random.randint(len(p3cnz))
nsynscurr = p3cnz[nsynidcurr]
newrow[nzrows[rid],nzcols[rid]] = nsynscurr
# check whether there is at least one large connection by our criterion in the generated row
# if not, generate a new row
if len(np.where(newrow > minnsyns)[0]) > 0:
resample = False
# normalize the newly sampled row
normfactcurr = np.sum(newrow,axis=1)
newrow = np.divide(newrow,normfactcurr)
p3cperm = np.append(p3cperm,newrow,axis=0)
# remove the dummy row
p3cperm = np.delete(p3cperm,0,axis=0)
# print(p3cl) # debugging
# print('\n') # debugging
# print(p3cperm) # debugging
# generate square, Euclidean distance matrix for the randomized connectivity matrix
p3cpermdsq = squareform(pdist(p3cperm,'euclidean'))
# compute block diagonal ordering
p3lnoperm,p3lneworderperm,p3linkperm = compute_serial_matrix(p3cpermdsq,method='ward',metric='euclidean')
# measure degree of block diagonal-ness
p3cfnopermlabels = [str(p3al[q][0]) for q in p3lneworderperm]
# compute distances that link cfs in this permuted connectivity matrix
dnpermcurr = dendrogram(p3linkperm,no_plot=True)
dg_dists_perm = [q[1]-q[0] for q in dnpermcurr['dcoord']]
wrs_p_perm.append(st.ranksums(dg_dists_obs,dg_dists_perm)[1]) # p-value from the wilcoxon rank sum test
dgd_med_perm.append(np.median(dg_dists_perm))
dgd_skew_perm.append(st.skew(dg_dists_perm))
dgd_std_perm.append(np.std(dg_dists_perm))
dgd_90pc_perm.append(np.percentile(dg_dists_perm,90))
dgd_95pc_perm.append(np.percentile(dg_dists_perm,95))
dgd_99pc_perm.append(np.percentile(dg_dists_perm,99))
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
dnpermcurr = dendrogram(p3linkperm)
cflabelsforfig = [p3al[q][0] for q in dnpermcurr['leaves']]
ax.set_xticklabels(cflabelsforfig,rotation=70)
ax.set_xlabel('Climbing fiber segment IDs')
ax.set_ylabel('Inter-group Euclidean distances')
plt.show()
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
cax = ax.matshow(p3lnoperm)
fig.colorbar(cax)
ax.set_xticks([q for q in range(p3lnoperm.shape[0])])
ax.set_yticks([q for q in range(p3lnoperm.shape[0])])
ax.set_xticklabels(p3cfnopermlabels,rotation=70)
ax.set_yticklabels(p3cfnopermlabels,rotation=0)
plt.show()
###Output
_____no_output_____
###Markdown
Inspect distribution parameters for hierarchical clustering heights for permuted and observed connectivity matrices
###Code
# # Wilcoxon rank-sum p-values
# alpha = 0.05
# plt.figure()
# plt.hist(wrs_p_perm)
# plt.plot([alpha,alpha],[0,nperm/2],'o-',marker='None')
# plt.show()
# medians
plt.figure(figsize=(10,10))
plt.hist(dgd_med_perm,label='permuted height dist medians')
plt.plot([dgd_med_obs,dgd_med_obs],[0,nperm/2],'o-',marker='None',label='observed height dist median')
plt.xlabel('connectivity matrix linkage height dist medians')
plt.ylabel('number of occurrences')
plt.legend()
plt.show()
# skews
plt.figure(figsize=(10,10))
plt.hist(dgd_skew_perm,label='permuted height dist medians')
plt.plot([dgd_skew_obs,dgd_skew_obs],[0,nperm/2],'o-',marker='None',label='observed height dist skew')
plt.xlabel('connectivity matrix linkage height dist medians')
plt.ylabel('number of occurrences')
plt.legend()
plt.show()
# standard deviations
plt.figure(figsize=(15,15))
plt.hist(dgd_std_perm,label='Randomized feature vectors')
plt.plot([dgd_std_obs,dgd_std_obs],[0,nperm/2],'o-',marker='None',label='Observed feature vectors')
plt.xlabel('Standard deviation, hierarchical clustering grouping distance distribution')
plt.ylabel('Number of occurrences')
plt.legend(loc='best')
plt.show()
# 90th percentile of distribution (we want to know what the largest distances are)
plt.figure(figsize=(10,10))
plt.hist(dgd_90pc_perm,label='permuted height dist 90th percentile')
plt.plot([dgd_90pc_obs,dgd_90pc_obs],[0,nperm/2],'o-',marker='None',label='observed height dist 90th percentile')
plt.xlabel('connectivity matrix linkage height dist 90th percentile')
plt.ylabel('number of occurrences')
plt.legend()
plt.show()
# 95th percentile of distribution (we want to know what the largest distances are)
plt.figure(figsize=(15,15))
plt.hist(dgd_95pc_perm,label='Randomized feature vectors')
plt.plot([dgd_95pc_obs,dgd_95pc_obs],[0,nperm/2],'o-',marker='None',label='Observed feature vectors')
plt.xlabel('95th percentile, hierarchical clustering grouping distance distribution')
plt.ylabel('Number of occurrences')
plt.legend(loc='best')
plt.show()
# 99th percentile of distribution (we want to know what the largest distances are)
plt.figure(figsize=(15,15))
plt.hist(dgd_99pc_perm,label='Randomized feature vectors')
plt.plot([dgd_99pc_obs,dgd_99pc_obs],[0,nperm/2],'o-',marker='None',label='Observed feature vectors')
plt.xlabel('99th percentile, hierarchical clustering grouping distance distribution')
plt.ylabel('Number of occurrences')
plt.legend(loc='best')
plt.show()
bin_edges = np.arange(0,210,10)*0.01
plt.figure(figsize = (10,10))
plt.hist(dg_dists_perm,bins=bin_edges,alpha = 0.7,label='Randomized feature vectors')
plt.hist(dg_dists_obs,bins=bin_edges,alpha = 0.7,label='Observed feature vectors')
plt.xlabel('Distance between linked groups')
plt.ylabel('Number of occurrences')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Determine the proportion of values that are more extreme than the observed in both cases
###Code
# medians
print('medians:')
avg_med_perm = np.mean(dgd_med_perm)
print(avg_med_perm)
dists_avg_m_perm = [q - avg_med_perm for q in dgd_med_perm]
dist_avg_m_obs = dgd_med_obs - avg_med_perm
if dist_avg_m_obs < 0:
frac_ex_med = len([q for q in dists_avg_m_perm if q < dist_avg_m_obs])/len(dists_avg_m_perm)
else:
frac_ex_med = len([q for q in dists_avg_m_perm if q > dist_avg_m_obs])/len(dists_avg_m_perm)
print(frac_ex_med)
# skews
print('skews:')
avg_skew_perm = np.mean(dgd_skew_perm)
print(avg_skew_perm)
dists_avg_sk_perm = [q - avg_skew_perm for q in dgd_skew_perm]
dist_avg_sk_obs = dgd_skew_obs - avg_skew_perm
if dist_avg_sk_obs < 0:
frac_ex_sk = len([q for q in dists_avg_sk_perm if q < dist_avg_sk_obs])/len(dists_avg_sk_perm)
else:
frac_ex_sk = len([q for q in dists_avg_sk_perm if q > dist_avg_sk_obs])/len(dists_avg_sk_perm)
print(frac_ex_sk)
# standard deviations
print('standard deviations:')
avg_std_perm = np.mean(dgd_std_perm)
print(avg_std_perm)
dists_avg_s_perm = [q - avg_std_perm for q in dgd_std_perm]
dist_avg_s_obs = dgd_std_obs - avg_std_perm
if dist_avg_s_obs < 0:
frac_ex_std = len([q for q in dists_avg_s_perm if q < dist_avg_s_obs])/len(dists_avg_s_perm)
else:
frac_ex_std = len([q for q in dists_avg_s_perm if q > dist_avg_s_obs])/len(dists_avg_s_perm)
print(frac_ex_std)
# 90th percentiles
print('90th percentiles:')
avg_90pc_perm = np.mean(dgd_90pc_perm)
print(avg_90pc_perm)
dists_avg_90pc_perm = [q - avg_90pc_perm for q in dgd_90pc_perm]
dist_avg_90pc_obs = dgd_90pc_obs - avg_90pc_perm
if dist_avg_90pc_obs < 0:
frac_ex_90pc = len([q for q in dists_avg_90pc_perm if q < dist_avg_90pc_obs])/len(dists_avg_90pc_perm)
else:
frac_ex_90pc = len([q for q in dists_avg_90pc_perm if q > dist_avg_90pc_obs])/len(dists_avg_90pc_perm)
print(frac_ex_90pc)
# 95th percentiles
print('95th percentiles:')
avg_95pc_perm = np.mean(dgd_95pc_perm)
print(avg_95pc_perm)
dists_avg_95pc_perm = [q - avg_95pc_perm for q in dgd_95pc_perm]
dist_avg_95pc_obs = dgd_95pc_obs - avg_95pc_perm
if dist_avg_95pc_obs < 0:
frac_ex_95pc = len([q for q in dists_avg_95pc_perm if q < dist_avg_95pc_obs])/len(dists_avg_95pc_perm)
else:
frac_ex_95pc = len([q for q in dists_avg_95pc_perm if q > dist_avg_95pc_obs])/len(dists_avg_95pc_perm)
print(frac_ex_95pc)
# 99th percentiles
print('99th percentiles:')
avg_99pc_perm = np.mean(dgd_99pc_perm)
print(avg_99pc_perm)
dists_avg_99pc_perm = [q - avg_99pc_perm for q in dgd_99pc_perm]
dist_avg_99pc_obs = dgd_99pc_obs - avg_99pc_perm
if dist_avg_99pc_obs < 0:
frac_ex_99pc = len([q for q in dists_avg_99pc_perm if q < dist_avg_99pc_obs])/len(dists_avg_99pc_perm)
else:
frac_ex_99pc = len([q for q in dists_avg_99pc_perm if q > dist_avg_99pc_obs])/len(dists_avg_99pc_perm)
print(frac_ex_99pc)
###Output
_____no_output_____
###Markdown
Inspect distances between climbing fibers when this reordering is done and compare with the observed distribution
###Code
# plt.figure()
# plt.hist(p3cld,alpha=0.7,label='observed connectivity')
# plt.hist(squareform(p3cpermdsq),alpha=0.7,label='permuted connectivity') # flattening
# plt.legend()
# plt.show()
###Output
_____no_output_____ |
scipy-2016-sklearn/notebooks/22 Unsupervised learning - Non-linear dimensionality reduction.ipynb | ###Markdown
SciPy 2016 Scikit-learn Tutorial Manifold LearningOne weakness of PCA is that it cannot detect non-linear features. A setof algorithms known as *Manifold Learning* have been developed to addressthis deficiency. A canonical dataset used in Manifold learning is the*S-curve*:
###Code
from sklearn.datasets import make_s_curve
X, y = make_s_curve(n_samples=1000)
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], X[:, 2], c=y)
ax.view_init(10, -60);
###Output
_____no_output_____
###Markdown
This is a 2-dimensional dataset embedded in three dimensions, but it is embeddedin such a way that PCA cannot discover the underlying data orientation:
###Code
from sklearn.decomposition import PCA
X_pca = PCA(n_components=2).fit_transform(X)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y);
###Output
_____no_output_____
###Markdown
Manifold learning algorithms, however, available in the ``sklearn.manifold``submodule, are able to recover the underlying 2-dimensional manifold:
###Code
from sklearn.manifold import Isomap
iso = Isomap(n_neighbors=15, n_components=2)
X_iso = iso.fit_transform(X)
plt.scatter(X_iso[:, 0], X_iso[:, 1], c=y);
###Output
_____no_output_____
###Markdown
Manifold learning on the digits data We can apply manifold learning techniques to much higher dimensional datasets, for example the digits data that we saw before:
###Code
from sklearn.datasets import load_digits
digits = load_digits()
fig, axes = plt.subplots(2, 5, figsize=(10, 5),
subplot_kw={'xticks':(), 'yticks': ()})
for ax, img in zip(axes.ravel(), digits.images):
ax.imshow(img, interpolation="none", cmap="gray")
###Output
_____no_output_____
###Markdown
We can visualize the dataset using a linear technique, such as PCA. We saw this already provides some intuition about the data:
###Code
# build a PCA model
pca = PCA(n_components=2)
pca.fit(digits.data)
# transform the digits data onto the first two principal components
digits_pca = pca.transform(digits.data)
colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525",
"#A83683", "#4E655E", "#853541", "#3A3120","#535D8E"]
plt.figure(figsize=(10, 10))
plt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max() + 1)
plt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max() + 1)
for i in range(len(digits.data)):
# actually plot the digits as text instead of using scatter
plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
plt.xlabel("first principal component")
plt.ylabel("second principal component");
###Output
_____no_output_____
###Markdown
Using a more powerful, nonlinear techinque can provide much better visualizations, though.Here, we are using the t-SNE manifold learning method:
###Code
from sklearn.manifold import TSNE
tsne = TSNE(random_state=42)
# use fit_transform instead of fit, as TSNE has no transform method:
digits_tsne = tsne.fit_transform(digits.data)
plt.figure(figsize=(10, 10))
plt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)
plt.ylim(digits_tsne[:, 1].min(), digits_tsne[:, 1].max() + 1)
for i in range(len(digits.data)):
# actually plot the digits as text instead of using scatter
plt.text(digits_tsne[i, 0], digits_tsne[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
###Output
_____no_output_____
###Markdown
t-SNE has a somewhat longer runtime that other manifold learning algorithms, but the result is quite striking. Keep in mind that this algorithm is purely unsupervised, and does not know about the class labels. Still it is able to separate the classes very well (though the classes four, one and nine have been split into multiple groups). ExercisesCompare the results of applying isomap to the digits dataset to the results of PCA and t-SNE. Which result do you think looks best?
###Code
# %load solutions/22A_isomap_digits.py
from sklearn.manifold import Isomap
iso = Isomap(n_components=2)
digits_isomap = iso.fit_transform(digits.data)
plt.figure(figsize=(10, 10))
plt.xlim(digits_isomap[:, 0].min(), digits_isomap[:, 0].max() + 1)
plt.ylim(digits_isomap[:, 1].min(), digits_isomap[:, 1].max() + 1)
for i in range(len(digits.data)):
# actually plot the digits as text instead of using scatter
plt.text(digits_isomap[i, 0], digits_isomap[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
###Output
_____no_output_____
###Markdown
Given how well t-SNE separated the classes, one might be tempted to use this processing for classification. Try training a K-nearest neighbor classifier on digits data transformed with t-SNE, and compare to the accuracy on using the dataset without any transformation.
###Code
# %load solutions/22B_tsne_classification.py
from sklearn.manifold import TSNE
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=1)
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
print('KNeighborsClassifier accuracy without t-SNE: {}'.format(clf.score(X_test, y_test)))
tsne = TSNE(random_state=42)
digits_tsne_train = tsne.fit_transform(X_train)
digits_tsne_test = tsne.fit_transform(X_test)
clf = KNeighborsClassifier()
clf.fit(digits_tsne_train, y_train)
print('KNeighborsClassifier accuracy with t-SNE: {}'.format(clf.score(digits_tsne_test, y_test)))
###Output
KNeighborsClassifier accuracy without t-SNE: 0.9933333333333333
KNeighborsClassifier accuracy with t-SNE: 0.14444444444444443
|
09-vgg-class-weights.ipynb | ###Markdown
Table of Contents 1 Load Libraries2 Load data/Create data Generators3 AUC callback function4 Load the model & weights5 Training6 Prediction Training after specifying class weights. Also, calculating AUC after every epoch. Load Libraries
###Code
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Sequential, load_model, Model
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.applications.vgg16 import VGG16
from keras_tqdm import TQDMNotebookCallback
from datetime import datetime
import os
import numpy as np
import pandas as pd
import math
pd.options.display.max_rows = 40
###Output
Using TensorFlow backend.
###Markdown
Load data/Create data Generators
###Code
validgen = ImageDataGenerator()
# 600/450 _ 500/375 _ 400/300 _ 300/225
img_width = 600
img_height = 450
train_data_dir = "data/train"
validation_data_dir = "data/valid"
test_data_dir = "data/test"
batch_size_train = 16
batch_size_val = 32
val_data = validgen.flow_from_directory(
directory = validation_data_dir,
target_size = (img_height, img_width),
batch_size = 568,
class_mode = "binary",
shuffle = False).next()
train_data = validgen.flow_from_directory(
directory = train_data_dir,
target_size = (img_height, img_width),
batch_size = 1727,
class_mode = "binary",
shuffle = False).next()
datagen = ImageDataGenerator(
rotation_range = 20,
width_shift_range = 0.2,
height_shift_range = 0.2,
horizontal_flip = True)
train_gen = datagen.flow_from_directory(
directory = train_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size_train,
class_mode = "binary",
shuffle = True)
train_samples = len(train_gen.filenames)
###Output
Found 1727 images belonging to 2 classes.
###Markdown
AUC callback function
###Code
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
class auc_callback(keras.callbacks.Callback):
def __init__(self, val_data, init_epoch):
self.val_x = val_data[0]
self.val_y = val_data[1]
self.init_epoch = init_epoch
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
self.model.save_weights('vgg-class-weights-epoch-' + str(self.init_epoch + epoch) + '.hdf5')
val_pred = self.model.predict(self.val_x, batch_size=32, verbose=0)
val_roc = roc_auc_score(self.val_y, val_pred[:,0])
val_loss = log_loss(self.val_y, np.append(1 - val_pred, val_pred, axis=1))
val_acc = accuracy_score(self.val_y, val_pred >= 0.5)
print('\nVal AUC: ' + str(val_roc))
print('\nVal Los: ' + str(val_loss))
print('\nVal Acc: ' + str(val_acc) + '\n')
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
###Output
_____no_output_____
###Markdown
Load the model & weights
###Code
vgg16 = VGG16(weights = 'imagenet',include_top=False)
x = vgg16.get_layer('block5_conv3').output
x = GlobalAveragePooling2D()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model_final = Model(inputs=vgg16.input, outputs=x)
model_final.compile(loss = 'binary_crossentropy',
optimizer = SGD(lr = 0.0001, momentum = 0.9, decay = 1e-5),
metrics = ['accuracy'])
model_final.load_weights('./weights/weights-iter-6-epoch-05.hdf5')
val_pred = model_final.predict(val_data[0], batch_size=32)
log_loss(val_data[1], np.append(1 - val_pred, val_pred, axis=1))
accuracy_score(val_data[1], val_pred >= 0.5)
roc_auc_score(val_data[1], val_pred[:,0])
###Output
_____no_output_____
###Markdown
Training
###Code
model_final.compile(loss = 'binary_crossentropy',
optimizer = SGD(lr = 0.0001, momentum = 0.9, decay = 1e-5, nesterov = True),
metrics = ['accuracy'])
model_final.fit_generator(generator = train_gen,
epochs = 10,
steps_per_epoch = math.ceil(1727 / batch_size_train),
validation_data = None,
verbose = 2,
callbacks = [auc_callback(val_data, 0), TQDMNotebookCallback()],
class_weight = {0: 1090/1727, 1: 637/1727})
model_final.load_weights('./vgg-class-weights-epoch-1.hdf5')
val_pred = model_final.predict(val_data[0], batch_size=32)
log_loss(val_data[1], np.append(1 - val_pred, val_pred, axis=1))
model_final.compile(loss = 'binary_crossentropy',
optimizer = SGD(lr = 0.00001, momentum = 0.9, decay = 1e-5, nesterov = True),
metrics = ['accuracy'])
model_final.fit_generator(generator = train_gen,
epochs = 10,
steps_per_epoch = math.ceil(1727 / batch_size_train),
validation_data = None,
verbose = 2,model_final.fit_generator(generator = train_gen,
epochs = 10,
steps_per_epoch = math.ceil(1727 / batch_size_train),
validation_data = None,
verbose = 2,
callbacks = [auc_callback(val_data, 5), TQDMNotebookCallback()],
class_weight = {0: 1090/1727, 1: 637/1727})
callbacks = [auc_callback(val_data, 5), TQDMNotebookCallback()],
class_weight = {0: 1090/1727, 1: 637/1727})
model_final.load_weights('./vgg-class-weights-epoch-6.hdf5')
val_pred = model_final.predict(val_data[0], batch_size=32)
log_loss(val_data[1], np.append(1 - val_pred, val_pred, axis=1))
accuracy_score(val_data[1], val_pred >= 0.5)
roc_auc_score(val_data[1], val_pred[:,0])
model_final.compile(loss = 'binary_crossentropy',
optimizer = SGD(lr = 0.00001, momentum = 0.9, decay = 1e-5, nesterov = True),
metrics = ['accuracy'])
model_final.fit_generator(generator = train_gen,
epochs = 10,
steps_per_epoch = math.ceil(1727 / batch_size_train),
validation_data = None,
verbose = 2,
callbacks = [auc_callback(val_data, 7), TQDMNotebookCallback()],
class_weight = {0: 1090/1727, 1: 637/1727})
###Output
_____no_output_____
###Markdown
Prediction
###Code
model_final.load_weights('./vgg-class-weights-epoch-5.hdf5')
val_pred = model_final.predict(val_data[0], batch_size=32)
log_loss(val_data[1], np.append(1 - val_pred, val_pred, axis=1))
batch_size_test = 32
test_gen = validgen.flow_from_directory(
directory = test_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size_test,
class_mode = "binary",
shuffle = False)
test_samples = len(test_gen.filenames)
preds = model_final.predict_generator(test_gen, math.ceil(test_samples / batch_size_test))
preds_filenames = test_gen.filenames
preds_filenames = [int(x.replace("unknown/", "").replace(".jpg", "")) for x in preds_filenames]
df_result = pd.DataFrame({'name': preds_filenames, 'invasive': preds[:,0]})
df_result = df_result.sort_values("name")
df_result.index = df_result["name"]
df_result = df_result.drop(["name"], axis=1)
df_result.to_csv("submission_10.csv", encoding="utf8", index=True)
from IPython.display import FileLink
FileLink('submission_10.csv')
# Got 0.99179 on LB
###Output
_____no_output_____ |
Week 1/Lecture_1_Intro to Programming.ipynb | ###Markdown
Lecture 1: Introduction to Programming Agenda for the Class:1. Python in-built datatypes2. Basic mathematical operators and Precedence order3. Python Interpreter vs Python for Scripting Firstly we'll focus on the **datatypes**.1. **Numeric**2. **Strings**3. **Lists** General format for **Assigning** a variable a value:Variable_name = Variable_Value1. We **Do not** mention datatype while assigning a variable a value in Python. (i.e. Dynamically Typed)2. **=** is used to assign a variable a value. ( L Value and R value)3. A variable name must follow certain naming conventions. Example: '23', 'len' can be variable names.4. There is no such thing as "variable declaration" or "variable initialization" in Python. It's only variable assignment Numeric data
###Code
a=1
b=3.14
# Assigning value 1 to variable a and 3.14 to variable b
###Output
_____no_output_____
###Markdown
Mathematical Operations on Variables:1. Add ('+')2. Multiple ('*')3. Subtract ('-')4. Divide ('/')5. Modulo ('%')6. Exponentiation (\*\*) **Order of Precedence**Exponent > (Multiple, Divide, Modulo) > (Add, Subtract)
###Code
a = 20
b = 10
c = 15
d = 5
e = 0
e = (a + b) * c / d #( 30 * 15 ) / 5
print ("Value of (a + b) * c / d is ", e)
e = ((a + b) * c) / d # (30 * 15 ) / 5
print ("Value of ((a + b) * c) / d is ", e)
e = (a + b) * (c / d); # (30) * (15/5)
print ("Value of (a + b) * (c / d) is ", e)
e = a + (b * c) / d; # 20 + (150/5)
print ("Value of a + (b * c) / d is ", e)
###Output
_____no_output_____
###Markdown
In case you are using Python 2 and want floating point division (e.g: 4/3 --> 1.33333333333 and not 4/3 --> 1) : For Python shell type in : from __future__ import print_function, division For a ".py" file : Use that import statement in the beginning of your Python file. Strings1. **Immutable** datatype2. String enclosed within **" String"** or **'String'**
###Code
course_name = "Introduction to Programming"
question = "Having a good time ? ;)"
print(course_name)
print(question)
###Output
_____no_output_____
###Markdown
Operations on Strings1. Since strings are immutable, we can't change the value stored in a string2. We can concatenate ('join') multiple strings.3. Slice/substring operations
###Code
string_1 = "Hello World!"
n = len(string_1) # "len" gives us the number of characters in the string
print(string_1 + " has", n , "characters")
###Output
_____no_output_____
###Markdown
1. **Indexing** : Every charcater of the string can be accessed by it's position in the string.2. Indexing starts from zero.3. Syntax string_name[index_number] Example:
###Code
print(string_1[0])
print(string_1[1])
print(string_1[-2])
###Output
_____no_output_____
###Markdown
Negative Indexing:string[-1] gives us the last characterstring[-2] gives us the second last characterand so on... Slicing operationsSyntax:string_name[start_index,end_index]
###Code
print(string_1[0:2])
print(string_1[5 : len(string_1)])
print(string_1[0:4]+string_1[4:len(string_1)])
###Output
_____no_output_____
###Markdown
Lists1. Initializing syntax: \n list_name = [value_1,value_2,...,value_n]2. Behaviour Similar to strings3. Mutable4. Can contain multiple data types.
###Code
primes = [2,3,5,8,11]
print(primes)
print(primes[0])
print(len(primes))
classroom = ['L','T', 1]
print(classroom)
print((classroom[2]+ 4))
###Output
_____no_output_____ |
Training Notebook - LSTM.ipynb | ###Markdown
Data preparation and scrubbing
###Code
"""
Scrub non-viable data
A lot of this data is legacy stuff that was being tested for importance. In future none of this data will be
included in the dataset so scrubbing won't be needed
"""
def scrub(data):
# Drop incomplete data
try:
data.drop('Ltc_search_US', axis=1, inplace=True)
data.drop('Ltc_search_GB', axis=1, inplace=True)
data.drop('Ltc_search_FR', axis=1, inplace=True)
data.drop('Ltc_search_DE', axis=1, inplace=True)
data.drop('Ltc_search_RU', axis=1, inplace=True)
data.drop('Ltc_search_KR', axis=1, inplace=True)
data.drop('Eth_search_US', axis=1, inplace=True)
data.drop('Eth_search_GB', axis=1, inplace=True)
data.drop('Eth_search_FR', axis=1, inplace=True)
data.drop('Eth_search_DE', axis=1, inplace=True)
data.drop('Eth_search_RU', axis=1, inplace=True)
data.drop('Eth_search_KR', axis=1, inplace=True)
data.drop('Btc_search_US', axis=1, inplace=True)
data.drop('Btc_search_GB', axis=1, inplace=True)
data.drop('Btc_search_FR', axis=1, inplace=True)
data.drop('Btc_search_DE', axis=1, inplace=True)
data.drop('Btc_search_RU', axis=1, inplace=True)
data.drop('Btc_search_KR', axis=1, inplace=True)
data.drop('Etheur_gdax_low', axis=1, inplace=True)
data.drop('Etheur_gdax_high', axis=1, inplace=True)
data.drop('Etheur_gdax_open', axis=1, inplace=True)
data.drop('Etheur_gdax_close', axis=1, inplace=True)
data.drop('Etheur_gdax_vol', axis=1, inplace=True)
data.drop('Ltcusd_gdax_low', axis=1, inplace=True)
data.drop('Ltcusd_gdax_high', axis=1, inplace=True)
data.drop('Ltcusd_gdax_open', axis=1, inplace=True)
data.drop('Ltcusd_gdax_close', axis=1, inplace=True)
data.drop('Ltcusd_gdax_vol', axis=1, inplace=True)
data.drop('Ltceur_gdax_low', axis=1, inplace=True)
data.drop('Ltceur_gdax_high', axis=1, inplace=True)
data.drop('Ltceur_gdax_open', axis=1, inplace=True)
data.drop('Ltceur_gdax_close', axis=1, inplace=True)
data.drop('Ltceur_gdax_vol', axis=1, inplace=True)
except:
pass
# Testing only: drop google search trend data
try:
data.drop('Eth_search_worldwide', axis=1, inplace=True)
data.drop('Ltc_search_worldwide', axis=1, inplace=True)
data.drop('Btc_search_worldwide', axis=1, inplace=True)
except:
pass
# Testing only: drop LTC blockchain network data
try:
data.drop('Ltc_hashrate', axis=1, inplace=True)
data.drop('Ltc_addresses', axis=1, inplace=True)
data.drop('Ltc_supply', axis=1, inplace=True)
data.drop('Ltc_daily_trx', axis=1, inplace=True)
data.drop('Ltc_fee_per_trx', axis=1, inplace=True)
except:
pass
# Testing only: drop BTC blockchain network data
try:
data.drop('Btc_hashrate', axis=1, inplace=True)
data.drop('Btc_addresses', axis=1, inplace=True)
data.drop('Btc_supply', axis=1, inplace=True)
data.drop('Btc_daily_trx', axis=1, inplace=True)
data.drop('Btc_fee_per_trx', axis=1, inplace=True)
except:
pass
# Testing only: drop ETH blockchain network data
try:
data.drop('Eth_hashrate', axis=1, inplace=True)
data.drop('Eth_addresses', axis=1, inplace=True)
data.drop('Eth_supply', axis=1, inplace=True)
data.drop('Eth_daily_trx', axis=1, inplace=True)
data.drop('Eth_fee_per_trx', axis=1, inplace=True)
except:
pass
# Testing only: drop LTC-USD kraken market data
try:
data.drop('Ltcusd_kraken_open', axis=1, inplace=True)
data.drop('Ltcusd_kraken_high', axis=1, inplace=True)
data.drop('Ltcusd_kraken_low', axis=1, inplace=True)
data.drop('Ltcusd_kraken_close', axis=1, inplace=True)
data.drop('Ltcusd_kraken_vol', axis=1, inplace=True)
except:
pass
# Testing only: drop BTC-USD gdax market data
#try:
#data.drop('Btcusd_gdax_open', axis=1, inplace=True)
#data.drop('Btcusd_gdax_high', axis=1, inplace=True)
#data.drop('Btcusd_gdax_low', axis=1, inplace=True)
#data.drop('Btcusd_gdax_close', axis=1, inplace=True)
#data.drop('Btcusd_gdax_vol', axis=1, inplace=True)
#except:
# pass
# Testing only: drop LTC-EUR kraken market data
try:
data.drop('Ltceur_kraken_open', axis=1, inplace=True)
data.drop('Ltceur_kraken_high', axis=1, inplace=True)
data.drop('Ltceur_kraken_low', axis=1, inplace=True)
data.drop('Ltceur_kraken_close', axis=1, inplace=True)
data.drop('Ltceur_kraken_vol', axis=1, inplace=True)
except:
pass
# Testing only: drop BTC-EUR gdax market data
#try:
#data.drop('Btceur_gdax_open', axis=1, inplace=True)
#data.drop('Btceur_gdax_high', axis=1, inplace=True)
#data.drop('Btceur_gdax_low', axis=1, inplace=True)
#data.drop('Btceur_gdax_close', axis=1, inplace=True)
#data.drop('Btceur_gdax_vol', axis=1, inplace=True)
#except:
# pass
# Testing only: drop ETH-USD kraken market data
#try:
#data.drop('Ethusd_kraken_open', axis=1, inplace=True)
#data.drop('Ethusd_kraken_high', axis=1, inplace=True)
#data.drop('Ethusd_kraken_low', axis=1, inplace=True)
#data.drop('Ethusd_kraken_close', axis=1, inplace=True)
#data.drop('Ethusd_kraken_vol', axis=1, inplace=True)
#except:
# pass
# Testing only: drop ETH-USD gdax market data
#try:
#data.drop('Ethusd_gdax_open', axis=1, inplace=True)
#data.drop('Ethusd_gdax_high', axis=1, inplace=True)
#data.drop('Ethusd_gdax_low', axis=1, inplace=True)
#data.drop('Ethusd_gdax_close', axis=1, inplace=True)
#data.drop('Ethusd_gdax_vol', axis=1, inplace=True)
#except:
#pass
# Testing only: drop ETH-EUR kraken market data
#try:
#data.drop('Etheur_kraken_open', axis=1, inplace=True)
#data.drop('Etheur_kraken_high', axis=1, inplace=True)
#data.drop('Etheur_kraken_low', axis=1, inplace=True)
#data.drop('Etheur_kraken_close', axis=1, inplace=True)
#data.drop('Etheur_kraken_vol', axis=1, inplace=True)
#except:
#pass
#data.drop('Ethusd_kraken_close', axis=1, inplace=True)
#data.drop('Btceur_kraken_open', axis=1, inplace=True)
data = data.astype('float64')
data = data.interpolate()
return data
data = scrub(data)
"""
Data visualisation to understand where missing data and null values are. Commented out as all validation on this
dataset has been done many times already
"""
#%matplotlib inline
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(data.head(1))
#msno.matrix(data)
#data.isnull().sum()
"""
Split data into training set and targets. Target variable selects the price we're looking to predict.
Forecast_range sets how many time periods / intervals out we're looking at. Currently set to 1 period of 1440 minutes i.e. 1 day
"""
target = "Btcusd_kraken_close"
forecast_range = 1
x, y, actuals = processor.generate_x_y(data, target=target, forecast_range=1)
"""
Normalise data
"""
x_mean = x.mean(axis=0)
x_std = x.std(axis=0)
x = (x - x_mean) / x_std
y_mean = y.mean()
y_std = y.std()
y = (y - y_mean) / y_std
"""
Split data into training vs validation sets
"""
train_x, valid_x = x[:-30], x[-30:]
train_y, valid_y = y[:-30], y[-30:]
actuals = actuals[-30:] # These are raw prices to use when calculating actual returns from growth rates
###Output
_____no_output_____
###Markdown
Prediction using LSTM
###Code
# Reshape data from (num_samples, features) to (num_samples, sequence_length, features)
sequence_length = 4
def seq_data(data_x, data_y, seq_length):
seq_data_x = []
seq_data_y = []
for ii in range(len(data_x) - seq_length + 1):
seq_data_x.append(data_x[ii : ii + seq_length])
seq_data_y.append(data_y[ii + seq_length-1])
return np.array(seq_data_x), np.array(seq_data_y)
# Add the last x time periods from before the validation set starts so that the first datapoint for the validation data
# also has the relevant price history for predictions
valid_x_2 = np.concatenate((train_x[-sequence_length + 1:], valid_x)) # Give full sequence length to first validation datapoint
valid_y_2 = np.concatenate((train_y[-sequence_length + 1:], valid_y)) # Give full sequence length to first validation datapoint
# Convert to sequential data feed for LSTM
train_x_seq, train_y_seq = seq_data(train_x, train_y, sequence_length)
valid_x_seq, valid_y_seq = seq_data(valid_x_2, valid_y_2, sequence_length)
class LSTM_net:
"""
RNN using LSTM
"""
def __init__(self, input_size, learning_rate):
self.input_size = input_size
self.learning_rate = learning_rate
self.build_model()
def build_model(self):
self.model = Sequential()
self.model.add(LSTM(256, return_sequences=True,
input_shape=self.input_size))
#self.model.add(Dropout(0.2))
self.model.add(LSTM(256))
#self.model.add(Dropout(0.2))
self.model.add(Dense(1, activation='linear'))
# Define optimiser and compile
optimizer = optimizers.Adam(self.learning_rate)
self.model.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])
# Initialise weight saving callback
LSTM_checkpointer = ModelCheckpoint(filepath='./saved_models/LSTM_weights.hdf5',
verbose=1, save_best_only=True)
# Initialise training hyper parameters
learning_rate = 0.00001
input_size = (train_x_seq.shape[1], train_x_seq.shape[2])
epochs = 1500
batch_size = 64
# Initialise neural network
LSTM_network = LSTM_net(input_size, learning_rate)
# Start training
LSTM_network.model.fit(train_x_seq, train_y_seq,
batch_size=batch_size, epochs=epochs,
callbacks=[LSTM_checkpointer],
validation_data=(valid_x_seq, valid_y_seq))
# Load the model weights with the best validation loss.
LSTM_network.model.load_weights('saved_models/LSTM_weights.hdf5')
prediction = []
for ii in range(len(valid_x_seq)):
input_data = np.reshape(valid_x_seq[ii], (-1, valid_x_seq.shape[1], valid_x_seq.shape[2]))
model_output = LSTM_network.model.predict(input_data)
prediction.append(model_output.item() * y_std + y_mean)
predicted_price = ([x + 1 for x in prediction]) * actuals
%matplotlib inline
plt.plot(range(1, len(predicted_price)+1), predicted_price, label='Predicted price')
plt.plot(range(len(predicted_price)), actuals, label='Actual price')
plt.legend()
_ = plt.ylim()
# Simulate returns for validation period
position = 0
cash = 1000
for ii in range(len(predicted_price)):
action = ""
if prediction[ii] > 0:
position += cash / actuals[ii] * 0.997 # 0.997 to account for fees
cash = 0
action = "BUY"
if prediction[ii] < 0:
cash += position * actuals[ii] * 0.997
position = 0
action = "SELL"
print("Day {}: {}. Price expected to change from {} to {}. Portfolio value of {}".format(ii, action, actuals[ii], predicted_price[ii], position*actuals[ii] + cash))
###Output
Day 0: BUY. Price expected to change from 10889.0 to 11089.177956500593. Portfolio value of 997.0
Day 1: BUY. Price expected to change from 10699.7 to 11495.111988314078. Portfolio value of 979.667637064928
Day 2: SELL. Price expected to change from 11303.0 to 11185.93113113797. Portfolio value of 1031.8012422628342
Day 3: SELL. Price expected to change from 11156.1 to 10942.29527413471. Portfolio value of 1031.8012422628342
Day 4: BUY. Price expected to change from 11206.8 to 11560.044999244956. Portfolio value of 1028.7058385360458
Day 5: SELL. Price expected to change from 11600.0 to 11524.36489784756. Portfolio value of 1061.6044512115036
Day 6: SELL. Price expected to change from 11309.7 to 11248.214704333112. Portfolio value of 1061.6044512115036
Day 7: SELL. Price expected to change from 10939.1 to 10230.489292186157. Portfolio value of 1061.6044512115036
Day 8: BUY. Price expected to change from 9750.1 to 9866.305487972206. Portfolio value of 1058.419637857869
Day 9: SELL. Price expected to change from 10020.3 to 8755.075562543509. Portfolio value of 1084.487877081827
Day 10: BUY. Price expected to change from 8522.7 to 8663.736664816735. Portfolio value of 1081.2344134505818
Day 11: BUY. Price expected to change from 8150.6 to 8883.351323356006. Portfolio value of 1034.027856227523
Day 12: SELL. Price expected to change from 8975.0 to 7727.444690654935. Portfolio value of 1135.1997165378123
Day 13: SELL. Price expected to change from 8012.0 to 7109.329679875536. Portfolio value of 1135.1997165378123
Day 14: BUY. Price expected to change from 6869.5 to 7791.067087301349. Portfolio value of 1131.7941173881989
Day 15: BUY. Price expected to change from 7262.2 to 7332.8283146195445. Portfolio value of 1196.4939572452984
Day 16: SELL. Price expected to change from 8132.4 to 8115.4873711466325. Portfolio value of 1335.8453850799976
Day 17: BUY. Price expected to change from 7861.0 to 8314.422103115441. Portfolio value of 1331.8378489247577
Day 18: SELL. Price expected to change from 8854.6 to 8223.100668779622. Portfolio value of 1495.676471547881
Day 19: BUY. Price expected to change from 8066.4 to 8269.760365694858. Portfolio value of 1491.1894421332374
Day 20: BUY. Price expected to change from 8506.4 to 8697.066802691457. Portfolio value of 1572.5297370031453
Day 21: SELL. Price expected to change from 8680.0 to 8374.004818291438. Portfolio value of 1599.808314073608
Day 22: BUY. Price expected to change from 8661.8 to 9279.512478220397. Portfolio value of 1595.0088891313871
Day 23: BUY. Price expected to change from 9490.1 to 9998.513478193378. Portfolio value of 1747.5344453515181
Day 24: BUY. Price expected to change from 10131.1 to 10142.9256355393. Portfolio value of 1865.5700381767067
Day 25: BUY. Price expected to change from 10149.1 to 11121.190595522681. Portfolio value of 1868.884610206119
Day 26: SELL. Price expected to change from 11100.0 to 10292.923364050599. Portfolio value of 2037.8541265499462
Day 27: BUY. Price expected to change from 10569.9 to 11158.451179599031. Portfolio value of 2031.7405641702962
Day 28: BUY. Price expected to change from 11446.1 to 11477.775001441747. Portfolio value of 2200.16326280756
Day 29: SELL. Price expected to change from 10880.0 to 10580.839636463794. Portfolio value of 2085.0737780072004
###Markdown
Some results from playing with hyperparametersModel 1:LSTM layers = 2Hidden nodes = 128learning rate = 0.00001batch size = 64sequence_length = 3epochs = 2000val_loss = 0.75104Portfolio value = $1908Model 2:LSTM layers = 2Hidden nodes = 256learning rate = 0.00001batch size = 64sequence_length = 4epochs = 2000val_loss = 0.71283Portfolio value = 1908Model 3:Dropout 20%LSTM layers = 2Hidden nodes = 256learning rate = 0.00001batch size = 64sequence_length = 4epochs = 2000val_loss = 0.82500Portfolio value = 1908Model 4 - Market data only:LSTM layers = 2Hidden nodes = 256learning rate = 0.00001batch size = 64sequence_length = 4epochs = 1250val_loss = 0.77Portfolio value = 2068Model 5 - BTC Kraken market data only:LSTM layers = 2Hidden nodes = 256learning rate = 0.00001batch size = 64sequence_length = 4epochs = 1500val_loss = 0.67853Portfolio value = 2017Model 6 - All BTC market data only:LSTM layers = 2Hidden nodes = 256learning rate = 0.00001batch size = 64sequence_length = 4epochs = 1500val_loss = 0.83381Portfolio value = 1961Model 6 - All BTC and ETH market data only:LSTM layers = 2Hidden nodes = 256learning rate = 0.00001batch size = 64sequence_length = 4epochs = 1500val_loss = 0.68054Portfolio value = 2011 Downloading and evaluating against new test data
###Code
start_time_n = datetime(2018,2,21)
end_time_n = datetime(2018,3,17)
interval = 1440
load_test = True
if load_test:
with open('pickles/test_data.pickle', 'rb') as f:
test_data = pickle.load(f)
else:
test_data = processor.historical_download(start_time_n, end_time_n, interval)
with open('pickles/test_data.pickle', 'wb') as f:
pickle.dump(test_data, f)
test_data = scrub(test_data)
test_data.drop('Etheur_gdax_low', axis=1, inplace=True)
test_data.drop('Etheur_gdax_high', axis=1, inplace=True)
test_data.drop('Etheur_gdax_open', axis=1, inplace=True)
test_data.drop('Etheur_gdax_close', axis=1, inplace=True)
test_data.drop('Etheur_gdax_vol', axis=1, inplace=True)
#%matplotlib inline
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(data.head(1))
#msno.matrix(test_data)
test_x, test_y, test_actuals = processor.generate_x_y(test_data, target=target, forecast_range=1)
test_x = (test_x - x_mean) / x_std
test_y = (test_y - y_mean) / y_std
test_x_seq, test_y_seq = seq_data(test_x, test_y, sequence_length)
test_actuals_for_seq = test_actuals[sequence_length-1:]
# Load the model weights with the best validation loss.
LSTM_network.model.load_weights('saved_models/LSTM_weights.hdf5')
prediction = []
for ii in range(len(test_x_seq)):
input_data = np.reshape(test_x_seq[ii], (-1, test_x_seq.shape[1], test_x_seq.shape[2]))
model_output = LSTM_network.model.predict(input_data)
prediction.append(model_output.item() * y_std + y_mean)
predicted_price = ([x + 1 for x in prediction]) * test_actuals_for_seq
%matplotlib inline
plt.plot(range(1, len(predicted_price)+1), predicted_price, label='Predicted price')
plt.plot(range(len(predicted_price)), test_actuals_for_seq, label='Actual price')
plt.legend()
_ = plt.ylim()
# Simulate returns for validation period
position = 0
cash = 1000
for ii in range(len(predicted_price)):
action = ""
if prediction[ii] > 0:
position += cash / test_actuals_for_seq[ii] * 0.997 # 0.997 to account for fees
cash = 0
action = "BUY"
if prediction[ii] < 0:
cash += position * test_actuals_for_seq[ii] * 0.997
position = 0
action = "SELL"
print("Day {}: {}. Price expected to change from {} to {}. Portfolio value of {}".format(ii, action, test_actuals_for_seq[ii], predicted_price[ii], position*actuals[ii] + cash))
###Output
Day 0: BUY. Price expected to change from 9610.6 to 10225.412957730716. Portfolio value of 1129.620731275883
Day 1: BUY. Price expected to change from 10287.4 to 10646.260353774569. Portfolio value of 1109.9828210517555
Day 2: SELL. Price expected to change from 10914.0 to 10580.018480399887. Portfolio value of 1128.8175791313756
Day 3: BUY. Price expected to change from 10420.9 to 10927.263153353115. Portfolio value of 1204.8308868873032
Day 4: SELL. Price expected to change from 11114.9 to 10937.735615950454. Portfolio value of 1196.780245801821
Day 5: BUY. Price expected to change from 11326.6 to 11378.652402358062. Portfolio value of 1221.990968052833
Day 6: BUY. Price expected to change from 11150.8 to 11506.42655087152. Portfolio value of 1191.4095906368213
Day 7: SELL. Price expected to change from 11539.7 to 11535.15340305478. Portfolio value of 1211.9918057342381
Day 8: SELL. Price expected to change from 11333.3 to 10643.063981050189. Portfolio value of 1211.9918057342381
Day 9: SELL. Price expected to change from 10718.4 to 9641.927547883493. Portfolio value of 1211.9918057342381
Day 10: SELL. Price expected to change from 9903.0 to 9162.008435793656. Portfolio value of 1211.9918057342381
Day 11: SELL. Price expected to change from 9290.1 to 9210.496167970654. Portfolio value of 1211.9918057342381
Day 12: SELL. Price expected to change from 9365.4 to 9184.877446253227. Portfolio value of 1211.9918057342381
Day 13: BUY. Price expected to change from 8577.7 to 9537.312688061927. Portfolio value of 1128.6646668104604
Day 14: SELL. Price expected to change from 9613.5 to 9370.444406674462. Portfolio value of 1350.2080031277103
Day 15: BUY. Price expected to change from 9200.9 to 9233.754877304431. Portfolio value of 1062.5117237045413
Day 16: SELL. Price expected to change from 9141.2 to 8353.966115990892. Portfolio value of 1333.4105742367012
Day 17: BUY. Price expected to change from 8175.9 to 8461.00290756633. Portfolio value of 1278.2072557764266
|
arch/2-ARCH-All-Annotations-In-One-File.ipynb | ###Markdown
Make one json file with all annotations
###Code
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import imageio, skimage
from collections import Counter
DATA_ROOT = "datasets/ARCH"
os.listdir(f'../{DATA_ROOT}')
###Output
_____no_output_____
###Markdown
Books set
###Code
book_set_dir = f'../{DATA_ROOT}/books_set'
os.listdir(book_set_dir)
###Output
_____no_output_____
###Markdown
Readme
###Code
!cat ../datasets/ARCH/books_set/README.md
###Output
_____no_output_____
###Markdown
Images
###Code
bookset_image_dir = f'{book_set_dir}/images'
len(os.listdir(bookset_image_dir))
bookset_uuids_to_extensions = {
file_name.split('.')[0]: file_name.split('.')[1]
for file_name in os.listdir(f'{book_set_dir}/images')
}
len(bookset_uuids_to_extensions)
###Output
_____no_output_____
###Markdown
Captions
###Code
with open(f'{book_set_dir}/captions.json', 'r') as f:
bookset_captions = json.load(f)
len(bookset_captions)
bookset_captions
bookset_captions_all_images_present = {idx: ann for (idx, ann) in bookset_captions.items()
if ann['uuid'] in bookset_uuids_to_extensions}
len(bookset_captions_all_images_present)
bookset_captions_all_images_present
###Output
_____no_output_____
###Markdown
PubMed Set
###Code
pubmed_set_dir = f'../{DATA_ROOT}/pubmed_set'
os.listdir(pubmed_set_dir)
###Output
_____no_output_____
###Markdown
Readme
###Code
!cat ../datasets/ARCH/pubmed_set/README.md
###Output
_____no_output_____
###Markdown
Images
###Code
pubmed_image_dir = f'{pubmed_set_dir}/images'
len(os.listdir(pubmed_image_dir))
pubmed_uuids_to_extensions = {
file_name.split('.')[0]: file_name.split('.')[1]
for file_name in os.listdir(f'{pubmed_set_dir}/images')
}
len(pubmed_uuids_to_extensions)
###Output
_____no_output_____
###Markdown
Captions
###Code
with open(f'{pubmed_set_dir}/captions.json', 'r') as f:
pubmed_captions = json.load(f)
pubmed_captions
pubmed_captions_all_images_present = {idx: ann for (idx, ann) in pubmed_captions.items()
if ann['uuid'] in pubmed_uuids_to_extensions}
len(pubmed_captions_all_images_present)
###Output
_____no_output_____
###Markdown
Unified Set Make unified set
###Code
arch_captions_all_images_present = {}
i = 0
for idx, ann in bookset_captions_all_images_present.items():
arch_captions_all_images_present[str(i)] = ann
source = 'books'
arch_captions_all_images_present[str(i)]['source'] = source
path = f"{source}_set/images/{ann['uuid']}.{bookset_uuids_to_extensions[ann['uuid']]}"
path_with_root = f"../{DATA_ROOT}/{path}"
assert os.path.exists(path_with_root), f"{path_with_root}"
arch_captions_all_images_present[str(i)]['path'] = path
i += 1
for idx, ann in pubmed_captions_all_images_present.items():
arch_captions_all_images_present[str(i)] = ann
arch_captions_all_images_present[str(i)]['letter'] = None
arch_captions_all_images_present[str(i)]['figure_id'] = None
source = 'pubmed'
arch_captions_all_images_present[str(i)]['source'] = source
path = f"{source}_set/images/{ann['uuid']}.{pubmed_uuids_to_extensions[ann['uuid']]}"
path_with_root = f"../{DATA_ROOT}/{path}"
assert os.path.exists(path_with_root), f"{path_with_root}"
arch_captions_all_images_present[str(i)]['path'] = path
i += 1
arch_captions_all_images_present
arch_captions_all_images_present['0']
arch_captions_all_images_present['4270']
###Output
_____no_output_____
###Markdown
Save the unified set
###Code
%ls ../datasets/ARCH
annotations_dir = f'../{DATA_ROOT}/annotations'
if not os.path.exists(annotations_dir):
os.path.mkdir(annotations_dir)
with open(f'../{DATA_ROOT}/annotations/captions_all.json', 'w') as f:
json.dump(arch_captions_all_images_present, f)
###Output
_____no_output_____
###Markdown
Check the unified dataset
###Code
import json
with open(f'../{DATA_ROOT}/annotations/captions_all.json', 'r') as f:
arch_captions_all_images_present = json.load(f)
import pandas as pd
arch_captions_df = pd.DataFrame(arch_captions_all_images_present).T
# check that the 'uuid'-s are unique and fine
assert len(arch_captions_df.uuid) == arch_captions_df.uuid.nunique()
arch_captions_df
arch_captions_df.nunique()
###Output
_____no_output_____
###Markdown
Save a mapping of UUIDs to integersNot sure if it's better to do here or in the dataset class on the fly
###Code
# # create the mappings
# uuids_to_ints = {}
# ints_to_uuids = {}
# # fill in the mappings
# for idx, uuid in enumerate(arch_captions_df.uuid):
# #print(idx, uuid)
# uuids_to_ints[uuid] = idx
# ints_to_uuids[idx] = uuid
# # save the mappings
# with open('../datasets/ARCH/annotations/uuids_to_ints.json', 'w') as f:
# json.dump(uuids_to_ints, f)
# with open('../datasets/ARCH/annotations/ints_to_uuids.json', 'w') as f:
# json.dump(ints_to_uuids, f)
# print("Saved the mappings.")
import os
os.listdir(f'../{DATA_ROOT}/annotations/')
###Output
_____no_output_____ |
examples/strategies/real-time-render.ipynb | ###Markdown
Param search Imports
###Code
import logging
from tinkoff.invest.mock_services import MockedSandboxClient
from decimal import Decimal
from tinkoff.invest.strategies.moving_average.strategy_settings import (
MovingAverageStrategySettings,
)
from tinkoff.invest import CandleInterval, MoneyValue
from tinkoff.invest.strategies.moving_average.signal_executor import (
MovingAverageSignalExecutor,
)
from tinkoff.invest.strategies.moving_average.supervisor import (
MovingAverageStrategySupervisor,
)
from tinkoff.invest.strategies.moving_average.strategy_state import (
MovingAverageStrategyState,
)
from tinkoff.invest.strategies.moving_average.strategy import MovingAverageStrategy
from tinkoff.invest.strategies.moving_average.trader import MovingAverageStrategyTrader
from datetime import timedelta, datetime, timezone
from tinkoff.invest.typedefs import ShareId, AccountId
from tinkoff.invest.strategies.base.account_manager import AccountManager
from tinkoff.invest.strategies.moving_average.plotter import (
MovingAverageStrategyPlotter,
)
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
###Output
_____no_output_____
###Markdown
Setup
###Code
token =
###Output
_____no_output_____
###Markdown
Settings
###Code
figi = ShareId("BBG0013HGFT4")
account_id = AccountId("1337007228")
settings = MovingAverageStrategySettings(
share_id=figi,
account_id=account_id,
max_transaction_price=Decimal(10000),
candle_interval=CandleInterval.CANDLE_INTERVAL_1_MIN,
long_period=timedelta(minutes=100),
short_period=timedelta(minutes=50),
std_period=timedelta(minutes=30),
)
###Output
_____no_output_____
###Markdown
Stocks for date
###Code
def start_datetime() -> datetime:
return datetime(year=2022, month=2, day=16, hour=17, tzinfo=timezone.utc)
real_market_data_test_from = start_datetime() - timedelta(days=1)
real_market_data_test_start = start_datetime()
real_market_data_test_end = start_datetime() + timedelta(days=3)
###Output
_____no_output_____
###Markdown
Initial balance
###Code
balance = MoneyValue(currency="rub", units=20050, nano=690000000)
###Output
_____no_output_____
###Markdown
Trader
###Code
with MockedSandboxClient(
token=token,
balance=balance,
) as mocked_services:
account_manager = AccountManager(
services=mocked_services, strategy_settings=settings
)
state = MovingAverageStrategyState()
strategy = MovingAverageStrategy(
settings=settings,
account_manager=account_manager,
state=state,
)
supervisor = MovingAverageStrategySupervisor()
signal_executor = MovingAverageSignalExecutor(
services=mocked_services,
state=state,
settings=settings,
)
moving_average_strategy_trader = MovingAverageStrategyTrader(
strategy=strategy,
settings=settings,
services=mocked_services,
state=state,
signal_executor=signal_executor,
account_manager=account_manager,
supervisor=supervisor,
)
plotter = MovingAverageStrategyPlotter(settings=settings)
initial_balance = account_manager.get_current_balance()
for i in range(50):
logger.info("Trade %s", i)
events = list(supervisor.get_events())
plotter.plot(events)
try:
moving_average_strategy_trader.trade()
except Exception:
pass
current_balance = account_manager.get_current_balance()
assert initial_balance != current_balance
logger.info("Initial balance %s", initial_balance)
logger.info("Current balance %s", current_balance)
###Output
_____no_output_____ |
Diffusie-Week4-Group32.ipynb | ###Markdown
*pas dit blok aan (dubbelklik om te wijzigen)* 2020 Diffusie werkcollege opdracht*** Datum: 27-feb-2020 Hoofdstuk: 4 Groep nummer: 32*** Student 1 naam: Julian van Doorn Studentnr: s2518074 Student 2 naam: Douwe Remmelts Studentnr: s2586592
###Code
# Dit blok moet altijd als eerste worden uitgevoerd. Verwijder het dus niet!
# voer blokken uit met shift-enter, of met de ▶-knop in de knoppenbalk
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Opgave 4.3 a
n_molecules = 10000
n_t = 100
min_step = -5
max_step = 7
step_diff = max_step - min_step
richtingen = np.random.random((n_molecules, n_t)) * step_diff + min_step
plt.scatter(np.linspace(1, 101, 100), richtingen[:100,0])
plt.title('Richtingen voor t=1 van 100 moleculen')
plt.ylabel('Richting')
plt.xlabel('Molecuul')
plt.show()
# Opgave 4.3 b
bin_size = 0.2
# Bereken de bins en de centra van de bins
bins = np.arange(1 * min_step - bin_size / 2, 1 * max_step + bin_size / 2, bin_size)
centra = np.arange(1 * min_step, 1 * max_step, bin_size)
frequencies, grenzen = np.histogram(richtingen[:,0], bins)
kansdichtheid = frequencies / (bin_size * n_molecules)
plt.bar(centra, kansdichtheid, width=bin_size)
plt.title(f'Kansdichtheid met binsize = {bin_size}')
plt.xlabel('Positie')
plt.ylabel(r'$\rho(x)$')
plt.show()
###Output
_____no_output_____
###Markdown
We berekenen de $p(x)$ door de frequencies te delen door het aantal moleculen, hierdoor krijgen we de kans. Vervolgens delen we nog door de binsize om de kans te normalizeren en dit geeft $\rho(x)$. Opgave 4.3 c$N_{bins} = step\_diff / bin\_size$, de kans is overal gelijk dus $p(x) = 1 / N_{bins} = bin\_size / step\_diff$.Dit normalizeren geeft $\rho(x) = p(x) / bin\_size = 1 / step\_diff$. Bij de gebruikte step_diff van 12 geeft dit $\rho(x) = 1/12 \approx 0.083$. Dit komt overeen met wat we zien in onze grafiek.
###Code
# Opgave 4.3 d
plt.bar(centra, kansdichtheid, width=bin_size, label='Experimentele PDF')
plt.hlines(1/step_diff, 1 * min_step, 1 * max_step, label='Theoretische PDF')
plt.title(f'Kansdichtheid met binsize = {bin_size}')
plt.xlabel('Positie')
plt.ylabel(r'$\rho(x)$')
plt.legend(loc='lower left')
plt.show()
# Opgave 4.3 e
bin_size = 0.7
posities = np.cumsum(richtingen, axis=1)
bins = np.arange(n_t * min_step - bin_size / 2, n_t * max_step + bin_size / 2, bin_size)
centra = np.arange(n_t * min_step, n_t * max_step, bin_size)
frequencies, grenzen = np.histogram(posities[:,n_t - 1], bins)
kansdichtheid = frequencies / (bin_size * n_molecules)
plt.bar(centra, kansdichtheid, width=bin_size)
plt.title(f'Kansdichtheid met binsize = {bin_size}')
plt.xlabel('Positie')
plt.ylabel(r'$\rho(x)$')
plt.show()
###Output
_____no_output_____
###Markdown
Opgave 4.3fWe passen de breedte van de view aan zodat we ieder molecuul kunnen zien. We maken de binsize ook iets groter zodat we een mooiere figuur krijgen. Bij een kleine binsize zijn er namelijk veel lege bins waardoor de figuur er "schokkerig" uit ziet. Opgave 4.3g$\langle x\rangle=\int_{-5}^7x\rho(x)dx=\int_{-5}^7x\cdot\frac{1}{12}dx=\left[\frac{1}{24}x^2\right]_{-5}^7=1$
###Code
# Opgave 4.3h
average_speed = np.mean(posities[:, n_t - 1]) / n_t
print(f'Gemiddelde snelheid op t={n_t} is {average_speed:.3f}')
# Opgave 4.3i
average_speed_theorethical = (max_step + min_step) / 2
average_position = np.mean(posities, axis=0)
plt.scatter(np.linspace(0, n_t + 1, n_t), average_position, marker='x', label='Gesimuleerde gemiddelde positie', c='orange')
plt.plot(np.linspace(0, n_t + 1, n_t), np.linspace(0, n_t + 1, n_t) * average_speed_theorethical, label='Gemiddelde theoretische positie')
plt.title('Gemiddelde positie over tijd')
plt.xlabel('Tijd')
plt.ylabel('Positie')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Opgave 4.3j$\sigma^2(1)=\int_{-5}^{7}(x - \langle x\rangle)^2\rho(x) dx$ We weten dat $\langle x\rangle = 1$, dus dit geeft: $\sigma^2=\int_{-5}^{7}(x-1)^2\rho(x) dx=\int_{-5}^{7}\frac{1}{12}(x^2-2x+1) dx = \frac{1}{12}\left[\frac{1}{3}x^3-x^2+x\right]_{-5}^7=12$
###Code
# Opgave 4.3k
D = 6
M1 = average_position
M2 = np.mean(np.square(posities), axis=0)
variantie = M2 - np.square(M1)
plt.scatter(np.linspace(0, n_t + 1, n_t), variantie, marker='x', label='Gesimuleerde variantie', c='orange')
plt.plot(np.linspace(0, n_t + 1, n_t), np.linspace(0, n_t + 1, n_t) * 2 * D, label='Theoretische variantie')
plt.title('Variantie over tijd')
plt.xlabel('Tijd')
plt.ylabel('Variantie')
plt.legend()
plt.show()
# Opgave 4.4a
n_deeltjes = 10000
steps = 100
min_step = 0
max_step = 1
bin_size = 0.1
time = 1
# Maakt een array met alle richtingen en bepaald zo de posities op elk moment
richtingen = -np.random.random((n_deeltjes, steps))**0.5 + 1
posities = np.cumsum(richtingen, axis=1)
# Bereken de bins en de centra
bins = np.arange(min_step, max_step, bin_size) * time
centra = bins[:-1] + 0.5 * bin_size
# Berekent de frequenties en de PDF
frequencies, grenzen = np.histogram(posities[:,time - 1], bins)
kansdichtheid = frequencies / (bin_size * n_deeltjes)
# Plot de kansdichtheid
plt.scatter(centra, kansdichtheid, marker='x', label='Gesimuleerde kansdichtheid', c='orange')
plt.plot(np.linspace(0,1), np.linspace(0,1)* -2 + 2, label='Theoretische kansdichtheid')
plt.title(f'Kansdichtheid met binsize = {bin_size}')
plt.xlabel('Positie')
plt.ylabel(r'$\rho(x)$')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Opgave 4.4bBij de kansdichtheid kan het gebeuren dat door het normaliseren de maximale "kans" boven de 1 komt. Het is voor een kansdischtheid alleen belangrijk dat het oppervlalkte 1 is, niet dat de som van alle kansen 1 is. De kans zelf kan nooit groter dan 1 zijn. Opgave 4.4cIn ons huiswerk hebben we gevonden dat $\langle x \rangle = \frac{1}{3}$. Dit houdt in dat $\langle x \rangle(t) = \frac{1}{3}t$.
###Code
# Opgave 4.4d
# Plot de posities
plt.scatter(np.linspace(0, steps + 1, steps), np.mean(posities, axis=0), marker='x', label='Gesimuleerde gemiddelde positie', c='orange')
plt.plot(np.linspace(0, n_t + 1, n_t), np.linspace(0, n_t + 1, n_t) / 3, label='Theoretische gemiddelde positie')
plt.title('Gemiddelde positie over tijd')
plt.xlabel('Tijd')
plt.ylabel('Positie')
plt.legend()
plt.show()
# Opgave 4.4e
M1 = np.mean(posities, axis=0) # Eerste moment
M2 = np.mean(np.square(posities), axis=0) # Tweede moment
variance = M2 - np.square(M1) # Variantie
# Plot de variantie
plt.scatter(np.linspace(0, steps + 1, steps), variance, marker='x', label='Gesimuleerde variantie', c='orange')
plt.plot(np.linspace(0, n_t + 1, n_t), np.linspace(0, n_t + 1, n_t) / 18, label='Theoretische variantie')
plt.title('Variantie over tijd')
plt.xlabel('Tijd')
plt.ylabel('Variantie')
plt.legend()
plt.show()
# Opgave 4.5a
bin_size = 0.05
for i in range(1, 11):
# Deze for-loop plot de PDF voor t = 1 t/m t = 10
bins = np.arange(min_step, max_step, bin_size) * i
centra = bins[:-1] + 0.5 * bin_size
frequencies, grenzen = np.histogram(posities[:,i - 1], bins)
kansdichtheid = frequencies / (i * bin_size * n_deeltjes)
plt.plot(centra, kansdichtheid, label=f'PDF for t={i}', marker='x', linestyle='dotted')
plt.title(f'PDF over tijd met binsize = {bin_size}')
plt.xlabel('Positie')
plt.ylabel('PDF')
plt.xlim(-0.5, 6)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Opgave 4.5bDe gemiddelde snelheid is $1/3$, dus dus $\mu = \frac{1}{3}t$. Verder is te zien dat $2\sigma^2 = 4Dt \Longrightarrow \sigma^2 = 2Dt$. Hieruit volgt dat $\alpha = \frac{\sqrt{t}}{\sqrt{4Dt\pi}} = \frac{1}{2\sqrt{D\pi}}$.
###Code
# Opgave 4.5c
bin_size = 0.01
# Bereken de bins en de centra
bins = np.arange(min_step, max_step, bin_size) * steps
centra = bins[:-1] + 0.5 * bin_size
# Bereken de frequencies en de PDF
frequencies, grenzen = np.histogram(posities[:,steps - 1], bins)
kansdichtheid = frequencies / (steps * bin_size * n_deeltjes)
plt.scatter(centra, kansdichtheid, marker='x')
plt.title(f'PDF op t={steps}')
plt.xlabel('Positie')
plt.ylabel('PDF')
plt.show()
# Opgave 4.5d
from scipy.optimize import curve_fit
def NormalFunc(x, mu, sigma):
# model Normal distribuition
return 1/(sigma*(2*np.pi)**0.5)*np.exp((-(np.square(x-mu)))/(2* sigma* sigma))
# Opgave 4.5e
# Probeert de normaal verdeling te fitten op de PDF van t=100
p_fit, cov = curve_fit(NormalFunc, np.linspace(0, steps, steps-1), kansdichtheid, p0=[100, 30])
# Plot de normaal verdelingen
plt.scatter(centra, kansdichtheid, marker='x', label='Gesimuleerde PDF', c='orange')
plt.plot(np.linspace(0, steps, steps-1), NormalFunc(np.linspace(0, steps, steps-1), *p_fit), label='Gefitte PDF')
plt.title(f'PDF op t={steps}')
plt.xlabel('Positie')
plt.ylabel('PDF')
plt.legend()
plt.show()
# Ogpave 4.5f
print(f'sigma={p_fit[0]:.2f}±{np.sqrt(cov[0,0]):.2f}')
print(f'mu={p_fit[1]:.3f}±{np.sqrt(cov[1,1]):.3f}')
# Opgave 4.5g
steps=2
# Bereken de bins en de centra
bins = np.arange(min_step, max_step, bin_size) * steps
centra = bins[:-1] + 0.5 * bin_size
# Bereken de frequencies en de PDF
frequencies, grenzen = np.histogram(posities[:,steps - 1], bins)
kansdichtheid = frequencies / (steps * bin_size * n_deeltjes)
# Probeert de normaal verdeling te fitten op de PDF van t=2
p_fit, cov = curve_fit(NormalFunc, np.linspace(0, steps, steps-1), kansdichtheid, p0=[1,1])
# Plot de PDF
plt.scatter(centra, kansdichtheid, marker='x', label='Gesimuleerde PDF', c='orange')
plt.plot(np.linspace(0, steps, 100), NormalFunc(np.linspace(0, steps, 100), *p_fit), label='Gefitte PDF')
plt.title(f'PDF op t={steps}')
plt.xlabel('Positie')
plt.ylabel('PDF')
plt.legend()
plt.show()
print(f'sigma={p_fit[0]:.2f}±{np.sqrt(cov[0,0]):.2f}')
print(f'mu={p_fit[1]:.3f}±{np.sqrt(cov[1,1]):.3f}')
###Output
_____no_output_____
###Markdown
We zien geen goeie fit, de fout marges zijn ook erg groot (zeker die van mu). Deze voldoet dus niet aan de normaalverdeling.
###Code
# Opgave 4.6a
time_tags1 = np.loadtxt('data/Timetags1.txt')
time_tags2 = np.loadtxt('data/Timetags2.txt')
# De regel van het tijdstip is gewoon gelijk aan het aantal regels dat hij ervoor geprint heeft
# Voor de y coördinaten kan dus gewoon een linspace gebruikt worden tussen 0 en de lengte van de arrays
plt.scatter(np.linspace(0, np.shape(time_tags1)[0], len(time_tags1)), time_tags1, label='Serie 1')
plt.scatter(np.linspace(0, np.shape(time_tags2)[0], len(time_tags2)), time_tags2, label='Serie 2')
plt.xlabel('Regel')
plt.ylabel('Tijd [s]')
plt.title('Tijd over regels')
plt.legend()
plt.show()
# Opgave 4.6b
print(f'Duratie serie 1 [s]: {np.max(time_tags1)}; Duratie serie 2 [s]: {np.max(time_tags2)}')
print(f'Frequentie photonen serie 1 [/s]: {len(time_tags1) / np.max(time_tags1)}; Frequentie photonen serie 2 [/s]: {len(time_tags2) / np.max(time_tags2)}')
###Output
Duratie serie 1 [s]: 339.8384593; Duratie serie 2 [s]: 339.83815899999996
Frequentie photonen serie 1 [/s]: 195.88718750997467; Frequentie photonen serie 2 [/s]: 730.2740831997033
###Markdown
De laser stond waarschijnlijk aan bij serie 2, want daar zijn de meeste photonen waargenomen. Opgave 4.6cJe kan nooit een perfect donkere kamer hebben dus je zal altijd een paar photonen hebben.
###Code
# Opgave 4.6d
bin_size = 0.05
# Bereken de bins en de centra voor serie 1
bins = np.arange(0, max(time_tags1), bin_size)
centra1 = (bins[:-1] + 0.5 * bin_size)[:200]
# Bereken de histogram voor serie 1
time_tags1_hist, grenzen = np.histogram(time_tags1, bins)
# Bereken de bins en de centra voor serie 2
bins = np.arange(0, max(time_tags2), bin_size)
centra2 = (bins[:-1] + 0.5 * bin_size)[:200]
# Bereken de histogram voor serie 2
time_tags2_hist, grenzen = np.histogram(time_tags2, bins)[:200]
# 10s/50ms = 200, dus de arrays hebben alleen de eerste 200 elementen nodig
time_tags1_filtered = np.cumsum(time_tags1_hist[:200])
time_tags2_filtered = np.cumsum(time_tags2_hist[:200])
plt.scatter(centra1, time_tags1_filtered, label='Serie 1')
plt.scatter(centra2, time_tags2_filtered, label='Serie 2')
plt.xlabel('Tijd [s]')
plt.ylabel('Waarnemingen')
plt.title('Aantal waarnemingen over tijd (tot t=10s)')
plt.legend()
plt.show()
# Opgave 4.6e
lagtime1 = np.diff(time_tags1)
lagtime2 = np.diff(time_tags2)
bin_size = 0.0001
bins = np.arange(0, np.max(lagtime1), bin_size)
centra = bins[:-1] + 0.5 * bin_size
frequencies1, grenzen1 = np.histogram(lagtime1, bins)
plt.scatter(centra, np.log(frequencies1), label='Serie 1')
plt.title('Lagtime voor serie 1')
plt.xlabel('Lagtime')
plt.ylabel('Frequentie [log]')
plt.legend()
plt.show()
bins = np.arange(0, np.max(lagtime2), bin_size)
centra = bins[:-1] + 0.5 * bin_size
frequencies2, grenzen2 = np.histogram(lagtime2, bins)
plt.scatter(centra, np.log(frequencies2), label='Serie 2')
plt.title('Lagtime voor serie 2')
plt.xlabel('Lagtime')
plt.ylabel('Frequentie [log]')
plt.legend()
plt.show()
###Output
/Users/julian/PycharmProjects/Df/venv/lib/python3.7/site-packages/ipykernel_launcher.py:12: RuntimeWarning: divide by zero encountered in log
if sys.path[0] == '':
/Users/julian/PycharmProjects/Df/venv/lib/python3.7/site-packages/ipykernel_launcher.py:25: RuntimeWarning: divide by zero encountered in log
###Markdown
Voor de grafiek van serie 1 zien we een lineair verband tussen de lagtime en log(frequentie), er is dus een exponentieel verband tussen lagtime en frequentie. Bij serie 2 zien we ook een lineair verband, totdat de lagtime klein wordt, dan neemt hij exponentieel toe. Dit verschil is te verklaren doordat bij serie 2 de laser aanstaat. Deze zend snel achterelkaar bursts uit, waardoor er bij de kleine lagtime meer fotonen worden waargenomen. Dit is ook waarom in de vorige grafiek serie twee veel hoger uitkomt dan serie 1: er worden vaker fotonen met kleine lagtimes uitgezonden
###Code
# Opgave 4.6f
def exponentieel(x, a, b):
# Dit is de standaar exponentiele functie
return np.exp(a*x + b)
bins = np.arange(0, np.max(lagtime1), bin_size)
centra = bins[:-1] + 0.5 * bin_size
plt.scatter(centra, frequencies1, label='Serie 1', marker='x', color='orange')
# Probeert de exponentiele functie op de lag van sessie 1 te fitten
p_fit, cov = curve_fit(exponentieel, centra, frequencies1, p0=[-150, 7.5])
plt.plot(centra, exponentieel(centra, *p_fit), label='Fit')
plt.title('Lagtime voor serie 1')
plt.xlabel('Lagtime')
plt.ylabel('Frequentie')
plt.legend()
plt.show()
print(f'Voor y=e^(ax+b): a={p_fit[0]:.2f}±{np.sqrt(cov[0,0]):.2f}, b={p_fit[1]:.2f}±{np.sqrt(cov[1,1]):.2f}')
# Opgave 4.6g
lagtime2_start = np.where(lagtime2 > 0.002)[0][0]
lagtime2_filtered = lagtime2[lagtime2_start:]
bins = np.arange(0.002, np.max(lagtime2_filtered), bin_size)
centra = bins[:-1] + 0.5 * bin_size
frequencies2_filtered, grenzen2_filtered = np.histogram(lagtime2_filtered, bins)
plt.scatter(centra, frequencies2_filtered, label='Serie 1', marker='x', color='orange')
# Probeert de exponentiele functie op de lag van sessie 2 te fitten, waarvoor geldt dat lagtime > 2ms
p_fit, cov = curve_fit(exponentieel, centra, frequencies2_filtered, p0=[-150, 7.5])
plt.plot(centra, exponentieel(centra, *p_fit), label='Fit')
plt.title('Lagtime voor serie 2, met lagtime > 2ms')
plt.xlabel('Lagtime')
plt.ylabel('Frequentie')
plt.legend()
plt.show()
print(f'Voor y=e^(ax+b): a={p_fit[0]:.2f}±{np.sqrt(cov[0,0]):.2f}, b={p_fit[1]:.2f}±{np.sqrt(cov[1,1]):.2f}')
###Output
_____no_output_____
###Markdown
In $y=e^{ax + b}$ moet de macht eenheidsloos zijn en we weten dat x in seconden is, dat betekent dus dat a een eenheid van $s^{-1}$ moet hebben. In dit geval is a dus de verhouding tussen de timelag en het aantal weergenomen fotonen.We zien dat serie 1 waarbij de laser uit staat dat de frequentie overeen komt met de gemiddelde frequentie die we hadden berekend. Bij serie 2 echter zien we een verschil, dit valt te verklaren doordat we alle hoge frequenties eruit hebben gefilterd.
###Code
def autocorrelate(time, I, tau):
# Dit is de functie voor autocorrelatie
I = (I - np.mean(I)) / np.std(I)
autocorrelation=[]
for t in tau:
shift = np.argmax(time > t)
autocorrelation.append(np.mean(I[:-shift]*I[shift:]))
return autocorrelation
# Opgave 4.6h
tau = np.logspace(-4, 1, 50)
bin_size = 0.0001
# Hier worden de bins en centra van de bins voor beide sessies berekend
# De bins worden gebruikt voor de histogramverdeling en de centras worden met de histogramverdeling
# gebruikt om de autocorrelatie te berekenen
bins1 = np.arange(0, max(time_tags1), bin_size)
centra1 = bins1[:-1] + 0.5 * bin_size
intensity1, grenzen = np.histogram(time_tags1, bins1)
corr1 = autocorrelate(bins1, intensity1, tau)
bins2 = np.arange(0, max(time_tags2), bin_size)
centra2 = bins2[:-1] + 0.5 * bin_size
intensity2, grenzen = np.histogram(time_tags2, bins2)
corr2 = autocorrelate(centra2, intensity2, tau)
plt.scatter(np.log(tau), corr1, marker='x')
plt.title('Autocorrelatie van serie 1')
plt.xlabel('log(Tau)')
plt.ylabel('Autocorrelatie')
plt.show()
plt.scatter(np.log(tau), corr2, marker='x')
plt.title('Autocorrelatie van serie 2')
plt.xlabel('log(Tau)')
plt.ylabel('Autocorrelatie')
plt.show()
###Output
_____no_output_____
###Markdown
Doordat in serie 1 de laser uit stond werd er op elke tijd ongeveer evenveel photonen gemeten, dus de grafiek was een rechte lijn. Daardoor zal de grafiek weinig/geen overlap hebben als het een stukje in de tijd wordt verschoven. De gemeten hoeveelheid photonen in serie 2 zal lang niet zo linear zijn, doordat de laser aanstond. We zien dus ook dat er in het begin onder verschuiving nog enige overlap is, totdat de verschuiving te groot wordt, waarna de autocorrelatie naar 0 gaat.
###Code
# Opgave 4.6i
def FCS(x, N, tau):
# Dit is de standaard functie voor de autocorrelatie van Fluorescence Correlation Spectroscopy
return 1/(N* (1 + x/tau)*((1+ (1/25)*x/tau))**0.5)
plt.scatter(np.log(tau), corr2, marker='x', color='orange')
plt.title('Autocorrelatie van serie 2 met fit')
plt.xlabel('log(Tau)')
plt.ylabel('Autocorrelatie')
# Probeert de FCS functie te fitten op de autocorrelatie van sessie 2
p_fit, cov = curve_fit(FCS, tau, corr2, p0=[4, 0.001])
plt.plot(np.log(tau), FCS(tau, *p_fit), label='Fit')
plt.show()
print(f'Tau = {p_fit[1]:.4f}±{np.sqrt(cov[1,1]):.4f}')
###Output
_____no_output_____ |
examples/.ipynb_checkpoints/classification_comparison-checkpoint.ipynb | ###Markdown
Setup code
###Code
# Install kdrcf if running in Google Colab
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
!git clone https://github.com/fagonzalezo/sklearn-kdcrf.git
!mv sklearn-kdcrf/kdcrf .
%matplotlib inline
import gzip
import pandas as pd
import pylab as pl
import h5py
import os
os.getcwd()
os.chdir('/tf/home/sklearn-kdcrf')
print(os.getcwd())
print(os.listdir())
from kdcrf import KDClassifierRF
from kdcrf import RBFSamplerORF
##approximate kernel RFF
from sklearn.model_selection import train_test_split
import numpy as np
from kdcrf import KDClassifierRF
from kdcrf import RBFSamplerORF
from sklearn import datasets, svm
# Import datasets, classifiers and performance metrics
!pip install wget
import wget
from sklearn.preprocessing import MinMaxScaler
import h5py
def classify(data_train, targets_train, data_test, targets_test, gammas, experiment="1"):
scores = {}
classifiers = {#'svm':('gamma', svm.SVC()),
'kdc exact':('gamma', KDClassifierRF(approx='exact')),
'lrff+ 2000':('gamma', KDClassifierRF(approx='lrff+',
n_components=data_train.shape[1],
random_state=1)),
'dmrff 2000':('gamma', KDClassifierRF(approx='dmrff',
n_components=data_train.shape[1],
random_state=1)),
'dmorf 2000':('gamma', KDClassifierRF(approx='dmrff',
n_components=data_train.shape[1],
random_state=1,
sampler=RBFSamplerORF(n_components=data_train.shape[1], random_state=1))),
'lrff+ orf 2000':('gamma', KDClassifierRF(approx='lrff+',
n_components=data_train.shape[1],
random_state=1,
sampler=RBFSamplerORF(n_components=data_train.shape[1], random_state=1))),
}
for clfn in classifiers.keys():
scores[clfn] = []
for gamma in gammas:
print('gamma:', gamma,' ',end='')
for clfn, (gname, clf) in classifiers.items():
print('clfn:', clfn)
clf.set_params(**{gname:gamma})
clf.fit(data_train, targets_train)
score = clf.score(data_test, targets_test)
scores[clfn].append(score)
print('clfn: class ', score)
with h5py.File("/tf/home/sklearn-kdcrf/examples/experiment_" + experiment + "_score.h5", mode="a") as file:
file.create_dataset(clfn + "_" + str(gamma), data=score)
return classifiers, scores
###Output
_____no_output_____
###Markdown
Kernel Density Classification for letters
###Code
## https://archive.ics.uci.edu/ml/datasets/Letter+Recognition
letter = wget.download("https://archive.ics.uci.edu/ml/machine-learning-databases/letter-recognition/letter-recognition.data")
letters = pd.read_csv("letter-recognition.data", header=None)
print(letters.head())
print(letters.describe())
vector = letters.values[:,1:]
labels = letters.values[:,0]
X_train, X_test, y_train, y_test = train_test_split(vector, labels, test_size=0.3, random_state=42)
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
gammas = [2**i for i in range(-7,9)]
classifiers, scores = classify(X_train, y_train, X_test, y_test, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
with h5py.File("usps.h5", 'r') as hf:
train = hf.get('train')
X_tr = train.get('data')[:]
y_tr = train.get('target')[:]
test = hf.get('test')
X_te = test.get('data')[:]
y_te = test.get('target')[:]
scaler = MinMaxScaler()
scaler.fit(X_tr)
X_tr = scaler.transform(X_tr)
X_te = scaler.transform(X_te)
gammas = [2**i for i in range(-7,4)]
classifiers, scores = classify(X_tr, y_tr, X_te, y_te, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
# Import datasets, classifiers and performance metrics
# The digits dataset
digits = datasets.load_digits(n_class=9)
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[:n_samples // 2],
digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2:],
digits.target[n_samples // 2:])
gammas = [2**i for i in range(-7,4)]
classifiers, scores = classify(data_train, targets_train, data_test, targets_test, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
###Output
_____no_output_____
###Markdown
Moon Database
###Code
from sklearn.datasets import make_moons
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
X, y = make_moons(n_samples=1000, noise=0.2, random_state=0)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
#y = y[:, np.newaxis]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
gammas = [2**i for i in range(-7,10)]
classifiers, scores = classify(X_train, y_train, X_test, y_test, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
###Output
_____no_output_____
###Markdown
Forest database
###Code
forest = wget.download("http://archive.ics.uci.edu/ml//machine-learning-databases/covtype/covtype.data.gz")
dataset = pd.read_csv('covtype.data.gz', nrows=100, compression='gzip',
error_bad_lines=False)
dataset = dataset.to_numpy()
X_train, X_test, y_train, y_test = train_test_split(
dataset[:,:-1], dataset[:, -1], test_size=0.33, random_state=42)
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
gammas = [2**i for i in range(-9,4)]
classifiers, scores = classify(X_train, y_train, X_test, y_test, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
###Output
_____no_output_____
###Markdown
MNIST
###Code
from requests import get
def download_file(url, file_name):
with open(file_name, "wb") as file:
response = get(url)
file.write(response.content)
download_file('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'train-images-idx3-ubyte.gz')
download_file('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', 'train-labels-idx1-ubyte.gz')
download_file('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 't10k-images-idx3-ubyte.gz')
download_file('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', 't10k-labels-idx1-ubyte.gz')
def read_mnist(images_path: str, labels_path: str):
with gzip.open(labels_path, 'rb') as labelsFile:
labels = np.frombuffer(labelsFile.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path,'rb') as imagesFile:
length = len(labels)
# Load flat 28x28 px images (784 px), and convert them to 28x28 px
features = np.frombuffer(imagesFile.read(), dtype=np.uint8, offset=16) \
.reshape(length, 784)
return features, labels
train = {}
test = {}
train['features'], train['labels'] = read_mnist('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz')
test['features'], test['labels'] = read_mnist('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz')
random_train = np.random.choice(range(train['features'].shape[0]), 10000, replace=False)
random_test = np.random.choice(range(test['features'].shape[0]), 10000, replace=False)
train_images = train['features'][random_train,:]
train_labels = train['labels'][random_train]
test_images = test['features'][random_test,:]
test_labels = test['labels'][random_test]
scaler = MinMaxScaler()
scaler.fit(train_images)
train_images = scaler.transform(train_images)
test_images = scaler.transform(test_images)
train_labels
gammas = [2**i for i in range(-8,8)]
classifiers, scores = classify(train_images, train_labels, test_images, test_labels, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
###Output
_____no_output_____
###Markdown
Gisette
###Code
wget.download("https://archive.ics.uci.edu/ml/machine-learning-databases/gisette/GISETTE/gisette_train.data")
wget.download("https://archive.ics.uci.edu/ml/machine-learning-databases/gisette/GISETTE/gisette_train.labels")
wget.download("https://archive.ics.uci.edu/ml/machine-learning-databases/gisette/GISETTE/gisette_valid.data")
wget.download("https://archive.ics.uci.edu/ml/machine-learning-databases/gisette/gisette_valid.labels")
train_data = pd.read_csv("gisette_train.data", header=None, sep=" ")
train_labels = pd.read_csv("gisette_train.labels", header=None, sep=" ")
test_data = pd.read_csv("gisette_valid.data", header=None, sep=" ")
test_labels = pd.read_csv("gisette_valid.labels", header=None, sep=" ")
print(train_data.head())
print(train_data.describe())
scaler = MinMaxScaler()
scaler.fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
gammas = [2**i for i in range(-3,16)]
classifiers, scores = classify(train_data, train_labels, test_data, test_labels, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
###Output
_____no_output_____
###Markdown
Cifar database
###Code
from examples.load_cifar_10 import cifar10
train_images, train_labels, test_images, test_labels = cifar10(path='data', is_one_hot=False)
train_images.shape
random_train = np.random.choice(range(train_images.shape[0]), 10000, replace=False)
random_test = np.random.choice(range(test_images.shape[0]), 5000, replace=False)
train_images = train_images[random_train,:]
train_labels = train_labels[random_train]
test_images = test_images[random_test,:]
test_labels = test_labels[random_test]
scaler = MinMaxScaler()
scaler.fit(train_images)
train_images = scaler.transform(train_images)
test_images = scaler.transform(test_images)
gammas = [2**i for i in range(-8,8)]
classifiers, scores = classify(train_images, train_labels, test_images, test_labels, gammas)
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
###Output
_____no_output_____
###Markdown
Extract Features from cifar with BIT - GOOGLE
###Code
#@title Imports
!pip install tensorflow_hub
!pip install tensorflow_datasets
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
import time
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import os
# Load model into KerasLayer
model_url = "https://tfhub.dev/google/bit/m-r101x3/1"
module = hub.KerasLayer(model_url)
def preprocess_image(image):
image = np.array(image)
# reshape into shape [batch_size, height, width, num_channels]
img_reshaped = tf.reshape(image, [1, image.shape[0], image.shape[1], image.shape[2]])
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
image = tf.image.convert_image_dtype(img_reshaped, tf.float32)
return image
def preprocess_batch_images(image):
image = np.array(image)
# reshape into shape [batch_size, height, width, num_channels]
img_reshaped = tf.reshape(image, [image.shape[0], image.shape[1], image.shape[2], image.shape[3]])
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
image = tf.image.convert_image_dtype(img_reshaped, tf.float32)
return image
module.build([None, 32, 32, 3])
#mdule.get_weights()
def display_image(position):
image = np.reshape(train_images[position,:], (3,32,32))
image = image.transpose((1,2,0))
plt.imshow(image)
display_image(4000)
train_images_reshape = np.reshape(train_images, (train_images.shape[0], 3,32,32))
train_images_reshape = train_images_reshape.transpose((0,2,3,1))
reshape_images_train = preprocess_batch_images(train_images_reshape)
features_images_train = module(reshape_images_train)
features_images_train.shape
test_images_reshape = np.reshape(test_images, (test_images.shape[0], 3,32,32))
test_images_reshape = test_images_reshape.transpose((0,2,3,1))
reshape_images_test = preprocess_batch_images(test_images_reshape)
features_images_test = module(reshape_images_test)
features_images_test.shape
from sklearn.linear_model import RidgeClassifier
ridge_classifier = RidgeClassifier()
ridge_classifier.fit(features_images_train, train_labels)
print(ridge_classifier.score(features_images_test, test_labels))
svc_classifier = svm.LinearSVC()
svc_classifier.fit(features_images_train, train_labels)
score = svc_classifier.score(features_images_test, test_labels)
print(score)
with h5py.File("/tf/home/sklearn-kdcrf/examples/experiment_cifar_svm_score.h5", mode="a") as file:
file.create_dataset("linear_svm_cifar", data=score)
kdc_classifier = KDClassifierRF(approx='exact')
kdc_classifier.fit(features_images_train, train_labels)
score = kdc_classifier.score(features_images_test, test_labels)
print(score)
with h5py.File("/tf/home/sklearn-kdcrf/examples/experiment_cifar_kdc_score.h5", mode="a") as file:
file.create_dataset("kdc_cifar", data=score)
#print(np.bincount(kdc_classifier.predict(features_images_test)))
print(kdc_classifier.predict_proba(features_images_test))
gammas = [2**i for i in range(-2,2)]
classifiers, scores = classify(features_images_train, train_labels, features_images_test, test_labels, gammas, "cifar")
pl.rcParams["figure.figsize"] = (15,8)
for clfn in classifiers.keys():
pl.plot(np.arange(len(gammas)), scores[clfn], label=clfn)
pl.axes().set_xticks(np.arange(len(gammas)))
pl.axes().set_xticklabels(gammas)
pl.setp(pl.axes().get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
pl.legend()
module = hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/4",
trainable=False)
features = module(reshape_images_train) # A batch with shape [batch_size, num_features].
with h5py.File("/tf/home/sklearn-kdcrf/examples/experiment_cifar_svm_score.h5", mode="r") as file:
print(file["linear_svm_cifar"][()])
###Output
0.5464
|
03_StellarDynamics_HW_Chap06.ipynb | ###Markdown
03_StellarDynamics_HW_Chap06 Question (1) Low Surface Density Part
###Code
# initialize the symbols
kappa, kappa_0, P, T, F_rad, F_conv, a, b, mu = sp.symbols(
'kappa kappa_0 P T F_rad F_conv a b mu')
# kappa for opacity
exprK = sp.Eq(kappa, kappa_0 * P**a * T**b)
exprK
# eq (6.121), pressure for photosphere with optical depth = 3/2
eqPh = sp.Eq(sp.simplify('P_ph'),
sp.simplify('2/3 * (1+a) * g / (kappa)'))
eqPh
# approx expression for F_rad in (6.123)
exprFr = sp.Eq(F_rad, sp.simplify('a * c / 4 * T^4'))
exprFr
# (6.124)
exprFc = sp.Eq(F_conv, sp.simplify('1/2 * rho * c_P * v * T'))
exprFc
# approx the radial-velocity fluctuation
v = sp.Symbol('v')
gamma = sp.Symbol('gamma')
exprV = sp.simplify('1 / 2 * ((r * k * T)/(mu * m_H))^(1/2)').subs(
sp.sympify('r'), gamma)
sp.Eq(v, exprV)
# (6.126) approx ρcT to internal energy
exprInternal = sp.simplify('3 / 2 * rho * (k * T / mu/ m_H)') * gamma
eqInternal = sp.Eq(sp.simplify('rho * c_P * T'), exprInternal)
eqInternal
# ideal gas law
eqIdeal = sp.Eq(P, sp.simplify('rho * k * T'))
eqIdeal = sp.solve(eqIdeal, sp.simplify('rho'))[0]
sp.Eq(sp.simplify('rho'), eqIdeal)
# substitute (6.124)
exprFc = exprFc.subs(
eqInternal.lhs, eqInternal.rhs).subs(
v, exprV).subs(sp.simplify('rho'), eqIdeal)
exprFc
# This is eq (6.127)
# for energy conservation, the radiative flux which enters the
# sphere shold equals to the convective flux leaving the
# stellar interior
sp.Eq(exprFc.lhs, exprFr.lhs)
# So we have
eqFcFr = sp.Eq(exprFc.rhs, exprFr.rhs)
eqP = sp.Eq(sp.simplify('P_ph'), sp.solve(eqFcFr, P)[0])
eqP
# So, the effective temperature for the fully convective star
# in low surface density is
## here we substitute kappa and P_ph
eqP = eqP.subs(eqPh.lhs, eqPh.rhs).subs(exprK.lhs, exprK.rhs).subs(P, eqP.rhs)
## here we symbolically solve for T
sp.solve(eqP, T)
###Output
_____no_output_____
###Markdown
The above solution is awful. But we can roughly see that the exponents of, e.g., $\kappa_0$ is the same as `eq (6.129)`. There is come inconsistent between above solution and `eq (6.129)`. However, since `eq. (6.129)` is just a approximation, let's just take the approximation made by the textbook.
###Code
# eq (6.129), effective temperature T for low surface density
eqTeff = sp.Eq(sp.simplify('T_eff'),
sp.simplify('(M / mu^((a+1)/2) / kappa_0 / R^2)^(1/(b+3.5*(1+a)))'))
eqTeff
###Output
_____no_output_____
###Markdown
- Population 1
###Code
# Population I effective temperature
eqTeff.subs(a, 0.7).subs(b, 5.3).subs(kappa_0, 6.9*10**-26)
###Output
_____no_output_____
###Markdown
- Population 2
###Code
# Population II effective temperature
eqTeff.subs(a, 0.6).subs(b, 9.4).subs(kappa_0, 6.1*10**-40)
###Output
_____no_output_____
###Markdown
Question (2) Low Surface Density Part
###Code
eqL = sp.Eq(sp.simplify('L'),
sp.simplify('4 * pi * sigma * R^2 * T_eff^4'))
# equaiton of luminosity
eqL = sp.Eq(sp.simplify('L'),
sp.simplify('4 * pi * sigma * R^2 * T_eff^4'))
eqL
# substitute for effective temperature
eqL = eqL.subs(eqTeff.lhs, eqTeff.rhs)
eqL
###Output
_____no_output_____
###Markdown
- Population 1
###Code
# Population I luminosity
eqL.subs(a, 0.7).subs(b, 5.3).subs(kappa_0, 6.9*10**-26)
###Output
_____no_output_____
###Markdown
- Population 2
###Code
# Population II luminosity
eqL.subs(a, 0.6).subs(b, 9.4).subs(kappa_0, 6.1*10**-40)
###Output
_____no_output_____
###Markdown
Question (1) High Surface Density PartFor high surface density, the convective transport is efficient. The temperature gradient is adiabatic. So we may have...$$P \propto \rho^{\gamma} \propto \rho T$$$$P \propto T^{\gamma / (\gamma - 1)}$$
###Code
K = sp.Symbol('K')
eqAdiabatic = sp.Eq(P, K * T**(gamma/(gamma-1)))
eqAdiabatic
###Output
_____no_output_____
###Markdown
So, we would have the relation between $P_{ph}$ and $P_{c}$
###Code
Pc, Tc = sp.symbols('P_c T_c')
P_ph, T_eff = sp.symbols('P_ph T_eff')
eqAdiabaticHigh = sp.Eq(
eqAdiabatic.subs(P, Pc).lhs / eqAdiabatic.subs(T, Tc).rhs * K,
eqAdiabatic.subs(P, P_ph).lhs / eqAdiabatic.subs(T, T_eff).rhs * K
).subs(gamma, 5/3)
eqAdiabaticHigh
# P_ph Substitute with (6.121)
eqHighSurface = eqAdiabaticHigh.subs(P_ph, eqPh.rhs)
eqHighSurface
# substitute kappa
eqHighSurface = eqHighSurface.subs(kappa, exprK.rhs).subs(T, T_eff).subs(P, P_ph)
eqHighSurface
# it is awuful that I need to replace P_ph with Pc/Tc again
eqHighSurface = eqHighSurface.subs(P_ph,
sp.solve(eqAdiabaticHigh, P_ph)[0])
eqHighSurface
###Output
_____no_output_____
###Markdown
不太確定為什麼課本不給一下 core 的 profile ,但是老闕上課好像有給。$$\frac{P_c}{T_c^{2.5}} \propto M^{-0.5} R^{1.5}$$
###Code
# substitute core profile with above relation provided by 老闕
M, R = sp.symbols('M R')
eqHighSurface = eqHighSurface.subs(Pc, M**-0.5 * R**1.5 * Tc**2.5)
eqHighSurface
# it seems Tc should have zero exponent,
eqHighSurface = eqHighSurface.subs(Tc, 1)
eqHighSurface
## subsitute for g = M / R^2
g = sp.Symbol('g')
eqHighSurface = eqHighSurface.subs(g, sp.simplify('M / R^2'))
eqHighSurface
## solve for T_eff
eqHighSurface = sp.Eq(T_eff, sp.solve(eqHighSurface, T_eff)[0])
eqHighSurface
###Output
_____no_output_____
###Markdown
- Population 1
###Code
# Population 1 effective temperature
eqHighSurface.subs(
a, 0.7).subs(b, 5.3).subs(kappa_0, 6.9*10**-26)
###Output
_____no_output_____
###Markdown
這個結果也太好笑了XD 不知道為什麼程式不把指數項爆開,但是我們可以試著幫他算一下:- exponent of M : 3.7 * 0.0523 = 0.1935, the same as (6.135)- exponent of R : 9.1 * 0.0523 = 0.4759, not the same as (6.135)R 的指數是錯的,有可能老闕給的那條式子是錯的或者我用錯地方,但是 M 的指數是對的 - Population 2
###Code
# Populatoin II effective temperature
eqHighSurface.subs(
a, 0.6).subs(b, 9.4).subs(kappa_0, 6.1*10**-40)
###Output
_____no_output_____
###Markdown
這次是兩個都錯了。好像不是很確定哪裡算錯了。 Question (2) High Surface Density Part
###Code
# eq of luminosity
eqLHighSurface = sp.Eq(
sp.simplify('L'), sp.simplify('4 * pi * sigma * R^2 * T_eff^4'))
eqLHighSurface
eqLHighSurface = eqLHighSurface.subs(T_eff, eqHighSurface.rhs)
eqLHighSurface
###Output
_____no_output_____
###Markdown
- Population 1
###Code
# population I luminosity
eqLHighSurface.subs(
a, 0.7).subs(b, 5.3).subs(kappa_0, 6.9*10**-26)
# population II luminosity
eqLHighSurface.subs(
a, 0.6).subs(b, 9.4).subs(kappa_0, 6.1*10**-40)
###Output
_____no_output_____
###Markdown
Question (1) Ionization Zones課本這邊寫得看不太懂,不過大概是叫我們硬解吧。Use the same adiabatic condition.
###Code
eqAdiabatic
###Output
_____no_output_____
###Markdown
The textbook says that the ionization zone is convective and the entropy across it is constant. So that $S(x=1) - S(x=0) = 0$, where x denotes the fraction of hydrogen ionized.Also, argued in the textbook, - x = 1 : the center of the star- x = 0 : at the base of the photosphereYeah ... so, basically we would have $$S(x=1, P=P_c) = S(x=0, P=P_{ph})$$
###Code
# entropy per unit mass
exprEntropy = sp.simplify(
'''
X * k / m_H *
(5 / 2 * (1 + x + delta) +
chi / k / T +
ln(2 * pi * m_H / h^2)^(3/2) +
delta * ln(2 * pi * m_He / h^2)^(3/2) +
x * ln(2 * pi * m_e / h^2)^(3/2) +
(1 + x + delta) * ln(k * T)^(5/2) * (1 + x + delta) / P)
''')
S = sp.Symbol('S')
epEntropy = sp.Eq(S, exprEntropy)
epEntropy
# lol this is ugly
###Output
_____no_output_____
###Markdown
Get the relation here, $$S(x=1, P=P_c) = S(x=0, P=P_{ph})$$
###Code
# S(x=1, P=P_c) = S(x=0, P=P_{ph})
x, delta = sp.symbols('x delta')
eqBoundary = sp.Eq(epEntropy.subs(x, 1).subs(P, Pc).subs(T, Tc).rhs,
epEntropy.subs(x, 0).subs(P, P_ph).subs(T, T_eff).rhs)
eqBoundary
# lol 超級複雜,用手算會瘋掉
# substitute delta = Y / (4 X)
eqBoundary = eqBoundary.subs(delta, sp.simplify('Y / (4 * X)'))
eqBoundary
###Output
_____no_output_____
###Markdown
And then, substitute with $$\frac{P_c}{T_c^{2.5}} \propto M^{-0.5} R^{1.5}$$
###Code
eqBoundary = eqBoundary.subs(Pc, M**-0.5 * R**1.5 * Tc**2.5)
eqBoundary
###Output
_____no_output_____
###Markdown
天啊,感覺 $T_c$ 是消不掉的。用 `simplify` 來試試看好了。註: `simplify` 是 `sympy` 用來簡化方程式的懶人指令,他自動會幫你跑他自己覺得比較簡化的結果。
###Code
eqBoundary = sp.simplify(eqBoundary)
eqBoundary
###Output
_____no_output_____
###Markdown
都做到這裡了也只能硬著頭皮做下去了:- 用 (6.121) 替代掉 $P_{ph}$- 替代掉 $\kappa$- 再用 adiabatic equation 替代掉 多餘的 $P_{ph}$,留下$T_{eff}$ 可解
###Code
# 用 (6.121)
eqBoundary = eqBoundary.subs(P_ph, eqPh.rhs)
eqBoundary
# 換掉 kappa
eqBoundary = eqBoundary.subs(kappa, exprK.rhs.subs(P, P_ph).subs(T, T_eff))
eqBoundary
# adiabatic condition Pc / Tc^2.5 = P_ph / T_eff^2.5
# 但是這邊就直接把 Pc / Tc^2.5 換成 M^{-0.5} R^{1.5} 好了
eqAdiabaticHigh.subs(eqAdiabaticHigh.lhs, M**-0.5 * R**1.5)
eqBoundary = eqBoundary.subs(
P_ph,
sp.solve(
eqAdiabaticHigh.subs(eqAdiabaticHigh.lhs, M**-0.5 * R**1.5),
P_ph
)[0]
)
eqBoundary
###Output
_____no_output_____
###Markdown
感覺是很嚴重的大失敗。 但是還是讓他把答案寫完好了。
###Code
sp.solve(eqBoundary, T_eff)
###Output
_____no_output_____ |
Docs/DataPackage-Introduction.ipynb | ###Markdown
_visualization functions, you can safely skip the following cell_
###Code
using System.Data;
using Microsoft.DotNet.Interactive.Formatting;
using static Microsoft.DotNet.Interactive.Formatting.PocketViewTags;
static void Dump(DataPackage dp, int? limit = null) {
var rs = dp.Resources
.Where(x => x.Format == "csv")
.Select(x => (Heading: th(x.Name), Result: td(ReadAsHtmlTable(x.Read(), limit: limit))))
.ToList();
display(table(
thead(rs.Select(x => x.Heading)),
tbody(rs.Select(x => x.Result))
));
}
static object Dump(DataBoss.DataPackage.TabularDataResource xs, int? limit = null) => Dump(xs.Read(), limit: limit);
static object Dump(IDataReader xs, int? limit = null) => display(ReadAsHtmlTable(xs, limit: limit));
static object ReadAsHtmlTable(IDataReader xs, int? limit = null) {
try {
limit ??= int.MaxValue;
var rows = new List<object>();
for(var i = 0;xs.Read() && i < limit; ++i)
rows.Add(Enumerable.Range(0, xs.FieldCount).Select(x => td(xs.GetValue(x))).ToList());
return table(
thead(Enumerable.Range(0, xs.FieldCount).Select(x => th[style:"font-weight:bold"](xs.GetName(x)))),
tbody(rows.Select(x => tr(x))));
} finally {
xs.Dispose();
}
}
###Output
_____no_output_____
###Markdown
Defining a simple resource
###Code
var dp = new DataPackage();
dp.AddResource(xs => xs.WithName("numbers").WithData(Enumerable.Range(0, 2).Select(x => new { Value = x })));
Dump(dp);
###Output
_____no_output_____
###Markdown
**DataPackage.Load** supports directory paths containing a datapackage.json, zip files and http.
###Code
var countries = DataPackage.Load(@"https://datahub.io/core/country-list/r/country-list_zip.zip");
Dump(countries.GetResource("data_csv"), limit: 10);
###Output
_____no_output_____
###Markdown
Resource (DataReader) Transformation
###Code
var c2 = new DataPackage();
c2.AddResource(countries.GetResource("data_csv"));
c2.UpdateResource("data_csv", xs => xs
.WithName("countries") //resource can be renamed.
.Transform(x =>
{
var id = 0;
x.Transform("Code", (string value) => value.ToLower()); //typed transform
x.Add(0, "Id", r => ++id); //columns can be added at any existing ordinal
x.Transform("Name", (string value) => value.ToUpper());
x.Add("NameLen", r => r["Name"].ToString().Length); //record based
x.Add("Source", r => $"{r.Source["Name"]} ({r.Source["Code"]})"); //from non transformed source
})
);
Dump(c2, limit: 10);
###Output
_____no_output_____
###Markdown
Creating Resrouces Incrementally
###Code
var n = 0;
var numbers = Enumerable.Range(0, 3).Select(x => new { Value = ++n });
var myNumbers = new DataPackage();
void AddOrAppend<T>(DataPackage dp, string name, IEnumerable<T> rows) {
dp.AddOrUpdateResource(name, xs => xs.WithData(rows), xs => xs.WithData(() => xs.Read().Concat(rows)));
}
AddOrAppend(myNumbers, "numbers", numbers.ToList());
Dump(myNumbers);
AddOrAppend(myNumbers, "numbers", numbers.ToList());
Dump(myNumbers);
###Output
_____no_output_____ |
assignment_3_regression_classification_3.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 3*--- Ridge Regression AssignmentWe're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.But not just for condos in Tribeca...- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.- [ ] Do one-hot encoding of categorical features.- [ ] Do feature selection with `SelectKBest`.- [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html). Use the scaler's `fit_transform` method with the train set. Use the scaler's `transform` method with the test set.- [ ] Fit a ridge regression model with multiple features.- [ ] Get mean absolute error for the test set.- [ ] As always, commit your notebook to your fork of the GitHub repo.The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. Stretch GoalsDon't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.- [ ] Add your own stretch goal(s) !- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).- [ ] Learn more about feature selection: - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance) - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html) - [mlxtend](http://rasbt.github.io/mlxtend/) library - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection) - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
df['SALE_PRICE'].value_counts()
df.shape
df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'])
df_copy = df[df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS']
df_copy = df_copy[df_copy['SALE_PRICE'] < 2000000]
df_copy = df_copy[df_copy['SALE_PRICE'] > 100000]
df_copy = df_copy.drop(['EASE-MENT'], axis=1)
df_copy['SALE_PRICE'].value_counts()
df_copy.head()
#SPLIT DATA
train = df_copy[df_copy['SALE_DATE'].dt.month < 4]
test = df_copy[df_copy['SALE_DATE'].dt.month ==4]
train['SALE_DATE'].describe()
train.head(1)
test.head(1)
test['SALE_PRICE'].mean()
#rREMOVING HIGH CARDINALITY
#aAND 1 UNIQU FEATURE
df_copy.describe(include=object)
df_copy.describe()
target = 'SALE_PRICE'
high_cardinality = ['ADDRESS', 'APARTMENT_NUMBER',
'LAND_SQUARE_FEET', 'SALE_DATE',
'BUILDING_CLASS_CATEGORY', 'APARTMENT_NUMBER',
'BUILDING_CLASS_AT_PRESENT', 'BUILDING_CLASS_AT_TIME_OF_SALE',
'COMMERCIAL_UNITS', 'TOTAL_UNITS', 'GROSS_SQUARE_FEET']
features = train.columns.drop([target] + high_cardinality)
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
y_train
#ONE HOT ENCODING
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
X_train.shape
X_train.head(1)
#SELECTING FEATURES W SELECTKBEST
from sklearn.feature_selection import f_regression, SelectKBest
selector = SelectKBest(score_func = f_regression, k = 10)
X_train_selected = selector.fit_transform(X_train, y_train)
X_test_selected = selector.transform(X_test)
X_train_selected.shape, X_test_selected.shape
all_names = X_train.columns
selected_mask = selector.get_support()
selected_names = all_names[selected_mask]
unselected_names = all_names[~selected_mask]
print('Featuers Selected: ')
for name in selected_names:
print(name)
print('\n')
print('Features not Selected: ')
for name in unselected_names:
print(name)
#FITTING RIDGE REGRESSION
import math
train['SALE_PRICE'].mean()
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
#LINEAR REGRESSION FOR SELECTKBEST FEATURE
lin_reg = LinearRegression()
lin_reg.fit(X_train_selected, y_train)
print(f'MSE: {mean_squared_error(y_test, lin_reg.predict(X_test_selected))}')
print(f'MAE: {mean_absolute_error(y_test, lin_reg.predict(X_test_selected))}')
from sklearn.linear_model import Ridge
#RIDGE REGRESSION FOR SELECTKBEST FEATURES
ridge_reg = Ridge(alpha=1).fit(X_train_selected, y_train)
print(f'MSE: {mean_squared_error(y_test, ridge_reg.predict(X_test_selected))}')
print(f'MAE: {mean_absolute_error(y_test, ridge_reg.predict(X_test_selected))}')
#RIDGE REGRESSION FOR ALL FEATURES
ridge_reg = Ridge(alpha=1).fit(X_train, y_train)
print(f'MSE: {mean_squared_error(y_test, ridge_reg.predict(X_test))}')
print(f'MAE: {mean_absolute_error(y_test, ridge_reg.predict(X_test))}')
#MSE AND MAE V. SALE_PRICE MEAN
sale_price_mean = []
for _ in y_test:
sale_price_mean.append(train['SALE_PRICE'].mean())
print(f'MSE: {mean_squared_error(y_test, sale_price_mean)}')
print(f'MAE: {mean_absolute_error(y_test, sale_price_mean)}')
#STANDARDIZE DATA
#ooooof
alphas = []
mses = []
maes = []
for alpha in range(1, 2000, 1):
ridge_reg_split = Ridge(alpha=alpha).fit(X_train, y_train)
mse = mean_squared_error(y_test, ridge_reg_split.predict(X_test))
mae = mean_absolute_error(y_test, ridge_reg_split.predict(X_test))
# print(alpha, mse)
alphas.append(alpha)
mses.append(mse)
maes.append(mae)
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(alphas, mses, color='pink');
###Output
_____no_output_____ |
Lessons&CourseWorks/3.ObjectTracking&Localization/4.IntroToKalmanFilter/1.NewMeanAndVariance/3. New Mean and Variance, solution.ipynb | ###Markdown
New Mean and VarianceNow let's take the formulas from the example below and use them to write a program that takes in two means and variances, and returns a *new*, updated mean and variance for a gaussian. This step is called the parameter or **measurement update** because it is the update that happens when an initial belief (represented by the blue Gaussian, below) is merged with a new piece of information, a measurement with some uncertainty (the orange Gaussian). As you've seen in the previous quizzes, the updated Gaussian will be a combination of these two Gaussians with a new mean that is in between both of theirs and a variance that is less than the smallest of the two given variances; this means that after a measurement, our new mean is more certain than that of the initial belief! Below is our usual Gaussian equation and imports.
###Code
# import math functions
from math import *
import matplotlib.pyplot as plt
import numpy as np
# gaussian function
def f(mu, sigma2, x):
''' f takes in a mean and squared variance, and an input x
and returns the gaussian value.'''
coefficient = 1.0 / sqrt(2.0 * pi *sigma2)
exponential = exp(-0.5 * (x-mu) ** 2 / sigma2)
return coefficient * exponential
###Output
_____no_output_____
###Markdown
QUIZ: Write an `update` function that performs the measurement update.This function should combine the given Gaussian parameters and return new values for the mean and squared variance.This function does not have to perform any exponential math, it simply has to follow the equations for the measurement update as seen in the image at the top of this notebook. You may assume that the given variances `var1` and `var2` are squared terms.
###Code
# the update function
def update(mean1, var1, mean2, var2):
''' This function takes in two means and two squared variance terms,
and returns updated gaussian parameters.'''
## TODO: Calculate the new parameters
new_mean = (var2*mean1 + var1*mean2)/(var2+var1)
new_var = 1/(1/var2 + 1/var1)
return [new_mean, new_var]
# test your implementation
new_params = update(10, 4, 12, 4)
print(new_params)
###Output
[11.0, 2.0]
###Markdown
Plot a GaussianPlot a Gaussian by looping through a range of x values and creating a resulting list of Gaussian values, `g`, as shown below. You're encouraged to see what happens if you change the values of `mu` and `sigma2`.
###Code
# display a gaussian over a range of x values
# define the parameters
mu = new_params[0]
sigma2 = new_params[1]
# define a range of x values
x_axis = np.arange(0, 20, 0.1)
# create a corresponding list of gaussian values
g = []
for x in x_axis:
g.append(f(mu, sigma2, x))
# plot the result
plt.plot(x_axis, g)
###Output
_____no_output_____ |
Pipeline/Assignment_5/isabel_schmuck_assignment_5/Assignment-5_Clustering.ipynb | ###Markdown
IntroductionIn this notebook we will use a clustering algorithm to analyze our data (i.e. YouTube comments of a single video).This will help us extract topics of discussion.We use the embeddings generated in Assignment 4 as input. (This notebook will not run without first running the assignment 4 Notebook, as it relies on the data in the folder 'output/')Each of our comments has been assigned a vector that encodes information about its meaning.The closer two vectors are, the more similar the meaning.Each vector is of 512 Dimensions.Before we can cluster our data we need to reduce the embeddings' dimensionality to overcome the curse of dimensionality.We use the UMAP ALgorithm for this.After that we use the KMedoids Algorithm to partition the embedding space and generate our clusters this way.We need to define the number of clusters we want to have. To find the optimal number of clusters, we use a simple optimization scheme.Once the clusters are created, we visualize them.To do this we reduce the dimensionality of the embeddings again to two dimensions.Then we render a scatterplot of our data.Furthermore we want to analyze and interpret our clusters.To do this, we:- print some statistics about each of the clusters- print cluster's medoid (the central sample)- print the cluster(s) we want to analyze further Check to see if jupyter lab uses the correct python interpreter with '!which python'.It should be something like '/opt/anaconda3/envs/[environment name]/bin/python' (on Mac).If not, try this: https://github.com/jupyter/notebook/issues/3146issuecomment-352718675
###Code
!which python
###Output
/opt/anaconda3/envs/csma3/bin/python
###Markdown
Install dependencies:
###Code
install_packages = False
if install_packages:
!conda install -c conda-forge umap-learn -y
!conda install -c conda-forge scikit-learn-extra -y
###Output
_____no_output_____
###Markdown
Imports
###Code
#imports
import pandas as pd
import numpy as np
import os
import time
import matplotlib.pyplot as plt
import umap
from sklearn_extra.cluster import KMedoids
import seaborn as sns
#from sklearn.cluster import AgglomerativeClustering, DBSCAN, KMeans, OPTICS
from sklearn.metrics import silhouette_samples, silhouette_score, pairwise_distances
###Output
_____no_output_____
###Markdown
Functions to Save and load manually
###Code
# Save and load your data after clustering
def save_results():
data.to_pickle(output_path+'data_clustered'+'.pkl')
def load_results():
data = pd.read_pickle(output_path+'data_clustered'+'.pkl')
###Output
_____no_output_____
###Markdown
Set pandas print options This will improve readability of printed pandas dataframe.
###Code
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
###Output
_____no_output_____
###Markdown
Set global ParametersSet your parameters here:output_path: Files generated in this notebook will be saved here.model_type: Define which model was used to produce the embeddings. (Check the name of the .npy-file containing the embeddings)
###Code
output_path = "./output/"
model_type = 'Transformer' #@param ['DAN','Transformer','Transformer_Multilingual']
###Output
_____no_output_____
###Markdown
Load DataLoad the preprocessed data as a pandas dataframe.And load the embeddings as a numpy ndarray (a matrix in our case).
###Code
data = pd.read_pickle(output_path+'data_preprocessed'+'.pkl')
labels_default = np.zeros(len(data.index))-1
data['label_manual'] = labels_default
embeddings = np.load(output_path+'/embeddings'+model_type+'.npy', mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII')
###Output
_____no_output_____
###Markdown
Dimensionality reduction with UMAPWe reduce the number of dimensions of our embeddings to make possibly present clusters more pronounced. The number of dimensions (num_dimensions) depends on the number of samples
###Code
# Set the number of dimensions to reduce to
num_dimensions =100
reducer_clustering = umap.UMAP(n_neighbors=50,
n_components=num_dimensions,
metric='cosine',
#n_epochs=200,
learning_rate=.5,
init='spectral',
min_dist=0,
#spread=5.0,
#set_op_mix_ratio=1.0,
#local_connectivity=1.0,
#negative_sample_rate=5,
#transform_queue_size=4.0,
force_approximation_algorithm=True,
unique=True)
embeddings_umap = reducer_clustering.fit_transform(embeddings)
###Output
_____no_output_____
###Markdown
Optimize the Number of Clusters
###Code
#optimize number of clusters
optimize_number_of_clusters = True#@param {type:'boolean'}
min_clusters= 5
max_clusters=55
step=5
if optimize_number_of_clusters:
rows_list = []
inertias = []
n_clusters = []
silouette_scores = []
init_param = 'k-medoids++' #@param ['random', 'heuristic', 'k-medoids++']
random_state_param=134 #@param {type:'number'}
for i in range(min_clusters,max_clusters, step):
temp_clustering = KMedoids(n_clusters=i, metric='euclidean', init=init_param, max_iter=300, random_state=random_state_param).fit(embeddings_umap)
silhouette_avg = silhouette_score(embeddings_umap, temp_clustering.labels_)
print("n_clusters:",i, "silhouette_avg:",silhouette_avg)
silhouette_dict = {'number of clusters': i, 'silhouette average': silhouette_avg}
rows_list.append(silhouette_dict)
results = pd.DataFrame(rows_list)
sns.lineplot(x = 'number of clusters', y = 'silhouette average',data = results)
###Output
n_clusters: 5 silhouette_avg: 0.282379
n_clusters: 10 silhouette_avg: 0.3443897
n_clusters: 15 silhouette_avg: 0.38070863
n_clusters: 20 silhouette_avg: 0.36556098
n_clusters: 25 silhouette_avg: 0.3519204
n_clusters: 30 silhouette_avg: 0.3452696
n_clusters: 35 silhouette_avg: 0.34582657
n_clusters: 40 silhouette_avg: 0.34683618
n_clusters: 45 silhouette_avg: 0.34131685
n_clusters: 50 silhouette_avg: 0.34406087
###Markdown
Clustering with KMedoids
###Code
number_of_clusters = 20
init_param = 'k-medoids++' #@param ['random', 'heuristic', 'k-medoids++']
clustering_model = KMedoids(n_clusters=number_of_clusters,
metric='cosine',
init=init_param,
max_iter=150,
random_state=None).fit(embeddings_umap)
clustering_model
labels = clustering_model.labels_
data["label_kmedoids"] = labels
print("cluster","members", data["label_kmedoids"].value_counts().sort_values())
clustering_model.inertia_
medoids_indices = clustering_model.medoid_indices_
#calculate distances
distances = np.diag(pairwise_distances(X = clustering_model.cluster_centers_[labels], Y = embeddings_umap[:], metric='cosine'))
data["distance_kmedoids"] = distances
###Output
_____no_output_____
###Markdown
Dimensionality Reduction for Visualization
###Code
num_dimensions =2
reducer_visualization = umap.UMAP(n_neighbors=50,
n_components=num_dimensions,
metric='cosine',
output_metric='euclidean',
#n_epochs=200,
learning_rate=.5,
init='spectral',
min_dist=.1,
spread=5.0,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
negative_sample_rate=5,
transform_queue_size=4.0,
force_approximation_algorithm=True,
unique=True)
embeddings_umap_2d = reducer_visualization.fit_transform(embeddings)
###Output
/usr/local/lib/python3.8/site-packages/umap/umap_.py:1158: RuntimeWarning: divide by zero encountered in power
return 1.0 / (1.0 + a * x ** (2 * b))
###Markdown
Visualize clustering results
###Code
#@markdown Set the color palette used for visualizing different clusters
palette_param = "Spectral" #@param ['Accent','cubehelix', "tab10", 'Paired', "Spectral"]
#@markdown Set opacity of data points (1 = opaque, 0 = invisible)
alpha_param = 1 #@param {type:"slider", min:0, max:1, step:0.01}
sns.relplot(x = embeddings_umap_2d[:, 0], y = embeddings_umap_2d[:, 1], hue = data['label_kmedoids'], palette = palette_param,alpha = alpha_param,height = 10)
###Output
_____no_output_____
###Markdown
Highlight one cluster
###Code
## Choose a cluster to higlight:
cluster_num = 15
cluster_num_2 = 14
data['highlight'] = np.zeros(len(data.index))
data.loc[data['label_kmedoids'] == cluster_num, 'highlight'] = 1
data.loc[data['label_kmedoids'] == cluster_num_2, 'highlight'] = 2
sns.relplot(x = embeddings_umap_2d[:, 0], y = embeddings_umap_2d[:, 1], hue = data['highlight'], palette = "Accent",alpha = 0.8,height = 10)
###Output
_____no_output_____
###Markdown
Print Medoids and cluster statistics
###Code
# print the medoids
data.iloc[medoids_indices]
# print statistics for each cluster
data['label_kmedoids'].value_counts().sort_values()
for k,g in data.groupby(by = 'label_kmedoids'):
print(g.iloc[0]['label_kmedoids'],"number of samples: ",len(g.index),"mean distance from center: ", 100*np.mean(g['distance_kmedoids']), "Proportion of replies:", 100*np.sum(g['isReply'])/len(g.index))
###Output
0 number of samples: 256 mean distance from center: 0.005472637712955475 Proportion of replies: 27.734375
1 number of samples: 149 mean distance from center: 0.004751170126837678 Proportion of replies: 81.20805369127517
2 number of samples: 135 mean distance from center: 0.005244281419436447 Proportion of replies: 35.55555555555556
3 number of samples: 164 mean distance from center: 0.005976565080345608 Proportion of replies: 50.609756097560975
4 number of samples: 163 mean distance from center: 0.0031442363251699135 Proportion of replies: 78.52760736196319
5 number of samples: 87 mean distance from center: 0.005120450077811256 Proportion of replies: 83.9080459770115
6 number of samples: 102 mean distance from center: 0.0019379691366339102 Proportion of replies: 63.72549019607843
7 number of samples: 161 mean distance from center: 0.005908301318413578 Proportion of replies: 31.055900621118013
8 number of samples: 153 mean distance from center: 0.00507103068230208 Proportion of replies: 57.51633986928105
9 number of samples: 77 mean distance from center: 0.004970717782271095 Proportion of replies: 32.467532467532465
10 number of samples: 79 mean distance from center: 0.0025136561816907488 Proportion of replies: 100.0
11 number of samples: 104 mean distance from center: 0.0021179708710405976 Proportion of replies: 58.65384615384615
12 number of samples: 26 mean distance from center: 0.00011737530485333991 Proportion of replies: 96.15384615384616
13 number of samples: 147 mean distance from center: 0.0050500137149356306 Proportion of replies: 70.06802721088435
14 number of samples: 148 mean distance from center: 0.007213893695734441 Proportion of replies: 27.7027027027027
15 number of samples: 26 mean distance from center: 0.0011136899047414772 Proportion of replies: 3.8461538461538463
16 number of samples: 84 mean distance from center: 0.005139978020451963 Proportion of replies: 96.42857142857143
17 number of samples: 65 mean distance from center: 0.0036997062125010416 Proportion of replies: 50.76923076923077
18 number of samples: 111 mean distance from center: 0.006885678885737434 Proportion of replies: 45.945945945945944
19 number of samples: 55 mean distance from center: 0.004104050822206773 Proportion of replies: 67.27272727272727
###Markdown
Print ClusterPrint the comments within a cluster. Comments are sorted by their distance from the cluster medoid
###Code
# Choose a cluster to print
cluster_number = 14
# Choose the number of samples to print
number_of_samples_to_print = 1000
data['label_kmedoids'] = data['label_kmedoids'].astype('category')
cluster = data[data['label_kmedoids']==cluster_number]
if cluster["text"].count()<=number_of_samples_to_print:
number_of_samples_to_print = cluster["text"].count()
cluster = cluster.sort_values(by='distance_kmedoids')
print("Number of samples in the cluster:", cluster["text"].count())
print("Average Distance from cluster center:", np.mean(cluster['distance_kmedoids']))
cluster['text']
###Output
Number of samples in the cluster: 148
Average Distance from cluster center: 7.213894e-05
###Markdown
Assign Cluster labels manuallycluster_number: which cluster would you like to assign labels to?min_distance: the minimum distance from the cluster medoid be for a data point to still get the specified labelmax_distance: the maximum distance from the cluster medoid be for a data point to still get the specified labellabel_manual: your label
###Code
#which cluster would you like to assign labels to?
cluster_number = 15
#your label
label_manual = 'rationality'
#the minimum distance from the cluster medoid be for a data point to still get the specified label
min_distance = 0
#the maximum distance from the cluster medoid be for a data point to still get the specified label
max_distance = 1000
# 2. Filter data by cluster label and specified label to filtered data
data.loc[(data['label_kmedoids']==cluster_number) & (data['distance_kmedoids'] <= max_distance) & (data['distance_kmedoids'] >= min_distance), 'label_manual'] = label_manual
data[data['label_kmedoids']==cluster_number].sort_values(by='distance_kmedoids')
###Output
_____no_output_____ |
lab7/lab7-parte2.ipynb | ###Markdown
[](https://colab.research.google.com/github/eirasf/GCED-AA2/blob/main/lab7/lab7-parte2.ipynb) Práctica 7: Residual neural networks Pre-requisitos. Instalar paquetesPara la segunda parte de este Laboratorio 7 necesitaremos TensorFlow y TensorFlow-Datasets. Además, como habitualmente, fijaremos la semilla aleatoria para asegurar la reproducibilidad de los experimentos.
###Code
import tensorflow as tf
import tensorflow_datasets as tfds
#Fijamos la semilla para poder reproducir los resultados
import os
import numpy as np
import random
seed=1234
os.environ['PYTHONHASHSEED']=str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
###Output
_____no_output_____
###Markdown
Además, cargamos también APIs que vamos a emplear para que el código quede más legible
###Code
#API de Keras, modelo Sequential y la capa Dense
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense
#Para mostrar gráficas
from matplotlib import pyplot
###Output
_____no_output_____
###Markdown
Carga del conjunto de datosDe nuevo, seguimos empleando el conjunto *german_credit_numeric* ya empleado en los laboratorios anteriores.
###Code
# TODO: Carga el conjunto german_credit como ds_train
# Indica además un tamaño de batch de 128 y que se repita indefinidamente
ds_train = ...
###Output
_____no_output_____
###Markdown
Visualización del desvanecimiento del gradienteEn este apartado visualizaremos los tamaños de los gradientes, como hicimos en la primera parte. Para ello mantenemos la declaración de `GradientLoggingSequentialModel`.
###Code
class GradientLoggingSequentialModel(tf.keras.models.Sequential):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# En la inicialización instanciamos una nueva variable en la que
# registraremos el historial de tamaños de gradientes de cada capa
self.gradient_history = {}
def compile(self, **kwargs):
result = super().compile(**kwargs)
# Una vez sabemos la arquitectura, podemos inicializar la historia
# de gradientes de cada capa a una lista vacía.
for l in self.layers:
self.gradient_history[l.name] = []
return result
def _save_gradients(self, gradients):
# A cada paso de entrenamiento llamaremos a esta función para que
# registre los gradientes.
# En la lista gradients se encuentran los gradientes de las distintas
# capas por orden. Cada capa l tendrá un número de gradientes que
# concidirá con l.trainable_variables.
# Teniendo esto en cuenta, recorremos los gradientes, calculamos su
# tamaño y guardamos la media de tamaños de cada capa en el histórico
i = 0
for layer in self.layers:
gradient_sizes = []
for lw in layer.trainable_variables:
g_size = np.linalg.norm(gradients[i].numpy())
gradient_sizes.append(g_size)
i += 1
mean_gradient_size = np.mean(gradient_sizes)
self.gradient_history[layer.name].append(mean_gradient_size)
def train_step(self, data):
# Haremos un paso de entrenamiento personalizado basado en
# https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit#a_first_simple_example
# Dejaremos el ejemplo tal cual, añadiendo tan solo la llamada a
# _save_gradients una vez que disponemos de los gradientes
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Llamada añadida para grabar los gradientes.
self._save_gradients(gradients)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
###Output
_____no_output_____
###Markdown
Creación del bloque con conexión residualEn una red neuronal *feed-forward* convencional, la salida de cada capa se utiliza como entrada de las capa siguiente.En contraste, en una ResNet se introducen bloques que incluyen conexiones residuales con el objetivo de favorecer la propagación de los gradientes.A pesar de que las ResNet se suelen utilizar con redes convolucionales, en este Laboratorio utilizaremos una red *feed-forward*.Para poder utilizar este tipo de bloques en nuestra arquitectura definiremos un nuevo tipo de modelo denominado `DoubleDenseWithSkipModel` que consistirá en lo representado en la imagen: - Una primera capa Dense, cuya salida sirve de entrada a... - Una segunda capa Dense lineal (sin activación), cuya salida sirve de entrada a... - A una operación suma que la añade a la entrada original a la primera capa. La salida de esta suma servirá de entrada a... - Una función de activación, cuya salida será la salida del bloque.Utilizaremos la función sigmoide como función de activación en ambos casos. La entrada y la salida del bloque deberán tener la misma dimensión para que se pueda realizar la suma. Por simplicidad, mantendremos la salida de la primera capa con la misma dimensión.Nuestra nueva clase heredará de `Model`, por lo que deberá implementar los métodos `build` y `call`. En celdas posteriores añadiremos estos bloques a un modelo `Sequential` como si fuesen capas.
###Code
class DoubleDenseWithSkipModel(tf.keras.models.Model):
def __init__(self, **kwargs):
super(DoubleDenseWithSkipModel, self).__init__(**kwargs)
# TODO: Completa el método build
def build(self, input_shape):
...
# TODO: Completa el método skip
def call(self, x):
...
###Output
_____no_output_____
###Markdown
Creamos un modelo *GradientLoggingSequentialModel* utilizando las nuevas capasCreamos un modelo *GradientLoggingSequentialModel* para ajustar a los datos de entrada siguiendo las especificaciones dadas. Deberá incluir (además de las capas de entrada y salida) una capa de 10 unidades con activación sigmoide y 10 de los nuevos bloques.
###Code
# TODO - Define en model una red GradientLoggingSequentialModel
# Pon una capa densa con 10 unidades con activación sigmoide y 10 capas DoubleDenseWithSkipModel
model = ...
#Construimos el modelo y mostramos
model.build()
print(model.summary())
###Output
_____no_output_____
###Markdown
Entrenamiento del modeloComo en la primera parte, vamos a establecer la función de pérdida (entropía cruzada binaria), el optimizador (SGD con LR $10^{-3}$) y la métrica que nos servirá para evaluar el rendimiento del modelo entrenado (área bajo la curva).
###Code
#TODO - Compila el modelo. Utiliza la opción run_eagerly=True para que se puedan registrar los gradientes a cada paso
model.compile(...)
###Output
_____no_output_____
###Markdown
Entrenamos el modelo usando model.fit
###Code
#TODO - entrenar el modelo utilizando 8 steps por epoch. Con 10 epochs nos valdrá para comprobar el desvanecimiento de gradientes.
model.fit(...)
###Output
_____no_output_____
###Markdown
Una vez hayas encontrado un valor de learning rate que consiga una convergencia rápida, guarda el history de la pérdida en la variable history_sgd para poder hacer comparativas.
###Code
pyplot.figure(figsize=(14, 6), dpi=80)
pyplot.boxplot(model.gradient_history.values())
pyplot.yscale('log')
pyplot.xticks(ticks=range(1,len(model.gradient_history)+1), labels=model.gradient_history.keys())
pyplot.show()
###Output
_____no_output_____ |
02ML/03Unsupervised/01Clustering/01kmeans.ipynb | ###Markdown
K-means 군집 알고리즘---군집은 유사도가 높은 객체로 이루어진 그룹을 찾는 기법이다. K-means는 다음과 같은 개념을 사용한다.* 군집 중앙은 해당 군집에 속하는 모든 점의 산술 평균이다.* 각 점은 다른 군집의 중앙보다 자신이 속한 군집의 중앙에 더 가깝다.(유사도 거리)
###Code
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=150,
n_features=2,
centers=3,
cluster_std=0.5,
shuffle=True,
random_state=0)
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(X[:, 0], X[:, 1], c='white', marker='o', edgecolor='black', s=50)
plt.show()
from sklearn.cluster import KMeans
km = KMeans(n_clusters=3,
init='random',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
y_km
plt.scatter(X[y_km == 0, 0],
X[y_km == 0, 1],
s=50, c='lightgreen',
marker='s', edgecolor='black',
label='cluster 1')
plt.scatter(X[y_km == 1, 0],
X[y_km == 1, 1],
s=50, c='orange',
marker='o', edgecolor='black',
label='cluster 2')
plt.scatter(X[y_km == 2, 0],
X[y_km == 2, 1],
s=50, c='lightblue',
marker='v', edgecolor='black',
label='cluster 3')
plt.scatter(km.cluster_centers_[:, 0],
km.cluster_centers_[:, 1],
s=250, marker='*',
c='red', edgecolor='black',
label='centroids')
plt.legend(scatterpoints=1)
plt.show()
###Output
_____no_output_____
###Markdown
적정 군집수 판단K Means를 수행하기전에는 클러스터의 개수를 명시적으로 지정해줘야 한다. 몇개의 클러스터의 수가 가장 적절할지는 어떻게 결정할 수 있을까? Inertia value 라는 값을 보면 적정 클러스터 수를 선택할 수 있다. Inertia value는 군집화가된 후에, 각 중심점에서 군집의 데이타간의 거리를 합산한것이으로 군집의 응집도를 나타내는 값이다, 이 값이 작을 수록 응집도가 높게 군집화가 잘되었다고 평가할 수 있다.다음은 위의 데이터로 1~6개의 클러스터로 클러스터링을 했을때, 각 클러스터 개수별로 inertia value를 출력해보는 코드이다.
###Code
ks = range(1,10)
inertias = []
for k in ks:
model = KMeans(n_clusters=k)
model.fit(X)
inertias.append(model.inertia_)
# Plot ks vs inertias
plt.plot(ks, inertias, '-o')
plt.xlabel('number of clusters, k')
plt.ylabel('inertia')
plt.show()
###Output
_____no_output_____
###Markdown
Iris dataset 으로 K-means 실습
###Code
from sklearn import datasets
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
iris = datasets.load_iris()
print(iris.DESCR)
X = iris.data[:, :2]
y = iris.target
# iris data 산점도를 그려 확인한다.
plt.scatter(X[:,0], X[:,1], c=y, cmap='gist_rainbow')
plt.xlabel('Spea1 Length', fontsize=18)
plt.ylabel('Sepal Width', fontsize=18)
plt.colorbar(ticks=range(3))
plt.show()
# iris data를 3개로 군집한다.
km = KMeans(n_clusters = 3, random_state=21)
km.fit(X)
# 중심점의 위치
centers = km.cluster_centers_
print(centers)
# 군집에 의해 분류된 class 정보
new_labels = km.labels_
# 산점도를 그려 군집을 확인
fig, axes = plt.subplots(1, 2, figsize=(16,8))
axes[0].scatter(X[:, 0], X[:, 1], c=y, cmap='gist_rainbow',
edgecolor='k', s=50)
axes[1].scatter(X[:, 0], X[:, 1], c=new_labels, cmap='jet',
edgecolor='k', s=50)
axes[1].scatter(centers[:, 0], centers[:, 1], c='red', marker='*', s=250)
axes[0].set_xlabel('Sepal length', fontsize=18)
axes[0].set_ylabel('Sepal width', fontsize=18)
axes[1].set_xlabel('Sepal length', fontsize=18)
axes[1].set_ylabel('Sepal width', fontsize=18)
axes[0].set_title('Actual', fontsize=18)
axes[1].set_title('Predicted', fontsize=18)
plt.show()
###Output
_____no_output_____
###Markdown
군집 모형의 평가clustering 알고리즘은 비지도 학습 알고리즘이다. 따라서 타겟 label이 없으므로 예측과 정답을 비교하는 방식으로 모형을 평가하기 어렵다. 그러나 군집 자체의 특성를 평가할 수는 있다. 클러스터 내의 샘플간의 거리는 가깝고 (즉, 조밀한 클러스터) 클러스터 간 거리는 먼 것 (즉, 잘 구분된 클러스터)이 좋은 클러스터라고 직관적으로 생각할 수 있다. * 실루엣 계수(silhouette score) : 실루엣 계수는 한 클러스터 안에 데이터들이 다른 클러스터와 비교해서 얼마나 비슷한가를 나타낸다.* 실루엣은 -1~1사이의 값을 가지며 **1에 가까울수록 잘 구분되는 클러스터를 의미한다.*** 사이킷런의 silhouette_score 함수는 모든 샘플의 실루엣 계수를 평균한 값이다.$$s_i={(b_i - a_i) \over max(a_i, b_i)}$$* $s_i$ : 샘플 i의 실루엣 계수* $a_i$ : 샘플 i와 같은 클래스 안에 있는 모든 다른 샘플 사이의 평균 거리* $b_i$ : 샘플 i와 가장 가까운 다른 클러스터 안에 있는 샘플 사이의 평균 거리
###Code
import numpy as np
from sklearn.metrics import silhouette_score
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
# 특성 행렬을 생성합니다.
features, _ = make_blobs(n_samples = 1000,
n_features = 10,
centers = 2,
cluster_std = 0.5,
shuffle = True,
random_state = 1)
# k-평균을 사용하여 데이터를 클러스터링하고 클래스를 예측합니다.
model = KMeans(n_clusters=2, random_state=1).fit(features)
# 예측된 클래스
target_predicted = model.labels_
# 모델을 평가합니다.
silhouette_score(features, target_predicted)
###Output
_____no_output_____ |
CaseStudy/CCE_DataGroups_Representation_BSPLINE.ipynb | ###Markdown
Format ResultsThe results were obtained by feeding MelodyShape with the encoded segments to compare.
###Code
def parse_result_line(encoded_line):
segments_id = encoded_line[:-1].split(";")
mp_a = segments_id[1]
m_a = segments_id[3]
mp_b = segments_id[5]
m_b = segments_id[7]
bspline_similarity = float(segments_id[9])
return (mp_a, m_a, mp_b, m_b, bspline_similarity)
%%time
cnt_mels = 0
raw_scores_bspline = {}
f = open(BSPLINE_RAW_RESULTS_PATH, "r")
for current_line in f:
(mp_a, m_a, mp_b, m_b, bspline_similarity) = parse_result_line(current_line)
current_comp_id = mp_a + ";" + m_a + ";" + mp_b + ";" + m_b
raw_scores_bspline[current_comp_id] = bspline_similarity
cnt_mels += 1
print("Comparisons made: {0} - {1}".format(cnt_mels, current_comp_id))
Y_AXIS_MIDI_PATHS = [
"./CalebRascon/CORPUS/MIDI/",
"./CalebRascon/MIDI_Grammar_SOLO_LEN12/",
"./MilesDavis/MIDI_Grammar_SOLO_LEN12/",
"./CharlieParker/MIDI_Grammar_SOLO_LEN12/"
]
X_AXIS_MIDI_PATHS = [
"./CalebRascon/CORPUS/MIDI/",
"./CalebRascon/MIDI_Grammar_SOLO_LEN12/",
"./MilesDavis/MIDI_Grammar_SOLO_LEN12/",
"./CharlieParker/MIDI_Grammar_SOLO_LEN12/",
"./CalebRascon/MIDI_Grammar_TRADE_Caleb/",
"./CalebRascon/MIDI_Grammar_TRADE_Miles/",
"./CalebRascon/MIDI_Grammar_TRADE_CharlieParker/"
]
all_similarities_store = {}
for MIDI_path_query in Y_AXIS_MIDI_PATHS:
for MIDI_path_test in X_AXIS_MIDI_PATHS:
similarities_from_references = []
similarities_all_v_all = {}
for root_ref, dirs_ref, files_ref in os.walk(MIDI_path_query):
for name_ref in files_ref:
# print("+++++++++++++++++++++++++++++")
# print(name_ref)
similarities_from_reference = []
similarities_all_v_all[name_ref] = {}
for root, dirs, files in os.walk(MIDI_path_test):
for name in files:
# print(name)
current_comp_id = MIDI_path_query + ";" + name_ref + ";" + MIDI_path_test + ";" + name
if (name == name_ref) and (root == root_ref):
current_similarity = 0.0;
else:
current_similarity = raw_scores_bspline[current_comp_id]
similarities_from_reference.append(current_similarity)
similarities_all_v_all[name_ref][name] = current_similarity
# print(current_similarity)
similarities_from_references.append(similarities_from_reference)
similarities_df = pd.DataFrame(similarities_all_v_all)
similarities_df = similarities_df.reindex(sorted(similarities_df.columns), axis=1)
similarities_df = similarities_df.sort_index()
similarities_df = similarities_df.replace(0, np.NaN)
display(similarities_df)
print(similarities_df.mean(axis=1))
keyname_sim = MIDI_path_query+":"+MIDI_path_test
print(keyname_sim)
all_similarities_store[keyname_sim] = similarities_df.to_dict('index')
with open('./CCEvaluation_groups_BSPLINE.json', 'w') as outfile:
json.dump(all_similarities_store, outfile)
###Output
_____no_output_____ |
pytorch-tutorials/notebook/01-basic/pytorch_basics.ipynb | ###Markdown
pytorch basics+ Author: xiaoran+ Time: p.m. 2019-01-17 展示pytorch的自动求导机制,和与numpy之间的交互
###Code
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
###Output
_____no_output_____
###Markdown
1. 自动求导的例子1
###Code
# 1. 使用变量创建tensor,可以使用向量创建
x = torch.tensor(1.0, requires_grad=True)
w = torch.tensor(2.0, requires_grad=True)
b = torch.tensor(3.0, requires_grad=True)
# 2. 建立一个图表达式,torch会帮我们构建一个图
# 默认的表达式是 y = 2 * x + 3
y = w * x + b
# 3. 计算y关于所有变量(x, w, b)的梯度
y.backward()
# 4. 打印出所有的梯度
print(x.grad) # x.grad = w = 2
print(w.grad) # w.grad = x = 1
print(b.grad) # b.grad = 1
###Output
tensor(2.)
tensor(1.)
tensor(1.)
###Markdown
2. 自动求导例子2
###Code
# 1. 随机创建二维的tensor,shape input x (10,2) and output y (10, 2)
x = torch.randn(10, 3)
y = torch.randn(10, 2)
# 2. 建立一个全连接层, y = w * x + b, w 是权重 shape (3, 2), b 是偏差 shape 2, 这是是默认参数还没有优化
linear = nn.Linear(3, 2)
print("w: ", linear.weight)
print("b: ", linear.bias)
# 3. 前面就是一个只有一层的MLP,定义损失函数和优化器
loss_fun = nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
# 4. 预测的时候就是,前向传播
pred = linear(x)
# 5. 计算损失
loss = loss_fun(pred, y)
# 6. 根据loss的反向传播,求导优化参数
loss.backward()
# 6.1. 打印出损失函数的梯度
print("dL/dw: ", linear.weight.grad)
print("dL/db: ", linear.bias.grad)
# 7. 梯度下降, 使用学习率 0.01, 这里值执行一部
optimizer.step()
# 7.1 上面的基于优化函数的梯度下降,可以用下面的两句替代
# linear.weight.data,sub_(0.01 * linear.weight.grad.data)
# linear.bias.data,sub_(0.01 * linear.bias.grad.data)
# 8. 打印执行一次梯度下降的损失函数
pred = linear(x)
loss = loss_fun(pred, y)
print("loss after 1 step optimization: ", loss.item())
###Output
w: Parameter containing:
tensor([[-0.1595, -0.4917, 0.1873],
[ 0.0073, -0.4014, -0.0299]], requires_grad=True)
b: Parameter containing:
tensor([-0.2682, -0.3715], requires_grad=True)
dL/dw: tensor([[ 0.1115, -0.4553, 0.9870],
[ 0.0586, -0.3679, 0.4290]])
dL/db: tensor([-0.4715, -0.9214])
loss after 1 step optimization: 1.4574180841445923
###Markdown
循环方式进行梯度优化
###Code
iter_k = 10
for i in range(iter_k):
# 4. 预测的时候就是,前向传播
pred = linear(x)
# 5. 计算损失
loss = loss_fun(pred, y)
print("loss after %d step optimization: %s" % (i+1, loss.item()))
# 6. 根据loss的反向传播,求导优化参数
loss.backward()
# 6.1. 打印出损失函数的梯度
# print("dL/dw: ", linear.weight.grad)
# print("dL/db: ", linear.bias.grad)
# 7. 梯度下降, 使用学习率 0.01, 这里值执行一部
optimizer.step()
# 7.1 上面的基于优化函数的梯度下降,可以用下面的两句替代
# linear.weight.data,sub_(0.01 * linear.weight.grad.data)
# linear.bias.data,sub_(0.01 * linear.bias.grad.data)
# 8. 打印执行一次梯度下降的损失函数
###Output
loss after 1 step optimization: 0.48850151896476746
loss after 2 step optimization: 0.49428820610046387
loss after 3 step optimization: 0.5042358636856079
loss after 4 step optimization: 0.5182933211326599
loss after 5 step optimization: 0.5361483693122864
loss after 6 step optimization: 0.5572361946105957
loss after 7 step optimization: 0.5807634592056274
loss after 8 step optimization: 0.6057454943656921
loss after 9 step optimization: 0.6310566067695618
loss after 10 step optimization: 0.6554887890815735
###Markdown
2 从numpy中得到数据
###Code
# numpy array
x = np.array([[1,2],[3,4]])
# convert numpy array to a touch tensor
y = torch.from_numpy(x)
# convert the torch tensor to a numpy array
z = y.numpy()
###Output
_____no_output_____
###Markdown
3 Input pipline
###Code
# Download and construct CIFAR-10 dataset
train_dataset = torchvision.datasets.CIFAR10(root="../data/",
train=True,
transform=transforms.ToTensor(),
download=True)
image, label = train_dataset[0]
print(image.size())
print(label)
# 数据架子(pytorch提供多线程和队列加载)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
# 当迭代开始的时候,队列和多线程开始从文件中加载数据
data_iter = iter(train_loader)
# 每次得到小批量的数据和label
images, labels = data_iter.next()
print(labels)
# 在实际的使用时候,一般用for循环
for images, labels in train_loader:
# Train code should be written here.
pass
###Output
tensor([1, 8, 2, 7, 2, 1, 6, 6, 8, 9, 4, 9, 9, 0, 2, 9, 4, 7, 7, 6, 9, 1, 8, 3,
3, 1, 2, 8, 6, 3, 3, 7, 8, 4, 5, 3, 2, 9, 3, 5, 4, 0, 7, 5, 4, 3, 2, 1,
0, 6, 4, 1, 0, 8, 3, 0, 4, 1, 1, 0, 6, 0, 1, 3])
###Markdown
3.1 Input pipline for custom dataset(自定制数据集)1. 使用下面给出的类参考的格式定义自己任务的数据2. 之后,使用上面的的方式,指定batch_size,3. 使用for循环或者iter的next
###Code
# 定制自己的客户数据集
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. Initialize file paths or a list of file namse
pass
def __getitem__(self, index):
# TODO
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open)
# 2. Preprocess the data (e.g. torchvision.Transform)
# 3. Return a data pair (e.g. image and label)
pass
def __len__(self):
# the total size of your dataset
size = 100
return size
# 使用方式
custom_dataset = CustomDataset()
train_loader = torch.utils.data.DataLoader(dataset=custom_dataset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
4. 基于迁移学习的预训练模型1. 例子 resnet-182. 去掉顶层网络,根据自己的数据重新定义3. 设置层的状态是否支持微调
###Code
# Download and load the pretrained ReNet-18. 下载并预训练模型
resnet = torchvision.models.resnet18(pretrained=True)
# 设置参数,仅仅微调顶层,将其他层冻结
for param in resnet.parameters():
param.requires_grad = False
# Replace the top layer for finetunig(根据自己的数据替换顶层网络结构,并记性微调)
label_size = 100 # 你的数据的类别的个数
resnet.fc = nn.Linear(resnet.fc.in_features, label_size)
# Forward pass, 前向传播(这里可以设置epoch,batch,iteator)
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print(outputs) # (64, 100)
###Output
tensor([[-0.1315, -0.3085, -0.3245, ..., 0.8461, -0.3385, 0.4021],
[-0.0519, 0.0103, -0.3423, ..., 0.2445, -0.6687, -0.3538],
[-0.1635, 0.3438, -0.1041, ..., 0.7054, -0.1408, -0.5326],
...,
[-0.0605, 0.0587, 0.2331, ..., 0.1945, -0.1341, -0.7930],
[ 0.0506, -0.0758, -0.2704, ..., -0.6851, -0.0645, -0.0620],
[ 0.2934, 0.9082, -0.0303, ..., 1.0034, -0.1470, -0.7424]],
grad_fn=<AddmmBackward>)
###Markdown
5. save and load the entire model. (保存和加载模型)
###Code
# Save and load the entir model, (保存加载整个模型)
torch.save(resnet, "resnet_model.ckpt")
model = torch.load("resnet_model.ckpt")
# Save and load only the model parameters (recommend) 推荐仅仅保存模型的参数
torch.save(resnet.state_dict(), "resnet_params.ckpt")
resnet.load_state_dict(torch.load("resnet_params.ckpt"))
resnet
###Output
_____no_output_____ |
classification/hw8_part2_vw_stackoverflow_tags_10mln_my0.ipynb | ###Markdown
Открытый курс по машинному обучению. Сессия №3Автор материала: программист-исследователь Mail.Ru Group Юрий Кашницкий Домашнее задание № 8 Vowpal Wabbit в задаче классификации тегов вопросов на Stackoverflow План 1. Введение 2. Описание данных 3. Предобработка данных 4. Обучение и проверка моделей 5. Заключение 1. ВведениеВ этом задании вы будете делать примерно то же, что я каждую неделю – в Mail.Ru Group: обучать модели на выборке в несколько гигабайт. Задание можно выполнить и на Windows с Python, но я рекомендую поработать под \*NIX-системой (например, через Docker) и активно использовать язык bash.Немного снобизма (простите, но правда): если вы захотите работать в лучших компаниях мира в области ML, вам все равно понадобится опыт работы с bash под UNIX.[Веб-форма](https://docs.google.com/forms/d/1VaxYXnmbpeP185qPk2_V_BzbeduVUVyTdLPQwSCxDGA/edit) для ответов.Для выполнения задания понадобится установленный Vowpal Wabbit (уже есть в докер-контейнере курса, см. инструкцию в Wiki [репозитория](https://github.com/Yorko/mlcourse_open) нашего курса) и примерно 70 Гб дискового пространства. Я тестировал решение не на каком-то суперкомпе, а на Macbook Pro 2015 (8 ядер, 16 Гб памяти), и самая тяжеловесная модель обучалась около 12 минут, так что задание реально выполнить и с простым железом. Но если вы планируете когда-либо арендовать сервера Amazon, можно попробовать это сделать уже сейчас.Материалы в помощь: - интерактивный [тьюториал](https://www.codecademy.com/en/courses/learn-the-command-line/lessons/environment/exercises/bash-profile) CodeAcademy по утилитам командной строки UNIX (примерно на час-полтора) - [статья](https://habrahabr.ru/post/280562/) про то, как арендовать на Amazon машину (еще раз: это не обязательно для выполнения задания, но будет хорошим опытом, если вы это делаете впервые) 2. Описание данных Имеются 10 Гб вопросов со StackOverflow – [скачайте](https://drive.google.com/file/d/1ZU4J3KhJDrHVMj48fROFcTsTZKorPGlG/view) и распакуйте архив. Формат данных простой:*текст вопроса* (слова через пробел) TAB *теги вопроса* (через пробел)Здесь TAB – это символ табуляции.Пример первой записи в выборке:
###Code
!head -1 hw8_data/stackoverflow.10kk.tsv
!head -1 hw8_data/stackoverflow_10mln.tsv
###Output
is there a way to apply a background color through css at the tr level i can apply it at the td level like this my td background color e8e8e8 background e8e8e8 however the background color doesn t seem to get applied when i attempt to apply the background color at the tr level like this my tr background color e8e8e8 background e8e8e8 is there a css trick to making this work or does css not natively support this for some reason css css3 css-selectors
###Markdown
Здесь у нас текст вопроса, затем табуляция и теги вопроса: *css, css3* и *css-selectors*. Всего в выборке таких вопросов 10 миллионов.
###Code
%%time
!wc -l stackoverflow_10mln.tsv
%%time
!wc -l hw8_data/stackoverflow.10kk.tsv
###Output
10000000 hw8_data/stackoverflow.10kk.tsv
CPU times: user 2.64 s, sys: 785 ms, total: 3.43 s
Wall time: 1min 53s
###Markdown
Обратите внимание на то, что такие данные я уже не хочу загружать в оперативную память и, пока можно, буду пользоваться эффективными утилитами UNIX – head, tail, wc, cat, cut и прочими. 3. Предобработка данных Давайте выберем в наших данных все вопросы с тегами *javascript, java, python, ruby, php, c++, c, go, scala* и *swift* и подготовим обучающую выборку в формате Vowpal Wabbit. Будем решать задачу 10-классовой классификации вопросов по перечисленным тегам.Вообще, как мы видим, у каждого вопроса может быть несколько тегов, но мы упростим себе задачу и будем у каждого вопроса выбирать один из перечисленных тегов либо игнорировать вопрос, если таковых тегов нет. Но вообще VW поддерживает multilabel classification (аргумент --multilabel_oaa).Реализуйте в виде отдельного файла `preprocess.py` код для подготовки данных. Он должен отобрать строки, в которых есть перечисленные теги, и переписать их в отдельный файл в формат Vowpal Wabbit. Детали: - скрипт должен работать с аргументами командной строки: с путями к файлам на входе и на выходе - строки обрабатываются по одной (можно использовать tqdm для подсчета числа итераций) - если табуляций в строке нет или их больше одной, считаем строку поврежденной и пропускаем - в противном случае смотрим, сколько в строке тегов из списка *javascript, java, python, ruby, php, c++, c, go, scala* и *swift*. Если ровно один, то записываем строку в выходной файл в формате VW: `label | text`, где `label` – число от 1 до 10 (1 - *javascript*, ... 10 – *swift*). Пропускаем те строки, где интересующих тегов больше или меньше одного - из текста вопроса надо выкинуть двоеточия и вертикальные палки, если они есть – в VW это спецсимволы
###Code
import os
from tqdm import tqdm
from time import time
import numpy as np
from sklearn.metrics import accuracy_score
###Output
_____no_output_____
###Markdown
Должно получиться вот такое число строк – 4389054. Как видите, 10 Гб у меня обработались примерно за полторы минуты.
###Code
!python preprocess.py hw8_data/stackoverflow.10kk.tsv hw8_data/stackoverflow.vw
!wc -l hw8_data/stack.vw
!python preprocess.py stackoverflow_10mln.tsv stackoverflow.vw
###Output
10000000it [01:23, 119447.53it/s]
4389054 lines selected, 15 lines corrupted.
###Markdown
Поделите выборку на обучающую, проверочную и тестовую части в равной пропорции - по 1463018 в каждый файл. Перемешивать не надо, первые 1463018 строк должны пойти в обучающую часть `stackoverflow_train.vw`, последние 1463018 – в тестовую `stackoverflow_test.vw`, оставшиеся – в проверочную `stackoverflow_valid.vw`. Также сохраните векторы ответов для проверочной и тестовой выборки в отдельные файлы `stackoverflow_valid_labels.txt` и `stackoverflow_test_labels.txt`.Тут вам помогут утилиты `head`, `tail`, `split`, `cat` и `cut`.
###Code
#!head -1463018 hw8_data/stackoverflow.vw > hw8_data/stackoverflow_train.vw
#!tail -1463018 hw8_data/stackoverflow.vw > hw8_data/stackoverflow_test.vw
#!tail -n+1463018 hw8_data/stackoverflow.vw | head -n+1463018 > hw8_data/stackoverflow_valid.vw
#!split -l 1463018 hw8_data/stackoverflow.vw hw8_data/stack
!mv hw8_data/stackaa hw8_data/stack_train.vw
!mv hw8_data/stackab hw8_data/stack_valid.vw
!mv hw8_data/stackac hw8_data/stack_test.vw
!cut -d '|' -f 1 hw8_data/stack_valid.vw > hw8_data/stack_valid_labels.txt
!cut -d '|' -f 1 hw8_data/stack_test.vw > hw8_data/stack_test_labels.txt
###Output
_____no_output_____
###Markdown
4. Обучение и проверка моделей Обучите Vowpal Wabbit на выборке `stackoverflow_train.vw` 9 раз, перебирая параметры passes (1,3,5), ngram (1,2,3).Остальные параметры укажите следующие: bit_precision=28 и seed=17. Также скажите VW, что это 10-классовая задача.Проверяйте долю правильных ответов на выборке `stackoverflow_valid.vw`. Выберите лучшую модель и проверьте качество на выборке `stackoverflow_test.vw`.
###Code
%%time
for p in [1,3,5]:
for n in [1,2,3]:
!vw --oaa 10 \
-d hw8_data/stack_train.vw \
--loss_function squared \
--passes {p} \
--ngram {n} \
-f hw8_data/stack_model_{p}_{n}.vw \
--bit_precision 28 \
--random_seed 17 \
--quiet \
--c
print ('stack_model_{}_{}.vw is ready'.format(p,n))
%%time
for p in [1,3,5]:
for n in [1,2,3]:
!vw -i hw8_data/stack_model_{p}_{n}.vw \
-t -d hw8_data/stack_valid.vw \
-p hw8_data/stack_valid_pred_{p}_{n}.txt \
--quiet
print ('stack_valid_pred_{}_{}.txt is ready'.format(p,n))
%%time
with open('hw8_data/stack_valid_labels.txt') as valid_labels_file :
valid_labels = [float(label) for label in valid_labels_file.readlines()]
scores=[]
best_valid_score=0
for p in [1,3,5]:
for n in [1,2,3]:
with open('hw8_data/stack_valid_pred_'+str(p)+'_'+str(n)+'.txt') as pred_file:
valid_pred = [float(label) for label in pred_file.readlines()]
#if (n,p) in [(2,3),(3,5),(2,1),(1,1)]:
acc_score=accuracy_score(valid_labels, valid_pred)
scores.append(((n,p),acc_score))
if acc_score>best_valid_score:
best_valid_score=acc_score
print(n,p,round(acc_score,4))
scores.sort(key=lambda tup: tup[1],reverse=True)
print(scores)
best_valid_scoret_valid_score
###Output
_____no_output_____
###Markdown
Вопрос 1. Какое сочетание параметров дает наибольшую долю правильных ответов на проверочной выборке `stackoverflow_valid.vw`?- Биграммы и 3 прохода по выборке- Триграммы и 5 проходов по выборке- **Биграммы и 1 проход по выборке** <--- Униграммы и 1 проход по выборке Проверьте лучшую (по доле правильных ответов на валидации) модель на тестовой выборке.
###Code
!vw -i hw8_data/stack_model_1_2.vw \
-t -d hw8_data/stack_test.vw \
-p hw8_data/stack_test_pred_1_2.txt \
--quiet
%%time
with open('hw8_data/stack_test_labels.txt') as test_labels_file :
test_labels = [float(label) for label in test_labels_file.readlines()]
with open('hw8_data/stack_test_pred_1_2.txt') as pred_file:
test_pred = [float(label) for label in pred_file.readlines()]
test_acc_score=accuracy_score(test_labels, test_pred)
print(round(test_acc_score,4))
100*round(test_acc_score,4)-100*round(best_valid_score,4)
###Output
_____no_output_____
###Markdown
Вопрос 2. Как соотносятся доли правильных ответов лучшей (по доле правильных ответов на валидации) модели на проверочной и на тестовой выборках? (здесь % – это процентный пункт, т.е., скажем, снижение с 50% до 40% – это на 10%, а не 20%).- На тестовой ниже примерно на 2%- На тестовой ниже примерно на 3%- **Результаты почти одинаковы – отличаются меньше чем на 0.5%** <-- Обучите VW с параметрами, подобранными на проверочной выборке, теперь на объединении обучающей и проверочной выборок. Посчитайте долю правильных ответов на тестовой выборке.
###Code
!cat hw8_data/stack_train.vw hw8_data/stack_valid.vw > hw8_data/stack_merged.vw
%%time
!vw --oaa 10 \
-d hw8_data/stack_merged.vw \
--loss_function squared \
--passes 1 \
--ngram 2 \
-f hw8_data/stack_model_merged.vw \
--bit_precision 28 \
--random_seed 17 \
--quiet \
-c
%%time
!vw -i hw8_data/stack_model_merged.vw \
-t -d hw8_data/stack_test.vw \
-p hw8_data/stack_test_pred_merged.txt \
--quiet
%%time
with open('hw8_data/stack_test_labels.txt') as test_labels_file :
test_labels = [float(label) for label in test_labels_file.readlines()]
with open('hw8_data/stack_test_pred_merged.txt') as pred_file:
test_pred = [float(label) for label in pred_file.readlines()]
merged_acc_score=accuracy_score(test_labels, test_pred)
print(round(merged_acc_score,4))
100*round(merged_acc_score,4)-100*round(test_acc_score,4)
###Output
_____no_output_____ |
Jupyter_Notebooks/Interfaces/Network_Visualizer/.ipynb_checkpoints/Network-Interface-Dash-Plotly-checkpoint.ipynb | ###Markdown
Network Visualizations: Dashhttp://dash.plotly.com/interactive-graphinghttps://dash.plotly.com/sharing-data-between-callbackshttps://medium.com/analytics-vidhya/interactive-visualization-with-plotly-and-dash-f3f840b786fahttps://dash-gallery.plotly.host/dash-cytoscape-lda/(or without Plotly http://jonathansoma.com/lede/algorithms-2017/classes/networks/networkx-graphs-from-source-target-dataframe/)
###Code
import re, json, warnings
import pandas as pd
import numpy as np
import networkx as nx
from networkx.readwrite import json_graph
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import nxviz as nxv
# Import (Jupyter) Dash -- App Functionality
import dash
from dash.dependencies import Input, Output
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from jupyter_dash import JupyterDash
# Ignore simple warnings.
warnings.simplefilter('ignore', DeprecationWarning)
# Declare directory location to shorten filepaths later.
abs_dir = "/Users/quinn.wi/Documents/SemanticData/"
###Output
_____no_output_____
###Markdown
App -- Dash + Plotly
###Code
%%time
with open(abs_dir + "Output/Graphs/JQA_Network_mergedEntities-correlation/network.json",
"r") as f:
G = json.load(f)
G = json_graph.node_link_graph(G, directed = True)
# external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
# For vanilla dash, use 'dash.Dash(__name__)'
app = JupyterDash(__name__)#, external_stylesheets = external_stylesheets)
fig = go.Figure()
app.layout = html.Div([
html.Div([
html.I("Node Selection & Steps Away (press enter after changing name or steps)"),
html.Br(),
dcc.Input(id = 'node-select', type = 'search', value = 'adams george', debounce=True), # Autocomplete possible?
dcc.Input(id = 'ego-steps', type = 'number', value = 1),
dcc.RangeSlider(id='edge-range', min=0, max=1, step=0.01, value=[0.9, 1],
marks = {0: '0', 0.25:'0.25', 0.5: '0.5', 0.75:'0.75', 1:'1'}),
html.Div(id="output-print"),
html.Br(),
]),
html.Div([
dcc.Graph(id='graph-output', figure = fig)
])
])
# Print input values with updates.
@app.callback(
Output("output-print", "children"),
[Input("node-select", "value"), Input('ego-steps', 'value'), Input('edge-range', 'value')]
)
def update_output(nodeSelect, steps, edge_range):
return f'Ego Node: {nodeSelect}, Steps Away from Ego: {steps}, Range of Edge Weight: {edge_range}'
# Use input values to update (subset) graph data.
@app.callback(
Output('graph-output', 'figure'),
[Input("node-select", "value"), Input('ego-steps', 'value'), Input('edge-range', 'value')]
)
def update_graph_data(nodeSelect, steps, edge_range):
ego = nx.ego_graph(G, nodeSelect, radius = steps)
lower_horizon = [(u, v, w) for (u, v, w) in ego.edges.data('weight') if w < edge_range[0]]
upper_horizon = [(u, v, w) for (u, v, w) in ego.edges.data('weight') if w >= edge_range[1]]
ego.remove_edges_from(lower_horizon + upper_horizon)
# Create Network Graph with Plotly
# Assign graph positions for each node.
pos = nx.spring_layout(ego, k=0.5, iterations=50) # nx.layout.circular_layout
for n, p in pos.items():
ego.nodes[n]['pos'] = p
# Create 'data' of edges for scatterplot.
edge_x = []
edge_y = []
for edge in ego.edges():
x0, y0 = ego.nodes[edge[0]]['pos']
x1, y1 = ego.nodes[edge[1]]['pos']
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
# Create 'trace' of scatter plot.
edge_trace = go.Scatter(
x = edge_x, y = edge_y,
line = dict(width = 0.5, color = '#888'),
hoverinfo = 'none',
mode = 'lines')
# Create 'data' of nodes.
node_x = []
node_y = []
node_degree = []
node_modularity = []
node_label = []
for node in ego.nodes():
x, y = ego.nodes[node]['pos']
node_x.append(x)
node_y.append(y)
node_degree.append(ego.nodes[node]['degree'])
node_modularity.append(ego.nodes[node]['modularity'])
node_label.append(node)
# Create 'trace' of nodes.
node_trace = go.Scatter(
x = node_x, y = node_y,
mode = 'markers',
hoverinfo = 'text',
marker = dict(
showscale = True,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale = 'Rainbow',
reversescale = False,
color = node_modularity,
size = 10,
colorbar = dict(
thickness = 15,
title = 'Node Community',
xanchor = 'left',
titleside = 'right'
),
line_width = 2))
# Creage visualization fith Plotly.go
fig = go.Figure(data = [edge_trace, node_trace],
layout = go.Layout(
title = f'{nodeSelect} Ego Network',
titlefont_size = 16,
showlegend = False,
hovermode = 'closest',
margin = dict(b = 20, l = 5, r = 5, t = 40),
annotations = [ dict(
text = "",
showarrow = True,
xref = "paper", yref = "paper",
x = 0.005, y = -0.002 ) ],
xaxis = dict(showgrid = False, zeroline = False, showticklabels = False),
yaxis = dict(showgrid = False, zeroline = False, showticklabels = False),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
)
return fig
if __name__ == "__main__":
app.run_server(mode = 'inline', debug = True) # mode = 'inline' for JupyterDash
###Output
_____no_output_____
###Markdown
App -- Dash + Plotly + Pandas Declare Functions
###Code
%%time
def create_nodes_edges_dfs(networkx_graph, ego_node, weight_min, weight_max):
G = json_graph.node_link_graph(networkx_graph, directed = True)
df = nx.to_pandas_edgelist(G)
# Create first-degree edges & 'step' for node size.
first_degree = df.query('(source == @ego_node) & (@weight_min < weight <= @weight_max)')
first_degree.loc[:,'step'] = 20
first_degree.loc[:,'color'] = '#ff4d4d'
# Create second-degree edges.
# Rule: a source in df that is a target in 'first_degree' = a second degree.
# That is, it is the target of first_degree nodes.
second_degree = df.loc[df['source'].isin(first_degree['target'].tolist())] \
.query('(@weight_min <weight <= @weight_max)')
second_degree['step'] = 10
second_degree['color'] = '#ffe6e6'
# Join first- & second-degree edges.
edges = pd.concat([first_degree, second_degree])
# Create nodes.
nodes = set(edges['source'].tolist() + edges['target'].tolist() + [ego_node])
nodes = pd.DataFrame(nodes, columns = ['target']) # 'target' because 'step' info about target, not source.
# Merge node information with step.
nodes = pd.merge(nodes,
edges[['target', 'step', 'color']].drop_duplicates(),
on = 'target', how = 'inner') \
.rename(columns = {'target':'label'})
# Create label + step for ego.
nodes = nodes.append({'label':ego_node, 'step':40, 'color':'#800000'},
ignore_index = True)
return nodes, edges
# https://blog.datasciencedojo.com/network-theory-game-of-thrones/
def make_graph(nodes_df, edges_df):
g = nx.Graph()
for i,row in nodes_df.iterrows():
keys = row.index.tolist()
values = row.values
# The dict contains all attributes
g.add_node(row['label'], **dict(zip(keys,values)))
for i,row in edges_df.iterrows():
keys = row.index.tolist()
values = row.values
g.add_edge(row['source'], row['target'], **dict(zip(keys,values)))
return g
###Output
CPU times: user 4 µs, sys: 0 ns, total: 4 µs
Wall time: 5.72 µs
###Markdown
Run App
###Code
%%time
with open(abs_dir + "Output/Graphs/JQA_Network_mergedEntities-correlation/network.json",
"r") as f:
G = json.load(f)
# external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
# For vanilla dash, use 'dash.Dash(__name__)'
app = JupyterDash(__name__)#, external_stylesheets = external_stylesheets)
fig = go.Figure()
app.layout = html.Div([
html.Div([
html.I("Node Selection (press enter after changing name)"),
html.Br(),
dcc.Input(id = 'node-select', type = 'search', value = 'adams george', debounce=True), # Autocomplete possible?
dcc.RangeSlider(id='edge-range', min=0, max=1, step=0.01, value=[0.6, 1],
marks = {0: '0', 0.25:'0.25', 0.5: '0.5', 0.75:'0.75', 1:'1'}),
html.Div(id="output-print"),
html.Br(),
]),
html.Div([
dcc.Graph(id='graph-output', figure = fig)
])
])
# Print input values with updates.
@app.callback(
Output("output-print", "children"),
[Input("node-select", "value"), Input('edge-range', 'value')]
)
def update_output(nodeSelect, edge_range):
return f'Ego Node: {nodeSelect}, Range of Edge Weight: {edge_range}'
# Use input values to update (subset) graph data.
@app.callback(
Output('graph-output', 'figure'),
[Input("node-select", "value"), Input('edge-range', 'value')]
)
def update_graph_data(nodeSelect, edge_range):
nodes, edges = create_nodes_edges_dfs(G, nodeSelect, edge_range[0], edge_range[1])
ego = make_graph(nodes, edges)
# Assign graph positions for each node.
pos = nx.spring_layout(ego, k=0.5, iterations=50) # nx.layout.circular_layout
for n, p in pos.items():
ego.nodes[n]['pos'] = p
# Create Network Graph with Plotly
# Create 'data' of edges for scatterplot.
edge_x = []
edge_y = []
edge_weight = []
for edge in ego.edges():
x0, y0 = ego.nodes[edge[0]]['pos']
x1, y1 = ego.nodes[edge[1]]['pos']
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
edge_weight.append(ego.edges[edge]['weight'])
# Create 'trace' of scatter plot.
edge_trace = go.Scatter(
x = edge_x, y = edge_y,
line = dict(width = 0.5,
color = '#888',
# width = edge_weight,
# color = node_color,
),
hoverinfo = 'none',
mode = 'lines')
# edge_trace['line'] = dict(width = edge_weight, color = '#888')
# Create 'data' of nodes.
node_x = []
node_y = []
node_size = []
node_color = []
node_label = []
for node in ego.nodes():
x, y = ego.nodes[node]['pos']
node_x.append(x)
node_y.append(y)
node_size.append(ego.nodes[node]['step'])
node_color.append(ego.nodes[node]['color'])
node_label.append(node)
# Create 'trace' of nodes.
node_trace = go.Scatter(
x = node_x, y = node_y,
mode = 'markers',
text = node_label, textposition = 'top center',
hoverinfo = 'text',
marker = dict(
color = node_color,
size = node_size,
opacity = 1,
line = dict(width = 1, color = '#330000'))
)
# Creage visualization fith Plotly.go
fig = go.Figure(data = [edge_trace, node_trace],
layout = go.Layout(
title = f'{nodeSelect} Ego Network',
titlefont_size = 16,
showlegend = False,
hovermode = 'closest',
margin = dict(b = 20, l = 5, r = 5, t = 40),
annotations = [ dict(
text = "",
showarrow = True,
xref = "paper", yref = "paper",
x = 0.005, y = -0.002 ) ],
xaxis = dict(showgrid = False, zeroline = False, showticklabels = False),
yaxis = dict(showgrid = False, zeroline = False, showticklabels = False),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
)
return fig
if __name__ == "__main__":
app.run_server(mode = 'inline', debug = True) # mode = 'inline' for JupyterDash
###Output
_____no_output_____ |
AI Professional/6 - Reinforcement Learning Explained/Module 2/Module 2_Ex2.1A Greedy.ipynb | ###Markdown
RIHAD VARIAWA, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING DAT257x: Reinforcement Learning Explained Lab 2: Bandits Exercise 2.1A: Greedy policy
###Code
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.bandit import BanditEnv
from lib.simulation import Experiment
###Output
_____no_output_____
###Markdown
Let's define an interface of a policy. For a start, the policy should know how many actions it can take and able to take a particular action given that policy
###Code
#Policy interface
class Policy:
#num_actions: (int) Number of arms [indexed by 0 ... num_actions-1]
def __init__(self, num_actions):
self.num_actions = num_actions
def act(self):
pass
def feedback(self, action, reward):
pass
###Output
_____no_output_____
###Markdown
Now let's implement a greedy policy based on the policy interface. The greedy policy will take the most rewarding action (i.e greedy). This is implemented in the act() function. In addition, we will maintain the name of the policy (name), the rewards it has accumulated for each action (total_rewards), and the number of times an action has been performed (total_counts).
###Code
#Greedy policy
class Greedy(Policy):
def __init__(self, num_actions):
Policy.__init__(self, num_actions)
self.name = "Greedy"
self.total_rewards = np.zeros(num_actions, dtype = np.longdouble)
self.total_counts = np.zeros(num_actions, dtype = np.longdouble)
def act(self):
current_averages = np.divide(self.total_rewards, self.total_counts, where = self.total_counts > 0)
current_averages[self.total_counts <= 0] = 0.5 #Correctly handles Bernoulli rewards; over-estimates otherwise
current_action = np.argmax(current_averages)
return current_action
def feedback(self, action, reward):
self.total_rewards[action] += reward
self.total_counts[action] += 1
###Output
_____no_output_____
###Markdown
We are now ready to perform our first simulation. Let's set some parameters.
###Code
evaluation_seed = 8026
num_actions = 5
trials = 10000
distribution = "bernoulli"
###Output
_____no_output_____
###Markdown
Now, put the pieces together and run the experiment.
###Code
env = BanditEnv(num_actions, distribution, evaluation_seed)
agent = Greedy(num_actions)
experiment = Experiment(env, agent)
experiment.run_bandit(trials)
###Output
Distribution: bernoulli [ 0.4561754 0.22507755 0.82070893 0.05221751 0.03428511]
Optimal arm: 2
|
longrun/Matching Market v20-longrun.ipynb | ###Markdown
Matching Market This simple model consists of a buyer, a supplier, and a market. The buyer represents a group of customers whose willingness to pay for a single unit of the good is captured by a vector of prices _wta_. You can initiate the buyer with a set_quantity function which randomly assigns the willingness to pay according to your specifications. You may ask for these willingness to pay quantities with a _getbid_ function. The supplier is similar, but instead the supplier is willing to be paid to sell a unit of technology. The supplier for instance may have non-zero variable costs that make them unwilling to produce the good unless they receive a specified price. Similarly the supplier has a get_ask function which returns a list of desired prices. The willingness to pay or sell are set randomly using uniform random distributions. The resultant lists of bids are effectively a demand curve. Likewise the list of asks is effectively a supply curve. A more complex determination of bids and asks is possible, for instance using time of year to vary the quantities being demanded. New in version 20- fixed bug in clearing mechanism, included a logic check to avoid wierd behavior around zero Microeconomic FoundationsThe market assumes the presence of an auctioneer which will create a _book_, which seeks to match the bids and the asks as much as possible. If the auctioneer is neutral, then it is incentive compatible for the buyer and the supplier to truthfully announce their bids and asks. The auctioneer will find a single price which clears as much of the market as possible. Clearing the market means that as many willing swaps happens as possible. You may ask the market object at what price the market clears with the get_clearing_price function. You may also ask the market how many units were exchanged with the get_units_cleared function. Agent-Based ObjectsThe following section presents three objects which can be used to make an agent-based model of an efficient, two-sided market.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import random as rnd
import pandas as pd
import numpy as np
import time
import datetime
import calendar
import json
import statistics
# fix what is missing with the datetime/time/calendar package
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = int(sourcedate.year + month / 12 )
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year, month)[1])
return datetime.date(year,month,day)
# measure how long it takes to run the script
startit = time.time()
dtstartit = datetime.datetime.now()
###Output
_____no_output_____
###Markdown
classes buyers and sellersBelow we are constructing the buyers and sellers in classes.
###Code
class Seller():
def __init__(self, name):
self.name = name
self.wta = []
self.step = 0
self.prod = 2000
self.lb_price = 10
self.lb_multiplier = 0
self.ub_price = 20
self.ub_multiplier = 0
self.init_reserve = 500000
self.reserve = 500000
self.init_unproven_reserve = 0
self.unproven_reserve = 0
#multiple market idea, also 'go away from market'
self.subscr_market = {}
self.last_price = 15
self.state_hist = {}
self.cur_scenario = ''
self.count = 0
self.storage = 0
self.q_to_market = 0
self.ratio_sold = 0
self.ratio_sold_hist = []
# the supplier has n quantities that they can sell
# they may be willing to sell this quantity anywhere from a lower price of l
# to a higher price of u
def set_quantity(self):
self.count = 0
self.update_price()
n = self.prod
l = self.lb_price + self.lb_multiplier
u = self.ub_price + self.ub_multiplier
wta = []
for i in range(n):
p = rnd.uniform(l, u)
wta.append(p)
if len(wta) < self.reserve:
self.wta = wta
else:
self.wta = wta[0:(self.reserve-1)]
self.prod = self.reserve
if len(self.wta) > 0:
self.wta = self.wta #sorted(self.wta, reverse=False)
self.q_to_market = len(self.wta)
def get_name(self):
return self.name
def get_asks(self):
return self.wta
def extract(self, cur_extraction):
if self.reserve > 0:
self.reserve = self.reserve - cur_extraction
else:
self.prod = 0
# production costs rise a 100%
def update_price(self):
depletion = (self.init_reserve - self.reserve) / self.init_reserve
self.ub_multiplier = int(self.ub_price * depletion)
self.lb_multiplier = int(self.lb_price * depletion)
def return_not_cleared(self, not_cleared):
self.count = self.count + (len(self.wta) - len(not_cleared))
self.wta = not_cleared
def get_price(self, price):
self.last_price = price
def update_production(self):
if (self.step/12).is_integer():
if self.prod > 0 and self.q_to_market > 0:
rp_ratio = self.reserve / self.prod
self.ratio_sold = self.count / self.q_to_market
self.ratio_sold_hist.append(self.ratio_sold)
yearly_average = statistics.mean(self.ratio_sold_hist[-12:])
if (rp_ratio > 15) and (yearly_average > .9):
self.prod = int(self.prod * 1.1)
if print_details:
print("%s evaluate production" % self.name)
if (self.unproven_reserve > 0) and (self.cur_scenario == 'PACES'):
self.reserve = self.reserve + int(0.1 * self.init_unproven_reserve)
self.unproven_reserve = self.unproven_reserve - int(0.1 * self.init_unproven_reserve)
def evaluate_timestep(self):
self.update_production()
# record every step into an dictionary, nog pythonic look into (vars)
def book_keeping(self):
self.state_hist[self.step] = self.__dict__
class Buyer():
def __init__(self, name):
self.name = name
self.type = 0
self.rof = 0
self.wtp = []
self.step = 0
self.offset= 0
self.base_demand = 0
self.max_demand = 0
self.lb_price = 10
self.ub_price = 20
self.last_price = 15
self.subscr_market = {}
self.state_hist = {}
self.cur_scenario = ''
self.count = 0
self.real_demand = 0
self.storage_cap = 1
self.storage = 0
self.storage_q = 0
# the supplier has n quantities that they can buy
# they may be willing to sell this quantity anywhere from a lower price of l
# to a higher price of u
def set_quantity(self):
self.count = 0
self.update_price()
n = int(self.consumption(self.step))
l = self.lb_price
u = self.ub_price
wtp = []
for i in range(n):
p = rnd.uniform(l, u)
wtp.append(p)
self.wtp = wtp #sorted(wtp, reverse=True)
# gets a little to obvious
def get_name(self):
return self.name
# return list of willingness to pay
def get_bids(self):
return self.wtp
def consumption(self, x):
# make it initialise to seller
b = self.base_demand
m = self.max_demand
y = b + m * (.5 * (1 + np.cos(((x+self.offset)/6)*np.pi)))
self.real_demand = y
s = self.storage_manager()
return(y+s)
def update_price(self):
# adjust Q
if self.type == 1: #home
if (self.step/12).is_integer():
self.base_demand = home_savings[self.cur_scenario] * self.base_demand
self.max_demand = home_savings[self.cur_scenario] * self.max_demand
if self.type == 2: # elec for eu + us
if (self.step/12).is_integer():
cur_elec_df = elec_space['RELATIVE'][self.cur_scenario]
period_now = add_months(period_null, self.step)
index_year = int(period_now.strftime('%Y'))
#change_in_demand = cur_elec_df[index_year]
self.base_demand = self.base_demand * cur_elec_df[index_year]
self.max_demand = self.max_demand * cur_elec_df[index_year]
if self.type == 3: #indu
if (self.step/12).is_integer():
if (self.rof == 0) and (self.cur_scenario == 'PACES'):
#cur_df = economic_growth['ECONOMIC GROWTH'][self.cur_scenario]
period_now = add_months(period_null, self.step)
index_year = int(period_now.strftime('%Y'))
#growth = cur_df[index_year]
growth = np.arctan((index_year-2013)/10)/(.5*np.pi)*.05+0.03
self.base_demand = (1 + growth) * self.base_demand
self.max_demand = (1 + growth) * self.max_demand
else:
cur_df = economic_growth['ECONOMIC GROWTH'][self.cur_scenario]
period_now = add_months(period_null, self.step)
index_year = int(period_now.strftime('%Y'))
growth = cur_df[index_year]
self.base_demand = (1 + growth) * self.base_demand
self.max_demand = (1 + growth) * self.max_demand
## adjust P now to get_price, but adress later
## moved to get_price, rename update_price function (?)
#self.lb_price = self.last_price * .75
#self.ub_price= self.last_price * 1.25
def return_not_cleared(self, not_cleared):
self.count = self.count + (len(self.wtp)-len(not_cleared))
self.wtp = not_cleared
def get_price(self, price):
self.last_price = price
if self.last_price > 100:
self.last_price = 100
self.lb_price = self.last_price * .75
self.ub_price= self.last_price * 1.25
# writes complete state to a dictionary, see if usefull
def book_keeping(self):
self.state_hist[self.step] = self.__dict__
# there has to be some accountability for uncleared bids of the buyers
def evaluate_timestep(self):
if self.type==1:
not_cleared = len(self.wtp)
#total_demand = self.real_demand + self.storage_q
storage_delta = self.storage_q - not_cleared
self.storage = self.storage + storage_delta
if print_details:
print(self.name, storage_delta)
def storage_manager(self):
# check if buyer is household buyer
if self.type==1:
if self.storage < 0:
self.storage_q = -self.storage
else:
self.storage_q = 0
return(self.storage_q)
else:
return(0)
###Output
_____no_output_____
###Markdown
Construct the marketFor the market two classes are made. The market itself, which controls the buyers and the sellers, and the book. The market has a book where the results of the clearing procedure are stored.
###Code
# the book is an object of the market used for the clearing procedure
class Book():
def __init__(self):
self.ledger = pd.DataFrame(columns = ("role","name","price","cleared"))
def set_asks(self,seller_list):
# ask each seller their name
# ask each seller their willingness
# for each willingness append the data frame
for seller in seller_list:
seller_name = seller.get_name()
seller_price = seller.get_asks()
ar_role = np.full((1,len(seller_price)),'seller', dtype=object)
ar_name = np.full((1,len(seller_price)),seller_name, dtype=object)
ar_cleared = np.full((1,len(seller_price)),'in process', dtype=object)
temp_ledger = pd.DataFrame([*ar_role,*ar_name,seller_price,*ar_cleared]).T
temp_ledger.columns= ["role","name","price","cleared"]
self.ledger = self.ledger.append(temp_ledger, ignore_index=True)
def set_bids(self,buyer_list):
# ask each seller their name
# ask each seller their willingness
# for each willingness append the data frame
for buyer in buyer_list:
buyer_name = buyer.get_name()
buyer_price = buyer.get_bids()
ar_role = np.full((1,len(buyer_price)),'buyer', dtype=object)
ar_name = np.full((1,len(buyer_price)),buyer_name, dtype=object)
ar_cleared = np.full((1,len(buyer_price)),'in process', dtype=object)
temp_ledger = pd.DataFrame([*ar_role,*ar_name,buyer_price,*ar_cleared]).T
temp_ledger.columns= ["role","name","price","cleared"]
self.ledger = self.ledger.append(temp_ledger, ignore_index=True)
def update_ledger(self,ledger):
self.ledger = ledger
def get_ledger(self):
return self.ledger
def clean_ledger(self):
self.ledger = pd.DataFrame(columns = ("role","name","price","cleared"))
class Market():
def __init__(self, name):
self.name= name
self.count = 0
self.last_price = ''
self.book = Book()
self.b = []
self.s = []
self.buyer_list = []
self.seller_list = []
self.buyer_dict = {}
self.seller_dict = {}
self.ledger = ''
self.seller_analytics = {}
self.buyer_analytics = {}
def book_keeping_all(self):
for i in self.buyer_dict:
self.buyer_dict[i].book_keeping()
for i in self.seller_dict:
self.seller_dict[i].book_keeping()
def add_buyer(self,buyer):
if buyer.subscr_market[self.name] == 1:
self.buyer_list.append(buyer)
def add_seller(self,seller):
if seller.subscr_market[self.name] == 1:
self.seller_list.append(seller)
def set_book(self):
self.book.set_bids(self.buyer_list)
self.book.set_asks(self.seller_list)
def get_bids(self):
# this is a data frame
ledger = self.book.get_ledger()
rows= ledger.loc[ledger['role'] == 'buyer']
# this is a series
prices=rows['price']
# this is a list
bids = prices.tolist()
return bids
def get_asks(self):
# this is a data frame
ledger = self.book.get_ledger()
rows = ledger.loc[ledger['role'] == 'seller']
# this is a series
prices=rows['price']
# this is a list
asks = prices.tolist()
return asks
# return the price at which the market clears
# this fails because there are more buyers then sellers
def get_clearing_price(self):
# buyer makes a bid starting with the buyer which wants it most
b = self.get_bids()
s = self.get_asks()
# highest to lowest
self.b=sorted(b, reverse=True)
# lowest to highest
self.s=sorted(s, reverse=False)
# find out whether there are more buyers or sellers
# then drop the excess buyers or sellers; they won't compete
n = len(b)
m = len(s)
# there are more sellers than buyers
# drop off the highest priced sellers
if (m > n):
s = s[0:n]
matcher = n
# There are more buyers than sellers
# drop off the lowest bidding buyers
else:
b = b[0:m]
matcher = m
# -It's possible that not all items sold actually clear the market here
# -Produces an error when one of the two lists are empty
# something like 'can't compare string and float'
count = 0
for i in range(matcher):
if (self.b[i] > self.s[i]):
count +=1
self.last_price = self.b[i]
# copy count to market object
self.count = count
return self.last_price
# TODO: Annotate the ledger
# this procedure takes up 80% of processing time
def annotate_ledger(self,clearing_price):
ledger = self.book.get_ledger()
# logic test
# b or s can not be zero, probably error or unreliable results
# so annote everything as false in that case and move on
b = self.get_bids()
s = self.get_asks()
if (len(s)==0 or len(b)==0):
new_col = [ 'False' for i in range(len(ledger['cleared']))]
ledger['cleared'] = new_col
self.book.update_ledger(ledger)
return
# end logic test
for index, row in ledger.iterrows():
if (row['role'] == 'seller'):
if (row['price'] < clearing_price):
ledger.loc[index,'cleared'] = 'True'
else:
ledger.loc[index,'cleared'] = 'False'
else:
if (row['price'] > clearing_price):
ledger.loc[index,'cleared'] = 'True'
else:
ledger.loc[index,'cleared'] = 'False'
self.book.update_ledger(ledger)
def get_units_cleared(self):
return self.count
def clean_ledger(self):
self.ledger = ''
self.book.clean_ledger()
def run_it(self):
self.pre_clearing_operation()
self.clearing_operation()
self.after_clearing_operation()
# pre clearing empty out the last run and start
# clean ledger is kind of sloppy, rewrite functions to overide the ledger
def pre_clearing_operation(self):
self.clean_ledger()
def clearing_operation(self):
self.set_book()
clearing_price = self.get_clearing_price()
if print_details:
print(self.name, clearing_price)
self.annotate_ledger(clearing_price)
def after_clearing_operation(self):
for agent in self.seller_list:
name = agent.name
cur_extract = len(self.book.ledger[(self.book.ledger['cleared'] == 'True') &
(self.book.ledger['name'] == name)])
agent.extract(cur_extract)
agent.get_price(self.last_price)
self.seller_analytics[name] = cur_extract
if cur_extract >0:
agent_asks = agent.get_asks()
agent_asks = sorted(agent_asks, reverse=False)
not_cleared = agent_asks[cur_extract:len(agent_asks)]
agent.return_not_cleared(not_cleared)
for agent in self.buyer_list:
name = agent.name
cur_extract = len(self.book.ledger[(self.book.ledger['cleared'] == 'True') &
(self.book.ledger['name'] == name)])
agent.get_price(self.last_price)
self.buyer_analytics[name] = cur_extract
if cur_extract >0:
agent_bids = agent.get_bids()
agent_bids = sorted(agent_bids, reverse=True)
not_cleared = agent_bids[cur_extract:len(agent_bids)]
agent.return_not_cleared(not_cleared)
# cleaning up the books
self.book_keeping_all()
###Output
_____no_output_____
###Markdown
ObserverThe observer holds the clock and collects data. In this setup it tells the market another tick has past and it is time to act. The market will instruct the other agents. The observer initializes the model, thereby making real objects out of the classes defined above.
###Code
class Observer():
def __init__(self, init_buyer, init_seller, timesteps, scenario):
self.init_buyer = init_buyer
self.init_seller = init_seller
self.init_market = init_market
self.maxrun = timesteps
self.cur_scenario = scenario
self.buyer_dict = {}
self.seller_dict = {}
self.market_dict = {}
self.timetick = 0
self.gas_market = ''
self.market_hist = []
self.seller_hist = []
self.buyer_hist = []
self.market_origin = []
self.market_origin_df = pd.DataFrame(columns=['seller_analytics','buyer_analytics'])
self.all_data = {}
def set_buyer(self, buyer_info):
for name in buyer_info:
self.buyer_dict[name] = Buyer('%s' % name)
self.buyer_dict[name].base_demand = buyer_info[name]['offset']
self.buyer_dict[name].base_demand = buyer_info[name]['b']
self.buyer_dict[name].max_demand = buyer_info[name]['m']
self.buyer_dict[name].lb_price = buyer_info[name]['lb_price']
self.buyer_dict[name].ub_price = buyer_info[name]['ub_price']
self.buyer_dict[name].type = buyer_info[name]['type']
self.buyer_dict[name].rof = buyer_info[name]['rof']
self.buyer_dict[name].cur_scenario = self.cur_scenario
self.buyer_dict[name].subscr_market = dict.fromkeys(init_market,0)
for market in buyer_info[name]['market']:
self.buyer_dict[name].subscr_market[market] = 1
def set_seller(self, seller_info):
for name in seller_info:
self.seller_dict[name] = Seller('%s' % name)
self.seller_dict[name].prod = seller_info[name]['prod']
self.seller_dict[name].lb_price = seller_info[name]['lb_price']
self.seller_dict[name].ub_price = seller_info[name]['ub_price']
self.seller_dict[name].reserve = seller_info[name]['reserve']
self.seller_dict[name].init_reserve = seller_info[name]['reserve']
self.seller_dict[name].unproven_reserve = seller_info[name]['UP_reserve']
self.seller_dict[name].init_unproven_reserve = seller_info[name]['UP_reserve']
#self.seller_dict[name].rof = seller_info[name]['rof']
self.seller_dict[name].cur_scenario = self.cur_scenario
self.seller_dict[name].subscr_market = dict.fromkeys(init_market,0)
for market in seller_info[name]['market']:
self.seller_dict[name].subscr_market[market] = 1
def set_market(self, market_info):
for name in market_info:
self.market_dict[name] = Market('%s' % name)
#add suplliers and buyers to this market
for supplier in self.seller_dict.values():
self.market_dict[name].add_seller(supplier)
for buyer in self.buyer_dict.values():
self.market_dict[name].add_buyer(buyer)
self.market_dict[name].seller_dict = self.seller_dict
self.market_dict[name].buyer_dict = self.buyer_dict
def update_buyer(self):
for i in self.buyer_dict:
self.buyer_dict[i].step += 1
self.buyer_dict[i].set_quantity()
def update_seller(self):
for i in self.seller_dict:
self.seller_dict[i].step += 1
self.seller_dict[i].set_quantity()
def evaluate_timestep(self):
for i in self.buyer_dict:
self.buyer_dict[i].evaluate_timestep()
for i in self.seller_dict:
self.seller_dict[i].evaluate_timestep()
def get_reserve(self):
reserve = []
for name in self.seller_dict:
reserve.append(self.seller_dict[name].reserve)
return reserve
def get_data(self):
for name in self.seller_dict:
self.all_data[name] = self.seller_dict[name].state_hist
for name in self.buyer_dict:
self.all_data[name] = self.buyer_dict[name].state_hist
def run_it(self):
# Timing
# time initialising
startit_init = time.time()
# initialise, setting up all the agents (firstrun not really needed anymore, since outside the loop)
# might become useful again if run_it is used for parametersweep
first_run = True
if first_run:
self.set_buyer(self.init_buyer)
self.set_seller(self.init_seller)
self.set_market(self.init_market)
first_run=False
# time init stop
stopit_init = time.time() - startit_init
if print_details:
print('%s : initialisation time' % stopit_init)
# building the multiindex for origin dataframe
listing = []
for m in self.market_dict:
listing_buyer = [(runname, m,'buyer_analytics',v.name) for v in self.market_dict[m].buyer_list]
listing = listing + listing_buyer
listing_seller = [(runname, m,'seller_analytics',v.name) for v in self.market_dict[m].seller_list]
listing = listing + listing_seller
multi_listing = pd.MultiIndex.from_tuples(listing)
# recording everything in dataframes, more dependable than lists?
#reserve_df = pd.DataFrame(data=None, columns=[i for i in self.seller_dict])
#iterables = [[i for i in self.market_dict], ['buyer_analytics', 'seller_analytics']]
#index = pd.MultiIndex.from_product(iterables)
market_origin_df = pd.DataFrame(data=None, columns=multi_listing)
for period in range(self.maxrun):
# time the period
startit_period = time.time()
self.timetick += 1
period_now = add_months(period_null, self.timetick-1)
if print_details:
print('#######################################')
print(period_now.strftime('%Y-%b'), self.cur_scenario)
# update the buyers and sellers (timetick+ set Q)
self.update_buyer()
self.update_seller()
# real action on the market
for market in self.market_dict:
if market != 'lng':
self.market_dict[market].run_it()
self.market_dict['lng'].run_it()
#tell buyers timetick has past
self.evaluate_timestep()
# data collection
for name in self.market_dict:
p_clearing = self.market_dict[name].last_price
q_sold = self.market_dict[name].count
self.market_hist.append([period_now.strftime('%Y-%b'), p_clearing, q_sold, name])
for name in self.seller_dict:
reserve = self.seller_dict[name].reserve
produced = self.seller_dict[name].count
self.seller_hist.append([period_now.strftime('%Y-%b'), reserve, produced, name])
for name in self.buyer_dict:
storage = self.buyer_dict[name].storage
consumed = self.buyer_dict[name].count
self.buyer_hist.append([period_now.strftime('%Y-%b'), storage, consumed, name])
# means to caption the origin of stuff sold on the market,
# but since dictionaries are declared global of some sort
# Dataframe has to be used to capture the real values
for name in self.market_dict:
seller_analytics = self.market_dict[name].seller_analytics
buyer_analytics = self.market_dict[name].buyer_analytics
for seller in seller_analytics:
market_origin_df.loc[period_now.strftime('%Y-%b'),
(runname, name,'seller_analytics',seller)] = seller_analytics[seller]
for buyer in buyer_analytics:
market_origin_df.loc[period_now.strftime('%Y-%b'),
(runname, name,'buyer_analytics',buyer)] = buyer_analytics[buyer]
# recording the step_info
# since this operation can take quite a while, print after every operation
period_time = time.time() - startit_period
if print_details:
print('%.2f : seconds to clear period' % period_time)
#safe df as attribute
self.market_origin_df = market_origin_df
###Output
_____no_output_____
###Markdown
Example MarketIn the following code example we use the buyer and supplier objects to create a market. At the market a single price is announced which causes as many units of goods to be swapped as possible. The buyers and sellers stop trading when it is no longer in their own interest to continue.
###Code
# import scenarios
inputfile = 'economic growth scenarios.xlsx'
# economic growth percentages
economic_growth = pd.read_excel(inputfile, sheetname='ec_growth', index_col=0, header=[0,1])
## demand for electricity import scenarios spaced by excel
#elec_space = pd.read_excel(inputfile, sheetname='elec_space', skiprows=1, index_col=0, header=0)
# demand for electricity import scenarios spaced by excel
elec_space = pd.read_excel(inputfile, sheetname='elec_space', index_col=0, header=[0,1])
# gasdemand home (percentage increases)
home_savings = {'PACES': 1.01, 'TIDES': .99, 'CIRCLES': .97}
# multilevel ecgrowth
economic_growth2 = pd.read_excel(inputfile, sheetname='ec_growth', index_col=0, header=[0,1])
#economic_growth2['ECONOMIC GROWTH']
# reading excel initialization data back
read_file = 'init_buyers_sellers_lng.xlsx'
df_buyer = pd.read_excel(read_file,orient='index',sheetname='buyers')
df_seller = pd.read_excel(read_file,orient='index',sheetname='sellers')
df_buyer['market'] = [eval(i) for i in df_buyer['market'].values]
df_seller['market'] = [eval(i) for i in df_seller['market'].values]
init_buyer = df_buyer.to_dict('index')
init_seller = df_seller.to_dict('index')
#init_market = {'eu', 'us','as'}, construct markets by unique values
market = []
for i in init_seller:
for x in init_seller[i]['market']: market.append(x)
for i in init_buyer:
for x in init_buyer[i]['market']: market.append(x)
market = list(set(market))
init_market = market
# set the starting time
period_null= datetime.date(2013,1,1)
###Output
C:\Anaconda3\lib\site-packages\pandas\util\_decorators.py:118: FutureWarning: The `sheetname` keyword is deprecated, use `sheet_name` instead
return func(*args, **kwargs)
###Markdown
run the modelTo run the model we create the observer. The observer creates all the other objects and runs the model.
###Code
# create observer and run the model
# first data about buyers then sellers and then model ticks
years = 35
# timestep = 12
print_details = False
run_market = {}
run_seller = {}
run_buyer = {}
run_market_origin = {}
run_market_origin_df = {}
for i in ['PACES', 'CIRCLES', 'TIDES']:
runname = i
dtrunstart = datetime.datetime.now()
print('\n%s scenario %d year run started' %(i,years))
obser1 = Observer(init_buyer, init_seller, years*12, i)
obser1.run_it()
#get the info from the observer
run_market[i] = obser1.market_hist
run_seller[i] = obser1.seller_hist
run_buyer[i] = obser1.buyer_hist
run_market_origin_df[i] = obser1.market_origin_df
#run_data[i] = obser1.all_data
dtrunstop = datetime.datetime.now()
print('%s scenario %d year run finished' %(i,years))
print('this run took %s (h:m:s) to complete'% (dtrunstop - dtrunstart))
# timeit
stopit = time.time()
dtstopit = datetime.datetime.now()
print('it took us %s seconds to get to this conclusion' % (stopit-startit))
print('in another notation (h:m:s) %s'% (dtstopit - dtstartit))
###Output
it took us 8947.212706804276 seconds to get to this conclusion
in another notation (h:m:s) 2:29:07.212706
###Markdown
Operations Research FormulationThe market can also be formulated as a very simple linear program or linear complementarity problem. It is clearer and easier to implement this market clearing mechanism with agents. One merit of the agent-based approach is that we don't need linear or linearizable supply and demand function. The auctioneer is effectively following a very simple linear program subject to constraints on units sold. The auctioneer is, in the primal model, maximizing the consumer utility received by customers, with respect to the price being paid, subject to a fixed supply curve. On the dual side the auctioneer is minimizing the cost of production for the supplier, with respect to quantity sold, subject to a fixed demand curve. It is the presumed neutrality of the auctioneer which justifies the honest statement of supply and demand. An alternative formulation is a linear complementarity problem. Here the presence of an optimal space of trades ensures that there is a Pareto optimal front of possible trades. The perfect opposition of interests in dividing the consumer and producer surplus means that this is a zero sum game. Furthermore the solution to this zero-sum game maximizes societal welfare and is therefore the Hicks optimal solution. Next StepsA possible addition of this model would be to have a weekly varying demand of customers, for instance caused by the use of natural gas as a heating agent. This would require the bids and asks to be time varying, and for the market to be run over successive time periods. A second addition would be to create transport costs, or enable intermediate goods to be produced. This would need a more elaborate market operator. Another possible addition would be to add a profit maximizing broker. This may require adding belief, fictitious play, or message passing. The object-orientation of the models will probably need to be further rationalized. Right now the market requires very particular ordering of calls to function correctly. Time of last runTime and date of the last run of this notebook file
###Code
# print the time of last run
print('last run of this notebook:')
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
###Output
last run of this notebook:
###Markdown
Plotting scenario runsFor the scenario runs we vary the external factors according to the scenarios. Real plotting is done in a seperate visualization file
###Code
plt.subplots()
for market in init_market:
for i in run_market:
run_df = pd.DataFrame(run_market[i])
run_df = run_df[run_df[3]==market]
run_df.set_index(0, inplace=True)
run_df.index = pd.to_datetime(run_df.index)
run_df.index.name = 'month'
run_df.rename(columns={1: 'price', 2: 'quantity'}, inplace=True)
run_df = run_df['price'].resample('A').mean().plot(label=i, title=market)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel('€/MWh')
plt.xlabel('Year')
plt.show();
###Output
_____no_output_____
###Markdown
saving data for laterTo keep this file as clear as possible and for efficiency we visualize the results in a separate file. To transfer the model run data we use the Json library (and possibly excel).
###Code
today = datetime.date.today().strftime('%Y%m%d')
outputexcel = '.\exceloutput\%srun.xlsx' %today
writer = pd.ExcelWriter(outputexcel)
def write_to_excel():
for i in run_market:
run_df = pd.DataFrame(run_market[i])
run_df.set_index(0, inplace=True)
run_df.index = pd.to_datetime(run_df.index)
run_df.index.name = 'month'
run_df.rename(columns={1: 'price', 2: 'quantity'}, inplace=True)
run_df.to_excel(writer, sheet_name=i)
# uncomment if wanted to write to excel file
#write_to_excel()
# Writing JSON data
# market data
data = run_market
with open('marketdata.json', 'w') as f:
json.dump(data, f)
# seller/reserve data
data = run_seller
with open('sellerdata.json', 'w') as f:
json.dump(data, f)
# buyer data
data = run_buyer
with open('buyerdata.json', 'w') as f:
json.dump(data, f)
# complex dataframes do not work well with Json, so use Pickle
# Merge Dataframes
result = pd.concat([run_market_origin_df[i] for i in run_market_origin_df], axis=1)
#pickle does the job
result.to_pickle('marketdataorigin.pickle', compression='infer', protocol=4)
# testing if complex frames did what it is expected to do
df_pickle = result
for i in df_pickle.columns.levels[0]:
scen=i
market='eu'
df = df_pickle[scen][market]['seller_analytics']
df.index = pd.to_datetime(df.index)
df.resample('A').sum().plot.area(title='%s %s'%(scen,market))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
###Output
_____no_output_____ |
assets/covid19/PROJECT_COVID19_GLOBAL_WK2.ipynb | ###Markdown
================================= STARTING FROM THE BOTTOM ================================= 1. OBTAIN DATA2. SCRUB DATA3. EXPLORE DATA4. MODEL DATA5. INTERPRET DATA MVP -- Minimum Viable Product:(simpliest submission possible, no modeling)
###Code
import pandas as pd
import numpy as np
## GLOBAL WK 2
train_file = "https://raw.githubusercontent.com/danielcaraway/data/master/covid19-global-forecasting-week-2/train.csv"
test_file = "https://raw.githubusercontent.com/danielcaraway/data/master/covid19-global-forecasting-week-2/test.csv"
sub_file = "https://raw.githubusercontent.com/danielcaraway/data/master/covid19-global-forecasting-week-2/submission.csv"
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
sub = pd.read_csv(sub_file)
# BEFORE
sub.head()
# AFTER
sub['ConfirmedCases'] = 100
sub['Fatalities'] = 18
sub.head()
sub.to_csv('submission', index=False)
from google.colab import files
files.download("submission.csv")
###Output
_____no_output_____
###Markdown
Ok so what's the problem with this picture? Well, we're saying that every single country on every single day has exactly 100 cases of COVID and exactly 18 deaths. BUT, we have proved we can manipulate the submission DF so we've got that going for us which is nice. Now, it would also be nice to actually take into account the country and the date, right?
###Code
merged = pd.merge(sub, test, on="ForecastId", how="left")
df = merged.copy()
df
###Output
_____no_output_____
###Markdown
OK great! Now we have the country AND the date with our corecast ID!! So we know we can successfully merge our testing df into our submission df.But... our ConfirmedCases and Fatalities are still 100 and 18 without regard to the country...
###Code
df['Date'] = pd.to_datetime(df['Date'])
df['days_from'] = df['Date'] - (df['Date'].min())
df['days_from'] = df['days_from'] / np.timedelta64(1, 'D')
df['CC_v2'] = df.apply(lambda x: x['days_from']*x['days_from'] , axis=1)
df['F_v2'] = df.apply(lambda x: x['days_from'] * 2 , axis=1)
df
###Output
_____no_output_____
###Markdown
OK great! Now each country is different! I call this VAMPIRE DATA where the number of people bitten (infected) is logarathmic and the number of deaths is simply linear (because not everyone dies from bites, duh)
###Code
spain = df[df['Country_Region'] == 'Spain']
spain.head()
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
graph_df = df[['days_from', 'CC_v2', 'F_v2']]
data = pd.melt(graph_df, id_vars=['days_from'], value_vars=['CC_v2','F_v2'])
data.head()
ax = sns.lineplot(x="days_from", y="value",
hue="variable", style="variable", data=data)
###Output
_____no_output_____
###Markdown
================================= STARTING CONNOR FOR REAL ================================= STEP 1: GET THAT DATA
###Code
import pandas as pd
import numpy as np
## GLOBAL WK 2
train_file = "https://raw.githubusercontent.com/danielcaraway/data/master/covid19-global-forecasting-week-2/train.csv"
test_file = "https://raw.githubusercontent.com/danielcaraway/data/master/covid19-global-forecasting-week-2/test.csv"
sub_file = "https://raw.githubusercontent.com/danielcaraway/data/master/covid19-global-forecasting-week-2/submission.csv"
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
sub = pd.read_csv(sub_file)
###Output
_____no_output_____
###Markdown
STEP 2: PREP THAT DATA* Deal with states + countries* Deal with datetimes* Deal with categoricals (LabelEncoder)
###Code
# subset = train.sample(n=500)
# subset
# train = subset.copy()
def use_country(state, country):
if pd.isna(state):
return country
else:
return state
train['Province_State'] = train.apply(lambda x: use_country(x['Province_State'], x['Country_Region']), axis=1)
test['Province_State'] = test.apply(lambda x: use_country(x['Province_State'], x['Country_Region']), axis=1)
train_d = pd.get_dummies(train)
test_d = pd.get_dummies(test)
train_dummies
###Output
_____no_output_____
###Markdown
STEP 3: MODEL THAT DATA* GridSearchCV* XGBRegressor
###Code
from sklearn.model_selection import GridSearchCV
import time
param_grid = {'n_estimators': [1000]}
def gridSearchCV(model, X_Train, y_Train, param_grid, cv=10, scoring='neg_mean_squared_error'):
start = time.time()
X_Train = train.copy()
y1_Train = X_Train['ConfirmedCases']
y2_Train = X_Train['Fatalities']
from xgboost import XGBRegressor
model = XGBRegressor()
model1 = gridSearchCV(model, X_Train, y1_Train, param_grid, 10, 'neg_mean_squared_error')
model2 = gridSearchCV(model, X_Train, y2_Train, param_grid, 10, 'neg_mean_squared_error')
countries = set(X_Train['Country_Region'])
#models_C = {}
#models_F = {}
df_out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
for country in countries:
states = set(X_Train['Province_State'])
# states = X_Train.loc[X_Train.Country == country, :].State.unique()
#print(country, states)
# check whether string is nan or not
for state in states:
X_Train_CS = X_Train.loc[(X_Train.Country == country) & (X_Train.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']]
y1_Train_CS = X_Train_CS.loc[:, 'ConfirmedCases']
y2_Train_CS = X_Train_CS.loc[:, 'Fatalities']
X_Train_CS = X_Train_CS.loc[:, ['State', 'Country', 'Date']]
X_Train_CS.Country = le.fit_transform(X_Train_CS.Country)
X_Train_CS['State'] = le.fit_transform(X_Train_CS['State'])
X_Test_CS = X_Test.loc[(X_Test.Country == country) & (X_Test.State == state), ['State', 'Country', 'Date', 'ForecastId']]
X_Test_CS_Id = X_Test_CS.loc[:, 'ForecastId']
X_Test_CS = X_Test_CS.loc[:, ['State', 'Country', 'Date']]
X_Test_CS.Country = le.fit_transform(X_Test_CS.Country)
X_Test_CS['State'] = le.fit_transform(X_Test_CS['State'])
#models_C[country] = gridSearchCV(model, X_Train_CS, y1_Train_CS, param_grid, 10, 'neg_mean_squared_error')
#models_F[country] = gridSearchCV(model, X_Train_CS, y2_Train_CS, param_grid, 10, 'neg_mean_squared_error')
model1 = XGBRegressor(n_estimators=1000)
model1.fit(X_Train_CS, y1_Train_CS)
y1_pred = model1.predict(X_Test_CS)
model2 = XGBRegressor(n_estimators=1000)
model2.fit(X_Train_CS, y2_Train_CS)
y2_pred = model2.predict(X_Test_CS)
df = pd.DataFrame({'ForecastId': X_Test_CS_Id, 'ConfirmedCases': y1_pred, 'Fatalities': y2_pred})
df_out = pd.concat([df_out, df], axis=0)
# Done for state loop
# Done for country Loop
###Output
_____no_output_____
###Markdown
SIDEQUEST: More on XGBoost
###Code
b_train = train.copy()
b_test = test.copy()
###Output
_____no_output_____
###Markdown
California Test
###Code
b_train['Date'].min()
b_train_ca = b_train[b_train['Province_State'] == 'California']
b_train = b_train_ca.copy()
b_train['Date'] = pd.to_datetime(b_train['Date'])
b_train['days_from'] = b_train['Date'] - (b_train['Date'].min())
b_train['days_from'] = b_train['days_from'] / np.timedelta64(1, 'D')
b_train
b_train_y1 = b_train['ConfirmedCases']
b_train_y2 = b_train['Fatalities']
# b_train_X = b_train.drop(['ConfirmedCases','Fatalities'], axis=1)
b_train_X = b_train.drop(['Fatalities', 'Date'], axis=1)
b_train_X
## CA TEST
# b_train_X_ca = b_train_X[b_train_X['Province_State'] == 'California']
# b_train_X = b_train_X_ca.copy()
b_train_X_d = pd.get_dummies(b_train_X)
# b_train_X_d['']
import xgboost as xgb
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
boston = load_boston()
# x, y = boston.data, boston.target
x,y = b_train_X_d, b_train_y1
# x,y = b_train_X_d, b_train_y2
xtrain, xtest, ytrain, ytest=train_test_split(x, y, test_size=0.15)
xgbr = xgb.XGBRegressor()
print(xgbr)
xgbr.fit(xtrain, ytrain)
# - cross validataion
scores = cross_val_score(xgbr, xtrain, ytrain, cv=5)
print("Mean cross-validation score: %.2f" % scores.mean())
kfold = KFold(n_splits=10, shuffle=True)
kf_cv_scores = cross_val_score(xgbr, xtrain, ytrain, cv=kfold )
print("K-fold CV average score: %.2f" % kf_cv_scores.mean())
ypred = xgbr.predict(xtest)
mse = mean_squared_error(ytest, ypred)
print("MSE: %.2f" % mse)
print("RMSE: %.2f" % np.sqrt(mse))
x_ax = range(len(ytest))
plt.scatter(x_ax, ytest, s=5, color="blue", label="original")
plt.plot(x_ax, ypred, lw=0.8, color="red", label="predicted")
plt.legend()
plt.show()
# BUILDING FORCAST ID:
boston.data
boston.data.shape
boston.target.shape
###Output
_____no_output_____
###Markdown
BOSTON* K-fold CV average score: 0.89* MSE: 11.69* RMSE: 3.42 V1* K-fold CV average score: -107.07* MSE: 36809.57* RMSE: 191.86 V2 -- California* K-fold CV average score: 0.61* MSE: 139454.95* RMSE: 373.44 V3 -- California, days_from, get_dummies* K-fold CV average score: 0.93* MSE: 9027.82* RMSE: 95.01
###Code
print(ypred)
###Output
[5.5687274e+02 7.5082043e+02 3.1586111e-02 3.1586111e-02 3.1586111e-02
3.1586111e-02 5.5687274e+02 3.1586111e-02 2.2094861e+02 3.1586111e-02]
###Markdown
SIDEQUEST TO MAKE SURE I CAN BUILD THE DF
###Code
train.head()
test.head(50)
sub.head()
countries = set(train['Country_Region'])
states = set(train['Province_State'])
for country in countries:
# train model
# run model
# make predictions
# print predictions
from fbprophet import Prophet
def get_prof_preds_for(df, n):
m = Prophet(daily_seasonality=True)
m.fit(df)
future = m.make_future_dataframe(periods=n)
forecast = m.predict(future)
return forecast
# fig1 = m.plot(forecast)
sm = train[['Date','ConfirmedCases']]
sm.columns = ['ds', 'y']
get_prof_preds_for(sm, 43)
big_df = pd.DataFrame()
for country in list(countries)[:3]:
df = train[train['Country_Region'] == country]
sm = df[['Date','ConfirmedCases']]
sm.columns = ['ds', 'y']
results = get_prof_preds_for(sm, 30)
new_df = results[['ds', 'trend']]
new_df['country'] = country
big_df = big_df.append(new_df)
print(results)
# train model
# run model
# make predictions
# print predictions
big_df
df
###Output
_____no_output_____ |
Code/2. Pandas for Machine Learning.ipynb | ###Markdown
Essential Pandas for Machine Learning Agenda1. Introduction to Pandas2. Understanding Series & DataFrames3. Loading CSV,JSON4. Connecting databases5. Descriptive Statistics6. Accessing subsets of data - Rows, Columns, Filters7. Handling Missing Data8. Dropping rows & columns9. Handling Duplicates10. Function Application - map, apply, groupby, rolling, str11. Merge, Join & Concatenate12. Pivot-tables13. Normalizing JSON 1. Introduction to Pandas* High Performance, Easy-to-use open source library for Data Analysis* Creates tabular format of data from different sources like csv, json, database.* Have utilities for descriptive statistics, aggregation, handling missing data* Database utilities like merge, join are available* Fast, Programmable & Easy alternative to spreadsheets
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
2. Understanding Series & DataFrames* Series represents one column* Combine multiple columns to create a table ( .i.e DataFrame )
###Code
ser1 = pd.Series(data=[1,2,3,4,5], index=list('abcde'))
ser1
ser2 = pd.Series(data=[11,22,33,44,55], index=list('abcde'))
ser2
###Output
_____no_output_____
###Markdown
* Creating DataFrame from above two series* Data corresponding to same index belongs to same row
###Code
df = pd.DataFrame({'A':ser1, 'B':ser2})
df
###Output
_____no_output_____
###Markdown
* Creating a random dataframe of 10 X 10
###Code
pd.DataFrame(data=np.random.randint(1,10,size=(10,10)), index=list('ABCDEFGHIJ'), columns=list('abcdefghij'))
###Output
_____no_output_____
###Markdown
3. Loading CSV,JSON
###Code
hr_data = pd.read_csv('https://raw.githubusercontent.com/zekelabs/data-science-complete-tutorial/master/Data/HR_comma_sep.csv.txt')
hr_data.info()
hr_data_itr = pd.read_csv('https://raw.githubusercontent.com/zekelabs/data-science-complete-tutorial/master/Data/HR_comma_sep.csv.txt', chunksize=5000)
for hr_data in hr_data_itr:
print (hr_data.info())
pd.read_json('https://raw.githubusercontent.com/zekelabs/data-science-complete-tutorial/master/Data/movie.json.txt')
###Output
_____no_output_____
###Markdown
4. Connecting Databases
###Code
!pip install pysqlite3
import sqlite3
con = sqlite3.connect('Data/database.sqlite')
pd.read_sql_query("SELECT * FROM Reviews LIMIT 5",con)
###Output
_____no_output_____
###Markdown
* import MySQLdb* mysql_cn= MySQLdb.connect(host='myhost', port=3306,user='myusername', passwd='mypassword', db='information_schema')* df_mysql = pd.read_sql('select * from VIEWS;', con=mysql_cn) 5. Descriptive Statistics* Pandas api's for understanding data
###Code
hr_data = pd.read_csv('https://raw.githubusercontent.com/zekelabs/data-science-complete-tutorial/master/Data/HR_comma_sep.csv.txt')
hr_data.head()
hr_data.tail()
hr_data.info()
hr_data.describe()
hr_data.salary.value_counts()
###Output
_____no_output_____
###Markdown
6. Accessing subset of data - rows, columns, filters* Get all columns with categorical values
###Code
cat_cols_data = hr_data.select_dtypes('object')
cat_cols_data.head()
###Output
_____no_output_____
###Markdown
* Rename columns names
###Code
hr_data.rename(columns={'sales':'department'},inplace=True)
hr_data.head()
###Output
_____no_output_____
###Markdown
* Select column by column names
###Code
hr_data.columns
hr_data[['satisfaction_level','last_evaluation','number_project']].head()
hr_data.satisfaction_level[:5]
hr_data['satisfaction_level'][:5]
movie_data = pd.read_json('https://raw.githubusercontent.com/zekelabs/data-science-complete-tutorial/master/Data/movie.json.txt')
movie_data
###Output
_____no_output_____
###Markdown
* Access data by index values
###Code
movie_data.loc['Scarface']
movie_data.loc['Scarface':'Vertigo']
movie_data['Scarface':'Vertigo']
movie_data.iloc[1]
movie_data.iloc[1:4]
movie_data[1:4]
###Output
_____no_output_____
###Markdown
* Filtering rows based on conditions
###Code
movie_data[ (movie_data['Adam Cohen'] > 3)]
movie_data[ ((movie_data['Adam Cohen'] > 3) & (movie_data['David Smith'] > 4))]
###Output
_____no_output_____
###Markdown
7. Handling missing data* Machine Learning algorithms don't expect data missing* If there is a columns with more than 40% data missing, we may drop the column* Fow rows with, important column values missing. Drop the rows
###Code
movie_data
###Output
_____no_output_____
###Markdown
* Get all the rows for which column 'Bill Duffy' is missing
###Code
movie_data['Bill Duffy'].notnull()
movie_data[movie_data['Bill Duffy'].notnull()]
###Output
_____no_output_____
###Markdown
* Get all the rows for which 'Bill Duffy' is null
###Code
movie_data[movie_data['Bill Duffy'].isnull()]
###Output
_____no_output_____
###Markdown
8. Dropping Rows & Columns
###Code
titanic_data = pd.read_csv('https://raw.githubusercontent.com/zekelabs/data-science-complete-tutorial/master/Data/titanic-train.csv.txt')
titanic_data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
PassengerId 891 non-null int64
Survived 891 non-null int64
Pclass 891 non-null int64
Name 891 non-null object
Sex 891 non-null object
Age 714 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Ticket 891 non-null object
Fare 891 non-null float64
Cabin 204 non-null object
Embarked 889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.6+ KB
###Markdown
* Dropping 'Cabin' column as it has only 204 data present in 891 rows
###Code
titanic_data.drop(['Cabin'],axis=1,inplace=True)
titanic_data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 11 columns):
PassengerId 891 non-null int64
Survived 891 non-null int64
Pclass 891 non-null int64
Name 891 non-null object
Sex 891 non-null object
Age 714 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Ticket 891 non-null object
Fare 891 non-null float64
Embarked 889 non-null object
dtypes: float64(2), int64(5), object(4)
memory usage: 76.6+ KB
###Markdown
* Now, drop all rows with missing values* We don't have inplace = True, so doesn't modify the dataframe
###Code
titanic_data.dropna().info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 712 entries, 0 to 890
Data columns (total 11 columns):
PassengerId 712 non-null int64
Survived 712 non-null int64
Pclass 712 non-null int64
Name 712 non-null object
Sex 712 non-null object
Age 712 non-null float64
SibSp 712 non-null int64
Parch 712 non-null int64
Ticket 712 non-null object
Fare 712 non-null float64
Embarked 712 non-null object
dtypes: float64(2), int64(5), object(4)
memory usage: 66.8+ KB
###Markdown
* Consider only selected columns to check if they contain NA
###Code
titanic_data.info()
titanic_data.dropna(subset=['Embarked','Age']).info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 712 entries, 0 to 890
Data columns (total 11 columns):
PassengerId 712 non-null int64
Survived 712 non-null int64
Pclass 712 non-null int64
Name 712 non-null object
Sex 712 non-null object
Age 712 non-null float64
SibSp 712 non-null int64
Parch 712 non-null int64
Ticket 712 non-null object
Fare 712 non-null float64
Embarked 712 non-null object
dtypes: float64(2), int64(5), object(4)
memory usage: 66.8+ KB
###Markdown
* Another approach of handling missing data is filling the missing ones
###Code
titanic_data.info()
titanic_data.fillna({'Age':0,'Embarked':'Unknown'}).info()
titanic_data.Age.fillna(method='ffill')[:5]
#Other options are 'bfill'
###Output
_____no_output_____
###Markdown
9. Handling Duplicates* Sometimes, it difficult to ensure that data is not duplicated.* This becomes responsibility in Data cleaning step to make sure duplicated data is deleted
###Code
df = pd.DataFrame({'A':[1,1,3,4,5,1], 'B':[1,1,3,7,8,1], 'C':[3,1,1,6,7,1]})
df
df.duplicated()
df[df.duplicated()]
df[df.duplicated(subset=['A','B'])]
###Output
_____no_output_____
###Markdown
10. Function Application* map for transforming one column to another* Can be applied only to series
###Code
titanic_data_age = titanic_data[titanic_data.Age.notnull()]
titanic_data['age_category'] = titanic_data.Age.map(lambda age: 'Kid' if age < 18 else 'Adult')
titanic_data.head()
###Output
_____no_output_____
###Markdown
* apply function can be done to Series as well as DataFrames
###Code
titanic_data.Age.apply('sum')
titanic_data.Age.apply(lambda age: 'Kid' if age < 18 else 'Adult')[:10]
###Output
_____no_output_____
###Markdown
* apply on dataframes helps us dealing with multiple columns* func will receive all the rows
###Code
#e will be each row
def func(e):
if e.Sex == 'male':
return e.Fare * 2
else:
return e.Fare
titanic_data.apply(func,axis=1)[:5]
###Output
_____no_output_____
###Markdown
* groupby - It splits data into groups, a function is applied to each groups separately, combine results into a data structure
###Code
titanic_data.groupby(['Sex']).Age.mean()
titanic_data.groupby(['Sex']).Age.agg(['mean','min','max'])
###Output
_____no_output_____
###Markdown
* Rolling for window based operation
###Code
titanic_data.Age.rolling(window=5,min_periods=1).agg(['sum','min'])
###Output
_____no_output_____
###Markdown
* For columns containing string, we have str utilities
###Code
titanic_data[titanic_data.Name.str.contains('Mr')]
###Output
_____no_output_____
###Markdown
11. Append,Merge, Join & Concatenate* Append for stacking dataframe
###Code
df1 = pd.DataFrame(data=np.random.randint(1,10,size=(10,3)), columns=list('ABC'))
df2 = pd.DataFrame(data=np.random.randint(1,10,size=(10,3)), columns=list('ABC'))
df1
df2
df1.append(df2, ignore_index=True)
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3','K4','K5'],
'A': ['A0', 'A1', 'A2', 'A3','A4','A5'],
'B': ['B0', 'B1', 'B2', 'B3','B4','B5']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3','K6','K7'],
'C': ['C0', 'C1', 'C2', 'C3','C6','C7'],
'D': ['D0', 'D1', 'D2', 'D3','D6','D7']})
left.merge(right, on='key')
left.merge(right, on='key', how='left')
###Output
_____no_output_____
###Markdown
* join for combining data based on index values
###Code
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
left.join(right)
###Output
_____no_output_____
###Markdown
12. Pivot Tables* An useful way to get important information from data
###Code
sales_data = pd.read_excel('https://github.com/zekelabs/data-science-complete-tutorial/blob/master/Data/sales-funnel.xlsx?raw=true')
sales_data
pd.pivot_table(sales_data, index=['Manager','Rep'], values=['Account','Price'], aggfunc=[np.sum, np.mean])
###Output
_____no_output_____
###Markdown
13. Normalizing JSON* JSON data will not always be of flat but can be hierchial
###Code
data = [ {'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
from pandas.io.json import json_normalize
json_normalize(data)
json_normalize(data,'counties',['state',['info', 'governor']])
###Output
_____no_output_____ |
more-advanced-materials/ML-demos/knet-tutorial/60.rnn.ipynb | ###Markdown
Introduction to Recurrent Neural Networks(c) Deniz Yuret, 2019* Objectives: learn about RNNs, the RNN layer, compare with MLP on a tagging task.* Prerequisites: [MLP models](40.mlp.ipynb)* New functions: [RNN](http://denizyuret.github.io/Knet.jl/latest/reference/Knet.RNN),[adam](http://denizyuret.github.io/Knet.jl/latest/reference/Knet.adam)([imagesource](http://colah.github.io/posts/2015-08-Understanding-LSTMs))In this notebook we will see how to implement a recurrent neural network (RNN) in Knet. In RNNs, connections between units form a directed cycle, which allows them to keep a persistent state over time. This gives them the ability to process sequences of arbitrary length one element at a time, while keeping track of what happened at previous elements. One can view the current state of the RNN as a representation for the sequence processed so far.We will build a part-of-speech tagger using a large annotated corpus of English. We will represent words with numeric vectors appropriate as inputs to a neural network. These word vectors will be initialized randomly and learned during training just like other model parameters. We will compare three network architectures: (1) an MLP which tags each word independently of its neighbors, (2) a simple RNN that can represent the neighboring words to the left, (3) a bidirectional RNN that can represent both left and right contexts. As can be expected 1 < 2 < 3 in performance. More surprisingly, the three models are very similar to each other: we will see their model diagrams are identical except for the horizontal connections that carry information across the sequence.
###Code
# Setup display width, load packages, import symbols
ENV["COLUMNS"] = 72
using Pkg; for p in ("Knet","Plots"); haskey(Pkg.installed(),p) || Pkg.add(p); end
using Random: shuffle!
using Base.Iterators: flatten
using Knet: Knet, AutoGrad, param, param0, mat, RNN, relu, Data, adam, progress, nll, zeroone
###Output
_____no_output_____
###Markdown
The Brown CorpusTo introduce recurrent neural networks (RNNs) we will train a part-of-speech tagger using the [Brown Corpus](https://en.wikipedia.org/wiki/Brown_Corpus). We will train three models: a MLP, a unidirectional RNN, a bidirectional RNN and observe significant performance differences.
###Code
include(Knet.dir("data/nltk.jl"))
(data,words,tags) = brown()
println("The Brown Corpus has $(length(data)) sentences, $(sum(length(p[1]) for p in data)) tokens, with a word vocabulary of $(length(words)) and a tag vocabulary of $(length(tags)).")
###Output
The Brown Corpus has 57340 sentences, 1161192 tokens, with a word vocabulary of 56057 and a tag vocabulary of 472.
###Markdown
`data` is an array of `(w,t)` pairs each representing a sentence, where `w` is a sequence of word ids, and `t` is a sequence of tag ids. `words` and `tags` contain the strings for the ids.
###Code
println.(summary.((data,words,tags)));
###Output
57340-element Array{Tuple{Array{UInt16,1},Array{UInt16,1}},1}
56057-element Array{String,1}
472-element Array{String,1}
###Markdown
Here is what the first sentence looks like with ids and with strings:
###Code
(w,t) = first(data)
display(permutedims(Int[w t]))
display(permutedims([words[w] tags[t]]))
###Output
_____no_output_____
###Markdown
Chain of layers
###Code
# Let's define a chain of layers
struct Chain
layers
Chain(layers...) = new(layers)
end
(c::Chain)(x) = (for l in c.layers; x = l(x); end; x)
(c::Chain)(x,y) = nll(c(x),y)
###Output
_____no_output_____
###Markdown
Dense layers
###Code
# Redefine dense layer (See mlp.ipynb):
struct Dense; w; b; f; end
Dense(i::Int,o::Int,f=identity) = Dense(param(o,i), param0(o), f)
(d::Dense)(x) = d.f.(d.w * mat(x,dims=1) .+ d.b)
###Output
_____no_output_____
###Markdown
Word Embeddings`data` has each sentence tokenized into an array of words and each word mapped to a `UInt16` id. To use these words as inputs to a neural network we further map each word to a Float32 vector. We will keep these vectors in the columns of a size (X,V) matrix where X is the embedding dimension and V is the vocabulary size. The vectors will be initialized randomly, and trained just like any other network parameter. Let's define an embedding layer for this purpose:
###Code
struct Embed; w; end
Embed(vocabsize::Int,embedsize::Int) = Embed(param(embedsize,vocabsize))
(e::Embed)(x) = e.w[:,x]
###Output
_____no_output_____
###Markdown
This is what the words, word ids and embeddings for a sentence looks like: (note the identical id and embedding for the 2nd and 5th words)
###Code
embedlayer = Embed(length(words),8)
(w,t) = data[52855]
display(permutedims(words[w]))
display(permutedims(Int.(w)))
display(embedlayer(w))
###Output
_____no_output_____
###Markdown
RNN layers
###Code
@doc RNN
###Output
_____no_output_____
###Markdown
The three taggers: MLP, RNN, biRNN Tagger0 (MLP)This is what Tagger0 looks like. Every tag is predicted independently. The prediction of each tag only depends on the corresponding word. Tagger1 (RNN) In Tagger1, the RNN layer takes its previous output as an additional input. The prediction of each tag is based on words to the left. Tagger2 (biRNN)In Tagger2 there are two RNNs: the forward RNN reads the sequence from left to right, the backward RNN reads it from right to left. The prediction of each tag is dependent on all the words in the sentence.
###Code
Tagger0(vocab,embed,hidden,output)= # MLP Tagger
Chain(Embed(vocab,embed),Dense(embed,hidden,relu),Dense(hidden,output))
Tagger1(vocab,embed,hidden,output)= # RNN Tagger
Chain(Embed(vocab,embed),RNN(embed,hidden,rnnType=:relu),Dense(hidden,output))
Tagger2(vocab,embed,hidden,output)= # biRNN Tagger
Chain(Embed(vocab,embed),RNN(embed,hidden,rnnType=:relu,bidirectional=true),Dense(2hidden,output));
###Output
_____no_output_____
###Markdown
Sequence MinibatchingMinibatching is a bit more complicated with sequences compared to simple classification problems, this section can be skipped on a first reading. In addition to the input and minibatch sizes, there is also the time dimension to consider. To keep things simple we will concatenate all sentences into one big sequence, then split this sequence into equal sized chunks. The input to the tagger will be size (B,T) where B is the minibatch size, and T is the chunk size. The input to the RNN layer will be size (X,B,T) where X is the embedding size.
###Code
BATCHSIZE = 64
SEQLENGTH = 32;
function seqbatch(x,y,B,T)
N = length(x) ÷ B
x = permutedims(reshape(x[1:N*B],N,B))
y = permutedims(reshape(y[1:N*B],N,B))
d = []; for i in 0:T:N-T
push!(d, (x[:,i+1:i+T], y[:,i+1:i+T]))
end
return d
end
allw = vcat((x->x[1]).(data)...)
allt = vcat((x->x[2]).(data)...)
d = seqbatch(allw, allt, BATCHSIZE, SEQLENGTH);
###Output
_____no_output_____
###Markdown
This may be a bit more clear if we look at an example minibatch:
###Code
(x,y) = first(d)
words[x]
###Output
_____no_output_____
###Markdown
Embedding a minibatchJulia indexing allows us to get the embeddings for this minibatch in one go as an (X,B,T) array where X is the embedding size, B is the minibatch size, and T is the subsequence length.
###Code
embedlayer = Embed(length(words),128)
summary(embedlayer(x))
###Output
_____no_output_____
###Markdown
Experiments
###Code
# shuffle and split minibatches into train and test portions
shuffle!(d)
dtst = d[1:10]
dtrn = d[11:end]
length.((dtrn,dtst))
# For running experiments we will use the Adam algorithm which typically converges faster than SGD.
function trainresults(file,maker,savemodel)
if (print("Train from scratch? "); readline()[1]=='y')
model = maker()
takeevery(n,itr) = (x for (i,x) in enumerate(itr) if i % n == 1)
results = ((nll(model,dtst), zeroone(model,dtst))
for x in takeevery(100, progress(adam(model,repeat(dtrn,5)))))
results = reshape(collect(Float32,flatten(results)),(2,:))
Knet.save(file,"model",(savemodel ? model : nothing),"results",results)
Knet.gc() # To save gpu memory
else
isfile(file) || download("http://people.csail.mit.edu/deniz/models/tutorial/$file",file)
model,results = Knet.load(file,"model","results")
end
println(minimum(results,dims=2))
return model,results
end
VOCABSIZE = length(words)
EMBEDSIZE = 128
HIDDENSIZE = 128
OUTPUTSIZE = length(tags);
# 2.35e-01 100.00%┣┫ 2780/2780 [00:13/00:13, 216.36i/s] [0.295007; 0.0972656]
t0maker() = Tagger0(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE)
(t0,r0) = trainresults("tagger113a.jld2",t0maker,false);
# 1.49e-01 100.00%┣┫ 2780/2780 [00:19/00:19, 142.58i/s] [0.21358; 0.0616211]
t1maker() = Tagger1(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE)
(t1,r1) = trainresults("tagger113b.jld2",t1maker,false);
# 9.37e-02 100.00%┣┫ 2780/2780 [00:25/00:25, 109.77i/s] [0.156669; 0.044043]
t2maker() = Tagger2(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE)
(t2,r2) = trainresults("tagger113c.jld2",t2maker,true);
using Plots; default(fmt=:png,ls=:auto,ymirror=true)
plot([r0[2,:], r1[2,:], r2[2,:]]; xlabel="x100 updates", ylabel="error",
ylim=(0,0.15), yticks=0:0.01:0.15, labels=["MLP","RNN","biRNN"])
plot([r0[1,:], r1[1,:], r2[1,:]]; xlabel="x100 updates", ylabel="loss",
ylim=(0,.5), yticks=0:0.1:.5, labels=["MLP","RNN","biRNN"])
###Output
_____no_output_____
###Markdown
PlaygroundBelow, you can type and tag your own sentences:
###Code
wdict=Dict{String,UInt16}(); for (i,w) in enumerate(words); wdict[w]=i; end
unk = UInt16(length(words))
wid(w) = get(wdict,w,unk)
function tag(tagger,s::String)
w = permutedims(split(s))
t = tags[(x->x[1]).(argmax(Array(tagger(wid.(w))),dims=1))]
vcat(w,t)
end
tag(t2,readline())
###Output
stdin> colorless green ideas sleep furiously
|
Lending_Club_4_Clustering.ipynb | ###Markdown
ClusteringPerform data clustering (try several methods for this purpose, at least 3) and check if there are any segments of borrowers, use appropriate methods for the optimal number of clusters (40 points)
###Code
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
#importing the PCA scaling library
from sklearn.decomposition import PCA
from sklearn.decomposition import IncrementalPCA
from sklearn.linear_model import Ridge, Lasso
# Import KNN Regressor machine learning library
from sklearn.neighbors import KNeighborsRegressor
from sklearn import metrics
# Import stats from scipy
from scipy import stats
# Import zscore for scaling
from scipy.stats import zscore
#importing the metrics
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
from sklearn import preprocessing
# importing the Polynomial features
from sklearn.preprocessing import PolynomialFeatures
#importing kmeans clustering library
from sklearn.cluster import KMeans
from sklearn.utils import resample
from sklearn.datasets import make_classification
from numpy import where
from sklearn.metrics import silhouette_score, silhouette_samples
dummies_loan_status = pd.read_csv('dummies_loan_status.csv')
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_row', None)
dummies_loan_status.head()
dummies_loan_status.drop('Unnamed: 0', axis = 1, inplace = True)
dummies_loan_status.shape
# define dataset
X = dummies_loan_status.drop('loan_status', axis = 1)
y = dummies_loan_status.loan_status
X.shape, y.shape
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver = 'full')
df_pca = pca.fit_transform(X)
loan_pca = pd.DataFrame(df_pca, columns=['c1', 'c2'], index=X.index)
loan_pca.head()
sns.distplot(loan_pca.c1).set(title = 'Density graph c1')
plt.show()
sns.distplot(loan_pca.c2).set(title = 'Density graph c2')
plt.show()
for col in loan_pca:
if loan_pca[col].min() <= 0:
loan_pca[col] = loan_pca[col] + np.abs(loan_pca[col].min()) + 1
loan_pca = np.log(loan_pca)
q1 = loan_pca.quantile(0.25)
q3 = loan_pca.quantile(0.75)
iqr = q3 - q1
low_boundary = (q1 - 1.5 * iqr)
upp_boundary = (q3 + 1.5 * iqr)
num_of_outliers_L = (loan_pca[iqr.index] < low_boundary).sum()
num_of_outliers_U = (loan_pca[iqr.index] > upp_boundary).sum()
outliers = pd.DataFrame({'lower_boundary':low_boundary, 'upper_boundary':upp_boundary,'num_of_outliers__lower_boundary':num_of_outliers_L, 'num_of_outliers__upper_boundary':num_of_outliers_U})
outliers
for row in outliers.iterrows():
loan_pca = loan_pca[(loan_pca[row[0]] >= row[1]['lower_boundary']) & (loan_pca[row[0]] <= row[1]['upper_boundary'])]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(loan_pca)
loan_pca_std = scaler.transform(loan_pca)
loan_pca_df = pd.DataFrame(data=loan_pca_std, index=loan_pca.index, columns=loan_pca.columns)
sns.distplot(loan_pca_df.c1).set(title = 'Density graph c1')
plt.show()
sns.distplot(loan_pca_df.c2).set(title = 'Density graph c2')
plt.show()
loan_pca_df.agg(['mean', 'std', 'max', 'min']).round(2)
loan_pca = loan_pca_df.sample(1000, random_state=42)
sns.lmplot('c1', 'c2', data = loan_pca, fit_reg=False).set(title = 'Scatterplot')
plt.show()
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
model_AggCl = AgglomerativeClustering(linkage='ward', affinity='euclidean', n_clusters=3).fit(loan_pca)
loan_pca['AggCl'] = model_AggCl.labels_
silhouette_score(loan_pca[['c1', 'c2']], loan_pca['AggCl']).round(4)
sns.lmplot('c1', 'c2', data = loan_pca, hue = 'AggCl', fit_reg=False).set(title='Groups visualisation')
plt.show()
model_link = linkage(loan_pca.iloc[:,0:2], method = 'ward', metric = 'euclidean')
fig = plt.figure(figsize=(20, 10))
dn = dendrogram(model_link)
plt.show()
clusters = fcluster(model_link, 3, criterion='maxclust')
loan_pca['linkage'] = clusters
sns.lmplot('c1', 'c2', data = loan_pca, fit_reg=False, hue = 'linkage')
plt.show()
silhouette_score(loan_pca[['c1', 'c2']], loan_pca['linkage']).round(4)
from sklearn.cluster import DBSCAN
model_DBSCAN = DBSCAN(eps = 0.3, min_samples=30, leaf_size=60).fit(loan_pca.iloc[:,0:2])
loan_pca['DBSCAN'] = model_DBSCAN.labels_
sns.lmplot('c1', 'c2', data = loan_pca, fit_reg=False, hue = 'DBSCAN')
plt.show()
silhouette_score(loan_pca[['c1', 'c2']], loan_pca['DBSCAN']).round(4)
ssd = [] # Sum of squared distances
range_n_clusters = [2, 3, 4, 5, 6, 7, 8]
for num_clusters in range_n_clusters:
kmeans = KMeans(n_clusters=num_clusters, max_iter=1000)
kmeans.fit(loan_pca.iloc[:,0:2])
ssd.append(kmeans.inertia_) #Sum of squared distances
# plot the SSDs for each n_clusters
# ssd
plt.plot(ssd)
range_n_clusters = [2, 3, 4, 5, 6, 7, 8]
for num_clusters in range_n_clusters:
# intialise kmeans
kmeans = KMeans(n_clusters=num_clusters, max_iter=1000)
kmeans.fit(loan_pca.iloc[:,0:2])
cluster_labels = kmeans.labels_
# silhouette score
silhouette_avg = silhouette_score(loan_pca.iloc[:,0:2], cluster_labels)
print("For n_clusters={0}, the silhouette score is {1}".format(num_clusters, silhouette_avg))
model_km = KMeans(n_clusters=4, max_iter=1000, random_state=42)
model_km.fit(loan_pca.iloc[:,0:2])
loan_pca['KMeans'] = model_km.labels_
sns.lmplot('c1', 'c2', data = loan_pca, fit_reg=False, hue = 'KMeans')
plt.scatter(model_km.cluster_centers_[:, 0], model_km.cluster_centers_[:, 1], c='black', s=100, alpha=0.5)
plt.show()
silhouette_score(loan_pca[['c1', 'c2']], loan_pca['KMeans']).round(4)
loan_pca
###Output
_____no_output_____ |
GIR/tools/get_cmip6_data.ipynb | ###Markdown
Getting 1pct nbp / nep / fgco2 / tas data from esgf
###Code
def get_annual_CMIP6_data_esgf(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
try:
result = esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment,institution_id=institution, source_id=source, member_id=member)
if not result:
print('No results for this request')
return None
# select results with only the latest datestamp:
latest = sorted([x.split('/')[15] for x in result])[-1]
result = [x for x in result if x.split('/')[15]==latest]
# remove duplicate results
result_1 = []
for item in result:
if item.split('/')[-1] in [x.split('/')[-1] for x in result_1]:
continue
else:
result_1 += [item]
ds = xr.open_mfdataset(result_1, combine='by_coords')
dims = list(ds[variable].dims)
dims.remove('time')
lat_dim_name = [x for x in dims if 'lat' in x][0]
weights = np.cos(np.deg2rad(ds[variable][lat_dim_name]))
weights.name = "weights"
ta_timeseries = ds[variable].weighted(weights).mean(dims)
return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member+'_'+variable+'_'+experiment)
except:
print('retrieval failed')
return None
pd.Series(['v20190306','v20200429']).sort_values()
search_results = esgf_search(table_id='Lmon', variable_id='nbp',experiment_id='1pctCO2-bgc')
search_df = pd.DataFrame([x.split('/')[-10:-1] for x in search_results],columns=['activity','institution','source','experiment','member','table','variable','grid','date'])
chosen_indices = []
for i,row in search_df.iterrows():
duplicates = search_df.query("activity==\'"+row.loc['activity']+"\' & table==\'"+row.loc['table']+"\' & variable==\'"+row.loc['variable']+"\' & experiment==\'"+row.loc['experiment']+"\' & institution==\'"+row.loc['institution']+"\' & source==\'"+row.loc['source']+"\' & member==\'"+row.loc['member']+"\' & grid==\'"+row.loc['grid']+"\'")
duplicate_dates = duplicates.loc[:,'date'].sort_values()
latest_date = duplicate_dates.iloc[-1]
if row.loc['date'] == latest_date:
chosen_indices += [i]
else:
continue
search_df = search_df.loc[chosen_indices].drop_duplicates()
experiments = ['1pctCO2','1pctCO2-bgc','1pctCO2-rad']
variables = ['nep','nbp','fgco2','fco2nat','tas']
tables = dict(zip(variables,['Emon','Lmon','Omon','Amon','Amon']))
search_dfs = []
for experiment in experiments:
for variable in variables:
search_results = esgf_search(table_id=tables[variable], variable_id=variable,experiment_id=experiment)
search_df = pd.DataFrame([x.split('/')[-10:-1] for x in search_results],columns=['activity','institution','source','experiment','member','table','variable','grid','date'])
chosen_indices = []
for i,row in search_df.iterrows():
duplicates = search_df.query("activity==\'"+row.loc['activity']+"\' & table==\'"+row.loc['table']+"\' & variable==\'"+row.loc['variable']+"\' & experiment==\'"+row.loc['experiment']+"\' & institution==\'"+row.loc['institution']+"\' & source==\'"+row.loc['source']+"\' & member==\'"+row.loc['member']+"\' & grid==\'"+row.loc['grid']+"\'")
duplicate_dates = duplicates.loc[:,'date'].sort_values()
latest_date = duplicate_dates.iloc[-1]
if row.loc['date'] == latest_date:
chosen_indices += [i]
else:
continue
search_df = search_df.loc[chosen_indices].drop_duplicates()
search_dfs += [search_df]
search_df = pd.concat(search_dfs,axis=0)
# filter search results by date
chosen_models = search_df.loc[(search_df.experiment.isin(['1pctCO2-bgc','1pctCO2-rad']))&(search_df.variable.isin(['fco2nat','fgco2']))].source.unique()
search_df.loc[(search_df.experiment.isin(['1pctCO2-bgc','1pctCO2-rad']))].source.unique()
# get area weights for each model
def get_model_area(source,area_var='areacella'):
try:
files_area = esgf_search(variable_id=area_var, source_id=source)
ds_area = xr.open_dataset(files_area[0])
return ds_area
except:
print('retrieval failed')
return None
model_areacella = {}
for model in chosen_models:
print('getting '+model+' areacella')
model_areacella[model] = get_model_areacella(model)
model_areacello = {}
for model in chosen_models:
print('getting '+model+' areacello')
model_areacello[model] = get_model_area(model,'areacello')
## switches:
## if gr available, get that
hist_info = gs_stores.loc[(gs_stores.experiment_id=='historical')&(gs_stores.variable_id=='tas')&(gs_stores.table_id=='Amon')]
hist_data = []
for index,row in hist_info.iterrows():
hist_data += [get_annual_CMIP6_data_gstore(row.loc['activity_id'], row.loc['table_id'], row.loc['variable_id'], row.loc['experiment_id'], row.loc['institution_id'], row.loc['source_id'], row.loc['member_id'])]
pd.concat(hist_data,axis=1).to_csv('./cmip6_data/historical_tas.csv')
def get_annual_CMIP6_data_gstore_nbp(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
area_query = gs_stores.query("variable_id=='areacella' & source_id==\'"+source+"\'")
if area_query.empty:
files_area = esgf_search(variable_id='areacella', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacella for this request')
return None
ds_area = xr.open_dataset(files_area[0])
else:
ds_area = xr.open_zarr(gcs.get_mapper(area_query.zstore.values[0]), consolidated=True)
query = gs_stores.query("activity_id==\'"+activity+"\' & table_id==\'"+table+"\' & variable_id==\'"+variable+"\' & experiment_id==\'"+experiment+"\' & institution_id==\'"+institution+"\' & source_id==\'"+source+"\' & member_id==\'"+member+"\'")
if query.empty:
print('No results for this request')
return None
# create a mutable-mapping-style interface to the store
mapper = gcs.get_mapper(query.zstore.values[0])
# open it using xarray and zarr
ds = xr.open_zarr(mapper, consolidated=True)
if source=='E3SM-1-1' and variable=='tas' and experiment=='piControl':
ds = xr.open_mfdataset(esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment, institution_id=institution, source_id=source, member_id=member)[7:],combine='by_coords')
coords = list(ds[variable].coords.keys())
if 'latitude' in coords:
dims = ['latitude','longitude']
_dims = ['lat','lon']
else:
dims = ['lat','lon']
_dims = ['latitude','longitude']
if not dims[0] in list(ds_area['areacella'].coords.keys()):
ds_area = ds_area.rename(dict(zip(_dims,dims)))
total_area = ds_area.areacella.sum(dim=dims)
ta_timeseries = (ds[variable] * ds_area.areacella).sum(dim=dims)
return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member+'_'+experiment)
nbp_cmip6=gs_stores.loc[(gs_stores.variable_id=='nbp')&(gs_stores.table_id=='Lmon')]
# nbp_data = []
# for index,row in nbp_cmip6.iterrows():
# nbp_data += [get_annual_CMIP6_data_gstore_nbp(row.loc['activity_id'], row.loc['table_id'], row.loc['variable_id'], row.loc['experiment_id'], row.loc['institution_id'], row.loc['source_id'], row.loc['member_id'])]
pd.concat(nbp_data,axis=1).to_csv('./cmip6_data/nbp.csv')
def get_annual_CMIP6_data_gstore_fgco2(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
area_query = gs_stores.query("variable_id=='areacello' & source_id==\'"+source+"\'")
if area_query.empty:
files_area = esgf_search(variable_id='areacello', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacello for this request')
return None
ds_area = xr.open_dataset(files_area[0])
else:
ds_area = xr.open_zarr(gcs.get_mapper(area_query.zstore.values[0]), consolidated=True)
query = gs_stores.query("activity_id==\'"+activity+"\' & table_id==\'"+table+"\' & variable_id==\'"+variable+"\' & experiment_id==\'"+experiment+"\' & institution_id==\'"+institution+"\' & source_id==\'"+source+"\' & member_id==\'"+member+"\'")
if query.empty:
print('No results for this request')
return None
# create a mutable-mapping-style interface to the store
mapper = gcs.get_mapper(query.zstore.values[0])
# open it using xarray and zarr
ds = xr.open_zarr(mapper, consolidated=True)
if source=='E3SM-1-1' and variable=='tas' and experiment=='piControl':
ds = xr.open_mfdataset(esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment, institution_id=institution, source_id=source, member_id=member)[7:],combine='by_coords')
dims = list(ds[variable].dims)
dims.remove('time')
# total_area = ds_area.areacello.sum(dim=dims)
ta_timeseries = (ds[variable] * ds_area.areacello).sum(dim=dims)
return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member+'_'+experiment)
gs_stores['isme'] = gs_stores['ism']+'_'+gs_stores['experiment_id']
fgco2_cmip6=gs_stores.loc[(gs_stores.variable_id=='fgco2')&(gs_stores.table_id=='Omon')&(gs_stores.grid_label=='gn')&(gs_stores.isme.isin(nbp_cmip6.isme))]
fgco2_data = []
for index,row in fgco2_cmip6.loc[237786:].iterrows():
fgco2_data += [get_annual_CMIP6_data_gstore_fgco2(row.loc['activity_id'], row.loc['table_id'], row.loc['variable_id'], row.loc['experiment_id'], row.loc['institution_id'], row.loc['source_id'], row.loc['member_id'])]
activity, table, variable, experiment, institution, source, member = row.loc['activity_id'], row.loc['table_id'], row.loc['variable_id'], row.loc['experiment_id'], row.loc['institution_id'], row.loc['source_id'], row.loc['member_id']
area_query = gs_stores.query("variable_id=='areacello' & source_id==\'"+source+"\'")
if area_query.empty:
files_area = esgf_search(variable_id='areacello', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacello for this request')
# return None
ds_area = xr.open_dataset(files_area[0])
else:
ds_area = xr.open_zarr(gcs.get_mapper(area_query.zstore.values[0]), consolidated=True)
query = gs_stores.query("activity_id==\'"+activity+"\' & table_id==\'"+table+"\' & variable_id==\'"+variable+"\' & experiment_id==\'"+experiment+"\' & institution_id==\'"+institution+"\' & source_id==\'"+source+"\' & member_id==\'"+member+"\'")
if query.empty:
print('No results for this request')
# return None
# create a mutable-mapping-style interface to the store
mapper = gcs.get_mapper(query.zstore.values[0])
# open it using xarray and zarr
ds = xr.open_zarr(mapper, consolidated=True)
if source=='E3SM-1-1' and variable=='tas' and experiment=='piControl':
ds = xr.open_mfdataset(esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment, institution_id=institution, source_id=source, member_id=member)[7:],combine='by_coords')
dims = list(ds[variable].dims)
dims.remove('time')
total_area = ds_area.areacello.sum(dim=dims)
ta_timeseries = (ds[variable] * ds_area.areacello).sum(dim=dims)
pd.concat(fgco2_data,axis=1).to_csv('./cmip6_data/fgco2.csv')
abrupt_4x_ism = gs_stores.loc[(gs_stores.experiment_id=='abrupt-4xCO2')&(gs_stores.variable_id.isin(['tas','rlut','rsdt','rsut']))]
abrupt_4x_ism = list(set([x for x in abrupt_4x_ism.ism if abrupt_4x_ism.loc[abrupt_4x_ism.ism==x].shape[0]>=4]))
onepct_ism = gs_stores.loc[(gs_stores.experiment_id=='1pctCO2')&(gs_stores.variable_id.isin(['tas','rlut','rsdt','rsut']))]
onepct_ism = list(set([x for x in onepct_ism.ism if onepct_ism.loc[onepct_ism.ism==x].shape[0]>=4]))
piControl_ism = gs_stores.loc[(gs_stores.experiment_id=='piControl')&(gs_stores.variable_id=='tas')]
piControl_ism = list(set(piControl_ism.ism))
areacella_s_gs = [x.split('_')[1] for x in list(set(gs_stores.loc[(gs_stores.variable_id=='areacella')].ism))]
areacella_list = esgf_search(activity_id='CMIP', variable_id='areacella')
areacella_list_nodupl = []
for item in areacella_list:
if item.split('/')[-1] in [x.split('/')[-1] for x in areacella_list_nodupl]:
continue
else:
areacella_list_nodupl += [item]
areacella_ism_list = list(set([x.split('/')[8]+'_'+x.split('/')[9]+'_'+x.split('/')[11] for x in areacella_list_nodupl]))
areacella_s_esgf = list(set([x.split('_')[1] for x in areacella_ism_list]))
areacella_s_all = list(set(areacella_s_gs).union(areacella_s_esgf))
abrupt_4x_ism_areacella_exist = [x for x in abrupt_4x_ism if x.split('_')[1] in areacella_s_all]
onepct_ism_areacella_exist = [x for x in onepct_ism if x.split('_')[1] in areacella_s_all]
piControl_ism_areacella_exist = [x for x in piControl_ism if x.split('_')[1] in areacella_s_all]
def get_cmip6_data_gs(ism,var,exp):
print('getting '+ism+' '+var)
ism_split = ism.split('_')
_out = get_annual_CMIP6_data_gstore('CMIP', 'Amon', var, exp, ism_split[0], ism_split[1], ism_split[2])
print('got '+ism)
return _out
onepct_tas_df_list = []
for ism in onepct_ism_areacella_exist:
onepct_tas_df_list += [get_cmip6_data_gs(ism,'tas','1pctCO2')]
onepct_rlut_df_list = []
for ism in onepct_ism_areacella_exist:
onepct_rlut_df_list += [get_cmip6_data_gs(ism,'rlut','1pctCO2')]
onepct_rsut_df_list = []
for ism in onepct_ism_areacella_exist:
onepct_rsut_df_list += [get_cmip6_data_gs(ism,'rsut','1pctCO2')]
onepct_rsdt_df_list = []
for ism in onepct_ism_areacella_exist:
onepct_rsdt_df_list += [get_cmip6_data_gs(ism,'rsdt','1pctCO2')]
def get_annual_CMIP6_data_gstore_co2mass(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
query = gs_stores.query("activity_id==\'"+activity+"\' & table_id==\'"+table+"\' & variable_id==\'"+variable+"\' & experiment_id==\'"+experiment+"\' & institution_id==\'"+institution+"\' & source_id==\'"+source+"\' & member_id==\'"+member+"\'")
if query.empty:
print('No results for this request')
return None
# create a mutable-mapping-style interface to the store
mapper = gcs.get_mapper(query.zstore.values[0])
# open it using xarray and zarr
ds = xr.open_zarr(mapper, consolidated=True)
return ds[variable].groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member)
onepct_co2mass_ism = gs_stores.loc[(gs_stores.experiment_id=='1pctCO2')&(gs_stores.variable_id=='co2mass')].ism
def get_cmip6_data_gs_co2mass(ism,var,exp):
print('getting '+ism+' '+var)
ism_split = ism.split('_')
_out = get_annual_CMIP6_data_gstore_co2mass('CMIP', 'Amon', var, exp, ism_split[0], ism_split[1], ism_split[2])
print('got '+ism)
return _out
onepct_co2mass_df_list = []
for ism in onepct_co2mass_ism:
onepct_co2mass_df_list += [get_cmip6_data_gs_co2mass(ism,'co2mass','1pctCO2')]
onepct_co2_esgf_results = esgf_search(activity_id='CMIP', table_id='Amon', variable_id='co2mass', experiment_id='1pctCO2')
onepct_co2_esgf_ism = [x.split('/')[8]+'_'+x.split('/')[9]+'_'+x.split('/')[11] for x in onepct_co2_esgf_results]
onepct_co2_esgf_ism = [x for x in onepct_co2_esgf_ism if not x in list(onepct_co2mass_ism)]
def get_annual_CMIP6_data_esgf_co2mass(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
result = esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment,institution_id=institution, source_id=source, member_id=member)
if not result:
print('No results for this request')
return None
# select results with only the latest datestamp:
latest = sorted([x.split('/')[15] for x in result])[-1]
result = [x for x in result if x.split('/')[15]==latest]
# remove duplicate results
result_1 = []
for item in result:
if item.split('/')[-1] in [x.split('/')[-1] for x in result_1]:
continue
else:
result_1 += [item]
ds = xr.open_mfdataset(result_1, combine='by_coords')
return ds[variable].groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member)
for x in onepct_co2_esgf_ism:
onepct_co2mass_df_list += [get_annual_CMIP6_data_esgf_co2mass('CMIP','Amon','co2mass','1pctCO2',x.split('_')[0],x.split('_')[1],x.split('_')[2])]
def get_annual_CMIP6_data_gstore_co2(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
query = gs_stores.query("activity_id==\'"+activity+"\' & table_id==\'"+table+"\' & variable_id==\'"+variable+"\' & experiment_id==\'"+experiment+"\' & institution_id==\'"+institution+"\' & source_id==\'"+source+"\' & member_id==\'"+member+"\'")
if query.empty:
print('No results for this request')
return None
# create a mutable-mapping-style interface to the store
mapper = gcs.get_mapper(query.zstore.values[0])
# open it using xarray and zarr
ds = xr.open_zarr(mapper, consolidated=True)
area_query = gs_stores.query("variable_id=='areacella' & source_id==\'"+source+"\'")
if area_query.empty:
files_area = esgf_search(variable_id='areacella', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacella for this request')
return None
ds_area = xr.open_dataset(files_area[0])
else:
ds_area = xr.open_zarr(gcs.get_mapper(area_query.zstore.values[0]), consolidated=True)
coords = list(ds[variable].coords.keys())
if 'lat' in coords:
dims = ['lat','lon']
else:
dims = ['latitude','longitude']
total_area = ds_area.areacella.sum(dim=dims)
ta_timeseries = (ds[variable].isel(plev=0) * ds_area.areacella).sum(dim=dims) / total_area
return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member)
onepct_co2_ism = gs_stores.loc[(gs_stores.experiment_id=='1pctCO2')&(gs_stores.variable_id=='co2')].ism
def get_cmip6_data_gs_co2(ism,var,exp):
print('getting '+ism+' '+var)
ism_split = ism.split('_')
_out = get_annual_CMIP6_data_gstore_co2('CMIP', 'Amon', var, exp, ism_split[0], ism_split[1], ism_split[2])
print('got '+ism)
return _out
onepct_co2_ism_areacella_exist = [x for x in onepct_co2_ism if x.split('_')[1] in areacella_s_all]
onepct_co2_df_list = []
for ism in onepct_co2_ism_areacella_exist:
onepct_co2_df_list += [get_cmip6_data_gs_co2(ism,'co2','1pctCO2')]
def get_annual_CMIP6_data_esgf_co2(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
result = esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment,institution_id=institution, source_id=source, member_id=member)
if not result:
print('No results for this request')
return None
# select results with only the latest datestamp:
latest = sorted([x.split('/')[15] for x in result])[-1]
result = [x for x in result if x.split('/')[15]==latest]
# remove duplicate results
result_1 = []
for item in result:
if item.split('/')[-1] in [x.split('/')[-1] for x in result_1]:
continue
else:
result_1 += [item]
ds = xr.open_mfdataset(result_1, combine='by_coords')
files_area = esgf_search(variable_id='areacella', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacella for this request')
return None
ds_area = xr.open_dataset(files_area[0])
coords = list(ds[variable].coords.keys())
if 'lat' in coords:
dims = ['lat','lon']
else:
dims = ['latitude','longitude']
total_area = ds_area.areacella.sum(dim=dims)
ta_timeseries = (ds[variable].isel(plev=0) * ds_area.areacella).sum(dim=dims) / total_area
return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member)
onepct_co2_esgf_results = esgf_search(activity_id='CMIP', table_id='Amon', variable_id='co2', experiment_id='1pctCO2')
onepct_co2_esgf_ism = [x.split('/')[8]+'_'+x.split('/')[9]+'_'+x.split('/')[11] for x in onepct_co2_esgf_results]
for ism in [x for x in set(onepct_co2_esgf_ism) if not x in onepct_co2_ism_areacella_exist]:
print('getting '+ism)
onepct_co2_df_list += [get_annual_CMIP6_data_esgf_co2('CMIP', 'Amon', 'co2', '1pctCO2', ism.split('_')[0], ism.split('_')[1], ism.split('_')[2])]
pd.concat(onepct_co2_df_list,axis=1).to_csv('./cmip6_data/onepct_co2.csv')
# piControl_df_list = []
for ism in piControl_ism_areacella_exist:
piControl_df_list += [get_cmip6_data_gs(ism,'tas','piControl')]
ESM3_picontrol = xr.open_mfdataset(esgf_search(activity_id='CMIP', table_id='Amon', variable_id='tas', experiment_id='piControl', institution_id='E3SM-Project', source_id='E3SM-1-0', member_id='r1i1p1f1'),combine='by_coords')
# ds_area = xr.open_dataset(files_area[0])
# coords = list(ds[variable].coords.keys())
# if 'lat' in coords:
# dims = ['lat','lon']
# else:
# dims = ['latitude','longitude']
# total_area = ds_area.areacella.sum(dim=dims)
# ta_timeseries = (ds[variable].isel(plev=0) * ds_area.areacella).sum(dim=dims) / total_area
# return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member)
ESM3_area = xr.open_dataset(esgf_search(activity_id='CMIP', variable_id='areacella',source_id='E3SM-1-0')[-1])
dims=['lat','lon']
total_area = ESM3_area.areacella.sum(dim=dims)
ta_timeseries = (ESM3_picontrol['tas'] * ESM3_area.areacella).sum(dim=dims) / total_area
ESM3_picontrol_tas = ta_timeseries.groupby('time.year').mean('time').to_pandas().rename('E3SM-Project_E3SM-1-0_r1i1p1f1')
ESM3_picontrol_tas.plot()
piControl_rlut_df_list = []
for ism in piControl_ism_areacella_exist:
piControl_rlut_df_list += [get_cmip6_data_gs(ism,'rlut','piControl')]
piControl_rsut_df_list = []
for ism in piControl_ism_areacella_exist:
piControl_rsut_df_list += [get_cmip6_data_gs(ism,'rsut','piControl')]
piControl_rsdt_df_list = []
for ism in piControl_ism_areacella_exist:
piControl_rsdt_df_list += [get_cmip6_data_gs(ism,'rsdt','piControl')]
abrutp4x_tas_df_list = []
for ism in abrupt_4x_ism_areacella_exist:
abrutp4x_tas_df_list += [get_cmip6_data_gs(ism,'tas','abrupt-4xCO2')]
abrutp4x_rlut_df_list = []
for ism in abrupt_4x_ism_areacella_exist:
abrutp4x_rlut_df_list += [get_cmip6_data_gs(ism,'rlut','abrupt-4xCO2')]
abrutp4x_rsut_df_list = []
for ism in abrupt_4x_ism_areacella_exist:
abrutp4x_rsut_df_list += [get_cmip6_data_gs(ism,'rsut','abrupt-4xCO2')]
abrutp4x_rsdt_df_list = []
for ism in abrupt_4x_ism_areacella_exist:
abrutp4x_rsdt_df_list += [get_cmip6_data_gs(ism,'rsdt','abrupt-4xCO2')]
abrutp4x_rsdt_df = pd.concat(abrutp4x_rsdt_df_list,axis=1)
abrutp4x_rsut_df = pd.concat(abrutp4x_rsut_df_list,axis=1)
abrutp4x_rlut_df = pd.concat(abrutp4x_rlut_df_list,axis=1)
abrutp4x_tas_df = pd.concat(abrutp4x_tas_df_list,axis=1)
piControl_tas_df = pd.concat(piControl_df_list,axis=1)
piControl_rsdt_df = pd.concat(piControl_rsdt_df_list,axis=1)
piControl_rsut_df = pd.concat(piControl_rsut_df_list,axis=1)
piControl_rlut_df = pd.concat(piControl_rlut_df_list,axis=1)
onepct_tas_df = pd.concat(onepct_tas_df_list,axis=1)
onepct_rlut_df = pd.concat(onepct_rlut_df_list,axis=1)
onepct_rsut_df = pd.concat(onepct_rsut_df_list,axis=1)
onepct_rsdt_df = pd.concat(onepct_rsdt_df_list,axis=1)
onepct_co2mass_df = pd.concat(onepct_co2mass_df_list,axis=1)
# onepct_co2mass_df.to_csv('./cmip6_data/onepct_co2mass.csv')
# onepct_tas_df.to_csv('./cmip6_data/onepct_tas.csv')
# onepct_rlut_df.to_csv('./cmip6_data/onepct_rlut.csv')
# onepct_rsut_df.to_csv('./cmip6_data/onepct_rsut.csv')
# onepct_rsdt_df.to_csv('./cmip6_data/onepct_rsdt.csv')
# piControl_rlut_df.to_csv('./cmip6_data/piControl_rlut.csv')
# piControl_rsut_df.to_csv('./cmip6_data/piControl_rsut.csv')
# piControl_rsdt_df.to_csv('./cmip6_data/piControl_rsdt.csv')
# abrutp4x_rsdt_df.to_csv('./cmip6_data/abrupt-4xCO2_rsdt.csv')
# abrutp4x_rsut_df.to_csv('./cmip6_data/abrupt-4xCO2_rsut.csv')
# abrutp4x_rlut_df.to_csv('./cmip6_data/abrupt-4xCO2_rlut.csv')
# abrutp4x_tas_df.to_csv('./cmip6_data/abrupt-4xCO2_tas.csv')
# piControl_tas_df.to_csv('./cmip6_data/piControl_tas.csv')
def get_annual_CMIP6_data_esgf(activity, table, variable, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
result = esgf_search(activity_id=activity, table_id=table, variable_id=variable, experiment_id=experiment,institution_id=institution, source_id=source, member_id=member)
if not result:
print('No results for this request')
return None
# select results with only the latest datestamp:
latest = sorted([x.split('/')[15] for x in result])[-1]
result = [x for x in result if x.split('/')[15]==latest]
# remove duplicate results
result_1 = []
for item in result:
if item.split('/')[-1] in [x.split('/')[-1] for x in result_1]:
continue
else:
result_1 += [item]
ds = xr.open_mfdataset(result_1, combine='by_coords')
files_area = esgf_search(variable_id='areacella', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacella for this request')
return None
ds_area = xr.open_dataset(files_area[0])
coords = list(ds[variable].coords.keys())
if 'lat' in coords:
dims = ['lat','lon']
else:
dims = ['latitude','longitude']
total_area = ds_area.areacella.sum(dim=dims)
ta_timeseries = (ds[variable] * ds_area.areacella).sum(dim=dims) / total_area
return ta_timeseries.groupby('time.year').mean('time').to_pandas().rename(institution+'_'+source+'_'+member)
def get_annual_CMIP6_data_esgf_multivar(activity, table, variables, experiment, institution, source, member):
# eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1"
result = esgf_search(activity_id=activity, table_id=table, experiment_id=experiment,institution_id=institution, source_id=source, member_id=member)
result = [x for x in result if x.split('/')[13] in variables]
if not result:
print('No results for this request')
return None
# select results with only the latest datestamp:
# latest = sorted([x.split('/')[15] for x in result])[-1]
# result = [x for x in result if x.split('/')[15]==latest]
# remove duplicate results
result_1 = []
for item in result:
if item.split('/')[-1] in [x.split('/')[-1] for x in result_1]:
continue
else:
result_1 += [item]
ds = xr.open_mfdataset(result_1, combine='by_coords')
files_area = esgf_search(variable_id='areacella', activity_id=activity, institution_id=institution, source_id=source)
if not files_area:
print('No areacella for this request')
return None
ds_area = xr.open_dataset(files_area[0])
coords = list(ds[variables].coords.keys())
if 'lat' in coords:
dims = ['lat','lon']
else:
dims = ['latitude','longitude']
total_area = ds_area.areacella.sum(dim=dims)
ta_timeseries = (ds[variables] * ds_area.areacella).sum(dim=dims) / total_area
_out = ta_timeseries.groupby('time.year').mean('time').to_dataframe()[variables]
return pd.concat([_out],axis=1,keys=[institution+'_'+source+'_'+member])
piControl_list = esgf_search(activity_id='CMIP', table_id='Amon', variable_id='tas', experiment_id='piControl')
piControl_list_nodupl = []
for item in piControl_list:
if item.split('/')[-1] in [x.split('/')[-1] for x in piControl_list_nodupl]:
continue
else:
piControl_list_nodupl += [item]
abrupt4x_list = esgf_search(activity_id='CMIP', table_id='Amon', variable_id='tas', experiment_id='abrupt-4xCO2')
abrupt4x_list_nodupl = []
for item in abrupt4x_list:
if item.split('/')[-1] in [x.split('/')[-1] for x in abrupt4x_list_nodupl]:
continue
else:
abrupt4x_list_nodupl += [item]
areacella_list = esgf_search(activity_id='CMIP', variable_id='areacella')
areacella_list_nodupl = []
for item in areacella_list:
if item.split('/')[-1] in [x.split('/')[-1] for x in areacella_list_nodupl]:
continue
else:
areacella_list_nodupl += [item]
abrupt4x_ism_list = list(set([x.split('/')[8]+'_'+x.split('/')[9]+'_'+x.split('/')[11] for x in abrupt4x_list_nodupl]))
piControl_ism_list = list(set([x.split('/')[8]+'_'+x.split('/')[9]+'_'+x.split('/')[11] for x in piControl_list_nodupl]))
areacella_ism_list = list(set([x.split('/')[8]+'_'+x.split('/')[9]+'_'+x.split('/')[11] for x in areacella_list_nodupl]))
areacella_s_list = list(set([x.split('_')[1] for x in areacella_ism_list]))
piControl_ism_areacella_exist = [x for x in piControl_ism_list if x.split('_')[1] in areacella_s_list]
abrupt4x_ism_areacella_exist = [x for x in abrupt4x_ism_list if x.split('_')[1] in areacella_s_list]
abrupt4x_tas_df = pd.read_csv('./cmip6_data/abrupt-4xCO2_tas.csv',index_col=0)
esgf_abrupt4x_list = [x for x in abrupt4x_ism_areacella_exist if not x in abrupt4x_tas_df.columns]
piControl_tas_df = pd.read_csv('./cmip6_data/piControl_tas.csv',index_col=0)
esgf_piControl_list = [x for x in piControl_ism_areacella_exist if not x in piControl_tas_df.columns]
def get_CMIP6_data(ism,exp='abrupt-4xCO2',var='tas',multivar=False):
ism_split = ism.split('_')
if multivar:
_out = get_annual_CMIP6_data_esgf_multivar('CMIP', 'Amon', var, exp, ism_split[0], ism_split[1], ism_split[2])
else:
_out = get_annual_CMIP6_data_esgf('CMIP', 'Amon', var, exp, ism_split[0], ism_split[1], ism_split[2])
print(ism+' complete')
return _out
# abrupt4x_df_list_esgf = []
for x in esgf_abrupt4x_list:
abrupt4x_df_list_esgf += [get_CMIP6_data(x,'abrupt-4xCO2',['tas','rlut','rsut','rsdt'],True)]
# P1=multiprocessing.Pool(processes=8)
# abrupt4xCO2_df = P1.starmap(get_CMIP6_data,[(x,'abrupt-4xCO2',['tas','rlut','rsut','rsdt'],True) for x in abrupt4x_ism_areacella_exist])
# P1.close
piControl_df_list_esgf = []
for x in piControl_ism_areacella_exist:
piControl_df_list_esgf += [get_CMIP6_data(x,'piControl','tas')]
# P1=multiprocessing.Pool(processes=8)
# piControl_df = P1.starmap(get_CMIP6_data,[(x,'piControl','tas',False) for x in piControl_ism_areacella_exist])
# P1.close
###Output
NASA-GISS_GISS-E2-1-G_r1i1p1f2 complete
NASA-GISS_GISS-E2-1-G_r1i1p1f3 complete
|
examples/nn-meter_for_bench_dataset.ipynb | ###Markdown
Use nn-Meter for Benchmark DatasetIn this notebook, we showed nn-Meter examples of latency prediction for benchmark dataset.
###Code
import os
import nn_meter
from nn_meter.dataset import bench_dataset
datasets = bench_dataset()
for data in datasets:
print(os.path.basename(data))
# list all supporting latency predictors
predictors = nn_meter.list_latency_predictors()
for p in predictors:
print(f"[Predictor] {p['name']}: version={p['version']}")
# specify basic predictor
predictor_name = 'adreno640gpu_tflite21' # user can change text here to test other predictors
predictor_version = 1.0
import warnings
warnings.filterwarnings('ignore')
import jsonlines
import nn_meter
# load predictor
predictor = nn_meter.load_latency_predictor(predictor_name, predictor_version)
# view latency prediction demo in one model group of the dataset
test_data = datasets[0]
with jsonlines.open(test_data) as data_reader:
True_lat = []
Pred_lat = []
for i, item in enumerate(data_reader):
if i >= 20: # only show the first 20 results to save space
break
graph = item["graph"]
pred_lat = predictor.predict(graph, model_type="nnmeter-ir")
real_lat = item[predictor_name]
print(f'[RESULT] {os.path.basename(test_data)}[{i}]: predict: {pred_lat}, real: {real_lat}')
if real_lat != None:
True_lat.append(real_lat)
Pred_lat.append(pred_lat)
if len(True_lat) > 0:
rmse, rmspe, error, acc5, acc10, _ = nn_meter.latency_metrics(Pred_lat, True_lat)
print(
f'[SUMMARY] The first 20 cases from {os.path.basename(test_data)} on {predictor_name}: rmse: {rmse}, 5%accuracy: {acc5}, 10%accuracy: {acc10}'
)
# apply nn-Meter prediction for all data
for filename in datasets:
print(f'Start testing {os.path.basename(filename)} ...')
True_lat = []
Pred_lat = []
with jsonlines.open(filename) as data_reader:
for item in data_reader:
graph = item["graph"]
pred_lat = predictor.predict(graph, model_type="nnmeter-ir")
real_lat = item[predictor_name]
if real_lat != None:
True_lat.append(real_lat)
Pred_lat.append(pred_lat)
if len(True_lat) > 0:
rmse, rmspe, error, acc5, acc10, _ = nn_meter.latency_metrics(Pred_lat, True_lat)
print(
f'{filename} on {predictor_name}: rmse: {rmse}, 5%accuracy: {acc5}, 10%accuracy: {acc10}'
)
###Output
_____no_output_____
###Markdown
Use nn-Meter for Bench DatasetIn this notebook, we showed nn-Meter examples of latency prediction for bench dataset.
###Code
import os
import nn_meter
from nn_meter.dataset import bench_dataset
datasets = bench_dataset()
for data in datasets:
print(os.path.basename(data))
# dataset schema: for each model, we store the: model id, graph in nn-meter IR graph format, latency numbers on four devices
import jsonlines
test_data = datasets[0]
with jsonlines.open(test_data) as data_reader:
True_lat = []
Pred_lat = []
for i, item in enumerate(data_reader):
print('dict keys:',list(item.keys()))
print('model id',item['id'])
print('cpu latency',item['cortexA76cpu_tflite21'])
print('adreno640gpu latency',item['adreno640gpu_tflite21'])
print('adreno630gpu latency',item['adreno630gpu_tflite21'])
print('intelvpu latency',item['myriadvpu_openvino2019r2'])
print('model graph is stored in nn-meter IR:',item['graph'])
break
# list all supporting latency predictors
predictors = nn_meter.list_latency_predictors()
for p in predictors:
print(f"[Predictor] {p['name']}: version={p['version']}")
# specify basic predictor
predictor_name = 'adreno640gpu_tflite21' # user can change text here to test other predictors
predictor_version = 1.0
import warnings
warnings.filterwarnings('ignore')
import jsonlines
import nn_meter
# load predictor
predictor = nn_meter.load_latency_predictor(predictor_name, predictor_version)
# view latency prediction demo in one model group of the dataset
test_data = datasets[0]
with jsonlines.open(test_data) as data_reader:
True_lat = []
Pred_lat = []
for i, item in enumerate(data_reader):
if i >= 20: # only show the first 20 results to save space
break
graph = item["graph"]
pred_lat = predictor.predict(graph, model_type="nnmeter-ir")
real_lat = item[predictor_name]
print(f'[RESULT] {os.path.basename(test_data)}[{i}]: predict: {pred_lat}, real: {real_lat}')
if real_lat != None:
True_lat.append(real_lat)
Pred_lat.append(pred_lat)
if len(True_lat) > 0:
rmse, rmspe, error, acc5, acc10, _ = nn_meter.latency_metrics(Pred_lat, True_lat)
print(
f'[SUMMARY] The first 20 cases from {os.path.basename(test_data)} on {predictor_name}: rmse: {rmse}, 5%accuracy: {acc5}, 10%accuracy: {acc10}'
)
# apply nn-Meter prediction for all data
for filename in datasets:
print(f'Start testing {os.path.basename(filename)} ...')
True_lat = []
Pred_lat = []
with jsonlines.open(filename) as data_reader:
for item in data_reader:
graph = item["graph"]
pred_lat = predictor.predict(graph, model_type="nnmeter-ir")
real_lat = item[predictor_name]
if real_lat != None:
True_lat.append(real_lat)
Pred_lat.append(pred_lat)
if len(True_lat) > 0:
rmse, rmspe, error, acc5, acc10, _ = nn_meter.latency_metrics(Pred_lat, True_lat)
print(
f'{filename} on {predictor_name}: rmse: {rmse}, 5%accuracy: {acc5}, 10%accuracy: {acc10}'
)
###Output
_____no_output_____ |
coursework/00-Crash-Course-Topics/01-Crash-Course-Pandas/01-Series.ipynb | ###Markdown
______Copyright Pierian DataFor more information, visit us at www.pieriandata.com SeriesThe first main data type we will learn about for pandas is the Series data type. Let's import Pandas and explore the Series object.A Series is very similar to a NumPy array (in fact it is built on top of the NumPy array object). What differentiates the NumPy array from a Series, is that a Series can have axis labels, meaning it can be indexed by a label, instead of just a number location. It also doesn't need to hold numeric data, it can hold any arbitrary Python Object.Let's explore this concept through some examples:
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Creating a SeriesYou can convert a list,numpy array, or dictionary to a Series:
###Code
labels = ['a','b','c']
my_list = [10,20,30]
arr = np.array([10,20,30])
d = {'a':10,'b':20,'c':30}
###Output
_____no_output_____
###Markdown
Using Lists
###Code
pd.Series(data=my_list)
pd.Series(data=my_list,index=labels)
pd.Series(my_list,labels)
###Output
_____no_output_____
###Markdown
Using NumPy Arrays
###Code
pd.Series(arr)
pd.Series(arr,labels)
###Output
_____no_output_____
###Markdown
Using Dictionaries
###Code
pd.Series(d)
###Output
_____no_output_____
###Markdown
Data in a SeriesA pandas Series can hold a variety of object types:
###Code
pd.Series(data=labels)
# Even functions (although unlikely that you will use this)
pd.Series([sum,print,len])
###Output
_____no_output_____
###Markdown
Using an IndexThe key to using a Series is understanding its index. Pandas makes use of these index names or numbers by allowing for fast look ups of information (works like a hash table or dictionary).Let's see some examples of how to grab information from a Series. Let us create two sereis, ser1 and ser2:
###Code
ser1 = pd.Series([1,2,3,4],index = ['USA', 'Germany','USSR', 'Japan'])
ser1
ser2 = pd.Series([1,2,5,4],index = ['USA', 'Germany','Italy', 'Japan'])
ser2
ser1['USA']
###Output
_____no_output_____
###Markdown
Operations are then also done based off of index:
###Code
ser1 + ser2
###Output
_____no_output_____ |
Exercise py/PY0101EN-2-2-Lists.ipynb | ###Markdown
Lists in Python Welcome! This notebook will teach you about the lists in the Python Programming Language. By the end of this lab, you'll know the basics list operations in Python, including indexing, list operations and copy/clone list. Table of Contents About the Dataset Lists Indexing List Content List Operations Copy and Clone List Quiz on Lists Estimated time needed: 15 min About the Dataset Imagine you received album recommendations from your friends and compiled all of the recommandations into a table, with specific information about each album.The table has one row for each movie and several columns:- **artist** - Name of the artist- **album** - Name of the album- **released_year** - Year the album was released- **length_min_sec** - Length of the album (hours,minutes,seconds)- **genre** - Genre of the album- **music_recording_sales_millions** - Music recording sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)- **claimed_sales_millions** - Album's claimed sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)- **date_released** - Date on which the album was released- **soundtrack** - Indicates if the album is the movie soundtrack (Y) or (N)- **rating_of_friends** - Indicates the rating from your friends from 1 to 10The dataset can be seen below: Artist Album Released Length Genre Music recording sales (millions) Claimed sales (millions) Released Soundtrack Rating (friends) Michael Jackson Thriller 1982 00:42:19 Pop, rock, R&B 46 65 30-Nov-82 10.0 AC/DC Back in Black 1980 00:42:11 Hard rock 26.1 50 25-Jul-80 8.5 Pink Floyd The Dark Side of the Moon 1973 00:42:49 Progressive rock 24.2 45 01-Mar-73 9.5 Whitney Houston The Bodyguard 1992 00:57:44 Soundtrack/R&B, soul, pop 26.1 50 25-Jul-80 Y 7.0 Meat Loaf Bat Out of Hell 1977 00:46:33 Hard rock, progressive rock 20.6 43 21-Oct-77 7.0 Eagles Their Greatest Hits (1971-1975) 1976 00:43:08 Rock, soft rock, folk rock 32.2 42 17-Feb-76 9.5 Bee Gees Saturday Night Fever 1977 1:15:54 Disco 20.6 40 15-Nov-77 Y 9.0 Fleetwood Mac Rumours 1977 00:40:01 Soft rock 27.9 40 04-Feb-77 9.5 Lists Indexing We are going to take a look at lists in Python. A list is a sequenced collection of different objects such as integers, strings, and other lists as well. The address of each element within a list is called an index. An index is used to access and refer to items within a list. To create a list, type the list within square brackets [ ], with your content inside the parenthesis and separated by commas. Let’s try it!
###Code
# Create a list
L = ["Michael Jackson", 10.1, 1982]
L
###Output
_____no_output_____
###Markdown
We can use negative and regular indexing with a list :
###Code
# Print the elements on each index
print('the same element using negative and positive indexing:\n Postive:',L[0],
'\n Negative:' , L[-3] )
print('the same element using negative and positive indexing:\n Postive:',L[1],
'\n Negative:' , L[-2] )
print('the same element using negative and positive indexing:\n Postive:',L[2],
'\n Negative:' , L[-1] )
###Output
_____no_output_____
###Markdown
List Content Lists can contain strings, floats, and integers. We can nest other lists, and we can also nest tuples and other data structures. The same indexing conventions apply for nesting:
###Code
# Sample List
["Michael Jackson", 10.1, 1982, [1, 2], ("A", 1)]
###Output
_____no_output_____
###Markdown
List Operations We can also perform slicing in lists. For example, if we want the last two elements, we use the following command:
###Code
# Sample List
L = ["Michael Jackson", 10.1,1982,"MJ",1]
L
###Output
_____no_output_____
###Markdown
###Code
# List slicing
L[3:5]
###Output
_____no_output_____
###Markdown
We can use the method extend to add new elements to the list:
###Code
# Use extend to add elements to list
L = [ "Michael Jackson", 10.2]
L.extend(['pop', 10])
L
###Output
_____no_output_____
###Markdown
Another similar method is append. If we apply append instead of extend, we add one element to the list:
###Code
# Use append to add elements to list
L = [ "Michael Jackson", 10.2]
L.append(['pop', 10])
L
###Output
_____no_output_____
###Markdown
Each time we apply a method, the list changes. If we apply extend we add two new elements to the list. The list L is then modified by adding two new elements:
###Code
# Use extend to add elements to list
L = [ "Michael Jackson", 10.2]
L.extend(['pop', 10])
L
###Output
_____no_output_____
###Markdown
If we append the list ['a','b'] we have one new element consisting of a nested list:
###Code
# Use append to add elements to list
L.append(['a','b'])
L
###Output
_____no_output_____
###Markdown
As lists are mutable, we can change them. For example, we can change the first element as follows:
###Code
# Change the element based on the index
A = ["disco", 10, 1.2]
print('Before change:', A)
A[0] = 'hard rock'
print('After change:', A)
###Output
_____no_output_____
###Markdown
We can also delete an element of a list using the del command:
###Code
# Delete the element based on the index
print('Before change:', A)
del(A[0])
print('After change:', A)
###Output
_____no_output_____
###Markdown
We can convert a string to a list using split. For example, the method split translates every group of characters separated by a space into an element in a list:
###Code
# Split the string, default is by space
'hard rock'.split()
###Output
_____no_output_____
###Markdown
We can use the split function to separate strings on a specific character. We pass the character we would like to split on into the argument, which in this case is a comma. The result is a list, and each element corresponds to a set of characters that have been separated by a comma:
###Code
# Split the string by comma
'A,B,C,D'.split(',')
###Output
_____no_output_____
###Markdown
Copy and Clone List When we set one variable B equal to A; both A and B are referencing the same list in memory:
###Code
# Copy (copy by reference) the list A
A = ["hard rock", 10, 1.2]
B = A
print('A:', A)
print('B:', B)
###Output
_____no_output_____
###Markdown
Initially, the value of the first element in B is set as hard rock. If we change the first element in A to banana, we get an unexpected side effect. As A and B are referencing the same list, if we change list A, then list B also changes. If we check the first element of B we get banana instead of hard rock:
###Code
# Examine the copy by reference
print('B[0]:', B[0])
A[0] = "banana"
print('B[0]:', B[0])
###Output
_____no_output_____
###Markdown
This is demonstrated in the following figure: You can clone list **A** by using the following syntax:
###Code
# Clone (clone by value) the list A
B = A[:]
B
###Output
_____no_output_____
###Markdown
Variable **B** references a new copy or clone of the original list; this is demonstrated in the following figure: Now if you change A, B will not change:
###Code
print('B[0]:', B[0])
A[0] = "hard rock"
print('B[0]:', B[0])
###Output
_____no_output_____
###Markdown
Quiz on List Create a list a_list, with the following elements 1, hello, [1,2,3] and True.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- Your answer is below:a_list = [1, 'hello', [1, 2, 3] , True]a_list--> Find the value stored at index 1 of a_list.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- Your answer is below:a_list[1]--> Retrieve the elements stored at index 1, 2 and 3 of a_list.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- Your answer is below:a_list[1:4]--> Concatenate the following lists A = [1, 'a'] and B = [2, 1, 'd']:
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____ |
sandbox/feature_drugs_and_outcomes.ipynb | ###Markdown
Module/Variable/Data setup
###Code
%run ../src/python/helpers.py
%matplotlib inline
from numpy import nan, arange
from pandas import read_feather
import seaborn as sns
import ipywidgets as w
from quilt.data.hsiaoyi0504 import aeolus_top5drugs
#VARIABLES
cl = ['atc_1st','atc_2nd','atc_3rd','atc_4th','drug_concept_name']
data = read_feather(aeolus_top5drugs.aeolus_top5drugs._data())
plot_settings()
###Output
/usr/local/Cellar/python/3.6.5_1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
return f(*args, **kwds)
###Markdown
For a drug class, how many unique adverse reactions are reported?
###Code
d = dropdown(cl)
d
###Output
_____no_output_____
###Markdown
classes in above chosen drug class
###Code
out= 'outcome_concept_name'
grpd = data.groupby([d.label,out])[out].count().sort_values(ascending=False)
grpd.name = 'n'
grpd2 = grpd.groupby(level=d.label)
series = grpd2.count()
p = plot(series,kind='barh')
q = p.set_title(p.get_ylabel().upper(),rotation=0,weight='bold')
q = p.set_ylabel("")
q = p.set_xlabel("Number of unique adverse reactions",weight="bold")
###Output
_____no_output_____
###Markdown
What are the top adverse reactions in the chosen drug class?
###Code
ds = dropdown(series.index)
ds
i = w.IntSlider(min=1,max=10,step=1,value=5)
i
###Output
_____no_output_____
###Markdown
indices for most frequent ADRs
###Code
sub = data.query('{0} in @ds.label'.format(d.label)).groupby([d.label,out])[out].count().sort_values(ascending=False).head(i.value)
sub.name= "n"
sub = sub.reset_index()
p = sns.barplot(y=d.label,x='n',hue=out,data=sub,orient='h')
p.legend(bbox_to_anchor=(1.7,1))
q = p.set_ylabel('')
###Output
_____no_output_____
###Markdown
How do the ADRs break down by report year?
###Code
outs = sub.iloc[:,1].values
sub2 = data.query('({0}[email protected]) & ({1} in @outs)'.format(d.label,out))
series2 = freqXbyY(sub2,'report_year','id')
p = plot(series2,kind='bar')
q = p.set_title(p.get_ylabel().upper(),rotation=0,weight='bold')
q = p.set_ylabel(p.get_ylabel(),rotation=0)
q = p.set_xlabel("Number of Reports",weight="bold")
###Output
_____no_output_____
###Markdown
subset data by chosen year(s)
###Code
labels = series2.index
mds = w.SelectMultiple(options = labels ,value = tuple(labels))
mds
###Output
_____no_output_____
###Markdown
How many are reported within these ADRs across the sexes?
###Code
#plot variables for filtering/wrangling
bars = 'gender_code'
x = 'report_year'
count = 'id'
dat = clean_gender(sub2).query('report_year in @mds.label')
#main
sub_dat = dat[[bars,x,count]]
plot_sub_dat = sub_dat.groupby([bars,x]).count().reset_index(level=bars).pivot(columns=bars)
plot_sub_dat.columns = plot_sub_dat.columns.droplevel(level=0)
plot_sub_dat.plot.bar()
###Output
_____no_output_____
###Markdown
How many are reported within this class across ages?
###Code
#plot variables for filtering/wrangling
grp = 'age_cat'
#main
dat[[d.label,grp]].groupby([grp]).count().plot.bar()
###Output
_____no_output_____
###Markdown
How many are reported within this class across ages for each sex?
###Code
#plot variables for filtering/wrangling
bars = 'gender_code'
x = 'age_cat'
count = 'id'
#want to filter dataset for M/F gender and if report year was clicked or selected
sub_dat = clean_gender(dat)[[bars,x,count]]
#main
plot_sub_dat = sub_dat.groupby([bars,x]).count().reset_index(level=bars).pivot(columns=bars)
plot_sub_dat.columns = plot_sub_dat.columns.droplevel(level=0)
plot_sub_dat.plot.bar()
###Output
_____no_output_____ |
Yue/.ipynb_checkpoints/Data Preprocess-checkpoint.ipynb | ###Markdown
Check label quality
###Code
plt.figure(figsize=(5,5))
plt.hist(lung[lung['Label'] == 'Stroma']['Stroma'].values, bins=100)
sns.despine()
plt.xlabel('Stroma marker probability')
plt.ylabel('Count')
plt.show()
plt.figure(figsize=(5,5))
plt.hist(lung[lung['Label'] == 'Immune']['Immune'].values, bins=100)
sns.despine()
plt.xlabel('Immune marker probability')
plt.ylabel('Count')
plt.show()
plt.figure(figsize=(5,5))
plt.hist(lung[lung['Label'] == 'Tumor']['Tumor'].values, bins=100)
sns.despine()
plt.xlabel('Tumor marker probability')
plt.ylabel('Count')
plt.show()
###Output
_____no_output_____
###Markdown
Quality filter Filter out cells with label marker probability less than 50%
###Code
prob_thre = 0.5
lung[lung]
lung['Tumor'].max()
print(lung1['Tumor'].max())
print(lung2['Tumor'].max())
print(lung3['Tumor'].max())
lung['EulerNumber'].value_counts()
###Output
_____no_output_____ |
05Natural Language Processing/01Lexical Processing/02Basic Lexical Processing/07TF-IDF Representation/tf-idf.ipynb | ###Markdown
TF-IDF model
###Code
# load all necessary libraries
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
pd.set_option('max_colwidth', 100)
###Output
_____no_output_____
###Markdown
Let's build a basic bag of words model on three sample documents
###Code
documents = ["Gangs of Wasseypur is a great movie. Wasseypur is a town in Bihar.", "The success of a song depends on the music.", "There is a new movie releasing this week. The movie is fun to watch."]
print(documents)
documents = ["Vapour, Bangalore has a really great terrace seating and an awesome view of the Bangalore skyline",
"The beer at Vapour, Bangalore was amazing. My favorites are the wheat beer and the ale beer.",
"Vapour, Bangalore has the best view in Bangalore."]
print(documents)
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
# add stemming and lemmatisation in the preprocess function
def preprocess(document):
'changes document to lower case and removes stopwords'
# change sentence to lower case
document = document.lower()
# tokenize into words
words = word_tokenize(document)
# remove stop words
words = [word for word in words if word not in stopwords.words("english")]
# stem
#words = [stemmer.stem(word) for word in words]
# join words to make sentence
document = " ".join(words)
return document
documents = [preprocess(document) for document in documents]
print(documents)
###Output
['vapour , bangalore really great terrace seating awesome view bangalore skyline', 'beer vapour , bangalore amazing . favorites wheat beer ale beer .', 'vapour , bangalore best view bangalore .']
###Markdown
Creating bag of words model using count vectorizer function
###Code
vectorizer = TfidfVectorizer()
tfidf_model = vectorizer.fit_transform(documents)
print(tfidf_model) # returns the row number and column number of cells which have 1 as value
# print the full sparse matrix
print(tfidf_model.toarray())
pd.DataFrame(tfidf_model.toarray(), columns = vectorizer.get_feature_names())
###Output
_____no_output_____
###Markdown
Let's create a tf-idf model on the spam dataset.
###Code
# load data
spam = pd.read_csv("SMSSpamCollection.txt", sep = "\t", names=["label", "message"])
spam.head()
###Output
_____no_output_____
###Markdown
Let's take a subset of data (first 50 rows only) and create bag of word model on that.
###Code
spam = spam.iloc[0:50,:]
print(spam)
# extract the messages from the dataframe
messages = [message for message in spam.message]
print(messages)
# preprocess messages using the preprocess function
messages = [preprocess(message) for message in messages]
print(messages)
# bag of words model
vectorizer = TfidfVectorizer()
tfidf_model = vectorizer.fit_transform(messages)
# Let's look at the dataframe
tfidf = pd.DataFrame(tfidf_model.toarray(), columns = vectorizer.get_feature_names())
tfidf
# token names
print(vectorizer.get_feature_names())
###Output
['000', '07732584351', '08000930705', '08002986030', '08452810075over18', '09061701461', '100', '11', '12', '150p', '16', '20', '2005', '21st', '2nd', '4403ldnw1a7rw18', '4txt', '50', '6days', '81010', '87077', '87121', '87575', '8am', '900', 'abiola', 'actin', 'aft', 'ahead', 'ahhh', 'aids', 'already', 'alright', 'always', 'amore', 'amp', 'anymore', 'anything', 'apologetic', 'apply', 'arabian', 'ard', 'around', 'ask', 'available', 'back', 'badly', 'bit', 'blessing', 'breather', 'brother', 'buffet', 'bugis', 'burns', 'bus', 'ca', 'call', 'callers', 'callertune', 'calls', 'camcorder', 'camera', 'car', 'cash', 'catch', 'caught', 'chances', 'charged', 'cheers', 'chgs', 'child', 'cine', 'claim', 'clear', 'click', 'co', 'code', 'colour', 'com', 'comin', 'comp', 'confirm', 'convincing', 'copy', 'cost', 'could', 'crave', 'crazy', 'credit', 'cried', 'csh11', 'cup', 'cuppa', 'customer', 'da', 'darling', 'date', 'day', 'dbuk', 'decide', 'decided', 'delivery', 'dinner', 'done', 'dont', 'dun', 'early', 'eat', 'eating', 'eg', 'egg', 'eh', 'endowed', 'england', 'enough', 'entitled', 'entry', 'even', 'fa', 'fainting', 'fair', 'fallen', 'fear', 'feel', 'ffffffffff', 'final', 'fine', 'finish', 'first', 'forced', 'forget', 'free', 'freemsg', 'friends', 'frying', 'fulfil', 'fun', 'get', 'getting', 'go', 'goals', 'goes', 'going', 'gon', 'got', 'gota', 'granted', 'great', 'gt', 'ha', 'hello', 'help', 'hep', 'hey', 'hl', 'home', 'hope', 'hopefully', 'hor', 'hospital', 'hospitals', 'hours', 'housework', 'http', 'hungry', 'immunisation', 'inches', 'info', 'invite', 'jackpot', 'joking', 'jurong', 'kept', 'kl341', 'know', 'knows', 'la', 'lar', 'latest', 'lccltd', 'learn', 'left', 'lesson', 'let', 'letter', 'like', 'link', 'live', 'lives', 'll', 'lol', 'look', 'lor', 'love', 'lt', 'lunch', 'macedonia', 'make', 'man', 'mark', 'may', 'maybe', 'meet', 'melle', 'membership', 'message', 'messages', 'minnaminunginte', 'miss', 'missed', 'mmmmmm', 'mobile', 'mobiles', 'mom', 'month', 'months', 'msg', 'na', 'nah', 'name', 'national', 'naughty', 'need', 'net', 'network', 'news', 'next', 'nigeria', 'nokia', 'nurungu', 'oh', 'ok', 'oni', 'oops', 'oru', 'packing', 'patent', 'pay', 'per', 'pizza', 'please', 'pls', 'pobox', 'poboxox36504w45wq', 'point', 'pounds', 'press', 'prize', 'promise', 'qjkgighjjgcbl', 'question', 'quick', 'rate', 'rcv', 're', 'really', 'receive', 'receivea', 'remember', 'reply', 'replying', 'request', 'reward', 'right', 'ringtone', 'rodger', 'room', 'roommate', 'sarcastic', 'saturday', 'say', 'scotland', 'searching', 'see', 'seeing', 'selected', 'send', 'seriously', 'set', 'sick', 'six', 'slice', 'sms', 'smth', 'soon', 'sooner', 'speak', 'spell', 'spoilt', 'std', 'steed', 'still', 'stock', 'str', 'stubborn', 'stuff', 'subscription', 'sucker', 'suckers', 'sucks', 'sunday', 'sure', 'sweet', 'take', 'talk', 'tb', 'tea', 'team', 'tell', 'telling', 'text', 'texting', 'thank', 'thanks', 'that', 'think', 'tho', 'though', 'till', 'times', 'timings', 'tkts', 'today', 'tomo', 'tomorrow', 'tonight', 'treat', 'tried', 'try', 'trying', 'tsandcs', 'turn', 'txt', 'tyler', 'uk', 'update', 'ur', 'urgent', 'us', 'use', 'usf', 'vaguely', 'valid', 'valued', 've', 'vettam', 'wait', 'wales', 'want', 'wanted', 'wap', 'wat', 'watching', 'watts', 'way', 'weak', 'week', 'weekend', 'well', 'wet', 'wif', 'win', 'winner', 'wkly', 'wo', 'wonderful', 'wont', 'word', 'words', 'work', 'world', 'worried', 'www', 'xuhui', 'xxx', 'xxxmobilemovieclub', 'yeah', 'yes', 'yummy', 'yup', 'ú1']
|
_tensorflow_keras/keras/DL_snippets.ipynb | ###Markdown
note warn
###Code
from mymods.lauthom import *
def svg_model(model, filename):
"""Visualise Keras NN model as flowchart"""
plot_model(model, to_file=filename)
return SVG(model_to_dot(model).create(prog='dot', format='svg'))
svg_model(happyModel, 'HappyModel.png')
###Output
_____no_output_____
###Markdown
###Code
# Shuffle the training set
order = np.argsort(np.random.random(train_labels.shape))
class PrintDot(keras.callbacks.Callback):
"""Display training progress by printing a single dot for each completed epoch.
"""
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('\nEpoch: {}'.format(epoch))
print('..', end='\b')
def on_train_begin(self, logs=None):
print('\nTraining model')
def on_train_end(self, logs=None):
print('\nModel is trained')
dict_ = {
"string": "string",
"array": [1, 2, 3],
"bool": True,
"object": {
"foo": "bar"
}
}
dictify(dict_)
# 'RGB'->'BGR'
x = x[..., ::-1]
# Freeze layers (Keras)
for layer in model.layers[:25]:
layer.trainable = False
sys.path
dir(sys)
sys.tracebacklimit = 0 # default = 1000
x = np.arange(5).reshape(-1, 1)
x
x.reshape(-1, 1)
r = np.random.randint(-5, 5, 5)
r
r.reshape(-1, 1)
# Broadcasting to random shifted range
x + r
y[:] = x + r
y
###Output
_____no_output_____ |
python_programm.ipynb | ###Markdown
###Code
a = int(input("enter the number"))
b = int(input("enter the number"))
c = int(input("enter the number"))
d = int(input("enter the number"))
e = int(input("enter the number"))
# function
def divisibility_check(k):
if (k%3==0):
print(str(k) +" is divisible by 3")
else:
print(str(k) +" is not divisible by 3")
divisibility_check(a)
divisibility_check(b)
divisibility_check(c)
divisibility_check(d)
divisibility_check(d)
a = int(input("enter the number"))
b = int(input("enter the number"))
c = int(input("enter the number"))
d = int(input("enter the number"))
e = int(input("enter the number"))
# function
def divisibility_check_3 (k):
if (k%3==0):
print(str(k) +" is divisible by 3")
else:
print(str(k) + "is not divisible by 3")
def divisibility_check_2 (p):
if (p%3==0):
print(str(p) +" is divisible by 2")
else:
print(str(p) + "is not divisible by 2")
divisibility_check_3(a)
divisibility_check_3(b)
divisibility_check_3(c)
divisibility_check_3(d)
divisibility_check_2(e)
for i in range(1,10):
a = int(input("enter the number"))
if (a%3 ==0 ):
print(str(a)+ "is divisible by 3")
else:
print(str(a)+ "is not divisible by 3")
store = ["banana","mango","guava","pineapple","oranga"]
store.append("grapes")
store.remove("mango")
store.insert(0,"mango")
store.append("grapes")
store.sort()
print(store)
#tuple
fruits=("banana","mango","guava","pineapple","orange")
student=[("shree eswar","maths","85"),("aravind","maths","89")]
print(student)
print(fruits)
fruits={"banana","mango","orange","pineapple","orange"}
print(fruits)
student={"name" : "eswar" , "subjects" : "maths" , "marks" : "89"}
print(student)
#list of student dictionary
student={"name" : "eswar" , "subjects" : "maths" , "marks" : "89" }, {"name" : "tridev" , "subjects" : "science" , "marks" : "89" },{"name" : "anand" , "subject" : "english" , "marks" : "87" }
print(student)
TRY
a = int(input("enter the number"))
b = int(input("enter the number"))
c = int(input("enter the number"))
d = int(input("enter the number"))
e = int(input("enter the munber"))
#function
def divisibility_check(k):
if (k%3==0):
print(str(k) +" divisible by 3")
else:
print(str(k) +" not divisible by 3")
divisibility_check(a)
divisibility_check(b)
divisibility_check(c)
divisibility_check(d)
divisibility_check(e)
TRY
a = int(input("enter the numbre"))
b = int(input("enter the number"))
c = int(input("enter the number"))
d = int(input("enter the number"))
e = int(input("enter the number"))
f = int(input("enter the number"))
# function
def divisibility_check_3 (k):
if (k%3==0):
print(str(k) +" is divisible by 3")
else:
print(str(k) + "is not divisible by 3")
def divisibility_check_2 (p):
if (p%3==0):
print(str(p) +" is divisible by 2")
else:
print(str(p) +" is not divisible by 2")
divisibility_check_3(a)
divisibility_check_3(b)
divisibility_check_3(c)
divisibility_check_2(d)
divisibility_check_2(e)
divisibility_check_2(f)
TRY
for i in range(1,10):
a = int(input("enter the number"))
if (a%3 2==0):
print(str(a) +" divisible by 3")
else:
print(str(a) +" not divisible by 3")
TRY
store = ["banana","apple","orange","pineapple","grapes"]
store.append("pear")
store.remove("apple")
store.insert(0,"mango")
print(store)
TRY
#tuple
store = ("banana","apple","orange","pear","pineapple","grapes")
student = ("Eswar","maths","88"),("Tridev","maths","90")
print(store)
print(student)
# programm make a simple calculator
# This function adds two numbers
def add(a,b):
return a + b
# This function subtracts two numbers
def subtract(a,b):
return a - b
# This function multiplies two numbers
def multiply(a,b):
return a * b
# This function divides two numbers
def divide(a,b):
return a / b
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
# take input for the user
choice = input("enter choice(1/2/3/4)")
#check if choice is one of the four option
if choice in ('1','2','3','4'):
num1 = float(input("enter first number:"))
num2 = float(input("enter second number:"))
if choice == '1':
print(num1,"+",num2,"=",add(num1,num2))
elif choice == '2':
print(num1,"-",num2,"=",subtract(num1,num2))
elif choice == '3':
print(num1,"*",num2,"=",multiply(num1,num2))
elif choice == '4':
print(num1,"/",num2,"=",divide(num1,num2))
break
print("invalid input")
a = float(input("enter the first term : "))
d = float(input("enter the difference : "))
for n in range(1,11)
print(a+(n-1)*d)
###Output
_____no_output_____ |
Example_LeastSquares.ipynb | ###Markdown
Linear least squaresHere the goal is to show how a model fitting problem can be decomposed and solved by ADMM.For this purpose, this notebook considers least-squares fitting. Problem minimize: $\frac{1}{2} \; \| A \; x - b \|_2^2$ Steps1. Devise a computation graph representing the problem as a bipartite graph2. Implement nodes as Java classes extending org.admm4j.core.Node3. Create the JSON input defining the graph4. Execute admm4j5. Import and analyze results
###Code
import numpy as np
import matplotlib.pyplot as plt
import json
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
###Output
_____no_output_____
###Markdown
Generate data for linear regression
###Code
np.random.seed(1234)
num_points = 40
x = np.array([-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]) # underlying values
A = -20 + 40 * np.random.rand(num_points, x.shape[0]) # features
noise = np.random.normal(0, 1, (num_points,x.shape[0])) # noise
b = np.dot(A+noise, x) # target values
###Output
_____no_output_____
###Markdown
Step 1. Devise a computation graph representing the problem as a bipartite graphThe data is distributed across worker nodes.\Each worker solves least squares problem using local data.\Master averages the results collected from workers. Step 2. Implement nodes as Java classes extending org.admm4j.core.Node The following classes are implemented1. org.admm4j.demo.ml.linearmodel.LeastSquaresNode.java2. org.admm4j.demo.common.AveragingNode.java Step 3. Create the JSON input defining the graphThe JSON input has a well-defined structure allowing to define an arbitrary bipartite graph.Here the JSON input is created in python.
###Code
# here we cluster the data into 3 clusters with data being distributed across 4 workers having 9 points in each node
num_workers = 4
points_per_node = 10
# init nodesI.
nodesI = []
for i in range(0, num_workers):
# define node
node = {}
node['name'] = 'worker{}'.format(i)
node['class'] = 'org.admm4j.demo.ml.linearmodel.LeastSquaresNode'
node['neighbors'] = ['master']
node['input'] = {'A': A[i*points_per_node:(i+1)*points_per_node:,:].tolist(),\
'b': b[i*points_per_node:(i+1)*points_per_node].tolist()}
# add to the list of nodesI
nodesI.append(node)
# init nodesII
nodesII = []
node = {}
node['name'] = 'master'
node['class'] = 'org.admm4j.demo.common.AveragingNode'
node['neighbors'] = None
node['input'] = None
nodesII.append(node)
# init whole json model
graph = {'nodesI': nodesI, 'nodesII': nodesII}
###Output
_____no_output_____
###Markdown
Show JSON model
###Code
print(json.dumps(graph))
###Output
{"nodesI": [{"name": "worker0", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": ["master"], "input": {"A": [[-12.339221984844308, 4.884350841593275, -2.4908904397154217, 11.41434334855077, 11.199032324752139, -9.096295788694334, -8.941429794276132, 12.074887101400769, 18.325574147348206, 15.037305389683787], [-5.687309201685334, 0.039805020938349145, 7.338517406885451, 8.508081079316007, -5.189969808384202, 2.4478474426249974, 0.12332661231238973, -19.44926201637271, 10.913064864494963, 15.305647625444664], [-5.404560643945109, 4.615847137339749, -16.98475033428094, -5.2470397599210195, 17.325604079300867, 6.055125729063096, -4.111896890953833, 11.549205717629821, -7.326555113245149, 2.723946105042767], [14.765095582449035, -2.5530630441728235, 12.085905683206363, -14.249327019417418, 8.170438844733418, 8.183252327582903, -11.248315773036456, 16.9947051446226, -2.314369783832934, 16.372638358898904], [-17.607631088805924, -12.628516647447455, -18.105788847939394, 6.99523774329321, 3.7849911973779555, 1.3324065199500232, -18.267037492207862, 2.457323202535914, -6.813262175163398, 0.11867332450473356], [-15.524227297023847, 4.287748248739383, 2.637785722021256, -19.729437520399888, 4.697668352171881, 16.484915457326174, 11.620965322281336, 19.68325864753446, 18.352070486114663, 11.678565411665591], [-8.58996159901961, 4.996668212236443, -0.8762481731730176, -12.172992853364072, -4.707301918739741, -17.845052594150538, -1.9340636695656386, 19.28018966087818, -15.04229198052148, -15.224764082950063], [9.54092224573387, 3.4921453385593857, -1.134698627185287, -15.714927312245347, -10.831257381575284, 15.998607793467016, -3.329858487892274, 1.4340665012646348, -19.751659336514823, -7.974331769187955], [-2.5242731129755924, 4.485959882630301, 16.72792301522292, 5.02946679850141, 8.23990260327093, -14.00665136040291, 9.842536365468668, 13.240279697341514, 5.349030758039163, -2.4676047551028972], [-13.897089013019785, 2.7363846098876046, 1.1289711034024208, 18.05715055014373, -0.7856328595993567, 0.10238253530201291, 1.4751277169763881, 12.76808268256633, -17.715374476445604, 6.776869722981953]], "b": [167.8014631136075, 62.66084163893335, 61.85876967735084, 43.59102523711901, 100.46959050748987, 322.0654342334289, -69.12321347557518, -114.5521939570391, -0.6249390647239714, 14.664174074604995]}}, {"name": "worker1", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": ["master"], "input": {"A": [[10.6846651351789, 8.324614479104152, 11.874687349007864, 2.310433137097977, 18.633461279685108, -14.113724004280112, -18.814119978583378, 3.7557397049908694, -15.4373720502935, 18.032394003364885], [-6.97170342298611, -12.255252393848913, -1.6875340441028968, 16.816102843723513, 15.16276646058703, -9.895369798138791, -6.079648285226153, -12.69645073678765, 16.071842054839685, 8.261126526871898], [9.066338462485632, 16.003513472388306, 11.16655203077297, 3.966191224171695, -8.354990204085006, -13.944189423702715, -6.593013634022604, 6.302071086312779, -17.066298254695273, -17.799744183751052], [-7.0722074431529425, 3.619272178519445, 14.155942685025671, -8.517502999999639, -13.077310927408314, -14.639151760046287, 19.786153145771777, -12.820085221224327, -7.298127078912167, 2.7316561863642903], [-19.62605701999022, 16.02594484620368, 19.089657236903484, 2.275787165473396, -16.609046264232393, -6.679901370835996, 9.13714705478688, -14.30258506632731, 2.0987575799018217, -9.078269612652576], [18.97980552349039, 6.7114762458015065, -9.773868542634414, -15.667540232690849, 11.047228926955288, 11.299119704008081, 10.464156572299764, 16.57612453077295, 6.344911277696905, 2.7347032629172965], [-11.929772301880867, 7.931855022070447, 18.087816392619096, 15.598531485680205, 19.742694514526946, 12.74814040815702, 1.8048866471887948, -1.9498378014360682, 15.622287517154291, 18.930591644746755], [3.736453182758787, -5.357020091181166, -7.076212281350021, 14.856930201947918, -11.374637480464166, 9.397807542487854, -5.375236505312575, 12.064103940110407, 11.309423679015048, 8.05421516946772], [4.911063464127963, -0.2526941701155181, 13.621508004739546, 8.483879477745894, -2.2436407433659014, -18.758605554666694, -5.470409592256079, 9.228871656413553, -0.9773370766543366, -6.223321195119894], [5.6352173998407515, -14.951787135424022, -13.14138956143001, 9.483459747540866, -14.918824257103903, -5.214005002134927, 4.173360193441056, -15.87582244577411, 12.09496729348841, 17.822129433684978]], "b": [-154.2527330789328, 95.64737865597965, -314.4273142554111, -21.261041922213945, -86.69463285062157, 39.04708737181019, 90.44855208686263, 109.9037421709396, -116.93547417648769, 160.8636049624077]}}, {"name": "worker2", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": ["master"], "input": {"A": [[19.1615527973575, 15.24928985355708, 5.107276858559008, 17.219461336106626, 8.991598124097557, 8.667115442214246, -18.356857336632647, -2.4207289933010046, -8.717208675356488, -6.6001612443229085], [-16.658919718223988, 10.433965882295443, 0.3708978865581578, 6.441896704338088, 5.212577699371259, -5.162926901698901, -2.1303939307662105, -3.395671192096273, -0.7844596794637368, 19.332942935538647], [-5.063051705858506, -19.503720730891455, 16.876133511961157, 14.931042813768315, -5.93012713649645, 5.205324500091685, -5.6869216263323565, -11.487202494061401, -11.067231075093913, -3.217448330766892], [-17.08359115687733, 6.033562805467692, 10.2274459580375, 17.28404905247249, -4.9438336167200845, -8.122908662018663, -5.122227500233993, 13.10797435214537, 16.045581410011792, -2.9078486057001527], [-19.912422767075135, -18.321390217413192, -14.328504673489416, 3.929487404206668, -15.741579119525566, -8.113640553027285, -17.664320403514687, 4.852994410054464, -19.092780955008667, 14.202187445266269], [-7.746511885349104, 10.351337790156165, 2.5310044055946435, -18.473432712854073, 2.620668962393516, 6.354939545928346, -12.669346024292887, 11.896452323645544, 4.494697178757953, 2.2261353779894755], [5.179661784895373, 7.447203013244081, -10.384698499088536, 11.517111519181586, 14.25909828238283, 9.111840623944765, 7.693805736638446, -1.1059624371926091, 14.274917677380031, 6.589733356172243], [-6.664962308987018, 0.7258199702525303, -4.212456662193098, -18.758016065745547, 3.0735937253248338, 14.01006981395443, 18.059554606945802, 7.319151884330669, -16.95349807215888, -3.5968822891005203], [-18.32394404913053, -13.032526736984833, 2.616080222590142, -13.084353176005585, 2.6644191177778467, 0.5601701179816452, 14.754835501895073, 8.813889917238114, 16.143653882811932, -8.895400070665115], [-9.733382712740482, 7.988130514456429, 16.062755849957576, 19.347693004863125, 5.6365124970498, -6.799704458591563, 4.267008649166403, 12.886391543035622, 5.118602618446463, -15.283077699971589]], "b": [-310.6161928172746, 98.98627416631402, -73.06594910606296, 54.648972331108105, 182.7282271222881, 80.40928105825154, 58.14428781345251, 68.7150780857935, 231.01697557009018, -86.33690243776405]}}, {"name": "worker3", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": ["master"], "input": {"A": [[-8.564893065953747, 19.469888613514584, -2.7279660603630234, 2.9693765513225756, 1.089683066969208, -12.093878024899492, -13.371662711006636, 0.039618316887942484, 19.60816240341257, -4.240327547654129], [8.560530611064596, -12.96382922306909, -17.172152849982293, -13.695076547500108, -13.537679459646563, -8.65451670951443, 1.4682410225626406, 10.92954859572842, 15.213743251088175, 5.49565787829621], [14.104089610182122, 3.9182148609770806, 7.753866412082516, 1.487066925864049, 15.569203338627837, -17.909342949727215, 11.327348031718714, -14.193069235932306, -17.652062960482112, -17.64021676577847], [-17.921150805278984, 0.639443537663432, -3.8001511960274605, 19.985983750807748, -15.657199577894229, -6.904627900208755, 19.9131972823241, -3.9554243304780243, 15.333430658925039, 2.765543221097353], [18.12963558577666, 15.415489766209461, 11.171423663277793, -18.7276774785531, 18.50474119133905, 0.7854177692560995, -11.68835954184022, 15.003912269546028, -11.033940839896168, -14.453626938523616], [9.009571500248082, 18.95155638029989, 1.419275063813906, -2.208080079980377, -19.1155528792596, 4.2395336126197805, 18.591860958267205, 18.703659238677403, 17.201152370421596, -12.612806720158707], [4.918486334457633, -3.4814932094747135, -5.477551911780644, -18.53588234620634, 14.725948382875117, 6.913128505125968, -16.501135437531055, 15.478637569455785, 11.298582030615869, -7.30782354948159], [12.732551495480273, 0.3022027296939278, -19.152267929277276, -2.6591296708487313, -2.1474777618482364, -10.447200423710807, 13.209829273643017, 9.79056706452213, 3.4591600384066226, -0.2852858973807173], [-0.5057648063522819, -9.330372163083185, 4.200444057588676, 10.14174883961148, -9.176630881870654, 0.8921313252990402, -16.066858791457555, 8.545466998779148, 15.361623760639283, 2.6821768491201325], [19.779263155889282, -12.850409189497674, -19.51199654822288, -1.7200607777167676, 17.270077674198035, 13.84098752871499, -1.0668049571895963, 16.10220104190941, -10.96017894650541, -7.833850453736545]], "b": [-20.44540006479864, 219.89844967341367, -341.5911450034827, 175.29022288956494, -254.14064726692754, 3.8387699972140013, 63.718747647836764, 55.35952498827272, 83.84482783940277, -44.722765767189614]}}], "nodesII": [{"name": "master", "class": "org.admm4j.demo.common.AveragingNode", "neighbors": null, "input": null}]}
###Markdown
Save JSON input file
###Code
filename = 'least_squares_input.json'
fout = open(filename, 'w')
json.dump(graph, fout, indent=4)
fout.close()
###Output
_____no_output_____
###Markdown
Step 4. Execute admm4jFollowing parameters are provided:1. -input2. -output3. -nvar4. -rho Note: -nvar and -rho are provided in command line
###Code
!java -jar admm4j-demo/target/admm4j-demo-1.0-jar-with-dependencies.jar\
-input least_squares_input.json\
-output least_squares_output.json\
-nvar 10\
-rho 1
###Output
_____no_output_____
###Markdown
Step 5. Import and analyze results
###Code
fin = open('least_squares_output.json', 'r')
res = json.loads(fin.read())
fin.close()
###Output
_____no_output_____
###Markdown
Show results and evaluate performance
###Code
x = res.get('nodesI')[0].get('variables').get('master')
print('Coefficients', x)
print('Mean squared error: %.2f' % mean_squared_error(b, np.dot(A,x)))
###Output
Coefficients [-4.795597453255772, -3.968367529875729, -2.6521910090508722, -2.2005638387809294, -0.9914573623761608, 1.1220004863876587, 2.0058960115138804, 2.8389483775574624, 4.185975599976671, 5.25831356764516]
Mean squared error: 61.18
###Markdown
It can be seen that reasonable results are obtained. Comparing with LinearRegression from scikit-learn
###Code
model = LinearRegression(fit_intercept=True).fit(A, b)
#print(model.intercept_)
print('Coefficients', model.coef_)
print('Mean squared error: %.2f' % mean_squared_error(b, model.predict(A)))
###Output
Coefficients [-4.80284843 -3.96067251 -2.64496968 -2.18549362 -0.98656868 1.10584387
2.009803 2.88621879 4.18509478 5.27464015]
Mean squared error: 60.30
###Markdown
The difference can be due to the choice of scaling parameter $\rho$. Example 2: Different structure of computation graphThe purpose of this example is to show that the computation graph can be defined in many different ways as long as its structure represents a bipartite graph. In this graph:- workers solve least squares problems using local data- masters coordinate connected nodes- connector connects mastersBoth connector and masters implement simple averaging.Similar modeling approach can be adopted to address more complex problems. Create the JSON input defining the graph
###Code
# here we cluster the data into 3 clusters with data being distributed across 4 workers having 9 points in each node
num_workers = 4
points_per_node = 10
# init nodesI.
nodesI = []
node = {}
node['name'] = 'connector'
node['class'] = 'org.admm4j.demo.common.AveragingNode'
node['neighbors'] = None
node['input'] = None
nodesI.append(node)
for i in range(0, num_workers):
# define node
node = {}
node['name'] = 'worker{}'.format(i)
node['class'] = 'org.admm4j.demo.ml.linearmodel.LeastSquaresNode'
node['neighbors'] = None
node['input'] = {'A': A[i*points_per_node:(i+1)*points_per_node:,:].tolist(),\
'b': b[i*points_per_node:(i+1)*points_per_node].tolist()}
# add to the list of nodesI
nodesI.append(node)
# init nodesII
nodesII = []
node = {}
node['name'] = 'master0'
node['class'] = 'org.admm4j.demo.common.AveragingNode'
node['neighbors'] = ['worker0', 'worker1', 'connector']
node['input'] = None
nodesII.append(node)
node = {}
node['name'] = 'master1'
node['class'] = 'org.admm4j.demo.common.AveragingNode'
node['neighbors'] = ['worker2', 'worker3', 'connector']
node['input'] = None
nodesII.append(node)
# init whole json model
graph = {'nodesI': nodesI, 'nodesII': nodesII}
###Output
_____no_output_____
###Markdown
Show JSON model of the graph
###Code
print(json.dumps(graph))
###Output
{"nodesI": [{"name": "connector", "class": "org.admm4j.demo.common.AveragingNode", "neighbors": null, "input": null}, {"name": "worker0", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": null, "input": {"A": [[-12.339221984844308, 4.884350841593275, -2.4908904397154217, 11.41434334855077, 11.199032324752139, -9.096295788694334, -8.941429794276132, 12.074887101400769, 18.325574147348206, 15.037305389683787], [-5.687309201685334, 0.039805020938349145, 7.338517406885451, 8.508081079316007, -5.189969808384202, 2.4478474426249974, 0.12332661231238973, -19.44926201637271, 10.913064864494963, 15.305647625444664], [-5.404560643945109, 4.615847137339749, -16.98475033428094, -5.2470397599210195, 17.325604079300867, 6.055125729063096, -4.111896890953833, 11.549205717629821, -7.326555113245149, 2.723946105042767], [14.765095582449035, -2.5530630441728235, 12.085905683206363, -14.249327019417418, 8.170438844733418, 8.183252327582903, -11.248315773036456, 16.9947051446226, -2.314369783832934, 16.372638358898904], [-17.607631088805924, -12.628516647447455, -18.105788847939394, 6.99523774329321, 3.7849911973779555, 1.3324065199500232, -18.267037492207862, 2.457323202535914, -6.813262175163398, 0.11867332450473356], [-15.524227297023847, 4.287748248739383, 2.637785722021256, -19.729437520399888, 4.697668352171881, 16.484915457326174, 11.620965322281336, 19.68325864753446, 18.352070486114663, 11.678565411665591], [-8.58996159901961, 4.996668212236443, -0.8762481731730176, -12.172992853364072, -4.707301918739741, -17.845052594150538, -1.9340636695656386, 19.28018966087818, -15.04229198052148, -15.224764082950063], [9.54092224573387, 3.4921453385593857, -1.134698627185287, -15.714927312245347, -10.831257381575284, 15.998607793467016, -3.329858487892274, 1.4340665012646348, -19.751659336514823, -7.974331769187955], [-2.5242731129755924, 4.485959882630301, 16.72792301522292, 5.02946679850141, 8.23990260327093, -14.00665136040291, 9.842536365468668, 13.240279697341514, 5.349030758039163, -2.4676047551028972], [-13.897089013019785, 2.7363846098876046, 1.1289711034024208, 18.05715055014373, -0.7856328595993567, 0.10238253530201291, 1.4751277169763881, 12.76808268256633, -17.715374476445604, 6.776869722981953]], "b": [167.8014631136075, 62.66084163893335, 61.85876967735084, 43.59102523711901, 100.46959050748987, 322.0654342334289, -69.12321347557518, -114.5521939570391, -0.6249390647239714, 14.664174074604995]}}, {"name": "worker1", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": null, "input": {"A": [[10.6846651351789, 8.324614479104152, 11.874687349007864, 2.310433137097977, 18.633461279685108, -14.113724004280112, -18.814119978583378, 3.7557397049908694, -15.4373720502935, 18.032394003364885], [-6.97170342298611, -12.255252393848913, -1.6875340441028968, 16.816102843723513, 15.16276646058703, -9.895369798138791, -6.079648285226153, -12.69645073678765, 16.071842054839685, 8.261126526871898], [9.066338462485632, 16.003513472388306, 11.16655203077297, 3.966191224171695, -8.354990204085006, -13.944189423702715, -6.593013634022604, 6.302071086312779, -17.066298254695273, -17.799744183751052], [-7.0722074431529425, 3.619272178519445, 14.155942685025671, -8.517502999999639, -13.077310927408314, -14.639151760046287, 19.786153145771777, -12.820085221224327, -7.298127078912167, 2.7316561863642903], [-19.62605701999022, 16.02594484620368, 19.089657236903484, 2.275787165473396, -16.609046264232393, -6.679901370835996, 9.13714705478688, -14.30258506632731, 2.0987575799018217, -9.078269612652576], [18.97980552349039, 6.7114762458015065, -9.773868542634414, -15.667540232690849, 11.047228926955288, 11.299119704008081, 10.464156572299764, 16.57612453077295, 6.344911277696905, 2.7347032629172965], [-11.929772301880867, 7.931855022070447, 18.087816392619096, 15.598531485680205, 19.742694514526946, 12.74814040815702, 1.8048866471887948, -1.9498378014360682, 15.622287517154291, 18.930591644746755], [3.736453182758787, -5.357020091181166, -7.076212281350021, 14.856930201947918, -11.374637480464166, 9.397807542487854, -5.375236505312575, 12.064103940110407, 11.309423679015048, 8.05421516946772], [4.911063464127963, -0.2526941701155181, 13.621508004739546, 8.483879477745894, -2.2436407433659014, -18.758605554666694, -5.470409592256079, 9.228871656413553, -0.9773370766543366, -6.223321195119894], [5.6352173998407515, -14.951787135424022, -13.14138956143001, 9.483459747540866, -14.918824257103903, -5.214005002134927, 4.173360193441056, -15.87582244577411, 12.09496729348841, 17.822129433684978]], "b": [-154.2527330789328, 95.64737865597965, -314.4273142554111, -21.261041922213945, -86.69463285062157, 39.04708737181019, 90.44855208686263, 109.9037421709396, -116.93547417648769, 160.8636049624077]}}, {"name": "worker2", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": null, "input": {"A": [[19.1615527973575, 15.24928985355708, 5.107276858559008, 17.219461336106626, 8.991598124097557, 8.667115442214246, -18.356857336632647, -2.4207289933010046, -8.717208675356488, -6.6001612443229085], [-16.658919718223988, 10.433965882295443, 0.3708978865581578, 6.441896704338088, 5.212577699371259, -5.162926901698901, -2.1303939307662105, -3.395671192096273, -0.7844596794637368, 19.332942935538647], [-5.063051705858506, -19.503720730891455, 16.876133511961157, 14.931042813768315, -5.93012713649645, 5.205324500091685, -5.6869216263323565, -11.487202494061401, -11.067231075093913, -3.217448330766892], [-17.08359115687733, 6.033562805467692, 10.2274459580375, 17.28404905247249, -4.9438336167200845, -8.122908662018663, -5.122227500233993, 13.10797435214537, 16.045581410011792, -2.9078486057001527], [-19.912422767075135, -18.321390217413192, -14.328504673489416, 3.929487404206668, -15.741579119525566, -8.113640553027285, -17.664320403514687, 4.852994410054464, -19.092780955008667, 14.202187445266269], [-7.746511885349104, 10.351337790156165, 2.5310044055946435, -18.473432712854073, 2.620668962393516, 6.354939545928346, -12.669346024292887, 11.896452323645544, 4.494697178757953, 2.2261353779894755], [5.179661784895373, 7.447203013244081, -10.384698499088536, 11.517111519181586, 14.25909828238283, 9.111840623944765, 7.693805736638446, -1.1059624371926091, 14.274917677380031, 6.589733356172243], [-6.664962308987018, 0.7258199702525303, -4.212456662193098, -18.758016065745547, 3.0735937253248338, 14.01006981395443, 18.059554606945802, 7.319151884330669, -16.95349807215888, -3.5968822891005203], [-18.32394404913053, -13.032526736984833, 2.616080222590142, -13.084353176005585, 2.6644191177778467, 0.5601701179816452, 14.754835501895073, 8.813889917238114, 16.143653882811932, -8.895400070665115], [-9.733382712740482, 7.988130514456429, 16.062755849957576, 19.347693004863125, 5.6365124970498, -6.799704458591563, 4.267008649166403, 12.886391543035622, 5.118602618446463, -15.283077699971589]], "b": [-310.6161928172746, 98.98627416631402, -73.06594910606296, 54.648972331108105, 182.7282271222881, 80.40928105825154, 58.14428781345251, 68.7150780857935, 231.01697557009018, -86.33690243776405]}}, {"name": "worker3", "class": "org.admm4j.demo.ml.linearmodel.LeastSquaresNode", "neighbors": null, "input": {"A": [[-8.564893065953747, 19.469888613514584, -2.7279660603630234, 2.9693765513225756, 1.089683066969208, -12.093878024899492, -13.371662711006636, 0.039618316887942484, 19.60816240341257, -4.240327547654129], [8.560530611064596, -12.96382922306909, -17.172152849982293, -13.695076547500108, -13.537679459646563, -8.65451670951443, 1.4682410225626406, 10.92954859572842, 15.213743251088175, 5.49565787829621], [14.104089610182122, 3.9182148609770806, 7.753866412082516, 1.487066925864049, 15.569203338627837, -17.909342949727215, 11.327348031718714, -14.193069235932306, -17.652062960482112, -17.64021676577847], [-17.921150805278984, 0.639443537663432, -3.8001511960274605, 19.985983750807748, -15.657199577894229, -6.904627900208755, 19.9131972823241, -3.9554243304780243, 15.333430658925039, 2.765543221097353], [18.12963558577666, 15.415489766209461, 11.171423663277793, -18.7276774785531, 18.50474119133905, 0.7854177692560995, -11.68835954184022, 15.003912269546028, -11.033940839896168, -14.453626938523616], [9.009571500248082, 18.95155638029989, 1.419275063813906, -2.208080079980377, -19.1155528792596, 4.2395336126197805, 18.591860958267205, 18.703659238677403, 17.201152370421596, -12.612806720158707], [4.918486334457633, -3.4814932094747135, -5.477551911780644, -18.53588234620634, 14.725948382875117, 6.913128505125968, -16.501135437531055, 15.478637569455785, 11.298582030615869, -7.30782354948159], [12.732551495480273, 0.3022027296939278, -19.152267929277276, -2.6591296708487313, -2.1474777618482364, -10.447200423710807, 13.209829273643017, 9.79056706452213, 3.4591600384066226, -0.2852858973807173], [-0.5057648063522819, -9.330372163083185, 4.200444057588676, 10.14174883961148, -9.176630881870654, 0.8921313252990402, -16.066858791457555, 8.545466998779148, 15.361623760639283, 2.6821768491201325], [19.779263155889282, -12.850409189497674, -19.51199654822288, -1.7200607777167676, 17.270077674198035, 13.84098752871499, -1.0668049571895963, 16.10220104190941, -10.96017894650541, -7.833850453736545]], "b": [-20.44540006479864, 219.89844967341367, -341.5911450034827, 175.29022288956494, -254.14064726692754, 3.8387699972140013, 63.718747647836764, 55.35952498827272, 83.84482783940277, -44.722765767189614]}}], "nodesII": [{"name": "master0", "class": "org.admm4j.demo.common.AveragingNode", "neighbors": ["worker0", "worker1", "connector"], "input": null}, {"name": "master1", "class": "org.admm4j.demo.common.AveragingNode", "neighbors": ["worker2", "worker3", "connector"], "input": null}]}
###Markdown
Save JSON input file
###Code
filename = 'least_squares_input2.json'
fout = open(filename, 'w')
json.dump(graph, fout, indent=4)
fout.close()
###Output
_____no_output_____
###Markdown
Execute admm4jFollowing parameters are provided:1. -input2. -output3. -nvar4. -rho Note: -nvar and -rho are provided in command line
###Code
!java -jar admm4j-demo/target/admm4j-demo-1.0-jar-with-dependencies.jar\
-input least_squares_input2.json\
-output least_squares_output2.json\
-nvar 10\
-rho 1
###Output
_____no_output_____
###Markdown
Import and analyze results
###Code
fin = open('least_squares_output2.json', 'r')
res = json.loads(fin.read())
fin.close()
# all nodes converged to the same variable values
x = res.get('nodesII')[0].get('variables').get('worker0')
print('Coefficients', x)
print('Mean squared error: %.2f' % mean_squared_error(b, np.dot(A,x)))
###Output
Coefficients [-4.794264220736139, -3.966588460214021, -2.652229092031883, -2.1995418098782498, -0.9911065704421048, 1.1224337798368926, 2.004735160304724, 2.8373401876842315, 4.185473461295792, 5.256226347660851]
Mean squared error: 61.18
|
_notebooks/2022_02_23_Tabular_Modeling.ipynb | ###Markdown
Tabular Modeling> We use decision trees and random forest to predict the price of used cars.- toc: true - badges: true- comments: true- categories: [jupyter]- image: images/used_cars.jpg
###Code
#hide
from google.colab import drive
drive.mount('/content/drive')
#hide
%cd /content/drive/MyDrive/Kaggle/"Used-cars price"
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#hide
#skip
![ -e /content ] && pip install -Uqq fastbook treeinterpreter waterfallcharts dtreeviz
#hide
from fastbook import *
from pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype
from pandas.core.common import SettingWithCopyError
from fastai.tabular.all import *
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from dtreeviz.trees import *
from IPython.display import Image, display_svg, SVG
pd.options.display.max_rows = 20
pd.options.display.max_columns = 8
###Output
_____no_output_____
###Markdown
Modern machine learning can be distilled down to a couple of key techniques that are widely applicable. Recent studies have shown that the vast majority of datasets can be best modeled with just two methods: - Ensembles of decision trees (i.e., random forests and gradient boosting machines), mainly for structured data (such as you might find in a database table at most companies) - Multilayered neural networks learned with SGD (i.e., shallow and/or deep learning), mainly for unstructured data (such as audio, images, and natural language)Deep learning often gives superior resutls for unstructured data, but for many kinds of structured data these two approaches tend to give quite similar results. But ensembles of decision trees tend to train faster, are often easier to interpret, do not require special GPU hardware for inference at scale, and often require less hyperparameter tuning. Most importantly, the critical step of interpreting a model of tabular data is significantly easier for decision tree ensembles. There are tools and methods for answering the pertinent questions, like: Which columns in the dataset were the most important for your predictions? How are they related to the dependent variable? How do they interact with each other? And which particular features were most important for some particular observation?Therefore, ensembles of decision trees are our first approach for analyzing a new tabular dataset. The Dataset The dataset we use is the used cars dataset of [Craigslist from Kaggle](https://www.kaggle.com/austinreese/craigslist-carstrucks-data), is the world's largest collection of used vehicles for sale.
###Code
#hide
#!kaggle datasets download -d austinreese/craigslist-carstrucks-data
#hide
#!unzip \*.zip && rm *.zip
###Output
_____no_output_____
###Markdown
Look at the Data
###Code
df = pd.read_csv('vehicles.csv', low_memory=False)
df.info()
drop_columns = ['id', 'url', 'region_url', 'image_url', 'description',
'size', 'county', 'posting_date', 'VIN', 'paint_color']
df = df.drop(columns = drop_columns)
###Output
_____no_output_____
###Markdown
At this point, a good next step is to handle ordinal columns. This refers to columns containing strings or similar, but where those strings have a natural ordering. For instance, here are the levels of conditions:
###Code
df['condition'].unique()
###Output
_____no_output_____
###Markdown
We can tell Pandas about a suitable ordering of these levels like so:
###Code
conditions = 'new', 'like new', 'excellent', 'good', 'fair', 'salvage'
df['condition'] = df['condition'].astype('category')
df['condition'].cat.set_categories(conditions, ordered=True, inplace=True)
df['cylinders'].unique()
cylinders = '12 cylinders', '10 cylinders', '8 cylinders', '6 cylinders', '5 cylinders',
'4 cylinders', '3 cylinders', 'other'
df['cylinders'] = df['cylinders'].astype('category')
df['cylinders'].cat.set_categories(cylinders, ordered=True, inplace=True)
###Output
/usr/local/lib/python3.7/dist-packages/pandas/core/arrays/categorical.py:2631: FutureWarning: The `inplace` parameter in pandas.Categorical.set_categories is deprecated and will be removed in a future version. Removing unused categories will always return a new Categorical object.
res = method(*args, **kwargs)
###Markdown
The most important data column is the dependent variable—that is, the one we want to predict. Recall that a model's metric is a function that reflects how good the predictions are. It's important to note what metric is being used for a project. Generally, selecting the metric is an important part of the project setup. Let's look at price column.
###Code
df[df['price']<10000]['price'].sample(n=1000, random_state=1).hist(bins=25);
###Output
_____no_output_____
###Markdown
In this case we will us root mean squared log error (RMSLE) between the actual and predicted prices. It is an extension on Mean Squared Error (MSE) that is mainly used when predictions have large deviations, which is the case with used cars prices. Values range from 0 up to thousands and we don't want to punish deviations in prediction as much as with MSE. Note that whene price is equal to zero this means pheraps that the value of price is missing. We can see this on one example.
###Code
np.array(df.loc[10,:])
###Output
_____no_output_____
###Markdown
We need to do only a small amount of processing to use this: we take the log of the prices, so that rmse of that value will give us what we ultimately need:
###Code
dep_var = 'price'
cond = df.price > 0
df = df[cond]
try:
df.loc[:,'price'] = np.log(df['price'])
except SettingWithCopyError:
pass
###Output
_____no_output_____
###Markdown
Decision trees
###Code
procs = [Categorify, FillMissing]
###Output
_____no_output_____
###Markdown
Categorify is a TabularProc that replaces a column with a numeric categorical column. FillMissing is a TabularProc that replaces missing values with the median of the column, and creates a new Boolean column that is set to True for any row where the value was missing. We want to ensure that a model is able to predict the future. But it means that if we are going to have a useful validation set, we also want the validation set to be later in time than the training set. The Kaggle training data ends in 2022, so we will define a narrower training dataset which consists only of the Kaggle training data from before 2018, and we'll define a validation set consisting of data from after 2019.
###Code
df = df[df['year'].notnull()]
cond = (df.year<2019)
train_idx = np.where( cond)[0]
valid_idx = np.where(~cond)[0]
splits = (list(train_idx),list(valid_idx))
cont,cat = cont_cat_split(df, 1, dep_var=dep_var)
to = TabularPandas(df, procs, cat, cont, y_names=dep_var, splits=splits)
###Output
_____no_output_____
###Markdown
A TabularPandas behaves a lot like a fastai Datasets object, including providing train and valid attributes:
###Code
len(to.train),len(to.valid)
#hide_output
to.show(3)
save_pickle('to.pkl',to)
###Output
_____no_output_____
###Markdown
Creating the Decision Tree
###Code
#hide
to = load_pickle('to.pkl')
xs,y = to.train.xs,to.train.y
valid_xs,valid_y = to.valid.xs,to.valid.y
###Output
_____no_output_____
###Markdown
Now that our data is all numeric, and there are no missing values, we can create a decision tree:
###Code
m = DecisionTreeRegressor(max_leaf_nodes=4)
m.fit(xs, y);
###Output
_____no_output_____
###Markdown
To keep it simple, we've told sklearn to just create four leaf nodes. To see what it's learned, we can display the tree:
###Code
draw_tree(m, xs, size=10, leaves_parallel=True, precision=2)
###Output
_____no_output_____
###Markdown
We can show the same information using Terence Parr's powerful dtreeviz library:
###Code
samp_idx = np.random.permutation(len(y))[:500]
dtreeviz(m, xs.iloc[samp_idx], y.iloc[samp_idx], xs.columns, dep_var,
fontname='DejaVu Sans', scale=1.6, label_fontsize=10,
orientation='LR')
###Output
/usr/local/lib/python3.7/dist-packages/sklearn/base.py:451: UserWarning: X does not have valid feature names, but DecisionTreeRegressor was fitted with feature names
"X does not have valid feature names, but"
###Markdown
This shows a chart of the distribution of the data for each split point. We can clearly see that there's a problem with our year data: there are cars made in the year 1927, apparently! For modeling purposes, 1927 is fine, but as you can see this outlier makes visualization of the values we are interested in more difficult. So, let's replace it with 1980:
###Code
xs.loc[xs['year']<1980, 'year'] = 1980
valid_xs.loc[valid_xs['year']<1980, 'year'] = 1980
###Output
_____no_output_____
###Markdown
That change makes the split much clearer in the tree visualization, even although it doesn't actually change the result of the model in any significant way. This is a great example of how resilient decision trees are to data issues!
###Code
m = DecisionTreeRegressor(max_leaf_nodes=4).fit(xs, y)
dtreeviz(m, xs.iloc[samp_idx], y.iloc[samp_idx], xs.columns, dep_var,
fontname='DejaVu Sans', scale=1.6, label_fontsize=10,
orientation='LR')
###Output
/usr/local/lib/python3.7/dist-packages/sklearn/base.py:451: UserWarning: X does not have valid feature names, but DecisionTreeRegressor was fitted with feature names
"X does not have valid feature names, but"
###Markdown
Let's now have the decision tree algorithm build a bigger tree. Here, we are not passing in any stopping criteria such as max_leaf_nodes:
###Code
m = DecisionTreeRegressor()
m.fit(xs, y);
def r_mse(pred,y): return round(math.sqrt(((pred-y)**2).mean()), 6)
def m_rmse(m, xs, y): return r_mse(m.predict(xs), y)
m_rmse(m, xs, y)
m_rmse(m, valid_xs, valid_y)
###Output
_____no_output_____
###Markdown
It looks like we might be overfitting pretty badly. Here's why:
###Code
m.get_n_leaves(), len(xs)
###Output
_____no_output_____
###Markdown
We've got too many nodes! Indeed, sklearn's default settings allow it to continue splitting nodes until there is only one item in each leaf node. Let's change the stopping rule to tell sklearn to ensure every leaf node contains at least 25 auction records:
###Code
m = DecisionTreeRegressor(min_samples_leaf=25)
m.fit(to.train.xs, to.train.y)
m_rmse(m, xs, y), m_rmse(m, valid_xs, valid_y)
###Output
_____no_output_____
###Markdown
That looks much better. Let's check the number of leaves again:
###Code
m.get_n_leaves()
###Output
_____no_output_____
###Markdown
Much more reasonable! Building a decision tree is a good way to create a model of our data. It is very flexible, since it can clearly handle nonlinear relationships and interactions between variables. Random Forest In essence a random forest is a model that averages the predictions of a large number of decision trees, which are generated by randomly varying various parameters that specify what data is used to train the tree and other tree parameters. Bagging is a particular approach to "ensembling," or combining the results of multiple models together. To see how it works in practice, let's get started on creating our own random forest! Creating a Random Forest In the following function definition n_estimators defines the number of trees we want, max_samples defines how many rows to sample for training each tree, and max_features defines how many columns to sample at each split point (where 0.5 means "take half the total number of columns"). We can also specify when to stop splitting the tree nodes, effectively limiting the depth of the tree, by including the same min_samples_leaf parameter we used in the last section. Finally, we pass n_jobs=-1 to tell sklearn to use all our CPUs to build the trees in parallel. By creating a little function for this, we can more quickly try different variations in the rest of this chapter:
###Code
def rf(xs, y, n_estimators=40, max_samples=200_000,
max_features=0.5, min_samples_leaf=5, **kwargs):
return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators,
max_samples=max_samples, max_features=max_features,
min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)
m = rf(xs, y);
m_rmse(m, xs, y), m_rmse(m, valid_xs, valid_y)
###Output
_____no_output_____
###Markdown
To see the impact of n_estimators, let's get the predictions from each individual tree in our forest (these are in the estimators_ attribute):
###Code
import warnings
warnings.filterwarnings("ignore")
preds = np.stack([t.predict(valid_xs) for t in m.estimators_])
r_mse(preds.mean(0), valid_y)
plt.plot([r_mse(preds[:i+1].mean(0), valid_y) for i in range(40)]);
###Output
_____no_output_____
###Markdown
Model InterpretationFor tabular data, model interpretation is particularly important. For a given model, the things we are most likely to be interested in are: - How confident are we in our predictions using a particular row of data? - For predicting with a particular row of data, what were the most important factors, and how did they influence that prediction? - Which columns are the strongest predictors, which can we ignore? - Which columns are effectively redundant with each other, for purposes of prediction? - How do predictions vary, as we vary these columns? Tree Variance for Prediction Confidence
###Code
preds = np.stack([t.predict(valid_xs) for t in m.estimators_])
preds.shape
preds_std = preds.std(0)
###Output
_____no_output_____
###Markdown
Here are the standard deviations for the predictions for the first five prices, that is, the first five rows of the validation set:
###Code
preds_std[:5]
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
fi = rf_feat_importance(m, xs)
fi[:10]
###Output
_____no_output_____
###Markdown
A plot of the feature importances shows the relative importances more clearly:
###Code
def plot_fi(fi):
return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False)
plot_fi(fi[:10]);
###Output
_____no_output_____
###Markdown
Removing Low-Importance Variables It seems likely that we could use just a subset of the columns by removing the variables of low importance and still get good results. Let's try just keeping those with a feature importance greater than 0.005:
###Code
to_keep = fi[fi.imp>0.005].cols
len(to_keep)
xs_imp = xs[to_keep]
valid_xs_imp = valid_xs[to_keep]
m = rf(xs_imp, y)
###Output
_____no_output_____
###Markdown
Removing Redundant Features
###Code
cluster_columns(xs_imp)
###Output
_____no_output_____
###Markdown
In this chart, the pairs of columns that are most similar are the ones that were merged together early, far from the "root" of the tree at the left. In our case it seams that there are no closely correlated features. Partial Dependence As we've seen, the two most important predictors are year lat and odometer. We'd like to understand the relationship between these predictors and sale price. It's a good idea to first check the count of values per category (provided by the Pandas value_counts method), to see how common each category is:
###Code
ax = valid_xs['lat'].hist()
ax = valid_xs['year'].hist()
###Output
_____no_output_____
###Markdown
Partial dependence plots try to answer the question: if a row varied on nothing other than the feature in question, how would it impact the dependent variable?For instance, how does year impact sale price, all other things being equal?
###Code
from sklearn.inspection import plot_partial_dependence
fig, ax = plt.subplots(figsize=(12, 4))
plot_partial_dependence(m, valid_xs_imp, ['odometer', 'lat'], grid_resolution=20, ax=ax);
###Output
_____no_output_____ |
002_Basic_Analysis.ipynb | ###Markdown
Analisis básicoEn esta notebook se hace un análisis muy básico de la dataNo tiene que hacer nada más que entenderla
###Code
df = pd.read_csv('data/acetylcholinesterase_02_bioactivity_data_preprocessed.csv')
df
df['molecule_chembl_id'].unique().shape
# Rango dinámico del standar value
df['standard_value'].max(), df['standard_value'].min()
###Output
_____no_output_____
###Markdown
Histograma de longitudes de los smiles
###Code
df['canonical_len'] = df['canonical_smiles'].apply(lambda x: len(x))
df['canonical_len'].hist(bins=100)
#Longitud promedio de caracteres aprox. 50
# Max y min
max_sequence_len = df['canonical_len'].max()
max_sequence_len, df['canonical_len'].min()
max_len_idx = df['canonical_len'].argmax()
min_len_idx = df['canonical_len'].argmin()
# Ejemplo de molécula más larga
df.iloc[max_len_idx].canonical_smiles
# Ejemplo de molécula más corta
df.iloc[min_len_idx].canonical_smiles
###Output
_____no_output_____
###Markdown
Histograma de caracteres
###Code
from collections import Counter
# Uno todos las cadenas smile en una sola
text = ''
for cs in df['canonical_smiles']:
text = text + cs
#cuento la cant. de veces q aparece cada caracter
vocab_hist = dict(Counter(text))
vocab_hist
###Output
_____no_output_____ |
testing/northern_cropmask/update_training_data.ipynb | ###Markdown
Update training data with manually drawn polygonsThis notebook will merge manually drawn crop/non-crop polygons (done either QGIS or ArcGIS) with the training data collected using Collect Earth.During each iteration of this procedure, update the suffix of the output file with the date of creation in format YYYYMMDD, this will help keep track of which iteration of training data is used for which set of classifications.****Filename guide:** `_training_data_.geojson`: The training dataset that includes CEO data, manually collected polygons, and any pre-existing datasets.* `ceo_td_polys.geojson` : training data polygons retrievd from Collect Earth, these are combined the manually collected polygons and any pre-existing datasets to produce the `_training_data_.geojson` file
###Code
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
###Output
/env/lib/python3.6/site-packages/geopandas/_compat.py:88: UserWarning: The Shapely GEOS version (3.7.2-CAPI-1.11.0 ) is incompatible with the GEOS version PyGEOS was compiled with (3.9.1-CAPI-1.14.2). Conversions between both will be slow.
shapely_geos_version, geos_capi_version_string
###Markdown
Analysis Parameters
###Code
date_suffix='20210803'
ceo_td_path = 'data/ceo_td_polys.geojson' #shouldn't need to change this
manual_poly_path = 'data/northern_manual_crop_polys_20210803.shp' #the file you've been adding new TD polygons too in GIS.
###Output
_____no_output_____
###Markdown
Open vector files
###Code
#add manually collected polygons
manual = gpd.read_file(manual_poly_path)
ceo = gpd.read_file(ceo_td_path)
###Output
_____no_output_____
###Markdown
Reclassify Class field
###Code
manual['Class'] = np.where(manual['Class']=='crop', 1, manual['Class'])
manual['Class'] = np.where(manual['Class']=='non-crop', 0, manual['Class'])
###Output
_____no_output_____
###Markdown
Merge files together
###Code
training_data = pd.concat([manual,ceo]).reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Ensure class is in integer type
###Code
training_data['Class'] = training_data['Class'].astype(int)
###Output
_____no_output_____
###Markdown
Counts for each class
###Code
print('No. of samples: '+str(len(training_data)))
print('Crop samples = '+str(len(training_data[training_data['Class']==1])))
print('Non-Crop samples = '+str(len(training_data[training_data['Class']==0])))
###Output
No. of samples: 3508
Crop samples = 1149
Non-Crop samples = 2359
###Markdown
Export to diskThis file will be the new training data to pass into the `1_Extract_training_data.ipynb` notebook
###Code
training_data.to_file('data/northern_training_data_'+date_suffix+'.geojson', driver='GeoJSON')
###Output
_____no_output_____ |
content/06_reactive_grasp.ipynb | ###Markdown
Reactive GRASPIn reactive GRASP the probability of selecting a RCL size is proportional to historical performance of this RCL size. Here GRASP **learns** the bias of RCL sizes needed to get the best results.Let $q_i = f^* / A_i$and $p_i = \dfrac{q_i}{\sum_{j=1}^{m} q_j}$ where$f^*$ is the incumbent (best cost); $A_i$ is the mean cost found with $r_i$larger $q_i$ indicates more suitable values of $r_i$ Imports
###Code
import numpy as np
import sys
###Output
_____no_output_____
###Markdown
`metapy` imports
###Code
# install metapy if running in Google Colab
if 'google.colab' in sys.modules:
!pip install meta-py
from metapy.tsp import tsp_io as io
from metapy.tsp.euclidean import gen_matrix, plot_tour
from metapy.tsp.objective import OptimisedSimpleTSPObjective
from metapy.local_search.hill_climbing import (HillClimber, TweakTwoOpt)
from metapy.tsp.grasp import (SemiGreedyConstructor, GRASP)
###Output
_____no_output_____
###Markdown
Load problem
###Code
#load file
file_path = 'https://raw.githubusercontent.com/TomMonks/meta-py/main/data/st70.tsp'
#number of rows in the file that are meta_data
md_rows = 6
#read the coordinates
cities = io.read_coordinates(file_path, md_rows)
matrix = gen_matrix(cities, as_integer=True)
###Output
_____no_output_____
###Markdown
ImplementationTo implement we create two new classes:* `MonitoredLocalSearch` - wraps HillClimber and notifies observering classes when a GRASP local search phase is complete.* `ReactiveRCLSizer` - observes GRASP local search, tracks average performance of RCL sizes, and updates prob of choosing different r's at a user specified frequency:
###Code
class MonitoredLocalSearch:
'''
Extends a local search class and provides the observer pattern.
An external object can observe the local search object and catch the
termination event (end of local search). The observer is notified and
passed the results of the local search.
Use cases:
----------
In GRASP this is useful for an algorithm sizing the RCL and learning
on average how different sizes of RCL perform.
'''
def __init__(self, local_search):
'''
Constructor:
Params:
------
local_search: Object
Must implement .solve(), best_cost, best_solution
'''
self.local_search = local_search
self.observers = []
def register_observer(self, observer):
'''
register an object to observe the local search
The observer should implement
local_search_terminated(*args, **kwargs)
'''
self.observers.append(observer)
def set_init_solution(self, solution):
'''
Set the initial solution
Params:
--------
solution: np.ndarray
vector representing the initial solution
'''
self.local_search.set_init_solution(solution)
def solve(self):
'''
Run the local search.
At the end of the run all observers are notified.
'''
# run local search
self.local_search.solve()
# notify observers after search terminates.
best = self.local_search.best_cost
solution = self.local_search.best_solutions[0]
self.notify_observers(best, solution)
def notify_observers(self, *args, **kwargs):
'''
Observers must implement `local_search_terminated()`
method.
Params:
------
*args: list
variable number of arguments
**kwargs: dict
key word arguments
'''
for o in self.observers:
o.local_search_terminated(*args, **kwargs)
def _get_best_cost(self):
'''
best cost from internal local_search object
'''
return self.local_search.best_cost
def _get_best_solutions(self):
'''
get best solutions from local_search object
'''
return self.local_search.best_solutions
best_cost = property(_get_best_cost, doc='best cost')
best_solutions = property(_get_best_solutions, doc='best solution')
class ReactiveRCLSizer:
'''
Dynamically update the probability of selecting a
value of r for the size of the RCL.
Implements Reactive GRASP.
'''
def __init__(self, r_list, local_search, freq=None, random_seed=None):
'''
Constructor
Params:
-------
r_list: list
vector of sizes for RCL e.g. [1, 2, 3, 4, 5]
local_search: MonitoredLocalSearch
local_search to monitor
freq: int, optional (default=None)
Frequency in iterations at which the probabilities are updated.
When set to None it defaults to the length of r_list * 2
random_seed: int, optional (default=None)
Control random sampling for reproducible result
'''
# list of r sizes
self.r_list = r_list
# set of indexes to work with probabilities
self.elements = np.arange(len(r_list))
# probability of choosing r (initially uniform)
self.probs = np.full(len(r_list), 1/len(r_list))
# mean performance of size r
self.means = np.full(len(r_list), 1.0)
# runs of size r
self.allocations = np.full(len(r_list), 0)
# local search to monitor
self.local_search = local_search
# frequency of updating probs
if freq is None:
self.freq = len(self.r_list)
else:
self.freq = freq
# number of iterations within frequency
self.iter = 0
# current r index
self.index = -1
# to init run one of each r value
self.init = True
# imcumbent solution cost
self.best_cost = -np.inf
# register sizer as observer of the local search
local_search.register_observer(self)
# random no. gen
self.rng = np.random.default_rng(random_seed)
def local_search_terminated(self, *args, **kwargs):
'''
Termination of the local search
'''
# iteration complete
self.iter += 1
# get the best cost found in the iteration
iter_cost = args[0]
# record iteration took plaxe with index i
self.allocations[self.index] += 1
# update running mean
mean_x = self.means[self.index]
n = self.allocations[self.index]
self.means[self.index] += (iter_cost - mean_x) / n
self.update_r()
# update incumbent cost if required
if iter_cost > self.best_cost:
self.best_cost = iter_cost
# update probs if freq met.
if self.iter >= self.freq and not self.init:
self.iter = 0
self.update_probability()
def update_probability(self):
'''
Let $q_i = f^* / A_i$
and $p_i = `\dfrac{q_i}{\sum_{j=1}^{m} q_j}$
where
$f^*$ is the incumbent (cost)
$A_i$ is the mean cost found with r_i
larger q_i indicates more suitable values of r_i
'''
q = self.best_cost / self.means
self.probs = q / q.sum()
def update_r(self):
'''
update the size of r
Note that the implementation ensures that all r values are run
for at least one iteration of the algorithm.
'''
# initial bit of logic makes sure there is at least one run of all probabilities
if self.init:
self.index += 1
if self.index >= len(self.r_list):
self.init = False
self.index = self.rng.choice(self.elements, p=self.probs)
else:
self.index = self.rng.choice(self.elements, p=self.probs)
def get_size(self):
'''
Return the selected size of the RCL
The selection is done using a discrete distribution
self.r_probs.
'''
return self.r_list[self.index]
###Output
_____no_output_____
###Markdown
Running Reactive GRASP
###Code
def compose_grasp(tour, matrix, max_iter=50, freq=10, rcl_min=2, rcl_max=15,
seeds=(None, None)):
'''
Compose the REACTIVE GRASP algorithm
'''
# objective function
obj = OptimisedSimpleTSPObjective(-matrix)
# Two-opt tweaks
tweaker = TweakTwoOpt()
# local search = first improvement hill climbing
ls = MonitoredLocalSearch(HillClimber(obj, tour, tweaker))
# semi-greedy constructor and RCL sizer
sizer = ReactiveRCLSizer(np.arange(rcl_min, rcl_max), ls, freq=freq,
random_seed=seeds[0])
constructor = SemiGreedyConstructor(sizer, tour, -matrix,
random_seed=seeds[1])
# GRASP framework
solver = GRASP(constructor, ls, max_iter=max_iter)
return solver
tour = np.arange(len(cities))
solver = compose_grasp(tour, matrix, seeds=(42, 101))
print("\nRunning REACTIVE GRASP")
solver.solve()
print("\n** GRASP OUTPUT ***")
print(f"best cost:\t{solver.best}")
print("best solutions:")
print(solver.best_solution)
fig, ax = plot_tour(solver.best_solution, cities, figsize=(12,9))
###Output
Running REACTIVE GRASP
** GRASP OUTPUT ***
best cost: -738.0
best solutions:
[ 0 35 22 12 28 69 34 56 14 23 1 18 54 48 25 7 27 2 31 6 3 17 41 5
40 43 13 19 29 67 26 45 24 44 38 60 39 8 42 16 20 33 11 59 32 61 53 47
66 10 63 64 55 50 51 9 4 52 65 62 21 58 68 30 37 46 36 49 57 15]
|
2-python packages/practice 2.ipynb | ###Markdown
The Series Data Structure
###Code
import pandas as pd
animals = ['Tiger', 'Bear', 'Moose']
pd.Series(animals)
numbers = [1, 2, 3]
pd.Series(numbers)
numbers = [1., 2., 3.]
pd.Series(numbers)
numbers = [1, 2, None]
pd.Series(numbers)
numbers = [1, 2, 3.5]
pd.Series(numbers)
carsm = ['subaru', 'nissan', 'toyota']
pd.Series(carsm)
import numpy as np
np.nan == None
np.nan == np.nan
np.isnan(np.nan)
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.index
s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
s
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])
s
###Output
_____no_output_____
###Markdown
Querying a Series¶
###Code
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.iloc[3]
s.loc['Golf']
s[3]
s['Golf']
sports = {99: 'Bhutan',
100: 'Scotland',
101: 'Japan',
102: 'South Korea'}
s = pd.Series(sports)
s
s = pd.Series([100.00, 120.00, 101.00, 3.00])
s
total = 0
for item in s:
total+=item
print(total)
total = 1
for item in s:
total=item
print(total)
total = 0
for item in s:
total-=item
print(total)
total = np.sum(s)
print(total)
s = pd.Series(np.random.randint(0,1000, 10000))
s
s.head()
len(s)
%%timeit -n 100
summary = 0
for item in s:
summary+=item
%%timeit -n 100
summary = np.sum(s)
s+=2 #adds two to each item in s using broadcasting
s.head()
for label, value in s.iteritems():
s.set_value(label, value+2)
s.head()
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,100))
for label, value in s.iteritems():
s.loc[label]= value+2
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,100))
s+=2
s = pd.Series([1, 2, 3])
s.loc['Animal'] = 'Bears'
s
original_sports = pd.Series({'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'})
cricket_loving_countries = pd.Series(['Australia',
'Barbados',
'Pakistan',
'England'],
index=['Cricket',
'Cricket',
'Cricket',
'Cricket'])
all_countries = original_sports.append(cricket_loving_countries)
all_countries
original_sports
cricket_loving_countries
all_countries.loc['Cricket']
all_countries.iloc[4]
###Output
_____no_output_____
###Markdown
Distributions In Numphy
###Code
np.random.binomial(1, 0.5)
np.random.binomial(1000, 0.5)
np.random.binomial(1000, 0.5)/1000
chance_of_tornado = 0.01/100
np.random.binomial(100000, chance_of_tornado)
chance_of_tornado = 0.01
tornado_events = np.random.binomial(1, chance_of_tornado, 1000000)
two_days_in_a_row = 0
for j in range(1,len(tornado_events)-1):
if tornado_events[j]==1 and tornado_events[j-1]==1:
two_days_in_a_row+=1
print('{} tornadoes back to back in {} years'.format(two_days_in_a_row, 1000000/365))
np.random.uniform(0, 1)
distribution = np.random.normal(0.75,size=1000)
np.sqrt(np.sum((np.mean(distribution)-distribution)**2)/len(distribution))
np.std(distribution)
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df
df.head()
df.loc['Store 2']
type(df.loc['Store 2'])
df.loc['Store 1']
df.loc['Store 1', 'Cost']
df.T
df.T.loc['Cost']
df['Cost']
df.loc['Store 1']['Cost']
df.loc[:,['Name', 'Cost']]
df.drop('Store 1')
df
copy_df = df.copy()
copy_df = copy_df.drop('Store 1')
copy_df
copy_df.drop
del copy_df['Name']
copy_df
df['Location'] = None
df
df['casier'] = ['peter', 'tom', 'paul']
df
###Output
_____no_output_____
###Markdown
costs = df['Cost']costs
###Code
df
copy_df
###Output
_____no_output_____
###Markdown
Merging Dataframes¶
###Code
df = pd.DataFrame([{'Name': 'MJ', 'Item Purchased': 'Sponge', 'Cost': 22.50},
{'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50},
{'Name': 'Filip', 'Item Purchased': 'Spoon', 'Cost': 5.00}],
index=['Store 1', 'Store 1', 'Store 2'])
df
df['Date'] = ['December 1', 'January 1', 'mid-May']
df
df['Delivered'] = True
df
df['Feedback'] = ['Positive', None, 'Negative']
df
adf = df.reset_index()
adf['Date'] = pd.Series({0: 'December 1', 2: 'mid-May'})
adf
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR'},
{'Name': 'Sally', 'Role': 'Course liasion'},
{'Name': 'James', 'Role': 'Grader'}])
staff_df = staff_df.set_index('Name')
print(staff_df)
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business'},
{'Name': 'Mike', 'School': 'Law'},
{'Name': 'Sally', 'School': 'Engineering'}])
student_df = student_df.set_index('Name')
student_df
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR'},
{'Name': 'Sally', 'Role': 'Course liasion'},
{'Name': 'James', 'Role': 'Grader'}])
staff_df = staff_df.set_index('Name')
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business'},
{'Name': 'Mike', 'School': 'Law'},
{'Name': 'Sally', 'School': 'Engineering'}])
student_df = student_df.set_index('Name')
print(staff_df.head())
print()
print(student_df.head())
pd.merge(staff_df, student_df, how='outer', left_index=True, right_index=True)
pd.merge(staff_df, student_df, how='inner', left_index=True, right_index=True)
pd.merge(staff_df, student_df, how='left', left_index=True, right_index=True)
pd.merge(staff_df, student_df, how='right', left_index=True, right_index=True)
staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR', 'Location': 'State Street'},
{'Name': 'Sally', 'Role': 'Course liasion', 'Location': 'Washington Avenue'},
{'Name': 'James', 'Role': 'Grader', 'Location': 'Washington Avenue'}])
student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business', 'Location': '1024 Billiard Avenue'},
{'Name': 'Mike', 'School': 'Law', 'Location': 'Fraternity House #22'},
{'Name': 'Sally', 'School': 'Engineering', 'Location': '512 Wilson Crescent'}])
pd.merge(staff_df, student_df, how='left', left_on='Name', right_on='Name')
staff_df = pd.DataFrame([{'First Name': 'Kelly', 'Last Name': 'Desjardins', 'Role': 'Director of HR'},
{'First Name': 'Sally', 'Last Name': 'Brooks', 'Role': 'Course liasion'},
{'First Name': 'James', 'Last Name': 'Wilde', 'Role': 'Grader'}])
student_df = pd.DataFrame([{'First Name': 'James', 'Last Name': 'Hammond', 'School': 'Business'},
{'First Name': 'Mike', 'Last Name': 'Smith', 'School': 'Law'},
{'First Name': 'Sally', 'Last Name': 'Brooks', 'School': 'Engineering'}])
staff_df
student_df
pd.merge(staff_df, student_df, how='inner', left_on=['First Name','Last Name'], right_on=['First Name','Last Name'])
df = pd.DataFrame(['A+', 'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C', 'C-', 'D+', 'D'],
index=['excellent', 'excellent', 'excellent', 'good', 'good', 'good', 'ok', 'ok', 'ok', 'poor', 'poor'])
df.rename(columns={0: 'Grades'}, inplace=True)
df
df['Grades'].astype('category').head()
grades = df['Grades'].astype('category',
categories=['D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+'],
ordered=True)
grades.head()
grades > 'C'
df_test = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
df_test.isin({'A': [1, 3], 'B': [4, 7, 12]})
df.loc[(df['Grades'] == 'A+') & (df['Grades'] == 'D')]
df.loc[df['Grades'] != 'B+']
df_test = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
df_test.loc[~df_test['A'].isin({'A': [1, 3], 'B': [4, 7, 12]})]
###Output
_____no_output_____
###Markdown
Time
###Code
pd.Timestamp('9/1/2016 10:05AM')
pd.Period('1/2016')
pd.Period('3/5/2016')
t1 = pd.Series(list('abc'), [pd.Timestamp('2016-09-01'), pd.Timestamp('2016-09-02'), pd.Timestamp('2016-09-03')])
t1
type(t1.index)
t2 = pd.Series(list('def'), [pd.Period('2016-09'), pd.Period('2016-10'), pd.Period('2016-11')])
t2
type(t2.index)
d1 = ['2 June 2013', 'Aug 29, 2014', '2015-06-26', '7/12/16']
ts3 = pd.DataFrame(np.random.randint(10, 100, (4,2)), index=d1, columns=list('ab'))
ts3
ts3.index = pd.to_datetime(ts3.index)
ts3
pd.to_datetime('4.7.12', dayfirst=True)
pd.Timestamp('9/3/2016')-pd.Timestamp('9/1/2016')
pd.Timestamp('9/2/2016 8:10AM') + pd.Timedelta('12D 3H')
dates = pd.date_range('10-01-2016', periods=9, freq='2W-SUN')
dates
df.index.ravel
import matplotlib
import seaborn as sns
sns.set(style='white', context='notebook', palette='deep')
sns.set_style('white')
%matplotlib inline
matplotlib.style.use('ggplot')
###Output
_____no_output_____
###Markdown
Skilearn
###Code
from sklearn.datasets import make_blobs
import numpy as np
X, y = make_blobs(n_samples=1000, centers=20, random_state=123)
labels = ["b", "r"]
y = np.take(labels, (y < 10))
print(X)
print(y[:5])
# X is a 2 dimensional array, with 1000 rows and 2 columns
print(X.shape)
# y is a vector of 1000 elements
print(y.shape)
# Rows and columns can be accessed with lists, slices or masks
print(X[[1, 2, 3]]) # rows 1, 2 and 3
print(X[:5]) # 5 first rows
print(X[500:510, 0]) # values from row 500 to row 510 at column 0
print(X[y == "b"][:5]) # 5 first rows for which y is "b"
from sklearn.datasets import load_wine
data = load_wine()
data.target[[10, 80, 140]]
list(data.target_names)
from sklearn.cluster import DBSCAN
import numpy as np
X = np.array([[1, 2], [2, 2], [2, 3],[8, 7], [8, 8], [25, 80]])
clustering = DBSCAN(eps=3, min_samples=2).fit(X)
clustering.labels_
clustering
from sklearn.decomposition import NMF
from sklearn.datasets import load_digits
X = load_digits().data
%timeit NMF(n_components=16, tol=1e-2).fit(X)
class Estimator(object):
def fit(self, X, y=None):
"""Fits estimator to data."""
# set state of ``self``
return self
###Output
_____no_output_____ |
.ipynb_checkpoints/WaveformExtraction_thy_Post-checkpoint.ipynb | ###Markdown
Patient 8 THY 3M Littmann Data
###Code
#image = Image.open('3M.bmp')
image = Image.open('3M_thy_post_s.bmp')
image
x = image.size[0]
y = image.size[1]
print(x)
print(y)
matrix = []
points = []
integrated_density = 0
for i in range(x):
matrix.append([])
for j in range(y):
matrix[i].append(image.getpixel((i,j)))
#integrated_density += image.getpixel((i,j))[1]
#points.append(image.getpixel((i,j))[1])
###Output
_____no_output_____
###Markdown
Extract Red Line Position
###Code
redMax = 0
xStore = 0
yStore = 0
for xAxis in range(x):
for yAxis in range(y):
currentPoint = matrix[xAxis][yAxis]
if currentPoint[0] == 255 and currentPoint[1] < 10 and currentPoint[2] < 10:
redMax = currentPoint[0]
xStore = xAxis
yStore = yAxis
print(xStore, yStore)
###Output
313 279
###Markdown
Extract Blue Points
###Code
redline_pos = 279
gain = 120
absMax = 0
littmannArr = []
points_vertical = []
theOne = 0
for xAxis in range(x):
for yAxis in range(y):
currentPoint = matrix[xAxis][yAxis]
# Pickup Blue points
if currentPoint[2] == 255 and currentPoint[0] < 220 and currentPoint[1] < 220:
points_vertical.append(yAxis)
#print(points_vertical)
# Choose the largest amplitude
for item in points_vertical:
if abs(item-redline_pos) > absMax:
absMax = abs(item-redline_pos)
theOne = item
littmannArr.append((theOne-redline_pos)*gain)
absMax = 0
theOne = 0
points_vertical = []
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr, linewidth=0.6, color='blue')
###Output
_____no_output_____
###Markdown
Ascul Pi Data
###Code
pathBase = 'C://Users//triti//OneDrive//Dowrun//Text//Manuscripts//Data//TianHaoyang//AusculPi_Post//'
filename = 'Numpy_Array_File_2020-06-24_18_15_52.npy'
line = pathBase + filename
arr = np.load(line)
arr
arr.shape
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(arr[0], linewidth=1.0, color='black')
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(arr[:,100], linewidth=1.0, color='black')
start = 1675
end = 2040
start_adj = int(start * 2583 / 3000)
end_adj = int(end * 2583 / 3000)
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(arr[start_adj:end_adj,240], linewidth=0.6, color='black')
start_adj-end_adj
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr, linewidth=0.6, color='blue')
asculArr = arr[start_adj:end_adj,400]
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(asculArr, linewidth=0.6, color='black')
###Output
_____no_output_____
###Markdown
Preprocess the two array
###Code
asculArr_processed = []
littmannArr_processed = []
for ascul in asculArr:
asculArr_processed.append(math.fabs(ascul))
for item in littmannArr:
littmannArr_processed.append(math.fabs(item))
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(asculArr_processed, linewidth=0.6, color='black')
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr_processed, linewidth=0.6, color='blue')
len(littmannArr)
len(asculArr)
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(asculArr_processed[:170], linewidth=0.6, color='black')
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr_processed[:170], linewidth=0.6, color='blue')
###Output
_____no_output_____
###Markdown
Coeffient
###Code
stats.pearsonr(asculArr_processed, littmannArr_processed)
stats.pearsonr(asculArr_processed[:170], littmannArr_processed[:170])
###Output
_____no_output_____
###Markdown
Fitness
###Code
stats.chisquare(asculArr_processed[:80], littmannArr_processed[2:82])
def cosCalculate(a, b):
l = len(a)
sumXY = 0
sumRootXSquare = 0
sumRootYSquare = 0
for i in range(l):
sumXY = sumXY + a[i]*b[i]
sumRootXSquare = sumRootXSquare + math.sqrt(a[i]**2)
sumRootYSquare = sumRootYSquare + math.sqrt(b[i]**2)
cosValue = sumXY / (sumRootXSquare * sumRootYSquare)
return cosValue
cosCalculate(asculArr_processed, littmannArr_processed)
###Output
_____no_output_____
###Markdown
Cross Comparing Volunteer 3M vs Patient 8 Ascul Post
###Code
#image = Image.open('3M.bmp')
image = Image.open('3Ms.bmp')
image
x = image.size[0]
y = image.size[1]
matrix = []
points = []
integrated_density = 0
for i in range(x):
matrix.append([])
for j in range(y):
matrix[i].append(image.getpixel((i,j)))
#integrated_density += image.getpixel((i,j))[1]
#points.append(image.getpixel((i,j))[1])
redline_pos = 51
absMax = 0
littmannArr2 = []
points_vertical = []
theOne = 0
for xAxis in range(x):
for yAxis in range(y):
currentPoint = matrix[xAxis][yAxis]
# Pickup Blue points
if currentPoint[2] == 255 and currentPoint[0] < 220 and currentPoint[1] < 220:
points_vertical.append(yAxis)
#print(points_vertical)
# Choose the largest amplitude
for item in points_vertical:
if abs(item-redline_pos) > absMax:
absMax = abs(item-redline_pos)
theOne = item
littmannArr2.append((theOne-redline_pos)*800)
absMax = 0
theOne = 0
points_vertical = []
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr2, linewidth=0.6, color='blue')
len(littmannArr2)
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr2[:400], linewidth=0.6, color='blue')
asculArr
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(asculArr, linewidth=0.6, color='black')
asculArr_processed = []
littmannArr2_processed = []
for ascul in asculArr:
asculArr_processed.append(math.fabs(ascul))
for item in littmannArr2[:400]:
littmannArr2_processed.append(math.fabs(item))
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(asculArr_processed, linewidth=1.0, color='black')
fig = plt.figure()
s = fig.add_subplot(111)
s.plot(littmannArr2_processed[:314], linewidth=1.0, color='blue')
len(asculArr_processed)
len(littmannArr2_processed[:314])
stats.pearsonr(asculArr_processed, littmannArr2_processed[:314])
###Output
_____no_output_____ |
.ipynb_checkpoints/Handling Class Imbalance-checkpoint.ipynb | ###Markdown
1. Setup
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from imblearn.over_sampling import RandomOverSampler
from collections import Counter
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import precision_score, recall_score, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.model_selection import RandomizedSearchCV
np.set_printoptions(suppress=True) # Suppress scientific notation where possible
from imblearn.over_sampling import SMOTE
from xgboost import XGBClassifier
from mlxtend.plotting import plot_decision_regions
%matplotlib inline
# make prettier plots
%config InlineBackend.figure_format = 'svg'
# to open pickled data
with open("MVP_patientlevel_intubation.pkl", 'rb') as picklefile:
patients = pickle.load(picklefile)
patients.head()
patients.shape
###Output
_____no_output_____
###Markdown
2. Improve balance using random oversampling of the minority class
###Code
ros = RandomOverSampler(random_state=0)
X = patients.iloc[:, 1:-1]
y = patients['Intubation']
X_resampled, y_resampled = ros.fit_sample(X, y)
# confirm target class is now balanced
Counter(y_resampled)
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, \
test_size=0.3, random_state=42)
scaler = MinMaxScaler()
X_train["Age"] = scaler.fit_transform(X_train["Age"].values.astype(float).reshape(-1,1))
X_test["Age"] = scaler.fit_transform(X_test["Age"].values.astype(float).reshape(-1,1))
###Output
<ipython-input-13-96beb5406f99>:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
X_test["Age"] = scaler.fit_transform(X_test["Age"].values.astype(float).reshape(-1,1))
###Markdown
3. Examine model performance after balancing classes
###Code
clf_ros = LogisticRegression().fit(X_train, y_train)
y_pred = clf_ros.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'Model Accuracy: {round(accuracy, 4)*100}')
conf_mat = confusion_matrix(y_true=y_test, y_pred=y_pred)
cm = print_confusion_matrix(conf_mat, ['Class 0', 'Class 1'])
# Precision = TP / (TP + FP)
# Recall = TP/P = True positive rate
# false positive rate = FP / true negatives = FP / (FP + TN)
from sklearn.metrics import roc_auc_score, roc_curve
fpr, tpr, thresholds = roc_curve(y_test, clf_ros.predict_proba(X_test)[:,1])
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve for COVID-19 Intubation');
print("ROC AUC score = ", roc_auc_score(y_test, clf_ros.predict_proba(X_test)[:,1]))
plt.savefig("ROC_resampling Logistic Regression")
from sklearn.metrics import f1_score
f1_score(y_test, y_pred)
from sklearn.metrics import fbeta_score
fbeta_score(y_test, y_pred, beta=0.5)
###Output
_____no_output_____
###Markdown
Now try using smote
###Code
from imblearn.over_sampling import SMOTE
X_smoted, y_smoted = SMOTE(random_state=42).fit_sample(X,y)
X_train, X_test, y_train, y_test = train_test_split(X_smoted, y_smoted, \
test_size=0.3, random_state=42)
scaler = MinMaxScaler()
X_train["Age"] = scaler.fit_transform(X_train["Age"].values.astype(float).reshape(-1,1))
scaler = MinMaxScaler()
X_test["Age"] = scaler.fit_transform(X_test["Age"].values.astype(float).reshape(-1,1))
Counter(y_smoted)
clf_smote = XGBClassifier().fit(X_train, y_train)
print("The score for xgBoost_Smote is")
print("Training: {:6.2f}%".format(100*clf_smote.score(X_train, y_train)))
print("Test set: {:6.2f}%".format(100*clf_smote.score(X_test, y_test)))
y_pred = clf_smote.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'Model Accuracy: {round(accuracy, 4)*100}')
conf_mat = confusion_matrix(y_true=y_test, y_pred=y_pred)
cm = print_confusion_matrix(conf_mat, ['Not Intubated', 'Intubated'])
from sklearn.metrics import f1_score
f1_score(y_test, y_pred)
# Precision = TP / (TP + FP)
# Recall = TP/P = True positive rate
# false positive rate = FP / true negatives = FP / (FP + TN)
from sklearn.metrics import roc_auc_score, roc_curve
fpr, tpr, thresholds = roc_curve(y_test, clf_smote.predict_proba(X_test)[:,1])
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve for COVID-19 Intubation');
print("ROC AUC score = ", roc_auc_score(y_test, clf_smote.predict_proba(X_test)[:,1]))
plt.savefig("ROC_xgb smote_baseline")
###Output
ROC AUC score = 0.9071543348521999
|
project3/4. Zip Your Project Files and Submit.ipynb | ###Markdown
Project SubmissionWhen you are ready to submit your project, meaning you have checked the [rubric](https://review.udacity.com/!/rubrics/1428/view) and made sure that you have completed all tasks and answered all questions. Then you are ready to compress your files and submit your solution!The following steps assume:1. All cells have been *run* in Notebook 3 (and that progress has been saved).2. All questions in Notebook 3 have been answered.3. Your robot `sense` function in `robot_class.py` is complete.Please make sure all your work is saved before moving on. You do not need to change any code in the following cells; this code is to help you submit your project, only.---The first thing we'll do, is convert your notebooks into `.html` files; these files will save the output of each cell and any code/text that you have modified and saved in those notebooks. Note that the second notebook is not included because its completion does not affect whether you pass this project.
###Code
!jupyter nbconvert "1. Robot Moving and Sensing.ipynb"
!jupyter nbconvert "3. Landmark Detection and Tracking.ipynb"
###Output
[NbConvertApp] Converting notebook 1. Robot Moving and Sensing.ipynb to html
[NbConvertApp] Writing 329972 bytes to 1. Robot Moving and Sensing.html
[NbConvertApp] Converting notebook 3. Landmark Detection and Tracking.ipynb to html
[NbConvertApp] Writing 420549 bytes to 3. Landmark Detection and Tracking.html
###Markdown
Zip the project filesNext, we'll zip these notebook files and your `robot_class.py` file into one compressed archive named `project3.zip`.After completing this step you should see this zip file appear in your home directory, where you can download it as seen in the image below, by selecting it from the list and clicking **Download**.
###Code
!!apt-get -y update && apt-get install -y zip
!zip project3.zip -r . [email protected]
###Output
adding: 1. Robot Moving and Sensing.html (deflated 80%)
adding: robot_class.py (deflated 66%)
adding: 3. Landmark Detection and Tracking.html (deflated 79%)
|
notebooks/1_get_raw_data.ipynb | ###Markdown
Get Misc Dataframes
###Code
q = '''
select
sym
,count(*)
from
prices_m
where
date(datetime) = '2021-02-18'
group by
sym
order by
count(*) desc
limit 3
'''
df = pd.read_sql(q, db.conn)
df
###Output
_____no_output_____
###Markdown
View Tables, Indexes
###Code
q = '''
SELECT *
FROM sqlite_master
'''
pd.read_sql(q, db.conn)
q='''
SELECT DISTINCT DATE(date)
FROM prices_d
WHERE sym='BYND'
AND DATE(date) >= '{}'
--AND DATE(date) <= '{}'
ORDER BY date
'''.format('2021-03-01', '2021-02-06')
df = pd.read_sql(q, db.conn)
df
###Output
_____no_output_____
###Markdown
Manual db view/modify
###Code
#execute
assert 0
q = '''
ALTER TABLE trading_days
RENAME COLUMN Date TO date
'''
q = '''
drop table trading_days
'''
q='''
ALTER TABLE prices_interday
RENAME TO prices_d
'''
q='''
drop index index_prices_interday
'''
q='''
UPDATE prices_m
SET is_reg_hours = CASE
WHEN time(datetime) < time('09:30:00') THEN 0
WHEN time(datetime) > time('15:59:00') THEN 0
ELSE 1
END
WHERE DATE(datetime) >= '2020-11-02'
AND DATE(datetime) <= '2020-11-24'
'''
q='''
--UPDATE prices_m
SET datetime = DATETIME(datetime, '-1 hours')
WHERE DATE(datetime) >= '2020-11-02'
AND DATE(datetime) <= '2020-11-24'
'''
db.execute(q)
beeps()
get_df_prices('BYND', '2020-11-28', '2020-12-03')
dt_info = yf.Ticker('GME').info
q = '''
DELETE
FROM prices_d
WHERE DATE(date) = '{}'
'''.format('2021-03-02')
db.execute(q)
dt_info
def get_df_info(sym):
'''Returns dataframe containing general info about input symbol
Args:
sym (str): e.g. BYND
Returns:
df_info (pandas.DataFrame)
sym (str)
long_name (str)
sec (str)
ind (str)
quote_type (str)
fund_family (str)
summary (str)
timestamp (datetime)
'''
dt_info = yf.Ticker(sym).info
dt_info['timestamp'] = datetime.datetime.now()
dt_info['sector'] = dt_info.get('sector')
dt_col = {
'symbol':'sym',
'longName':'long_name',
'sector':'sec',
'industry':'ind',
'quoteType':'quote_type',
'fundFamily':'fund_family',
'longBusinessSummary':'summary',
'timestamp':'timestamp',
}
dt_info = {key:dt_info.get(key) for key in dt_col}
df_info = pd.DataFrame([dt_info])
df_info = df_info.rename(columns=dt_col)
return df_info
test = get_df_info('GME')
test
###Output
_____no_output_____ |
semana12-04-12-2020/.ipynb_checkpoints/analise-sofisticada-de-regressoes-checkpoint.ipynb | ###Markdown
Regressão Linear Regularizada e Bias vs Variância
###Code
# usado para manipular caminhos de diretório
import os
# pacote usado para realizar operações com matrizes
import numpy as np
# pacote de visualização gráfica
from matplotlib import pyplot as plt
# pacote de otimização (escolha de alguns hiperparâmetros)
from scipy import optimize
# carrega datasets executáveis em matlab
from scipy.io import loadmat
# incorporando plotagem do matplotlib no arquivo
%matplotlib inline
###Output
_____no_output_____
###Markdown
Regressão Linear RegularizadaNa primeira metade do exercício, você implementará a regressão linear regularizada para prever a quantidade de água que flui de uma barragem usando a mudança do nível de água em um reservatório. Na próxima metade, você fará alguns diagnósticos de algoritmos de aprendizagem de depuração e examinará os efeitos do viés vs. variância. Visualizando o datasetComeçaremos visualizando o conjunto de dados contendo registros históricos sobre a mudança no nível da água, $ x $, e a quantidade de água fluindo para fora da barragem, $ y $. Este conjunto de dados é dividido em três partes:- Um conjunto de **treinamento** que seu modelo aprenderá em: **X**, **y**; - Um conjunto de **validação cruzada** definida para determinar o parâmetro de regularização: **Xval**, **yval**;- Um conjunto de **teste** definido para avaliar o desempenho. Estes são exemplos que seu modelo não viu durante o treinamento: **Xtest**, **ytest**;
###Code
# carregando os dados do arquivo ex5data1.mat, todas as variáveis serão armazenadas em um dicionário
dataset = loadmat(os.path.join('datasets', 'ex5data1.mat'))
# visulizando a organização do dicionário
for keys, values in dataset.items():
print(keys)
# separando os dados de treinamento, teste e validação do dicionário
# além disso, os dados foram convertidos para um formato de vetor numpy
X, y = dataset['X'], dataset['y'][:, 0]
Xtest, ytest = dataset['Xtest'], dataset['ytest'][:, 0]
Xval, yval = dataset['Xval'], dataset['yval'][:, 0]
# m = número de exemplos treináveis
m = y.size
print(m)
# Visualizando os dados com matplotlib
plt.figure(figsize = (10, 5))
plt.plot(X, y, 'ro', ms=10, mec='k', mew=1)
plt.xlabel('Mudança no nível da água (x)')
plt.ylabel('Água fluindo para fora da barragem (y)')
###Output
_____no_output_____
###Markdown
Função de custo de regressão linear regularizadaLembre-se de que a regressão linear regularizada tem a seguinte função de custo:$$ J(\theta) = \frac{1}{2m} \left( \sum_{i=1}^m \left( h_\theta\left( x^{(i)} \right) - y^{(i)} \right)^2 \right) + \frac{\lambda}{2m} \left( \sum_{j=1}^n \theta_j^2 \right)$$Onde $\lambda$ é um parâmetro de regularização que controla o grau de regularização (assim, ajuda a prevenir overfitting). O termo de regularização impõe uma penalidade ao custo geral J. À medida que as magnitudes dos parâmetros do modelo $\theta_j$ aumentam, a penalidade também aumenta. Observe que você não deve regularizar o termo $\theta_0$. Gradiente de regressão linear regularizadoCorrespondentemente, a derivada parcial da função de custo para regressão linear regularizada é definida como:$$\begin{align}& \frac{\partial J(\theta)}{\partial \theta_0} = \frac{1}{m} \sum_{i=1}^m \left( h_\theta \left(x^{(i)} \right) - y^{(i)} \right) x_j^{(i)} & \qquad \text{for } j = 0 \\& \frac{\partial J(\theta)}{\partial \theta_j} = \left( \frac{1}{m} \sum_{i=1}^m \left( h_\theta \left( x^{(i)} \right) - y^{(i)} \right) x_j^{(i)} \right) + \frac{\lambda}{m} \theta_j & \qquad \text{for } j \ge 1\end{align}$$
###Code
def linearRegCostFunction(X, y, teta, lambda_= 0.0):
"""
Compute cost and gradient for regularized linear regression
with multiple variables. Computes the cost of using theta as
the parameter for linear regression to fit the data points in X and y.
Parameters
----------
X : array_like
The dataset. Matrix with shape (m x n + 1) where m is the
total number of examples, and n is the number of features
before adding the bias term.
y : array_like
The functions values at each datapoint. A vector of
shape (m, ).
theta : array_like
The parameters for linear regression. A vector of shape (n+1,).
lambda_ : float, optional
The regularization parameter.
Returns
-------
J : float
The computed cost function.
grad : array_like
The value of the cost function gradient w.r.t theta.
A vector of shape (n+1, ).
Instructions
------------
Compute the cost and gradient of regularized linear regression for
a particular choice of theta.
You should set J to the cost and grad to the gradient.
"""
m = y.size # número de exemplos treináveis
# You need to return the following variables correctly
J = 0
grad = np.zeros(teta.shape)
# ====================== YOUR CODE HERE ======================
# computando a equação da função de custo
h = X.dot(teta)
J = (1 / (2 * m)) * np.sum(np.square(h - y)) + (lambda_ / (2 * m)) * np.sum(np.square(teta[1:]))
# computando o valor dos parâmetros através do método de gradiente descendente (via derivadas parciais)
grad = (1 / m) * (h - y).dot(X)
grad[1:] = grad[1:] + (lambda_ / m) * teta[1:]
# ============================================================
return J, grad
# hstack = concatena por colunas, vstack = concatena por linhas
#np.hstack((np.ones((m, 1)), X))
# axis = 1 concatena por colunas, axis = 0 concatena por linhas
#np.concatenate([np.ones((m, 1)), X], axis=1)
# definindo uma hipótese inicial para os valores dos parâmetros
teta = np.array([1, 1])
# adicionando um bias aos atributos previsores de treinamento
X_bias = np.concatenate([np.ones((m, 1)), X], axis=1)
# computando o valor da função de custo para esses valores
J, grad = linearRegCostFunction(X_bias, y, teta, 1)
print('Custo dos valores de teta = {}: {}'.format(teta, J))
print('Valores dos parâmetros teta para esse custo: {}'.format(grad))
###Output
Custo dos valores de teta = [1 1]: 303.9931922202643
Valores dos parâmetros teta para esse custo: [-15.30301567 598.25074417]
###Markdown
Realizando o treinamento com a regressão linearUma vez que sua função de custo e gradiente estão funcionando corretamente, a próxima célula irá executar o código em `trainLinearReg` para calcular os valores ótimos de $\theta$. Esta função de treinamento usa o módulo de otimização `scipy` para minimizar a função de custo.Nesta parte, definimos o parâmetro de regularização $\lambda$ como zero. Como nossa implementação atual de regressão linear está tentando ajustar um $\theta$ bidimensional, a regularização não será extremamente útil para um $\theta$ de dimensão tão baixa. Nas partes posteriores do exercício, você usará a regressão polinomial com regularização.Finalmente, o código na próxima célula também deve traçar a linha de melhor ajuste, que deve ser semelhante à figura abaixo. A linha de melhor ajuste nos diz que o modelo não é um bom ajuste para os dados porque os dados têm um padrão não linear. Embora visualizar o melhor ajuste conforme mostrado seja uma maneira possível de depurar seu algoritmo de aprendizado, nem sempre é fácil visualizar os dados e o modelo. Na próxima seção, você implementará uma função para gerar curvas de aprendizado que podem ajudá-lo a depurar seu algoritmo de aprendizado, mesmo que não seja fácil visualizar os dados.
###Code
def trainLinearReg(linearRegCostFunction, X, y, lambda_ = 0.0, maxiter = 200):
"""
Trains linear regression using scipy's optimize.minimize.
Parameters
----------
X : array_like
The dataset with shape (m x n+1). The bias term is assumed to be concatenated.
y : array_like
Function values at each datapoint. A vector of shape (m,).
lambda_ : float, optional
The regularization parameter.
maxiter : int, optional
Maximum number of iteration for the optimization algorithm.
Returns
-------
theta : array_like
The parameters for linear regression. This is a vector of shape (n+1,).
"""
# definindo valores iniciais para teta
teta_inicial = np.zeros(X.shape[1])
# criando uma função lambda relativa a função de custo
costFunction = lambda t: linearRegCostFunction(X, y, t, lambda_)
# a função de custo recebe apenas um argumento
options = {'maxiter': maxiter}
# minimização da função de custo através do scipy (por meio da modificação dos parâmetros)
res = optimize.minimize(costFunction, teta_inicial, jac = True, method = 'TNC', options = options)
return res
# entendendo como uma função lambda pode ser invocada (observe que ela funciona só com a chamada de um argumento ao invés de 4)
lambda_ = 0
costFunction = lambda teta: linearRegCostFunction(X_bias, y, teta, lambda_)
costFunction(np.array([1, 1]))
# obtendo a função de custo e os parâmetros após a otimização
valores_otimizados = trainLinearReg(linearRegCostFunction, X_bias, y, lambda_ = 0)
print('Função de custo após a otimização: {}'.format(valores_otimizados.fun))
print('Valores de teta após a otimização: {}'.format(valores_otimizados.x))
# Visualizando a reta obtida com o algoritmo de regressão linear
plt.plot(X, y, 'ro', ms=10, mec='k', mew=1.5)
plt.xlabel('Mudança no nível da água (x)')
plt.ylabel('Água fluindo para fora da barragem (y)')
plt.plot(X, np.dot(X_bias, valores_otimizados.x), '--', lw=2)
###Output
_____no_output_____
###Markdown
Bias - VariânciaUm conceito importante no aprendizado de máquina é a compensação de bias - variância. Os modelos com um viés (bias) alto não são complexos o suficiente para os dados e tendem a se ajustar mal, enquanto os modelos com alta variância se ajustam excessivamente aos dados de treinamento. Curvas de AprendizagemAgora, você implementará o código para gerar as curvas de aprendizado que serão úteis na depuração de algoritmos de aprendizado. Lembre-se de que uma curva de aprendizado traça o treinamento e o erro de validação cruzada como uma função do tamanho do conjunto de treinamento. Seu trabalho é preencher a função **learningCurve** na próxima célula, de modo que ela retorne um vetor de erros para o conjunto de treinamento e conjunto de validação cruzada.Para traçar a curva de aprendizado, precisamos de um erro de conjunto de treinamento e validação cruzada para diferentes tamanhos de conjunto de treinamento. Para obter tamanhos de conjunto de treinamento diferentes, você deve usar subconjuntos diferentes do conjunto de treinamento original `X`. Especificamente, para um tamanho de conjunto de treinamento de $i$, você deve usar os primeiros $i$ exemplos (i.e., `X[:i, :]`and `y[:i]`).Depois de aprender os parâmetros $\theta$, você deve calcular o erro nos conjuntos de treinamento e validação cruzada. Lembre-se de que o erro de treinamento para um conjunto de dados é definido como$$ J_{\text{train}} = \frac{1}{2m} \left[ \sum_{i=1}^m \left(h_\theta \left( x^{(i)} \right) - y^{(i)} \right)^2 \right] $$Em particular, observe que o erro de treinamento não inclui o termo de regularização. Uma maneira de calcular o erro de treinamento é usar sua função de custo existente e definir $\lambda$ como 0 apenas ao usá-la para calcular o erro de treinamento e o erro de validação cruzada. Ao calcular o erro do conjunto de treinamento, certifique-se de calculá-lo no subconjunto de treinamento (ou seja, `X [: n,:]` e `y [: n]`) em vez de no conjunto de treinamento inteiro. No entanto, para o erro de validação cruzada, você deve computá-lo em todo o conjunto de validação cruzada.
###Code
def learningCurve(X, y, Xval, yval, lambda_=0):
"""
Generates the train and cross validation set errors needed to plot a learning curve
returns the train and cross validation set errors for a learning curve.
In this function, you will compute the train and test errors for
dataset sizes from 1 up to m. In practice, when working with larger
datasets, you might want to do this in larger intervals.
Parameters
----------
X : array_like
The training dataset. Matrix with shape (m x n + 1) where m is the
total number of examples, and n is the number of features
before adding the bias term.
y : array_like
The functions values at each training datapoint. A vector of
shape (m, ).
Xval : array_like
The validation dataset. Matrix with shape (m_val x n + 1) where m is the
total number of examples, and n is the number of features
before adding the bias term.
yval : array_like
The functions values at each validation datapoint. A vector of
shape (m_val, ).
lambda_ : float, optional
The regularization parameter.
Returns
-------
error_train : array_like
A vector of shape m. error_train[i] contains the training error for
i examples.
error_val : array_like
A vecotr of shape m. error_val[i] contains the validation error for
i training examples.
Instructions
------------
Fill in this function to return training errors in error_train and the
cross validation errors in error_val. i.e., error_train[i] and
error_val[i] should give you the errors obtained after training on i examples.
Notes
-----
- You should evaluate the training error on the first i training
examples (i.e., X[:i, :] and y[:i]).
For the cross-validation error, you should instead evaluate on
the _entire_ cross validation set (Xval and yval).
- If you are using your cost function (linearRegCostFunction) to compute
the training and cross validation error, you should call the function with
the lambda argument set to 0. Do note that you will still need to use
lambda when running the training to obtain the theta parameters.
Hint
----
You can loop over the examples with the following:
for i in range(1, m+1):
# Compute train/cross validation errors using training examples
# X[:i, :] and y[:i], storing the result in
# error_train[i-1] and error_val[i-1]
....
"""
# número de exemplos treináveis
m = y.size
# criando um array numpy para armazenar os erros de predição associados aos atributos de treinamento e de validação
erro_treinamento = np.zeros(m)
erro_validacao = np.zeros(m)
# ====================== YOUR CODE HERE ======================
for i in range(1, m + 1):
teta_t = trainLinearReg(linearRegCostFunction, X[:i], y[:i], lambda_ = 0)
erro_treinamento[i - 1], _ = linearRegCostFunction(X[:i], y[:i], teta_t.x, lambda_ = 0)
erro_validacao[i - 1], _ = linearRegCostFunction(Xval, yval, teta_t.x, lambda_ = 0)
# =============================================================
return erro_treinamento, erro_validacao
###Output
_____no_output_____
###Markdown
Quando você terminar de implementar a função `learningCurve`, executar a próxima célula imprimirá as curvas de aprendizado e produzirá um gráfico semelhante à figura abaixo.Na figura da curva de aprendizado, você pode observar que tanto o erro do treinamento quanto o erro de validação cruzada são altos quando o número de exemplos de treinamento é aumentado. Isso reflete um problema de viés (bias) alto no modelo - o modelo de regressão linear é muito simples e não consegue se ajustar bem ao nosso conjunto de dados.
###Code
# adicionando um parâmetro bias nos atributos previsores de treinamento e de validação
X_bias = np.concatenate([np.ones((m, 1)), X], axis=1)
Xval_bias = np.concatenate([np.ones((yval.size, 1)), Xval], axis=1)
# obtendo os valores de erro associados aos dados de treinamento e aos dados de validação
erro_treinamento, erro_validacao = learningCurve(X_bias, y, Xval_bias, yval, lambda_=0)
# visulizando o gráfico de erro nas predições com os dados de treinamento e os dados de validação
plt.figure(figsize = (10, 5))
plt.plot(np.arange(1, m+1), erro_treinamento, np.arange(1, m+1), erro_validacao, lw=2)
plt.title('Curva de aprendizado para regressão linear')
plt.legend(['Treinamento', 'Validação Cruzada'])
plt.xlabel('Número de exemplos treináveis')
plt.ylabel('Erro')
print('# Exemplos de Treinamento\tErro de Treinamento\tErro de Validação Cruzada')
for i in range(m):
print('{}\t\t\t\t{}\t{}'.format(i+1, erro_treinamento[i], erro_validacao[i]))
###Output
# Exemplos de Treinamento Erro de Treinamento Erro de Validação Cruzada
1 1.0176953929799205e-18 205.1210957127572
2 3.466571458294657e-09 110.30264057845221
3 3.2865950455012833 45.010231320931936
4 2.8426776893998307 48.36891082876348
5 13.154048809114924 35.86516473228544
6 19.443962512495464 33.829961665848444
7 20.098521655088877 31.97098567215687
8 18.172858695200024 30.862446202285934
9 22.609405424954733 31.13599809769148
10 23.261461592611813 28.93620747049214
11 24.31724958804416 29.55143162171058
12 22.373906495108915 29.43381813215488
###Markdown
Regressão Polinomial RegularizadaO problema com nosso modelo linear é que ele é muito simples para os dados e resultava em subajuste (viés alto). Nesta parte do exercício, você tratará desse problema adicionando mais recursos. Para regressão polinomial, nossa hipótese tem a forma:$$\begin{align}h_\theta(x) &= \theta_0 + \theta_1 \times (\text{nivelAgua}) + \theta_2 \times (\text{nivelAgua})^2 + \cdots + \theta_p \times (\text{nivelAgua})^p \\& = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \cdots + \theta_p x_p\end{align}$$Observe que ao definir $x_1 = (\text{nivelAgua})$, $x_2 = (\text{nivelAgua})^2$ , $\cdots$, $x_p =(\text{nivelAgua})^p$, obtemos um modelo de regressão linear onde os recursos são as várias potências do valor original (nivelAgua).Agora, você adicionará mais recursos usando as potências mais altas do recurso existente $x$ no conjunto de dados. Sua tarefa nesta parte é completar o código na função `polyFeatures` na próxima célula. A função deve mapear o conjunto de treinamento original $X$ de tamanho $m \times 1$ em suas potências superiores. Especificamente, quando um conjunto de treinamento $X$ de tamanho $m \times 1$ é passado para a função, a função deve retornar uma matriz $m \times p$ `X_poli`, onde a coluna 1 contém os valores originais de X, coluna 2 contém os valores de $X^2$, a coluna 3 contém os valores de $ X^3$ e assim sucessivamente.
###Code
def polyFeatures(X, p):
"""
Maps X (1D vector) into the p-th power.
Parameters
----------
X : array_like
A data vector of size m, where m is the number of examples.
p : int
The polynomial power to map the features.
Returns
-------
X_poly : array_like
A matrix of shape (m x p) where p is the polynomial
power and m is the number of examples. That is:
X_poly[i, :] = [X[i], X[i]**2, X[i]**3 ... X[i]**p]
Instructions
------------
Given a vector X, return a matrix X_poly where the p-th column of
X contains the values of X to the p-th power.
"""
# iniciar um array numpy para armazenar os valores das potências obtidas
X_polinomios = np.zeros((X.shape[0], p))
# ====================== YOUR CODE HERE ======================
# iteração para obter as potências relativas em cada execução
for i in range(p):
X_polinomios[:, i] = X[:, 0] ** (i + 1)
# ============================================================
return X_polinomios
###Output
_____no_output_____
###Markdown
Agora você tem uma função que mapeará os recursos para uma dimensão superior. A próxima célula o aplicará ao conjunto de treinamento, ao conjunto de teste e ao conjunto de validação cruzada.
###Code
def featureNormalize(X):
"""
Normalizes the features in X returns a normalized version of X where the mean value of each
feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when
working with learning algorithms.
Parameters
----------
X : array_like
An dataset which is a (m x n) matrix, where m is the number of examples,
and n is the number of dimensions for each example.
Returns
-------
X_norm : array_like
The normalized input dataset.
mu : array_like
A vector of size n corresponding to the mean for each dimension across all examples.
sigma : array_like
A vector of size n corresponding to the standard deviations for each dimension across
all examples.
"""
# obtendo a médio dos dados
mu = np.mean(X, axis = 0)
X_norm = (X - mu)
# obtendo o desvio padrão dos dados
sigma = np.std(X_norm, axis = 0, ddof = 1)
# aplicando a distribuição normal (ou distribuição gaussiana)
X_norm = (X - mu) / sigma
return X_norm, mu, sigma
# definindo polinômios até grau 8
p = 8
# realizar o mapeamento nos atributos previsores de treinamento
X_polinomial = polyFeatures(X, p)
X_polinomial, mu, sigma = featureNormalize(X_polinomial)
X_polinomial = np.concatenate([np.ones((m, 1)), X_polinomial], axis=1)
# Realizar o mapeamento e aplicar a normalização (usando mu e sigma)
X_polinomial_teste = polyFeatures(Xtest, p)
X_polinomial_teste -= mu
X_polinomial_teste /= sigma
# adicionando um parâmetro de viés
X_polinomial_teste = np.concatenate([np.ones((ytest.size, 1)), X_polinomial_teste], axis=1)
# Realizar o mapeamento e aplicar a normalização (usando mu e sigma)
X_polinomial_validacao = polyFeatures(Xval, p)
X_polinomial_validacao -= mu
X_polinomial_validacao /= sigma
# adicionando um parâmetro de viés
X_polinomial_validacao = np.concatenate([np.ones((yval.size, 1)), X_polinomial_validacao], axis=1)
print('Exemplos de treinamento normalizados: ')
X_polinomial[0, :]
###Output
Exemplos de treinamento normalizados:
###Markdown
Depois de concluir a função `polyFeatures`, continuaremos a treinar a regressão polinomial usando sua função de custo de regressão linear.Lembre-se de que, embora tenhamos termos polinomiais em nosso vetor de recursos, ainda estamos resolvendo um problema de otimização de regressão linear. Os termos polinomiais simplesmente se transformaram em recursos que podemos usar para regressão linear. Estamos usando a mesma função de custo e gradiente que você escreveu para a parte anterior deste exercício.Para esta parte do exercício, você usará um polinômio de grau 8. Acontece que se executarmos o treinamento diretamente nos dados projetados, não funcionará bem, pois os recursos seriam mal dimensionados (por exemplo, um exemplo com $ x = 40 $ agora terá um recurso de $x_8 = 40^8 = 6.5 \times 10^{12}$). Portanto, você vai precisa usar a normalização.Antes de aprender os parâmetros $\theta$ para a regressão polinomial, primeiro chamamos `featureNormalize` e normalizamos os recursos do conjunto de treinamento, armazenando os parâmetros mu, sigma separadamente.Depois de aprender os parâmetros $\theta$, você deve ver dois gráficos gerados para regressão polinomial com $\lambda = 0 $, que devem ser semelhantes aos aqui: Você deve ver que o ajuste polinomial é capaz de acompanhar muito bem os pontos de dados, obtendo assim um erro de treinamento baixo. A figura à direita mostra que o erro de treinamento permanece essencialmente zero para todos os números de amostras de treinamento. No entanto, o ajuste polinomial é muito complexo e até mesmo cai nos extremos. Este é um indicador de que o modelo de regressão polinomial está super ajustando os dados de treinamento e não generalizará bem.Para entender melhor os problemas com o modelo não regularizado ($\lambda = 0$), você pode ver que a curva de aprendizado mostra o mesmo efeito onde o erro de treinamento é baixo, mas o erro de validação cruzada é alto. Há uma lacuna entre os erros de treinamento e de validação cruzada, indicando um problema de alta variância.
###Code
def plotFit(polyFeatures, min_x, max_x, mu, sigma, teta, p):
"""
Plots a learned polynomial regression fit over an existing figure.
Also works with linear regression.
Plots the learned polynomial fit with power p and feature normalization (mu, sigma).
Parameters
----------
polyFeatures : func
A function which generators polynomial features from a single feature.
min_x : float
The minimum value for the feature.
max_x : float
The maximum value for the feature.
mu : float
The mean feature value over the training dataset.
sigma : float
The feature standard deviation of the training dataset.
theta : array_like
The parameters for the trained polynomial linear regression.
p : int
The polynomial order.
"""
# traçamos um intervalo ligeiramente maior do que os valores mínimo e máximo para obter
# uma ideia de como o ajuste irá variar fora do intervalo dos pontos de dados
x = np.arange(min_x - 15, max_x + 25, 0.05).reshape(-1, 1)
# realizando um mapeamento nos valores de X_polinomio
X_polinomio = polyFeatures(x, p)
X_polinomio -= mu
X_polinomio /= sigma
# adicionando o parâmetro de viés
X_polinomio = np.concatenate([np.ones((x.shape[0], 1)), X_polinomio], axis=1)
# plotando o gráfico da curva
plt.plot(x, np.dot(X_polinomio, teta), '--', lw=2)
return None
lambda_ = 1
# obtendo os valores de teta otimizador
teta = trainLinearReg(linearRegCostFunction, X_polinomial, y, lambda_=lambda_, maxiter = 55)
# plotandos os dados e realizando treinamento para obter a curva polinomial
plt.figure(figsize = (10, 5))
plt.plot(X, y, 'ro', ms=10, mew=1.5, mec='k')
plotFit(polyFeatures, np.min(X), np.max(X), mu, sigma, teta.x, p)
plt.xlabel('Mudança no nível da água (x)')
plt.ylabel('Água fluindo para fora da barragem (y)')
plt.title('Ajuste de regressão polinomial (lambda =% f)' % lambda_)
plt.ylim([-20, 50])
plt.figure(figsize = (10, 5))
# obtendo os erros de predição para os dados de treinamento e dados de validação
erro_treinamento, erro_validacao = learningCurve(X_polinomial, y, X_polinomial_validacao, yval, lambda_)
plt.plot(np.arange(1, 1+m), erro_treinamento, np.arange(1, 1+m), erro_validacao)
plt.title('Curva de aprendizado de regressão polinomial (lambda =% f)' % lambda_)
plt.xlabel('Número de exemplos treináveis')
plt.ylabel('Erro')
plt.axis([0, 13, 0, 100])
plt.legend(['Treinamento', 'Validação Cruzada'])
# visualizando os erros associados aos dados de treinamento e aos dados de validação
print('Regressão polinomial (lambda =% f) \n' % lambda_)
print('# Exemplos de Treinamento\tErro de Treinamento\tErro de Validação Cruzada')
for i in range(m):
print('{}\t\t\t\t{}\t{}'.format(i+1, erro_treinamento[i], erro_validacao[i]))
###Output
Regressão polinomial (lambda = 1.000000)
# Exemplos de Treinamento Erro de Treinamento Erro de Validação Cruzada
1 3.859500834000432e-18 160.72189969292504
2 1.1930763209757199e-15 160.12151057471988
3 2.071029708106108e-15 59.07163515099596
4 2.529302939607181e-14 77.99800443468749
5 2.619151881368912e-13 6.44903267892577
6 1.199041652627713e-09 10.829904725223551
7 1.1477531309141007e-08 27.917627576373725
8 0.0014422436133806741 18.841672110191364
9 0.00018516106251086653 31.270978495497804
10 0.014306064406882769 76.11884337225582
11 0.032763615207474964 38.04180051121988
12 0.030050528636162192 39.119109734143265
###Markdown
Uma maneira de combater o problema de overfitting (alta variância) é adicionar regularização ao modelo. Na próxima seção, você experimentará diferentes parâmetros $\lambda$ para ver como a regularização pode levar a um modelo melhor. Ajustando o hiperparâmetro de regularizaçãoNesta seção, você verá como o parâmetro de regularização afeta a variação de polarização da regressão polinomial regularizada. Agora você deve modificar o parâmetro lambda e tentar $\lambda = 1, 100$. Para cada um desses valores, o script deve gerar um ajuste polinomial aos dados e também uma curva de aprendizado.Para $\lambda = 1$, os gráficos gerados devem ser semelhantes à figura abaixo. Você deve ver um ajuste polinomial que segue bem a tendência dos dados (à esquerda) e uma curva de aprendizado (à direita) mostrando que a validação cruzada e o erro de treinamento convergem para um valor relativamente baixo. Isso mostra que o modelo de regressão polinomial regularizado $\lambda = 1$ não tem problemas de viés alto ou alta variância. Na verdade, ele consegue um bom equilíbrio entre o viés e a variância. Para $\lambda = 100$, você deve ver um ajuste polinomial (figura abaixo) que não segue bem os dados. Nesse caso, há muita regularização e o modelo não consegue ajustar os dados de treinamento. Selecionando $\lambda$ usando validação cruzadaNas partes anteriores do exercício, você observou que o valor de $\lambda$ pode afetar significativamente os resultados da regressão polinomial regularizada no conjunto de treinamento e validação cruzada. Em particular, um modelo sem regularização ($\lambda = 0$) se ajusta bem ao conjunto de treinamento, mas não generaliza. Por outro lado, um modelo com muita regularização ($\lambda = 100$) não se ajusta bem ao conjunto de treinamento e teste. Uma boa escolha de $\lambda$ (por exemplo, $\lambda = 1$) pode fornecer um bom ajuste aos dados.Nesta seção, você implementará um método automatizado para selecionar o parâmetro $\lambda$. Concretamente, você usará um conjunto de validação cruzada para avaliar a qualidade de cada valor de $\lambda$. Depois de selecionar o melhor valor $\lambda$ usando o conjunto de validação cruzada, podemos avaliar o modelo no conjunto de teste para estimar o quão bem o modelo terá um desempenho em dados reais não vistos.Sua tarefa é completar o código na função `validationCurve`. Especificamente, você deve usar a função `trainLinearReg` para treinar o modelo usando diferentes valores de $\lambda$ e calcular o erro de treinamento e o erro de validação cruzada. Você deve tentar $\lambda$ no seguinte intervalo: {0, 0,001, 0,003, 0,01, 0,03, 0,1, 0,3, 1, 3, 10}.
###Code
def validationCurve(X, y, Xval, yval):
"""
Generate the train and validation errors needed to plot a validation
curve that we can use to select lambda_.
Parameters
----------
X : array_like
The training dataset. Matrix with shape (m x n) where m is the
total number of training examples, and n is the number of features
including any polynomial features.
y : array_like
The functions values at each training datapoint. A vector of
shape (m, ).
Xval : array_like
The validation dataset. Matrix with shape (m_val x n) where m is the
total number of validation examples, and n is the number of features
including any polynomial features.
yval : array_like
The functions values at each validation datapoint. A vector of
shape (m_val, ).
Returns
-------
lambda_vec : list
The values of the regularization parameters which were used in
cross validation.
error_train : list
The training error computed at each value for the regularization
parameter.
error_val : list
The validation error computed at each value for the regularization
parameter.
Instructions
------------
Fill in this function to return training errors in `error_train` and
the validation errors in `error_val`. The vector `lambda_vec` contains
the different lambda parameters to use for each calculation of the
errors, i.e, `error_train[i]`, and `error_val[i]` should give you the
errors obtained after training with `lambda_ = lambda_vec[i]`.
Note
----
You can loop over lambda_vec with the following:
for i in range(len(lambda_vec))
lambda = lambda_vec[i]
# Compute train / val errors when training linear
# regression with regularization parameter lambda_
# You should store the result in error_train[i]
# and error_val[i]
....
"""
# selecionando valores de análise para o hiperparâmetro lambda
lambda_vec = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10]
# computando o erro na predição dos dados de treinamento e dados de validação
erro_treinamento = np.zeros(len(lambda_vec))
erro_validacao = np.zeros(len(lambda_vec))
# ====================== YOUR CODE HERE ======================
for i in range(len(lambda_vec)):
lambda_try = lambda_vec[i]
teta_t = trainLinearReg(linearRegCostFunction, X, y, lambda_ = lambda_try)
erro_treinamento[i], _ = linearRegCostFunction(X, y, teta_t.x, lambda_ = 0)
erro_validacao[i], _ = linearRegCostFunction(Xval, yval, teta_t.x, lambda_ = 0)
# ============================================================
return lambda_vec, erro_treinamento, erro_validacao
###Output
_____no_output_____
###Markdown
Depois de concluir o código, a próxima célula executará sua função e traçará uma curva de validação cruzada de erro vs. $\lambda$ que permite que você selecione qual parâmetro $\lambda$ usar. Você deve ver um gráfico semelhante à figura abaixo. Nesta figura, podemos ver que o melhor valor de $\lambda$ está em torno de 3. Devido à aleatoriedade nas divisões de treinamento e validação do conjunto de dados, o erro de validação cruzada pode às vezes ser menor do que o erro de treinamento.
###Code
lambda_vec, erro_treinamento, erro_validacao = validationCurve(X_polinomial, y, X_polinomial_validacao, yval)
plt.figure(figsize = (10, 5))
plt.plot(lambda_vec, erro_treinamento, '-o', lambda_vec, erro_validacao, '-o', lw = 2)
plt.legend(['Treinamento', 'Validação Cruzada'])
plt.xlabel('lambda')
plt.ylabel('Erro')
print('lambda\t\tErro de Treinamento\tErro de Validação')
for i in range(len(lambda_vec)):
print('{}\t\t{}\t{}'.format(lambda_vec[i], erro_treinamento[i], erro_validacao[i]))
###Output
lambda Erro de Treinamento Erro de Validação
0 0.030050528636162192 39.119109734143265
0.001 0.11274587795818126 9.844088638063784
0.003 0.1710418958031822 16.277693853361665
0.01 0.22147683088770645 16.912547865800423
0.03 0.2818279277763506 12.829495305555342
0.1 0.4593280639738924 7.586642390152084
0.3 0.9217613979717447 4.636819997428417
1 2.076199483729708 4.26060177469673
3 4.901371970848054 3.822928671071926
10 16.092272700585347 9.945554220768376
|
notebooks/semisupervised/MNIST/baseline/augmented2/aug-mnist-16ex.ipynb | ###Markdown
Choose GPU
###Code
%env CUDA_DEVICE_ORDER=PCI_BUS_ID
%env CUDA_VISIBLE_DEVICES=1
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices)>0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
print(gpu_devices)
tf.keras.backend.clear_session()
###Output
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
###Markdown
dataset information
###Code
from datetime import datetime
dataset = "mnist"
dims = (28, 28, 1)
num_classes = 10
labels_per_class = 16 # full
batch_size = 128
datestring = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
datestring = (
str(dataset)
+ "_"
+ str(labels_per_class)
+ "____"
+ datestring
+ '_baseline_augmented'
)
print(datestring)
###Output
mnist_16____2020_08_26_22_36_42_823740_baseline_augmented
###Markdown
Load packages
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
from IPython import display
import pandas as pd
import umap
import copy
import os, tempfile
###Output
/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)
" (e.g. in jupyter console)", TqdmExperimentalWarning)
###Markdown
Load dataset
###Code
from tfumap.load_datasets import load_MNIST, mask_labels
X_train, X_test, X_valid, Y_train, Y_test, Y_valid = load_MNIST(flatten=False)
X_train.shape
if labels_per_class == "full":
X_labeled = X_train
Y_masked = Y_labeled = Y_train
else:
X_labeled, Y_labeled, Y_masked = mask_labels(
X_train, Y_train, labels_per_class=labels_per_class
)
###Output
_____no_output_____
###Markdown
Build network
###Code
from tensorflow.keras import datasets, layers, models
from tensorflow_addons.layers import WeightNormalization
def conv_block(filts, name, kernel_size = (3, 3), padding = "same", **kwargs):
return WeightNormalization(
layers.Conv2D(
filts, kernel_size, activation=None, padding=padding, **kwargs
),
name="conv"+name,
)
#CNN13
#See:
#https://github.com/vikasverma1077/ICT/blob/master/networks/lenet.py
#https://github.com/brain-research/realistic-ssl-evaluation
lr_alpha = 0.1
dropout_rate = 0.5
num_classes = 10
input_shape = dims
model = models.Sequential()
model.add(tf.keras.Input(shape=input_shape))
### conv1a
name = '1a'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv1b
name = '1b'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv1c
name = '1c'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name="mp1"))
# dropout
model.add(layers.Dropout(dropout_rate, name="drop1"))
### conv2a
name = '2a'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha))
### conv2b
name = '2b'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv2c
name = '2c'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name="mp2"))
# dropout
model.add(layers.Dropout(dropout_rate, name="drop2"))
### conv3a
name = '3a'
model.add(conv_block(name = name, filts = 512, kernel_size = (3,3), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv3b
name = '3b'
model.add(conv_block(name = name, filts = 256, kernel_size = (1,1), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv3c
name = '3c'
model.add(conv_block(name = name, filts = 128, kernel_size = (1,1), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.AveragePooling2D(pool_size=(3, 3), strides=2, padding='valid'))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation=None, name='z'))
model.add(WeightNormalization(layers.Dense(256, activation=None)))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelufc1'))
model.add(WeightNormalization(layers.Dense(256, activation=None)))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelufc2'))
model.add(WeightNormalization(layers.Dense(num_classes, activation=None)))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1a (WeightNormalization) (None, 28, 28, 128) 2689
_________________________________________________________________
bn1a (BatchNormalization) (None, 28, 28, 128) 512
_________________________________________________________________
lrelu1a (LeakyReLU) (None, 28, 28, 128) 0
_________________________________________________________________
conv1b (WeightNormalization) (None, 28, 28, 128) 295297
_________________________________________________________________
bn1b (BatchNormalization) (None, 28, 28, 128) 512
_________________________________________________________________
lrelu1b (LeakyReLU) (None, 28, 28, 128) 0
_________________________________________________________________
conv1c (WeightNormalization) (None, 28, 28, 128) 295297
_________________________________________________________________
bn1c (BatchNormalization) (None, 28, 28, 128) 512
_________________________________________________________________
lrelu1c (LeakyReLU) (None, 28, 28, 128) 0
_________________________________________________________________
mp1 (MaxPooling2D) (None, 14, 14, 128) 0
_________________________________________________________________
drop1 (Dropout) (None, 14, 14, 128) 0
_________________________________________________________________
conv2a (WeightNormalization) (None, 14, 14, 256) 590593
_________________________________________________________________
bn2a (BatchNormalization) (None, 14, 14, 256) 1024
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 14, 14, 256) 0
_________________________________________________________________
conv2b (WeightNormalization) (None, 14, 14, 256) 1180417
_________________________________________________________________
bn2b (BatchNormalization) (None, 14, 14, 256) 1024
_________________________________________________________________
lrelu2b (LeakyReLU) (None, 14, 14, 256) 0
_________________________________________________________________
conv2c (WeightNormalization) (None, 14, 14, 256) 1180417
_________________________________________________________________
bn2c (BatchNormalization) (None, 14, 14, 256) 1024
_________________________________________________________________
lrelu2c (LeakyReLU) (None, 14, 14, 256) 0
_________________________________________________________________
mp2 (MaxPooling2D) (None, 7, 7, 256) 0
_________________________________________________________________
drop2 (Dropout) (None, 7, 7, 256) 0
_________________________________________________________________
conv3a (WeightNormalization) (None, 5, 5, 512) 2360833
_________________________________________________________________
bn3a (BatchNormalization) (None, 5, 5, 512) 2048
_________________________________________________________________
lrelu3a (LeakyReLU) (None, 5, 5, 512) 0
_________________________________________________________________
conv3b (WeightNormalization) (None, 5, 5, 256) 262913
_________________________________________________________________
bn3b (BatchNormalization) (None, 5, 5, 256) 1024
_________________________________________________________________
lrelu3b (LeakyReLU) (None, 5, 5, 256) 0
_________________________________________________________________
conv3c (WeightNormalization) (None, 5, 5, 128) 65921
_________________________________________________________________
bn3c (BatchNormalization) (None, 5, 5, 128) 512
_________________________________________________________________
lrelu3c (LeakyReLU) (None, 5, 5, 128) 0
_________________________________________________________________
average_pooling2d (AveragePo (None, 2, 2, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 512) 0
_________________________________________________________________
z (Dense) (None, 256) 131328
_________________________________________________________________
weight_normalization (Weight (None, 256) 131841
_________________________________________________________________
lrelufc1 (LeakyReLU) (None, 256) 0
_________________________________________________________________
weight_normalization_1 (Weig (None, 256) 131841
_________________________________________________________________
lrelufc2 (LeakyReLU) (None, 256) 0
_________________________________________________________________
weight_normalization_2 (Weig (None, 10) 5151
=================================================================
Total params: 6,642,730
Trainable params: 3,388,308
Non-trainable params: 3,254,422
_________________________________________________________________
###Markdown
Augmentation
###Code
#https://github.com/tanzhenyu/image_augmentation/blob/master/image_augmentation/image/image_ops.py
IMAGE_DTYPES = [tf.uint8, tf.float32, tf.float16, tf.float64]
def _check_image_dtype(image):
assert image.dtype in IMAGE_DTYPES, "image with " + str(image.dtype) + " is not supported for this operation"
@tf.function
def invert(image, name=None):
"""Inverts the pixels of an `image`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "invert"):
if image.dtype == tf.uint8:
inv_image = 255 - image
else:
inv_image = 1. - image
return inv_image
@tf.function
def cutout(image, size=16, color=None, name=None):
"""This is an implementation of Cutout as described in "Improved
Regularization of Convolutional Neural Networks with Cutout" by
DeVries & Taylor (https://arxiv.org/abs/1708.04552).
It applies a random square patch of specified `size` over an `image`
and by replacing those pixels with value of `color`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
size: A 0-D int tensor or single int value that is divisible by 2.
color: A single pixel value (grayscale) or tuple of 3 values (RGB),
in case a single value is used for RGB image the value is tiled.
Gray color (128) is used by default.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "cutout"):
image_shape = tf.shape(image)
height, width, channels = image_shape[0], image_shape[1], image_shape[2]
loc_x = tf.random.uniform((), 0, width, tf.int32)
loc_y = tf.random.uniform((), 0, height, tf.int32)
ly, lx = tf.maximum(0, loc_y - size // 2), tf.maximum(0, loc_x - size // 2)
uy, ux = tf.minimum(height, loc_y + size // 2), tf.minimum(width, loc_x + size // 2)
gray = tf.constant(128)
if color is None:
if image.dtype == tf.uint8:
color = tf.repeat(gray, channels)
else:
color = tf.repeat(tf.cast(gray, tf.float32) / 255., channels)
else:
color = tf.convert_to_tensor(color)
color = tf.cast(color, image.dtype)
cut = tf.ones((uy - ly, ux - lx, channels), image.dtype)
top = image[0: ly, 0: width]
between = tf.concat([
image[ly: uy, 0: lx],
cut * color,
image[ly: uy, ux: width]
], axis=1)
bottom = image[uy: height, 0: width]
cutout_image = tf.concat([top, between, bottom], axis=0)
return cutout_image
@tf.function
def solarize(image, threshold, name=None):
"""Inverts the pixels of an `image` above a certain `threshold`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
threshold: A 0-D int / float tensor or int / float value for setting
inversion threshold.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "solarize"):
threshold = tf.cast(threshold, image.dtype)
inverted_image = invert(image)
solarized_image = tf.where(image < threshold, image, inverted_image)
return solarized_image
@tf.function
def solarize_add(image, addition, threshold=None, name=None):
"""Adds `addition` intensity to each pixel and inverts the pixels
of an `image` above a certain `threshold`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
addition: A 0-D int / float tensor or int / float value that is to be
added to each pixel.
threshold: A 0-D int / float tensor or int / float value for setting
inversion threshold. 128 (int) / 0.5 (float) is used by default.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "solarize_add"):
if threshold is None:
threshold = tf.image.convert_image_dtype(tf.constant(128, tf.uint8), image.dtype)
addition = tf.cast(addition, image.dtype)
added_image = image + addition
dark, bright = tf.constant(0, tf.uint8), tf.constant(255, tf.uint8)
added_image = tf.clip_by_value(added_image, tf.image.convert_image_dtype(dark, image.dtype),
tf.image.convert_image_dtype(bright, image.dtype))
return solarize(added_image, threshold)
@tf.function
def posterize(image, num_bits, name=None):
"""Reduces the number of bits used to represent an `image`
for each color channel.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
num_bits: A 0-D int tensor or integer value representing number of bits.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "posterize"):
orig_dtype = image.dtype
image = tf.image.convert_image_dtype(image, tf.uint8)
num_bits = tf.cast(num_bits, tf.int32)
mask = tf.cast(2 ** (8 - num_bits) - 1, tf.uint8)
mask = tf.bitwise.invert(mask)
posterized_image = tf.bitwise.bitwise_and(image, mask)
posterized_image = tf.image.convert_image_dtype(posterized_image, orig_dtype, saturate=True)
return posterized_image
@tf.function
def equalize(image, name=None):
"""Equalizes the `image` histogram. In case of an RGB image, equalization
individually for each channel.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "equalize"):
orig_dtype = image.dtype
image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
image = tf.cast(image, tf.int32)
def equalize_grayscale(image_channel):
"""Equalizes the histogram of a grayscale (2D) image."""
bins = tf.constant(256, tf.int32)
histogram = tf.math.bincount(image_channel, minlength=bins)
nonzero = tf.where(tf.math.not_equal(histogram, 0))
nonzero_histogram = tf.reshape(tf.gather(histogram, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histogram) - nonzero_histogram[-1]) // (bins - 1)
# use a lut similar to PIL
def normalize(histogram, step):
norm_histogram = (tf.math.cumsum(histogram) + (step // 2)) // step
norm_histogram = tf.concat([[0], norm_histogram], axis=0)
norm_histogram = tf.clip_by_value(norm_histogram, 0, bins - 1)
return norm_histogram
return tf.cond(tf.math.equal(step, 0),
lambda: image_channel,
lambda: tf.gather(normalize(histogram, step), image_channel))
channels_first_image = tf.transpose(image, [2, 0, 1])
channels_first_equalized_image = tf.map_fn(equalize_grayscale, channels_first_image)
equalized_image = tf.transpose(channels_first_equalized_image, [1, 2, 0])
equalized_image = tf.cast(equalized_image, tf.uint8)
equalized_image = tf.image.convert_image_dtype(equalized_image, orig_dtype)
return equalized_image
@tf.function
def auto_contrast(image, name=None):
"""Normalizes `image` contrast by remapping the `image` histogram such
that the brightest pixel becomes 1.0 (float) / 255 (unsigned int) and
darkest pixel becomes 0.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "auto_contrast"):
orig_dtype = image.dtype
image = tf.image.convert_image_dtype(image, tf.float32)
min_val, max_val = tf.reduce_min(image, axis=[0, 1]), tf.reduce_max(image, axis=[0, 1])
norm_image = (image - min_val) / (max_val - min_val)
norm_image = tf.image.convert_image_dtype(norm_image, orig_dtype, saturate=True)
return norm_image
@tf.function
def blend(image1, image2, factor, name=None):
"""Blends an image with another using `factor`.
Args:
image1: An int or float tensor of shape `[height, width, num_channels]`.
image2: An int or float tensor of shape `[height, width, num_channels]`.
factor: A 0-D float tensor or single floating point value depicting
a weight above 0.0 for combining the example_images.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image1` and `image2`.
"""
_check_image_dtype(image1)
_check_image_dtype(image2)
assert image1.dtype == image2.dtype, "image1 type should exactly match type of image2"
if factor == 0.0:
return image1
elif factor == 1.0:
return image2
else:
with tf.name_scope(name or "blend"):
orig_dtype = image2.dtype
image1, image2 = tf.image.convert_image_dtype(image1, tf.float32), tf.image.convert_image_dtype(image2, tf.float32)
scaled_diff = (image2 - image1) * factor
blended_image = image1 + scaled_diff
blended_image = tf.image.convert_image_dtype(blended_image, orig_dtype, saturate=True)
return blended_image
@tf.function
def sample_pairing(image1, image2, weight, name=None):
"""Alias of `blend`. This is an implementation of SamplePairing
as described in "Data Augmentation by Pairing Samples for Images Classification"
by Inoue (https://arxiv.org/abs/1801.02929).
Args:
image1: An int or float tensor of shape `[height, width, num_channels]`.
image2: An int or float tensor of shape `[height, width, num_channels]`.
weight: A 0-D float tensor or single floating point value depicting
a weight factor above 0.0 for combining the example_images.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image1`.
"""
with tf.name_scope(name or "sample_pairing"):
paired_image = blend(image1, image2, weight)
return paired_image
@tf.function
def color(image, magnitude, name=None):
"""Adjusts the `magnitude` of color of an `image`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
magnitude: A 0-D float tensor or single floating point value above 0.0.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "color"):
tiled_gray_image = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
colored_image = blend(tiled_gray_image, image, magnitude)
return colored_image
@tf.function
def sharpness(image, magnitude, name=None):
"""Adjusts the `magnitude` of sharpness of an `image`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
magnitude: A 0-D float tensor or single floating point value above 0.0.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "sharpness"):
orig_dtype = image.dtype
image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
image = tf.cast(image, tf.float32)
blur_kernel = tf.constant([[1, 1, 1],
[1, 5, 1],
[1, 1, 1]], tf.float32, shape=[3, 3, 1, 1]) / 13
blur_kernel = tf.tile(blur_kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
# add extra dimension to image before conv
blurred_image = tf.nn.depthwise_conv2d(image[None, ...], blur_kernel,
strides, padding="VALID")
blurred_image = tf.clip_by_value(blurred_image, 0., 255.)
# remove extra dimension
blurred_image = blurred_image[0]
mask = tf.ones_like(blurred_image)
extra_padding = tf.constant([[1, 1],
[1, 1],
[0, 0]], tf.int32)
padded_mask = tf.pad(mask, extra_padding)
padded_blurred_image = tf.pad(blurred_image, extra_padding)
blurred_image = tf.where(padded_mask == 1, padded_blurred_image, image)
sharpened_image = blend(blurred_image, image, magnitude)
sharpened_image = tf.cast(sharpened_image, tf.uint8)
sharpened_image = tf.image.convert_image_dtype(sharpened_image, orig_dtype)
return sharpened_image
@tf.function
def brightness(image, magnitude, name=None):
"""Adjusts the `magnitude` of brightness of an `image`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
magnitude: A 0-D float tensor or single floating point value above 0.0.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "brightness"):
dark = tf.zeros_like(image)
bright_image = blend(dark, image, magnitude)
return bright_image
@tf.function
def contrast(image, magnitude, name=None):
"""Adjusts the `magnitude` of contrast of an `image`.
Args:
image: An int or float tensor of shape `[height, width, num_channels]`.
magnitude: A 0-D float tensor or single floating point value above 0.0.
name: An optional string for name of the operation.
Returns:
A tensor with same shape and type as that of `image`.
"""
_check_image_dtype(image)
with tf.name_scope(name or "contrast"):
orig_dtype = image.dtype
image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
grayed_image = tf.image.rgb_to_grayscale(image)
grayed_image = tf.cast(grayed_image, tf.int32)
bins = tf.constant(256, tf.int32)
histogram = tf.math.bincount(grayed_image, minlength=bins)
histogram = tf.cast(histogram, tf.float32)
mean = tf.reduce_sum(tf.cast(grayed_image, tf.float32)) / tf.reduce_sum(histogram)
mean = tf.clip_by_value(mean, 0.0, 255.0)
mean = tf.cast(mean, tf.uint8)
mean_image = tf.ones_like(grayed_image, tf.uint8) * mean
mean_image = tf.image.grayscale_to_rgb(mean_image)
contrast_image = blend(mean_image, image, magnitude)
contrast_image = tf.image.convert_image_dtype(contrast_image, orig_dtype, saturate=True)
return contrast_image
import tensorflow_addons as tfa
def get_augment(
augment_probability=0.25,
brightness_range=[1e-5, 1.5],
contrast_range=[1e-5, 1],
cutout_range=[0, 0.5],
rescale_range=[0.5, 1],
rescale_range_x_range=0.5,
rescale_range_y_range=0.5,
rotate_range=[-3.14, 3.14],
shear_x_range=[-0.3, 0.3],
shear_y_range=[-0.3, 0.3],
translate_x_range=0.3,
translate_y_range=0.3,
dims=(28, 28, 1),
):
def augment(image, label):
#image = tf.image.random_flip_left_right(image)
random_switch = tf.cast(
tf.random.uniform(
(1,), minval=0, maxval=1 + int(1 / augment_probability), dtype=tf.int32
)[0]
== 1,
tf.bool,
)
if random_switch:
return image, label
# Brightness 0-1
brightness_factor = tf.random.uniform(
(1,),
minval=brightness_range[0],
maxval=brightness_range[1],
dtype=tf.float32,
)[0]
image = brightness(image, brightness_factor)
# rescale 0.5-1
rescale_factor = tf.random.uniform(
(1,), minval=rescale_range[0], maxval=rescale_range[1], dtype=tf.float32
)[0]
image = tf.image.random_crop(image, [dims[0]*rescale_factor, dims[1]*rescale_factor, dims[2]])
image = tf.image.resize(image, [dims[0], dims[1]])
# sqeeze x or y
randint_hor = tf.random.uniform(
(2,),
minval=0,
maxval=tf.cast(rescale_range_x_range * dims[0], tf.int32),
dtype=tf.int32,
)[0]
randint_vert = tf.random.uniform(
(2,),
minval=0,
maxval=tf.cast(rescale_range_y_range * dims[1], tf.int32),
dtype=tf.int32,
)[0]
image = tf.image.resize(
image, (dims[0] + randint_vert * 2, dims[1] + randint_hor * 2)
)
image = tf.image.resize_with_pad(image, dims[0], dims[1])
image = tf.image.resize_with_crop_or_pad(
image, dims[0] + 3, dims[1] + 3
) # crop 6 pixels
image = tf.image.random_crop(image, size=dims)
# rotate -45 45
rotate_factor = tf.random.uniform(
(1,),
minval=rotate_range[0],
maxval=rotate_range[1],
dtype=tf.float32,
)[0]
image = tfa.image.rotate(image, rotate_factor, interpolation="BILINEAR",)
# shear_x -0.3, 3
shear_x_factor = tf.random.uniform(
(1,), minval=shear_x_range[0], maxval=shear_x_range[1], dtype=tf.float32
)[0]
img = tf.repeat(tf.cast(image * 255, tf.uint8), 3, axis=2)
image = tf.cast(tfa.image.shear_x(
img, shear_x_factor, replace=0
)[:,:,:1], tf.float32) / 255
# shear_y -0.3, 3
shear_y_factor = tf.random.uniform(
(1,), minval=shear_x_range[0], maxval=shear_y_range[1], dtype=tf.float32
)[0]
img = tf.repeat(tf.cast(image * 255, tf.uint8), 3, axis=2)
image = tf.cast(tfa.image.shear_y(
img, shear_y_factor, replace=0
)[:,:,:1], tf.float32) / 255.
#print(image.shape)
# translate x -0.3, 0.3
translate_x_factor = tf.random.uniform(
(1,), minval=0, maxval=translate_x_range * 2, dtype=tf.float32
)[0]
# translate y -0.3, 0.3
translate_y_factor = tf.random.uniform(
(1,), minval=0, maxval=translate_y_range * 2, dtype=tf.float32
)[0]
image = tf.image.resize_with_crop_or_pad(
image,
dims[0] + tf.cast(translate_x_factor * dims[0], tf.int32),
dims[1] + tf.cast(translate_x_factor * dims[1], tf.int32),
) # crop 6 pixels
image = tf.image.random_crop(image, size=dims)
# contrast 0-1
contrast_factor = tf.random.uniform(
(1,), minval=contrast_range[0], maxval=contrast_range[1], dtype=tf.float32
)[0]
image = tf.image.adjust_contrast(image, contrast_factor)
image = image - tf.reduce_min(image)
# cutout 0-0.5
cutout_factor = tf.random.uniform(
(1,), minval=cutout_range[0], maxval=cutout_range[1], dtype=tf.float32
)[0]
image = cutout(image, tf.cast(cutout_factor * dims[0], tf.int32))
image = tf.clip_by_value(image, 0.0,1.0)
return image, label
return augment
augment = get_augment(
augment_probability=0.1,
brightness_range=[0.5, 1],
contrast_range=[0.5, 2],
cutout_range=[0, 0.75],
rescale_range=[0.75, 1],
rescale_range_x_range=0.9,
rescale_range_y_range=0.9,
rotate_range=[-0.5, 0.5],
shear_x_range=[-0.3, 0.3],
shear_y_range=[-0.3, 0.3],
translate_x_range=0.2,
translate_y_range=0.2,
dims=(28, 28, 1),
)
nex = 10
for i in range(5):
fig, axs = plt.subplots(ncols=nex +1, figsize=((nex+1)*2, 2))
axs[0].imshow(np.squeeze(X_train[i]), cmap = plt.cm.Greys)
axs[0].axis('off')
for ax in axs.flatten()[1:]:
aug_img = np.squeeze(augment(X_train[i], Y_train[i])[0])
ax.matshow(aug_img, cmap = plt.cm.Greys, vmin=0, vmax=1)
ax.axis('off')
###Output
_____no_output_____
###Markdown
train
###Code
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy', min_delta=0, patience=100, verbose=1, mode='auto',
baseline=None, restore_best_weights=True
)
import tensorflow_addons as tfa
opt = tf.keras.optimizers.Adam(1e-4)
opt = tfa.optimizers.MovingAverage(opt)
loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.2, from_logits=True)
model.compile(opt, loss = loss, metrics=['accuracy'])
Y_valid_one_hot = tf.keras.backend.one_hot(
Y_valid, num_classes
)
Y_labeled_one_hot = tf.keras.backend.one_hot(
Y_labeled, num_classes
)
from livelossplot import PlotLossesKerasTF
# plot losses callback
plotlosses = PlotLossesKerasTF()
train_ds = (
tf.data.Dataset.from_tensor_slices((X_labeled, Y_labeled_one_hot))
.repeat()
.shuffle(len(X_labeled))
.map(augment, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
steps_per_epoch = int(len(X_train)/ batch_size)
history = model.fit(
train_ds,
epochs=500,
validation_data=(X_valid, Y_valid_one_hot),
callbacks = [early_stopping, plotlosses],
steps_per_epoch = steps_per_epoch,
)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
submodel = tf.keras.models.Model(
[model.inputs[0]], [model.get_layer('z').output]
)
z = submodel.predict(X_train)
np.shape(z)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
z_valid = submodel.predict(X_valid)
np.shape(z_valid)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z_valid.reshape(len(z_valid), np.product(np.shape(z_valid)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_valid.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(embedding[:, 0], embedding[:, 1], c=Y_valid.flatten(), s= 1, alpha = 1, cmap = plt.cm.tab10)
predictions = model.predict(X_valid)
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(embedding[:, 0], embedding[:, 1], c=np.argmax(predictions, axis=1), s= 1, alpha = 1, cmap = plt.cm.tab10)
Y_test_one_hot = tf.keras.backend.one_hot(
Y_test, num_classes
)
result = model.evaluate(X_test, Y_test_one_hot)
###Output
_____no_output_____
###Markdown
save results
###Code
# save score, valid embedding, weights, results
from tfumap.paths import MODEL_DIR, ensure_dir
save_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring
ensure_dir(save_folder)
###Output
_____no_output_____
###Markdown
save weights
###Code
encoder = tf.keras.models.Model(
[model.inputs[0]], [model.get_layer('z').output]
)
encoder.save_weights((save_folder / "encoder").as_posix())
classifier = tf.keras.models.Model(
[tf.keras.Input(tensor=model.get_layer('weight_normalization').input)], [model.outputs[0]]
)
print([i.name for i in classifier.layers])
classifier.save_weights((save_folder / "classifier").as_posix())
###Output
_____no_output_____
###Markdown
save score
###Code
Y_test_one_hot = tf.keras.backend.one_hot(
Y_test, num_classes
)
result = model.evaluate(X_test, Y_test_one_hot)
np.save(save_folder / 'test_loss.npy', result)
###Output
_____no_output_____
###Markdown
save embedding
###Code
z = encoder.predict(X_train)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
np.save(save_folder / 'train_embedding.npy', embedding)
###Output
_____no_output_____
###Markdown
save results
###Code
import pickle
with open(save_folder / 'history.pickle', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
###Output
_____no_output_____ |
notebooks/2_convert_matches.ipynb | ###Markdown
Takes downloaded HTML files and converts them to dataframes -> saves to csv.
Adds season as a column
###Code
from bs4 import BeautifulSoup
import os
import pandas as pd
pagesPath = r'C:\Users\calvin\Documents\GitHub\springboard\champions_league_luck\data\raw\fbref_pages'
tablesPath = r'C:\Users\calvin\Documents\GitHub\springboard\champions_league_luck\data\raw\fbref_tables'
# Takes downloaded HTML files and converts them to dataframes -> saves to csv.
# adds season as a column
for page in os.listdir(pagesPath):
with open(f'{pagesPath}/{page}', 'r', encoding='utf8') as f:
soup = BeautifulSoup(f.read(), 'html.parser')
table = pd.read_html(f'{pagesPath}/season_{page[-14:-5]}.html')[0]
table['season'] = page[-14:-5]
table.to_csv(f'{tablesPath}/season{page[-14:-5]}.csv')
###Output
_____no_output_____ |
03_Model Selection. Decision Tree vs Support Vector Machines vs Logistic Regression/03_model-selection_session_blank.ipynb | ###Markdown
03 | Model Selection. Decision Tree vs Support Vector Machines vs Logistic Regression Python + Data Science Tutorials in ↓ <a href="https://www.youtube.com/c/PythonResolver?sub_confirmation=1" >YouTube</a > Blog GitHub Author: @jsulopz Discipline to Search Solutions in Google > Apply the following steps when **looking for solutions in Google**:>> 1. **Necesity**: How to load an Excel in Python?> 2. **Search in Google**: by keywords> - `load excel python`> - ~~how to load excel in python~~> 3. **Solution**: What's the `function()` that loads an Excel in Python?> - A Function to Programming is what the Atom to Phisics.> - Every time you want to do something in programming> - **You will need a `function()`** to make it> - Theferore, you must **detect parenthesis `()`**> - Out of all the words that you see in a website> - Because they indicate the presence of a `function()`. Load the Data Load the dataset from [CIS](https://www.cis.es/cis/opencms/ES/index.html) executing the lines of code below:> - The goal of this dataset is> - To predict `internet_usage` of **people** (rows)> - Based on their **socio-demographical characteristics** (columns) Build & Compare Models `DecisionTreeClassifier()` Model in Python
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
> - Build the model `model.fit()`> - And see how good it is `model.score()` `SVC()` Model in Python
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/efR1C6CvhmE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
> - Build the model `model.fit()`> - And see how good it is `model.score()` `LogisticRegression()` Model in Python
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/yIYKR4sgzI8" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____ |
notebooks/twikwak17_phase1.ipynb | ###Markdown
Monitoring memory in python
###Code
import psutil
# psutil.virtual_memory?
vmem = psutil.virtual_memory()
vmem
total_mem_bytes = vmem.total
total_mem_bytes
avail_mem_bytes = vmem.available
avail_mem_bytes
###Output
_____no_output_____
###Markdown
File probing
###Code
import gzip
def probe_file(fpath, lines_to_probe=100):
with gzip.open(fpath, 'rt') as f:
for i, line in enumerate(f):
print(line)
if i==lines_to_probe:
return
###Output
_____no_output_____
###Markdown
Reading lines
###Code
from ezenum import StringEnum
LINETYPE = StringEnum(['Time', 'User', 'Content', 'Other'])
def interpret_line(decoded_line):
try:
char1 = decoded_line[1]
if char1 == '\t':
char0 = decoded_line[0]
if char0 == 'T':
return LINETYPE.Time, decoded_line[2:-1]
elif char0 == 'U':
return LINETYPE.User, decoded_line[21:-1]
elif char0 == 'W':
return LINETYPE.Content, decoded_line[2:-1]
else:
return LINETYPE.Other, decoded_line[2:-1]
else:
return LINETYPE.Other, ''
except IndexError:
return LINETYPE.Other, ''
import gzip
from time import time
NO_CONTENT = 'No Post Title'
user_to_tweets_str = {}
# user_to_tweets_count = {}
monitor_line_freq = 1000000
min_avail_mem_bytes = 500 * 1000000
report_template = (
'{:.2f} min running | {} lines processed | ~ {} tweets processed |'
' {} tpm | {} available memory'
)
def merge_user_tweets_in_file(fpath, line_limit):
most_recent_user = None
starting_available_mem = psutil.virtual_memory().available
start_time = time()
with gzip.open(fpath, 'rt') as textf:
for i, line in enumerate(textf):
try:
# print(line)
# print(interpret_line(line))
ltype, lcontent = interpret_line(line)
if ltype == LINETYPE.User:
most_recent_user = lcontent
elif ltype == LINETYPE.Content and lcontent != NO_CONTENT:
user_to_tweets_str[most_recent_user] = user_to_tweets_str.get(
most_recent_user, '') + ' ' + lcontent
# user_to_tweets_count[most_recent_user] = user_to_tweets_count.get(most_recent_user, 0) + 1
except Exception as e:
print(line)
print(interpret_line(line))
raise e
if i % monitor_line_freq == 0:
av_mem = psutil.virtual_memory().available
seconds_running = time() - start_time
report = report_template.format(
seconds_running / 60,
i,
i / 4,
(i / 4) / (seconds_running / 60),
av_mem,
)
print(report, end='\r')
if av_mem < min_avail_mem_bytes:
return
# if i >= line_limit:
# break
source1_fpath = '/home/shaypalachy/data/twitter7/tweets2009-06.txt.gz'
sample_fpath = '/home/shaypalachy/data/twitter7/twitter7_sample.txt.gz'
merge_user_tweets_in_file(fpath1, 10000)
# user_to_tweets_str
len(user_to_tweets_str)
user_to_tweets_str['poluakerford']
# user_to_tweets_count
import sys
sys.getsizeof(user_to_tweets_str)
sys.getsizeof(user_to_tweets_count)
###Output
_____no_output_____
###Markdown
Run finished code
###Code
from twikwak17.sample import sample_twitter7
sample_twitter7(num_tweets=50000, source_fpath=source1_fpath)
from twikwak17.phases.phase1 import merge_user_tweets_in_file
sample_fpath = '/home/shaypalachy/data/twitter7/twitter7_sample.txt.gz'
merge_user_tweets_in_file(sample_fpath)
probe_file('/home/shaypalachy/data/twitter7/twitter7_sample_p1dump_0.txt.gz', 100)
###Output
_____no_output_____ |
Lightcurve/Analyze light curves chunk by chunk - an example.ipynb | ###Markdown
R.m.s. - intensity diagramThis diagram is used to characterize the variability of black hole binaries and AGN (see e.g. Plant et al., arXiv:1404.7498; McHardy 2010 2010LNP...794..203M for a review).In Stingray it is very easy to calculate. Setup: simulate a light curve with a variable rms and rateWe simulate a light curve with powerlaw variability, and then we rescaleit so that it has increasing flux and r.m.s. variability.
###Code
from stingray.simulator.simulator import Simulator
from scipy.ndimage.filters import gaussian_filter1d
from stingray.utils import baseline_als
from scipy.interpolate import interp1d
np.random.seed(1034232)
# Simulate a light curve with increasing variability and flux
length = 10000
dt = 0.1
times = np.arange(0, length, dt)
# Create a light curve with powerlaw variability (index 1),
# and smooth it to eliminate some Gaussian noise. We will simulate proper
# noise with the `np.random.poisson` function.
# Both should not be used together, because they alter the noise properties.
sim = Simulator(dt=dt, N=int(length/dt), mean=50, rms=0.4)
counts_cont = sim.simulate(1).counts
counts_cont_init = gaussian_filter1d(counts_cont, 200)
# ---------------------
# Renormalize so that the light curve has increasing flux and r.m.s.
# variability.
# ---------------------
# The baseline function cannot be used with too large arrays.
# Since it's just an approximation, we will just use one every
# ten array elements to calculate the baseline
mask = np.zeros_like(times, dtype=bool)
mask[::10] = True
print (counts_cont_init[mask])
baseline = baseline_als(times[mask], counts_cont_init[mask], 1e10, 0.001)
base_func = interp1d(times[mask], baseline, bounds_error=False, fill_value='extrapolate')
counts_cont = counts_cont_init - base_func(times)
counts_cont -= np.min(counts_cont)
counts_cont += 1
counts_cont *= times * 0.003
# counts_cont += 500
counts_cont += 500
# Finally, Poissonize it!
counts = np.random.poisson(counts_cont)
plt.plot(times, counts_cont, zorder=10, label='Continuous light curve')
plt.plot(times, counts, label='Final light curve')
plt.legend()
###Output
_____no_output_____
###Markdown
R.m.s. - intensity diagramWe use the `analyze_lc_chunks` method in `Lightcurve` to calculate two quantities: the rate and the excess variance, normalized as $F_{\rm var}$ (Vaughan et al. 2010).`analyze_lc_chunks()` requires an input function that just accepts a light curve. Therefore, we create the two functions `rate` and `excvar` that wrap the existing functionality in Stingray.Then, we plot the results.Done!
###Code
# This function can be found in stingray.utils
def excess_variance(lc, normalization='fvar'):
"""Calculate the excess variance.
Vaughan et al. 2003, MNRAS 345, 1271 give three measurements of source
intrinsic variance: the *excess variance*, defined as
.. math:: \sigma_{XS} = S^2 - \overline{\sigma_{err}^2}
the *normalized excess variance*, defined as
.. math:: \sigma_{NXS} = \sigma_{XS} / \overline{x^2}
and the *fractional mean square variability amplitude*, or
:math:`F_{var}`, defined as
.. math:: F_{var} = \sqrt{\dfrac{\sigma_{XS}}{\overline{x^2}}}
Parameters
----------
lc : a :class:`Lightcurve` object
normalization : str
if 'fvar', return the fractional mean square variability :math:`F_{var}`.
If 'none', return the unnormalized excess variance variance
:math:`\sigma_{XS}`. If 'norm_xs', return the normalized excess variance
:math:`\sigma_{XS}`
Returns
-------
var_xs : float
var_xs_err : float
"""
lc_mean_var = np.mean(lc.counts_err ** 2)
lc_actual_var = np.var(lc.counts)
var_xs = lc_actual_var - lc_mean_var
mean_lc = np.mean(lc.counts)
mean_ctvar = mean_lc ** 2
var_nxs = var_xs / mean_lc ** 2
fvar = np.sqrt(var_xs / mean_ctvar)
N = len(lc.counts)
var_nxs_err_A = np.sqrt(2 / N) * lc_mean_var / mean_lc ** 2
var_nxs_err_B = np.sqrt(mean_lc ** 2 / N) * 2 * fvar / mean_lc
var_nxs_err = np.sqrt(var_nxs_err_A ** 2 + var_nxs_err_B ** 2)
fvar_err = var_nxs_err / (2 * fvar)
if normalization == 'fvar':
return fvar, fvar_err
elif normalization == 'norm_xs':
return var_nxs, var_nxs_err
elif normalization == 'none' or normalization is None:
return var_xs, var_nxs_err * mean_lc **2
def fvar_fun(lc):
return excess_variance(lc, normalization='fvar')
def norm_exc_var_fun(lc):
return excess_variance(lc, normalization='norm_xs')
def exc_var_fun(lc):
return excess_variance(lc, normalization='none')
def rate_fun(lc):
return lc.meancounts, np.std(lc.counts)
lc = Lightcurve(times, counts, gti=[[-0.5*dt, length - 0.5*dt]], dt=dt)
start, stop, res = lc.analyze_lc_chunks(1000, np.var)
var = res
start, stop, res = lc.analyze_lc_chunks(1000, rate_fun)
rate, rate_err = res
start, stop, res = lc.analyze_lc_chunks(1000, fvar_fun)
fvar, fvar_err = res
start, stop, res = lc.analyze_lc_chunks(1000, exc_var_fun)
evar, evar_err = res
start, stop, res = lc.analyze_lc_chunks(1000, norm_exc_var_fun)
nvar, nvar_err = res
plt.errorbar(rate, fvar, xerr=rate_err, yerr=fvar_err, fmt='none')
plt.loglog()
plt.xlabel('Count rate')
plt.ylabel(r'$F_{\rm var}$')
tmean = (start + stop)/2
from matplotlib.gridspec import GridSpec
plt.figure(figsize=(15, 20))
gs = GridSpec(5, 1)
ax_lc = plt.subplot(gs[0])
ax_mean = plt.subplot(gs[1], sharex=ax_lc)
ax_evar = plt.subplot(gs[2], sharex=ax_lc)
ax_nvar = plt.subplot(gs[3], sharex=ax_lc)
ax_fvar = plt.subplot(gs[4], sharex=ax_lc)
ax_lc.plot(lc.time, lc.counts)
ax_lc.set_ylabel('Counts')
ax_mean.scatter(tmean, rate)
ax_mean.set_ylabel('Counts')
ax_evar.errorbar(tmean, evar, yerr=evar_err, fmt='o')
ax_evar.set_ylabel(r'$\sigma_{XS}$')
ax_fvar.errorbar(tmean, fvar, yerr=fvar_err, fmt='o')
ax_fvar.set_ylabel(r'$F_{var}$')
ax_nvar.errorbar(tmean, nvar, yerr=nvar_err, fmt='o')
ax_nvar.set_ylabel(r'$\sigma_{NXS}$')
###Output
_____no_output_____ |
colab_drive_01.ipynb | ###Markdown
Prefaceこの Notebook は、[Google Colab/Drive の連携: Magic Commands 篇](https://ggcs.io/2020/12/04/google-colab-drive-01/) 付録のPython scripts の手順詳細版です。- Website: ごたごた気流調査所 https://ggcs.io- GitHub : Gota Gota Current Survey https://github.com/ggcurrs/gota2-observatory- Version 1.0.0- Created: 2020-12-04- Updated: 0000-00-00 Mount Your Google Drive on Google Colaboratory Your Google Drive (the Drive) will be mounted under /content/drive*See also* [*External data: Local Files, Drive, Sheets, and Cloud Storage*](https://colab.research.google.com/notebooks/io.ipynb)
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Using the Drive from Colab You can right-click the file/directory name in File Pane on the left and use *the Copy path* command from there.
###Code
dir_path = '/content/drive/MyDrive/my_project/world_domination_project'
###Output
_____no_output_____
###Markdown
Write on/Read from the Drive from Python/Colab
###Code
msg_to_earthlians = 'Hello, World! All your base are belong to us!'
file_path = dir_path + '/msg.txt'
with open(file_path, 'w') as f:
f.write(msg_to_earthlians)
with open(file_path, 'r') as f:
msg_read = f.read()
print(msg_read)
###Output
Hello, World! All your base are belong to us!
###Markdown
File/Directory Manupulation from Shell/Colab*See also* [*IPython Magic Commands*](https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/01.03-Magic-Commands.ipynbscrollTo=bFD7pjapu7St)
###Code
%less /content/drive/MyDrive/my_project/world_domination_project/msg.txt
%mkdir /content/drive/MyDrive/my_project/world_domination_project/sub_dir
%mv /content/drive/MyDrive/my_project/world_domination_project/msg.txt \
/content/drive/MyDrive/my_project/world_domination_project/sub_dir
%rm /content/drive/MyDrive/my_project/world_domination_project/sub_dir/msg.txt
%rm -r /content/drive/MyDrive/my_project/world_domination_project/sub_dir
###Output
_____no_output_____ |
advanced-machine-learning-and-signal-processing/Week 3/Unsupervised Learning (Programming Assignment).ipynb | ###Markdown
This is the third assignment for the Coursera course "Advanced Machine Learning and Signal Processing"Just execute all cells one after the other and you are done - just note that in the last one you must update your email address (the one you've used for coursera) and obtain a submission token, you get this from the programming assignment directly on coursera.Please fill in the sections labelled with "YOUR_CODE_GOES_HERE" This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production.In case you are facing issues, please read the following two documents first:https://github.com/IBM/skillsnetwork/wiki/Environment-Setuphttps://github.com/IBM/skillsnetwork/wiki/FAQThen, please feel free to ask:https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/allPlease make sure to follow the guidelines before asking a question:https://github.com/IBM/skillsnetwork/wiki/FAQim-feeling-lost-and-confused-please-help-meIf running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells.
###Code
from IPython.display import Markdown, display
def printmd(string):
display(Markdown('# <span style="color:red">' + string + "</span>"))
if "sc" in locals() or "sc" in globals():
printmd(
"<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>"
)
!pip install pyspark==2.4.5
try:
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
except ImportError as e:
printmd(
"<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>"
)
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession.builder.getOrCreate()
!wget https://github.com/IBM/coursera/raw/master/coursera_ml/a2.parquet
###Output
_____no_output_____
###Markdown
Now it’s time to have a look at the recorded sensor data. You should see data similar to the one exemplified below….
###Code
df = spark.read.load("a2.parquet")
df.createOrReplaceTempView("df")
spark.sql("SELECT * from df").show()
###Output
_____no_output_____
###Markdown
Let’s check if we have balanced classes – this means that we have roughly the same number of examples for each class we want to predict. This is important for classification but also helpful for clustering
###Code
spark.sql("SELECT count(class), class from df group by class").show()
###Output
_____no_output_____
###Markdown
Let's create a VectorAssembler which consumes columns X, Y and Z and produces a column “features”
###Code
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=["X", "Y", "Z"], outputCol="features")
###Output
_____no_output_____
###Markdown
Please insatiate a clustering algorithm from the SparkML package and assign it to the clust variable. Here we don’t need to take care of the “CLASS” column since we are in unsupervised learning mode – so let’s pretend to not even have the “CLASS” column for now – but it will become very handy later in assessing the clustering performance. PLEASE NOTE – IN REAL-WORLD SCENARIOS THERE IS NO CLASS COLUMN – THEREFORE YOU CAN’T ASSESS CLASSIFICATION PERFORMANCE USING THIS COLUMN
###Code
from pyspark.ml.clustering import GaussianMixture
clust = GaussianMixture(featuresCol="features").setK(2).setSeed(42)
###Output
_____no_output_____
###Markdown
Let’s train...
###Code
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[vectorAssembler, clust])
model = pipeline.fit(df)
###Output
_____no_output_____
###Markdown
...and evaluate...
###Code
prediction = model.transform(df)
prediction.show()
prediction.createOrReplaceTempView("prediction")
spark.sql(
"""
select max(correct)/max(total) as accuracy from (
select sum(correct) as correct, count(correct) as total from (
select case when class != prediction then 1 else 0 end as correct from prediction
)
union
select sum(correct) as correct, count(correct) as total from (
select case when class = prediction then 1 else 0 end as correct from prediction
)
)
"""
).rdd.map(lambda row: row.accuracy).collect()[0]
###Output
_____no_output_____
###Markdown
If you reached at least 55% of accuracy you are fine to submit your predictions to the grader. Otherwise please experiment with parameters setting to your clustering algorithm, use a different algorithm or just re-record your data and try to obtain. In case you are stuck, please use the Coursera Discussion Forum. Please note again – in a real-world scenario there is no way in doing this – since there is no class label in your data. Please have a look at this further reading on clustering performance evaluation https://en.wikipedia.org/wiki/Cluster_analysisEvaluation_and_assessment
###Code
!rm -f rklib.py
!wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py
!rm -Rf a2_m3.json
prediction = prediction.repartition(1)
prediction.write.json("a2_m3.json")
import os
import zipfile
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
zipf = zipfile.ZipFile("a2_m3.json.zip", "w", zipfile.ZIP_DEFLATED)
zipdir("a2_m3.json", zipf)
zipf.close()
!base64 a2_m3.json.zip > a2_m3.json.zip.base64
from rklib import submit
key = "pPfm62VXEeiJOBL0dhxPkA"
part = "EOTMs"
email = "[email protected]"
token = "9oxxslejUNoFaLMg"
with open("a2_m3.json.zip.base64", "r") as myfile:
data = myfile.read()
submit(email, token, key, part, [part], data)
###Output
_____no_output_____ |
Web_Mining_CSE3024/Index_Compression_Lab_5/Index_Construction.ipynb | ###Markdown
Inverted Indexing and Index CompressionBuild the inverted index for the following documents:* ID1 : Selenium is a portable framework for testing web applications* ID2 : Beautiful Soup is useful for web scraping* ID3: It is a python package for parsing the pagesPerform Index Compression for the integer values in the inverted index (duplicates to be eliminated) using Elias delta coding and variable byte scheme.
###Code
# Import statements
import math
import re
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
# Documents list
documents = [
"Selenium is a portable framework for testing web applications",
"Beautiful Soup is useful for web scraping",
"It is a python package for parsing the pages"
]
###Output
_____no_output_____
###Markdown
1. Inverted Index Construction 1.1 Pre-processingRefer to this link for more details : [Text Preprocessing Reference](https://medium.com/@datamonsters/text-preprocessing-in-python-steps-tools-and-examples-bf025f872908)
###Code
def preprocess(text) :
"""
Given a text, we pre-process and return an array of the words from the text.
"""
s = text
# Convert to lower case
s = s.lower()
# Removing numbers and other numerical data
# We substitute all the occurances of numbers by an empty string, thereby effectively removing them.
s = re.sub(r'\d+', '', s)
# Remove punctuation signs
#s = s.translate(string.maketrans("",""), string.punctuation)
s = s.replace('/[^A-Za-z0-9]/g', '')
# Trim the leading and trailing spaces
s = s.strip()
# Tokenize the text
words = word_tokenize(s)
# Stop Word Removal
stop_words = set(stopwords.words('english'))
words = [word for word in words if word not in stop_words]
# Return the word list
return words
###Output
_____no_output_____
###Markdown
1.2 Find Occurance Function
###Code
def findOccurance(text, word) :
"""
Given a text and the word to be found, we partially pre-process the text and then return the count of occurances of the word,
and the positions in the text where they occur.
"""
# Split the text into tokens and remove the punctuation signs and convert to lower case
# This is to find the position of the words to have in the inverted index
text = text.replace('/[^A-Za-z0-9]/g', '')
text = text.replace(' ', ' ')
text = text.lower()
text_words = text.strip().split()
word_count = 0
word_positions = []
for i in range(len(text_words)) :
if word == text_words[i] :
word_count += 1
word_positions.append(i)
return (word_count, word_positions)
###Output
_____no_output_____
###Markdown
1.3 Inverted Indexing
###Code
inverted_index = {}
# Process each of the documnet
for (i, doc) in enumerate(documents) :
# Pre-Processing of each individual document
words = preprocess(doc)
# Add the words into the inverted index
for word in words :
# Create an entry for the word if one does not exist
if word not in inverted_index :
inverted_index[word] = []
# Find all the occurances of the word in the doc.
occurance_count, occurance_pos_list = findOccurance(doc, word)
# Add these details into the inverted index
inverted_index[word].append(((i+1), occurance_count, occurance_pos_list))
###Output
_____no_output_____
###Markdown
Format for the inverted index is :* inverted index :```python{ word : [ (document_id, number_of_occurances_in_document, [offset_of_occurances]), ... ], ...}```
###Code
print('Inverted Index : ')
for item in inverted_index.items() :
print(item)
###Output
Inverted Index :
('selenium', [(1, 1, [0])])
('portable', [(1, 1, [3])])
('framework', [(1, 1, [4])])
('testing', [(1, 1, [6])])
('web', [(1, 1, [7]), (2, 1, [5])])
('applications', [(1, 1, [8])])
('beautiful', [(2, 1, [0])])
('soup', [(2, 1, [1])])
('useful', [(2, 1, [3])])
('scraping', [(2, 1, [6])])
('python', [(3, 1, [3])])
('package', [(3, 1, [4])])
('parsing', [(3, 1, [6])])
('pages', [(3, 1, [8])])
###Markdown
2. Index CompressionFor every unique number in the index, we create a map of the number to a encoded version of the number which occupies a lower size, thereby ensuring compression. 2.1 Binary Conversion
###Code
def binary(n) :
"""
Given an integer number returns the equivalent binary string.
"""
# Convert to binary string
num = bin(n)
# Remove the `0b` which is present in the front of the string
num = num[2:]
return num
###Output
_____no_output_____
###Markdown
2.2 Elias Gamma Encoding
###Code
def eliasGammaEncoding(n) :
"""
Given an integer number `n`, we encode the number using the `Elias Gamma Encoding` scheme, and return the compressed value as a string.
"""
# Zero is already encoded
if n == 0 :
return "0"
# Find the binary value of number
num = binary(n)
# Prepend the value with (length-1) zeros
num = ('0' * (len(num) - 1)) + num
return num
###Output
_____no_output_____
###Markdown
2.3 Elias Delta Encoding
###Code
def eliasDeltaEncoding(n) :
"""
Given an integer number `n`, we encode the number using the `Elias Delta Encoding` scheme, and return the compressed value as a string.
"""
# Zero is already encoded
if n == 0 :
return "0"
# Find the gamma code for (1 + log2(n))
num1 = 1 + int(math.log2(n))
num1 = eliasGammaEncoding(num1)
# Number in binary form after removing the MSB
num2 = binary(n)
num2 = str(num2)[1:]
# Combine the gamma code and the other code value
num = num1 + num2
return num
###Output
_____no_output_____
###Markdown
2.4 Variable Byte Encoding Scheme
###Code
def variableByteEncoding(n) :
"""
Given an integer number `n`, we encode the number using the `Variable Byte Encoding` scheme, and return the compressed value as a string.
"""
# Convert the number into binary form
s = binary(n)
result = ""
while len(s) > 0 :
# Get the term and update the binary string
if len(s) > 7 :
term = s[-7:]
s = s[:-7]
else :
term = s
s = ""
term = ("0" * (7 - len(term))) + term
if len(result) == 0 :
result = term + "0"
else :
result = term + "1" + result
return result
###Output
_____no_output_____
###Markdown
2.5 Index Compression Function
###Code
def indexCompression(inverted_index, encoding_scheme) :
"""
Given an inverted index, we perform compression for all the integers in the inverted index and return the encoding map.
"""
compression_map = {}
for word_indices in inverted_index.values() :
for word_index in word_indices :
# Prepare an array to have all the numbers involved in this
i, count, positions = word_index
arr = [i, count] + positions
# For each number compute and store the elias delta encoded value if not already present
for n in arr :
if n not in compression_map :
if encoding_scheme == 'ELIAS_DELTA' :
compression_map[n] = eliasDeltaEncoding(n)
elif encoding_scheme == 'VARIABLE_BYTE' :
compression_map[n] = variableByteEncoding(n)
return compression_map
###Output
_____no_output_____
###Markdown
2.6 Index Compression By Elias DeltaWe perform compression for all the numbers using `Elias Delta` encoding scheme in the inverted index created in `Section 1`
###Code
elias_delta_compression_map = indexCompression(inverted_index, 'ELIAS_DELTA')
print("Elias Delta Encoding Map :")
for item in elias_delta_compression_map.items() :
print(item)
###Output
Elias Delta Encoding Map :
(1, '1')
(0, '0')
(3, '0101')
(4, '01100')
(6, '01110')
(7, '01111')
(2, '0100')
(5, '01101')
(8, '00100000')
###Markdown
2.7 Index Compression By Variable Byte EncodingWe perform compression for all the numbers using `Variable Byte` encoding scheme in the inverted index created in `Section 1`
###Code
variable_byte_compression_map = indexCompression(inverted_index, 'VARIABLE_BYTE')
print("Variable Byte Encoding Map :")
for item in variable_byte_compression_map.items() :
print(item)
###Output
Variable Byte Encoding Map :
(1, '00000010')
(0, '00000000')
(3, '00000110')
(4, '00001000')
(6, '00001100')
(7, '00001110')
(2, '00000100')
(5, '00001010')
(8, '00010000')
|
QuantumComputing/Qiskit/09_Discrete_Optimization_and_Ensemble_Learning.ipynb | ###Markdown
Any learning algorithm will always have strengths and weaknesses: a single model is unlikely to fit every possible scenario. Ensembles combine multiple models to achieve higher generalization performance than any of the constituent models is capable of. How do we assemble the weak learners? We can use some sequential heuristics. For instance, given the current collection of models, we can add one more based on where that particular model performs well. Alternatively, we can look at all the correlations of the predictions between all models, and optimize for the most uncorrelated predictors. Since this latter is a global approach, it naturally maps to a quantum computer. But first, let's take a look a closer look at loss functions and regularization, two key concepts in machine learning. Loss Functions and RegularizationIf you can solve a problem by a classical computer -- let that be a laptop or a massive GPU cluster -- there is little value in solving it by a quantum computer that costs ten million dollars. The interesting question in quantum machine learning is whether there are problems in machine learning and AI that fit quantum computers naturally, but are challenging on classical hardware. This, however, requires a good understanding of both machine learning and contemporary quantum computers.In this course, we primarily focus on the second aspect, since there is no shortage of educational material on classical machine learning. However, it is worth spending a few minutes on going through some basics.Let us take a look at the easiest possible problem: the data points split into two, easily distinguishable sets. We randomly generate this data set:
###Code
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
c1 = np.random.rand(50, 2)/5
c2 = (-0.6, 0.5) + np.random.rand(50, 2)/5
data = np.concatenate((c1, c2))
labels = np.array([0] * 50 + [1] *50)
plt.figure(figsize=(6, 6))
plt.subplot(111, xticks=[], yticks=[])
plt.scatter(data[:50, 0], data[:50, 1], color='navy')
plt.scatter(data[50:, 0], data[50:, 1], color='c')
###Output
_____no_output_____
###Markdown
Let's shuffle the data set into a training set that we are going to optimize over (2/3 of the data), and a test set where we estimate our generalization performance.
###Code
idx = np.arange(len(labels))
np.random.shuffle(idx)
# train on a random 2/3 and test on the remaining 1/3
idx_train = idx[:2*len(idx)//3]
idx_test = idx[2*len(idx)//3:]
X_train = data[idx_train]
X_test = data[idx_test]
y_train = labels[idx_train]
y_test = labels[idx_test]
###Output
_____no_output_____
###Markdown
We will use the package `scikit-learn` to train various machine learning models.
###Code
import sklearn
import sklearn.metrics
metric = sklearn.metrics.accuracy_score
###Output
_____no_output_____
###Markdown
Let's train a perceptron, which has a linear loss function $\frac{1}{N}\sum_{i=1}^N |h(x_i)-y_i)|$:
###Code
from sklearn.linear_model import Perceptron
model_1 = Perceptron(max_iter=1000, tol=1e-3)
model_1.fit(X_train, y_train)
print('accuracy (train): %5.2f'%(metric(y_train, model_1.predict(X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, model_1.predict(X_test))))
###Output
accuracy (train): 1.00
accuracy (test): 1.00
###Markdown
It does a great job. It is a linear model, meaning its decision surface is a plane. Our dataset is separable by a plane, so let's try another linear model, but this time a support vector machine. If you eyeball our dataset, you will see that to define the separation between the two classes, actually only a few points close to the margin are relevant. These are called support vectors and support vector machines aim to find them. Its objective function measures the loss and it has a regularization term with a weight $C$. The $C$ hyperparameter controls a regularization term that penalizes the objective for the number of support vectors:
###Code
from sklearn.svm import SVC
model_2 = SVC(kernel='linear', C=10)
model_2.fit(X_train, y_train)
print('accuracy (train): %5.2f'%(metric(y_train, model_2.predict(X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, model_2.predict(X_test))))
print('Number of support vectors:', sum(model_2.n_support_))
###Output
accuracy (train): 1.00
accuracy (test): 1.00
Number of support vectors: 2
###Markdown
It picks only two datapoints out of the hundred. Let's change the hyperparameter to reduce the penalty:
###Code
model_2 = SVC(kernel='linear', C=0.01)
model_2.fit(X_train, y_train)
print('accuracy (train): %5.2f'%(metric(y_train, model_2.predict(X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, model_2.predict(X_test))))
print('Number of support vectors:', sum(model_2.n_support_))
###Output
accuracy (train): 0.53
accuracy (test): 0.44
Number of support vectors: 62
###Markdown
You can see that the model gets confused by using too many datapoints in the final classifier. This is one example where regularization helps. Ensemble methodsEnsembles yield better results when there is considerable diversity among the base classifiers. If diversity is sufficient, base classifiers make different errors, and a strategic combination may reduce the total error, ideally improving generalization performance. A constituent model in an ensemble is also called a base classifier or weak learner, and the composite model a strong learner.The generic procedure of ensemble methods has two steps. First, develop a set of base classifiers from the training data. Second, combine them to form the ensemble. In the simplest combination, the base learners vote, and the label prediction is based on majority. More involved methods weigh the votes of the base learners. Let us import some packages and define our figure of merit as accuracy in a balanced dataset.
###Code
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.metrics
%matplotlib inline
metric = sklearn.metrics.accuracy_score
###Output
_____no_output_____
###Markdown
We generate a random dataset of two classes that form concentric circles:
###Code
np.random.seed(0)
data, labels = sklearn.datasets.make_circles()
idx = np.arange(len(labels))
np.random.shuffle(idx)
# train on a random 2/3 and test on the remaining 1/3
idx_train = idx[:2*len(idx)//3]
idx_test = idx[2*len(idx)//3:]
X_train = data[idx_train]
X_test = data[idx_test]
y_train = 2 * labels[idx_train] - 1 # binary -> spin
y_test = 2 * labels[idx_test] - 1
scaler = sklearn.preprocessing.StandardScaler()
normalizer = sklearn.preprocessing.Normalizer()
X_train = scaler.fit_transform(X_train)
X_train = normalizer.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
X_test = normalizer.fit_transform(X_test)
plt.figure(figsize=(6, 6))
plt.subplot(111, xticks=[], yticks=[])
plt.scatter(data[labels == 0, 0], data[labels == 0, 1], color='navy')
plt.scatter(data[labels == 1, 0], data[labels == 1, 1], color='c')
###Output
_____no_output_____
###Markdown
Let's train a perceptron:
###Code
from sklearn.linear_model import Perceptron
model_1 = Perceptron(max_iter=1000, tol=1e-3)
model_1.fit(X_train, y_train)
print('accuracy (train): %5.2f'%(metric(y_train, model_1.predict(X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, model_1.predict(X_test))))
###Output
accuracy (train): 0.44
accuracy (test): 0.65
###Markdown
Since its decision surface is linear, we get a poor accuracy. Would a support vector machine with a nonlinear kernel fare better?
###Code
from sklearn.svm import SVC
model_2 = SVC(kernel='rbf', gamma='auto')
model_2.fit(X_train, y_train)
print('accuracy (train): %5.2f'%(metric(y_train, model_2.predict(X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, model_2.predict(X_test))))
###Output
accuracy (train): 0.64
accuracy (test): 0.24
###Markdown
It performs better on the training set, but at the cost of extremely poor generalization. Boosting is an ensemble method that explicitly seeks models that complement one another. The variation between boosting algorithms is how they combine weak learners. Adaptive boosting (AdaBoost) is a popular method that combines the weak learners in a sequential manner based on their individual accuracies. It has a convex objective function that does not penalize for complexity: it is likely to include all available weak learners in the final ensemble. Let's train AdaBoost with a few weak learners:
###Code
from sklearn.ensemble import AdaBoostClassifier
model_3 = AdaBoostClassifier(n_estimators=3)
model_3.fit(X_train, y_train)
print('accuracy (train): %5.2f'%(metric(y_train, model_3.predict(X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, model_3.predict(X_test))))
###Output
accuracy (train): 0.65
accuracy (test): 0.29
###Markdown
Its performance is marginally better than that of the SVM. QBoostThe idea of Qboost is that optimization on a quantum computer is not constrained to convex objective functions, therefore we can add arbitrary penalty terms and rephrase our objective [[1](1)]. Qboost solves the following problem:$$\mathrm{argmin}_{w} \left(\frac{1}{N}\sum_{i=1}^{N}\left(\sum_{k=1}^{K}w_kh_k(x_i)-y_i\right)^2+\lambda\|w\|_0\right),$$where $h_k(x_i)$ is the prediction of the weak learner $k$ for a training instance $k$. The weights in this formulation are binary, so this objective function already maps to an Ising model. The regularization in the $l_0$ norm ensures sparsity, and it is not the kind of regularization we would consider classically: it is hard to optimize with this term on a digital computer.Let us expand the quadratic part of the objective:$$\mathrm{argmin}_{w} \left(\frac{1}{N}\sum_{i=1}^{N}\left( \left(\sum_{k=1}^{K} w_k h_k(x_i)\right)^{2} -2\sum_{k=1}^{K} w_k h_k(\mathbf{x}_i)y_i + y_i^{2}\right) + \lambda \|w\|_{0}\right).$$Since $y_i^{2}$ is just a constant offset, the optimization reduces to$$\mathrm{argmin}_{w} \left(\frac{1}{N}\sum_{k=1}^{K}\sum_{l=1}^{K} w_k w_l\left(\sum_{i=1}^{N}h_k(x_i)h_l(x_i)\right) - \frac{2}{N}\sum_{k=1}^{K}w_k\sum_{i=1}^{N} h_k(x_i)y_i +\lambda \|w\|_{0} \right).$$This form shows that we consider all correlations between the predictions of the weak learners: there is a summation of $h_k(x_i)h_l(x_i)$. Since this term has a positive sign, we penalize for correlations. On the other hand, the correlation with the true label, $h_k(x_i)y_i$, has a negative sign. The regularization term remains unchanged.To run this on an annealing machine we discretize this equation, reduce the weights to single bits, and normalize the estimator by K to scale with the feature data. As the weights are single bit, the regularization term becomes a summation that allows us to turn the expression into a QUBO.$$\mathrm{argmin}_{w} \sum_{k=1}^{K} \sum_{l=1}^{K} w_kw_l \sum_{i=1}^{N}\frac{1}{K^2}h_k(x_i)h_l(x_i) + \sum_{k=1}^{K}w_k \left(\lambda-2\sum_{i=1}^{N} \frac{1}{K}h_k(x_i)y_i \right), \mathrm{w}_k \in \{0,1\}$$We split off the diagonal coefficients (k=l) in the left term and since $\mathrm {w}\in \{0,1\}$, and predictions, $\mathrm h_k(x_i) \in\{-1,1\}$ the following holds:$$w_kw_k = w_k,\;h_k(x_i)h_k(x_i) = 1$$Hence:$$\mathrm \sum_{k=1}^{K} w_kw_k \sum_{i=1}^{N}\frac{1}{K^2}h_k(x_i)h_k(x_i) = \sum_{k=1}^{K} w_k \frac{N}{K^2}$$This last term is effectively a fixed offset to $\lambda $ $$\mathrm{argmin}_{w} \sum_{k\neq1}^{K} w_kw_l \left(\sum_{i=1}^{N}\frac{1}{K^2}h_k(x_i)h_l(x_i)\right) + \sum_{k=1}^{K}w_k \left(\frac{N}{K^2} +\lambda-2\sum_{i=1}^{N} \frac{1}{K}h_k(x_i)y_i \right), \mathrm{w}_k \in \{0,1\}$$The expressions between brackets are the coeficients of the QUBO Let us consider all three models from the previous section as weak learners.
###Code
models = [model_1, model_2, model_3]
###Output
_____no_output_____
###Markdown
We calculate their predictions and set $\lambda$ to 1.
###Code
n_models = len(models)
predictions = np.array([h.predict(X_train) for h in models], dtype=np.float64)
λ = 1
###Output
_____no_output_____
###Markdown
We create the quadratic binary optimization of the objective function as we expanded above.First the off-diagonal elements (see DWave's documentation for the sample_qubo() method ):$$q_{ij} = \sum_{i=1}^{N}\frac{1}{K^2}h_k(x_i)h_l(x_i) $$
###Code
q = predictions @ predictions.T/(n_models ** 2)
###Output
_____no_output_____
###Markdown
Then the diagonal elements:$$\mathrm q_{ii} =\frac{N}{K^2}+ \lambda-2\sum_{i=1}^{N} \frac{1}{K}h_k(x_i)y_i$$
###Code
qii = len(X_train) / (n_models ** 2) + λ - 2 * predictions @ y_train/(n_models)
q[np.diag_indices_from(q)] = qii
Q = {}
for i in range(n_models):
for j in range(i, n_models):
Q[(i, j)] = q[i, j]
###Output
_____no_output_____
###Markdown
We solve the quadratic binary optimization with simulated annealing and read out the optimal weights:
###Code
import dimod
sampler = dimod.SimulatedAnnealingSampler()
response = sampler.sample_qubo(Q, num_reads=10)
weights = list(response.first.sample.values())
###Output
_____no_output_____
###Markdown
We define a prediction function to help with measuring accuracy:
###Code
def predict(models, weights, X):
n_data = len(X)
T = 0
y = np.zeros(n_data)
for i, h in enumerate(models):
y0 = weights[i] * h.predict(X) # prediction of weak classifier
y += y0
T += np.sum(y0)
y = np.sign(y - T / (n_data*len(models)))
return y
print('accuracy (train): %5.2f'%(metric(y_train, predict(models, weights, X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, predict(models, weights, X_test))))
###Output
accuracy (train): 0.65
accuracy (test): 0.29
###Markdown
The accuracy co-incides with our strongest weak learner's, the AdaBoost model. Looking at the optimal weights, this is apparent:
###Code
weights
###Output
_____no_output_____
###Markdown
Only AdaBoost made it to the final ensemble. The first two models perform poorly and their predictions are correlated. Yet, if you remove regularization by setting $\lambda=0$ above, the second model also enters the ensemble, decreasing overall performance. This shows that the regularization is in fact important. Solving by QAOASince eventually our problem is just an Ising model, we can also solve it on a gate-model quantum computer by QAOA. Let us explicitly map the binary optimization to the Ising model:
###Code
h, J, offset = dimod.qubo_to_ising(Q)
###Output
_____no_output_____
###Markdown
We have to translate the Ising couplings to be suitable for solving by the QAOA routine:
###Code
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import WeightedPauliOperator as Operator
num_nodes = q.shape[0]
pauli_list = []
for i in range(num_nodes):
wp = np.zeros(num_nodes)
vp = np.zeros(num_nodes)
vp[i] = 1
pauli_list.append([h[i], Pauli(vp, wp)])
for j in range(i+1, num_nodes):
if q[i, j] != 0:
wp = np.zeros(num_nodes)
vp = np.zeros(num_nodes)
vp[i] = 1
vp[j] = 1
pauli_list.append([J[i, j], Pauli(vp, wp)])
ising_model = Operator(paulis=pauli_list)
###Output
_____no_output_____
###Markdown
Next we run the optimization:
###Code
from qiskit.aqua import get_aer_backend, QuantumInstance
from qiskit.aqua.algorithms import QAOA
from qiskit.aqua.components.optimizers import COBYLA
p = 1
optimizer = COBYLA()
qaoa = QAOA(ising_model, optimizer, p )
backend = get_aer_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend, shots=100)
result = qaoa.run(quantum_instance)
###Output
_____no_output_____
###Markdown
Finally, we extract the most likely solution:
###Code
k = np.argmax(result['eigvecs'][0])
weights = np.zeros(num_nodes)
for i in range(num_nodes):
weights[i] = k % 2
k >>= 1
###Output
_____no_output_____
###Markdown
Let's see the weights found by QAOA:
###Code
weights
###Output
_____no_output_____
###Markdown
And the final accuracy:
###Code
print('accuracy (train): %5.2f'%(metric(y_train, predict(models, weights, X_train))))
print('accuracy (test): %5.2f'%(metric(y_test, predict(models, weights, X_test))))
###Output
accuracy (train): 0.65
accuracy (test): 0.29
|
lab05/Untitled.ipynb | ###Markdown
Pre YES-1 Secondary structuremodels for the most stable conformers ascomputed using the partition function algorithmin the absence (OFF) or presence (ON) of a 22-nucleotide DNA effector. The effector-bindingsite (light blue) is joined to nucleotides 10.1 and11.1 of the hammerhead core via eight- andsix-nucleotide linkers. In the ON state, most ofthese linker nucleotides are predicted to form anextended stem II structure (red). To the right ofeach model is a dot matrix plot wherein largerpoints reflect greater probability of base pairing.Encircled points reflect the main differences inpredicted structures between the OFF (stem IV)and ON (stem II) states. Nucleotides 1 through79 are numbered from 5¢ to 3¢ across the topand right of the plots. Schematic representationsof the logic states of the constructs are shownin this and subsequent figures.NOT-1 Secondary structure models for the most stableconformers predicted in the absence (ON) andpresence (OFF) of a 23-nucleotide effector. Dotmatrix plots for the construct are presented inSupplementary Figure 5 online.AND-1 is designed to form the active hammerheadstructure and self-cleave only when presentedsimultaneously with its two correspondingeffector DNAs (DNA-7 and DNA-8). The dotmatrix plots for the ON state showing somecharacter of the OFF states (stem IV) is depicted.Dot matrix plots for the three OFF states arepresented in Supplementary Figure 7 online.OR-1 is designed totrigger self-cleavage when either effector (DNA-9 or DNA-10) or both effectors are present. Dot matrix plots for each of the four structures are presentedin Supplementary Figure 7 online.
###Code
# (1) riboswitch name, (2) start and end coordinates of OBS-1 (blue region), (3) start
# and end coordinates of OBS-2 (blue region, only applicable to AND-1 and OR-1), and (4) start and end coordinates of the two red regions.
info = [('YES-1', (26, 47), (16,21), (49,54) ), # OBS1 RED1 RED2
('NOT-1', (44,66), (40,43), (74,77)), # OBS1 RED1 RED2
('AND-1', (30,45), (49,64), (16,23), (70,77)), # OBS1 OBS2 RED1 RED2
('OR-1', (27,46), (47,66), (16,26), (67,77) ) ] # OBS1 OBS2 RED1 RED2
seq = [
'GGGCGACCCUGAUGAGCUUGAGUUUAGCUCGUCACUGUCCAGGUUCAAUCAGGCGAAACGGUGAAAGCCGUAGGUUGCCC',
'GGCAGGUACAUACAGCUGAUGAGUCCCAAAUAGGACGAAACGCGACACACACCACUAAACCGUGCAGUGUUUUGCGUCCUGUAUUCCACUGC'
'GGGCGACCCUGAUGAGCUUGGUUUAGUAUUUACAGCUCCAUACAUGAGGUGUUAUCCCUAUGCAAGUUCGAUCAGGCGAAACGGUGAAAGCCG3UAGGUUGCCCAGAGACAAU'
'GGGCGACCCUGAUGAGCUUGGUUGAGUAUUUACAGCUCCAUACAUGAGGUGUUCUCCCUACGCAAGUUCGAUCAGGCGAAACGGUGAAAGCCGUAGGUUGCCC'
]
print(len(seq[0]))
###Output
_____no_output_____ |
Calculations/Subsample Calculations Diabetes.ipynb | ###Markdown
Clinical Profile Calculations on JHU Diabetes Sample Steph Howson, JHU/APL, Data ScientistThis notebook calculates fields to be generated for the Clinical Profiles model. Once the values are calculated, the results will be dynamically put into the model with the fhir.resources implementation. The Clinical Profiles Python specification was built using fhir-parser. These forked Github repositories can be found (currently not much was done to add desired features for Clinical Profiles in particular, but the templating captures much of the functionality needed):https://github.com/stephanie-howson/fhir-parserhttps://github.com/stephanie-howson/fhir.resourcesThe Clinical Profile Python FHIR Class definition can be found at:https://github.com/stephanie-howson/fhir.resources/blob/master/fhir/resources/clinicalprofile.py Imports
###Code
import pandas as pd
import numpy as np
import scipy.stats as ss
import math
import dask.dataframe as dd
import sys
###Output
_____no_output_____
###Markdown
Reading in data from SAFE
###Code
# Want to specify dtypes for performance
demographics_DOB = ['DOB']
demographics_dtypes = {'PatientID':np.int64, 'Gender':'category','Race':'category','Ethnicity':'category'}
labs_dates = ['Ordering_datetime','Result_datetime']
labs_dtypes = {'PatientID':np.int64, 'EncounterID':np.int64, 'Result_numeric':np.float64,'Lab_Name':'category',
'Base_Name':'category','Loinc_Code':'category','LONG_COMMON_NAME':'category',
'status':'category','Category':'category','GroupId':'category','unit':'category',
'range':'category'}
diagnoses_hpo_dates = ['Entry_Date']
diagnoses_hpo_dtypes = {'PatientID':np.int64, 'icd_10':'category','icd_name':'category',
'hpo':'category','hpo_term':'category'}
encounter_dates = ['Encounter_date']
encounter_dtypes = {'PatientID': np.int64,'EncounterID': np.int64, 'Encounter_type':'category'}
meds_dates = ['Order_datetime','Start_date','End_date']
meds_dtypes = {'PatientID': np.int64,'EncounterID': np.int64, 'Medication_Name':'category','Dose':'category',
'Route':'category', 'Frequency':'category', 'Quantity':'category', 'RXNorm':np.float64,'Therapeutic_Class':'category',
'Pharmaceutical_Class':'category', 'Pharmaceutical_Subclass':'category'}
procedure_dtypes = {'PatidentID':np.int64,'EncounterID':np.int64, 'Procedure_ID':np.int64,'Procedure_Code':'category',
'Procedure_Name':'category'}
df_demographics = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_demographics.txt',sep='|',
dtype=demographics_dtypes, parse_dates=demographics_DOB)
print(sys.getsizeof(df_demographics)*10**(-9))
df_labs = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_labs.txt',sep='|',
dtype=labs_dtypes, parse_dates=labs_dates)
print(sys.getsizeof(df_labs)*10**(-9))
df_diagnoses_hpo = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_diagnoses_hpo.txt',sep='|',
dtype=diagnoses_hpo_dtypes, parse_dates=diagnoses_hpo_dates)
print(sys.getsizeof(df_diagnoses_hpo)*10**(-9))
df_encounter = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_encounter.txt',sep='|',
dtype=encounter_dtypes, parse_dates=encounter_dates)
print(sys.getsizeof(df_encounter)*10**(-9))
df_meds = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_meds.txt',sep='|',
dtype=meds_dtypes, parse_dates=meds_dates)
print(sys.getsizeof(df_meds)*10**(-9))
df_procedures = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_procedure.txt',sep='|',encoding='Latin-1',
dtype=procedure_dtypes)
print(sys.getsizeof(df_procedures)*10**(-9))
###Output
1.067732568
###Markdown
Calculating Lab Information Lesson learned: grab patient IDs from demographics and then drop not needed columns, not all patients will have all encounter types, e.g. labs, medications, etc.
###Code
df_labs_full = df_labs.merge(df_demographics, on='PatientID', how='right')
df_labs_full.head()
(len(df_labs_full)-len(df_labs) )
df_labs_full.drop(['Result_datetime','Base_Name','status','Category','GroupId'],axis=1,inplace=True)
print(sys.getsizeof(df_labs_full)*10**(-9))
code = df_labs_full.Loinc_Code.unique().dropna()
code[0]
count = df_labs_full.Loinc_Code.value_counts()
count.index[0]
df_labs_full['orderYear'] = pd.to_datetime(df_labs_full.Ordering_datetime).dt.year
frequencyPerYear = df_labs_full.groupby(['Loinc_Code','PatientID','orderYear']).PatientID.size().groupby(['Loinc_Code','orderYear']).aggregate(np.mean)
frequencyPerYear.head(20)
%time correlatedLabsCoefficients = df_labs_full.groupby('Loinc_Code').Result_numeric.apply(lambda x: pd.Series(x.values)).unstack().transpose().corr()
correlatedLabsCoefficients
abscorrelation = correlatedLabsCoefficients.abs()
fractionOfSubjects = df_labs_full.groupby(['Loinc_Code']).PatientID.nunique()/df_labs_full.PatientID.nunique()
fractionOfSubjects
units = df_labs_full.groupby(['Loinc_Code']).unit.unique()
minimum = df_labs_full.groupby(['Loinc_Code']).Result_numeric.min()
maximum = df_labs_full.groupby(['Loinc_Code']).Result_numeric.max()
mean = df_labs_full.groupby(['Loinc_Code']).Result_numeric.mean()
median = df_labs_full.groupby(['Loinc_Code']).Result_numeric.median()
stdDev = df_labs_full.groupby(['Loinc_Code']).Result_numeric.std()
nthDecile = df_labs_full.groupby('Loinc_Code').Result_numeric.quantile([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
def percentile(n):
def percentile_(x):
return x.quantile(n*0.01)
percentile_.__name__ = '%s' % n
return percentile_
stats = (df_labs_full.groupby(['Loinc_Code'])
.Result_numeric.agg(['min','max', 'mean','median','std',
percentile(10), percentile(20), percentile(30),
percentile(40), percentile(50), percentile(60),
percentile(70), percentile(80), percentile(90)]))
df_labs_full.info()
df_labs_full['range_high'] = (pd.to_numeric(df_labs_full.range.dropna()
.astype('str').str.split(',',expand=True)[1]).astype('float'))
df_labs_full['range_low'] = (pd.to_numeric(df_labs_full.range.dropna()
.astype('str').str.split(',',expand=True)[0]).astype('float'))
def fracsAboveBelowNormal(x):
aboveNorm = np.divide(np.sum(x.Result_numeric > x.range_high), x.Result_numeric.size)
belowNorm = np.divide(np.sum(x.Result_numeric < x.range_low), x.Result_numeric.size)
return pd.Series({'aboveNorm':aboveNorm, 'belowNorm':belowNorm})
%%time
aboveBelowNorm = (df_labs_full.groupby(['Loinc_Code'])
.apply(fracsAboveBelowNormal))
aboveBelowNorm.aboveNorm
###Output
_____no_output_____
###Markdown
**NOTE: Less than a minute to calculate all necessary lab information (~9 million rows)** Printing out first 10 results from each calculated field as an example*If you copy this file, feel free to remove .head(10) to see all results, by default pandas groupby sorts alphanumerically*
###Code
code.head(10)
count.head(10)
frequencyPerYear.head(10)
correlatedLabsCoefficients.head(10)
abscorrelation.head(10)
fractionOfSubjects.head(10)
units.head(10)
minimum.head(10)
maximum.head(10)
mean.head(10)
median.head(10)
stdDev.head(10)
nthDecile.head(20)
###Output
_____no_output_____
###Markdown
Define Correlation Functions Needed for Categorical Data
###Code
def cramers_v(df, x, y):
confusion_matrix = (df.groupby([x,y])[y].size().unstack().fillna(0).astype(int))
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1))
rcorr = r-((r-1)**2)/(n-1)
kcorr = k-((k-1)**2)/(n-1)
return np.sqrt(phi2corr/min((kcorr-1), (rcorr-1)))
def uncertainty_coefficient(df, x, y):
df2 = df[[x,y]]
total = len(df2.dropna())
p_y = (df.groupby([y], sort=False)[y].size()/total).reindex(index=p_xy.index, level=1)
s_xy = sum(p_xy * (p_y/p_xy).apply(math.log))
p_x = df.groupby([x], sort=False)[x].size()/total
s_x = ss.entropy(p_x)
if s_x == 0:
return 1
else:
return ((s_x - s_xy) / s_x)
def correlation_ratio(df, x, y):
df2 = df.groupby([x],sort=False)[y].agg([np.size,np.mean])
ybar = df[y].mean()
numerator = np.nansum(np.multiply(df2['size'],np.square(df2['mean']-ybar)))
ssd = np.square(df[y]-ybar)
#ssd = df.groupby([x,y],sort=False)[y].apply(lambda y: np.nansum(np.square(y-ybar)))
denominator = np.nansum(ssd)
if numerator == 0:
return 0.0
else:
return np.sqrt(numerator/denominator)
###Output
_____no_output_____
###Markdown
Join All DataFrames to "Correlate Everything to Everything"
###Code
df = (df_labs.merge(df_diagnoses_hpo, on='PatientID')
.merge(df_encounter, on=['PatientID','EncounterID'], how='outer')
.merge(df_meds, on=['PatientID','EncounterID'], how='outer'))
###Output
_____no_output_____
###Markdown
Define Categorical Fields
###Code
categoricals = ['Lab_Name','Base_Name','Loinc_Code','LONG_COMMON_NAME','Category','GroupId','icd_10','icd_name',
'hpo','hpo_term','Encounter_type','Medication_Name','Dose','Route','Frequency','RXNorm',
'Therapeutic_Class','Pharmaceutical_Class','Pharmaceutical_Subclass']
###Output
_____no_output_____
###Markdown
Work in Progress... Need to Define Correlations More Precisely Will Add in Other Fields & Their Calculated Results Shortly..... Medications
###Code
df_meds_full = df_meds.merge(df_demographics, on='PatientID', how='outer')
(len(df_meds_full) - len(df_meds))
###Output
_____no_output_____
###Markdown
**Why is Medication Name nunique() > RXNorm nunique() ?**
###Code
medication = df_meds_full.RXNorm.unique()
uniqDropNA = lambda x: np.unique(x.dropna())
dosageInfo = df_meds_full.groupby('RXNorm').agg({'Route':uniqDropNA, 'Dose':uniqDropNA,'Quantity':uniqDropNA})#[['Route','Dose','Quantity']].apply(np.unique)
#dose = df_meds_full.groupby('RXNorm')['Dose'].unique()
#quantity = df_meds_full.groupby('RXNorm')['Quantity'].unique()
# How to calculate rateRatio?!
#treatmentDuration says need clarification in model!
df_meds_full['startYear'] = pd.to_datetime(df_meds_full.Start_date).dt.year
frequencyPerYear = df_meds_full.groupby(['RXNorm','startYear','PatientID']).PatientID.count().groupby(['RXNorm','startYear']).mean()
fractionOfSubjects = df_meds_full.groupby(['RXNorm']).PatientID.nunique()/df_meds_full.PatientID.nunique()
#correlatedLabsCoefficients = df_labs.groupby('LONG_COMMON_NAME').Result_numeric.apply(lambda x: pd.Series(x.values)).unstack().transpose().corr()
#abscorrelation = correlatedLabsCoefficients.abs()
dosageInfo
###Output
_____no_output_____
###Markdown
Diagnosis
###Code
df_diagnoses_hpo_full = df_diagnoses_hpo.merge(df_demographics, on='PatientID', how='outer')
(len(df_diagnoses_hpo_full) - len(df_diagnoses_hpo))
code = df_diagnoses_hpo_full.icd_10.unique()
df_diagnoses_hpo_full['entryYear'] = pd.to_datetime(df_diagnoses_hpo_full.Entry_Date).dt.year
frequencyPerYear = df_diagnoses_hpo_full.groupby(['icd_10','entryYear','PatientID']).PatientID.count().groupby(['icd_10','entryYear']).mean()
fractionOfSubjects = df_diagnoses_hpo_full.groupby(['icd_10']).PatientID.nunique()/df_diagnoses_hpo_full.PatientID.nunique()
frequencyPerYear
###Output
_____no_output_____
###Markdown
Procedures
###Code
df_procedures_full = df_procedures.merge(df_demographics, on='PatientID', how='right')
df_procedures_full.drop(['DOB','Gender','Race','Ethnicity'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
**I need the encounter table to get a date**
###Code
encounter_dtypes = {'PatientID': np.int64, 'EncounterID': np.int64, 'Encounter_type': 'category'}
encounter_date = ['Encounter_date']
df_encounter = pd.read_csv(r'S:\NCATS\Clinical_Profiles\clean_data\Diabetes\jh_diabetes_encounter.txt',sep='|',
dtype=encounter_dtypes, parse_dates=encounter_date)
print(sys.getsizeof(df_encounter)*10**(-9))
df_procedures_full = df_procedures_full.merge(df_encounter, on=['EncounterID','PatientID'], how='left')
print(sys.getsizeof(df_procedures_full)*10**(-9))
(len(df_procedures_full) - len(df_procedures))
df_procedures_full.columns
# Oops don't need extra patient column
len(df_procedures_full.PatientID_x.dropna()) - len(df_procedures_full.PatientID_y.dropna())
df_procedures_full.drop('PatientID_y',axis=1,inplace=True)
df_procedures_full.rename(columns={'PatientID_x': 'PatientID'}, inplace=True)
###Output
_____no_output_____
###Markdown
procedure_dtypes = {'PatidentID':np.int64,'EncounterID':np.int64, 'Procedure_ID':np.int64,'Procedure_Code':'category', 'Procedure_Name':'category'}
###Code
code = df_procedures_full.Procedure_Code.unique()
df_procedures_full['encounterYear'] = pd.to_datetime(df_procedures_full.Encounter_date).dt.year
frequencyPerYear = (df_procedures_full.groupby(['Procedure_Code','encounterYear','PatientID']).PatientID.count()
.groupby(['Procedure_Code','encounterYear']).mean())
fractionOfSubjects = df_procedures_full.groupby(['Procedure_Code']).PatientID.nunique()/df_procedures_full.PatientID.nunique()
fractionOfSubjects
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.