path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/pyhmx_demo.ipynb | ###Markdown
PyHMxDemo of the alternative interface (non `f90wrap`). This is currently called `pyhmx`.
###Code
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.colorbar
import camb
import pyhmx
def colorbar(colormap, ax, vmin=None, vmax=None):
cmap = plt.get_cmap(colormap)
cb_ax = matplotlib.colorbar.make_axes(ax)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
cb = matplotlib.colorbar.ColorbarBase(cb_ax[0], cmap=cmap,
norm=norm, **cb_ax[1])
return cb, lambda x, norm=norm: cmap(norm(x))
###Output
_____no_output_____
###Markdown
Compare the HMCode implementations in HMx and in CAMBSet cosmology and halo model parameters.
###Code
hmx = pyhmx.HMx()
h = 0.7
omc = 0.25
omb = 0.048
mnu = 0.12
w = -1.0
wa = 0.0
ns = 0.97
As = 2.1e-9
Theat = 10**7.8
halo_model_mode = pyhmx.constants.HMCode2016
A = 3.13
eta0 = 0.603
fields = np.array([pyhmx.constants.field_dmonly])
###Output
_____no_output_____
###Markdown
Run CAMB to generate the linear and non-linear matter power spectra.
###Code
# Get linear power spectrum
p = camb.CAMBparams(WantTransfer=True,
NonLinearModel=camb.nonlinear.Halofit(halofit_version="mead",
HMCode_A_baryon=A, HMCode_eta_baryon=eta0))
p.set_cosmology(H0=h*100, omch2=omc*h**2, ombh2=omb*h**2, mnu=mnu)
p.set_dark_energy(w=w)
p.set_initial_power(camb.InitialPowerLaw(As=As, ns=ns))
z_lin = np.linspace(0, 3, 128, endpoint=True)
p.set_matter_power(redshifts=z_lin, kmax=20.0, nonlinear=False)
r = camb.get_results(p)
sigma8 = r.get_sigma8()[-1]
k_lin, z_lin, pofk_lin_camb = r.get_matter_power_spectrum(minkh=1e-3, maxkh=20.0, npoints=128)
omv = r.omega_de + r.get_Omega("photon") + r.get_Omega("neutrino")
omm = p.omegam
###Output
_____no_output_____
###Markdown
Now run HMx to get the non-linear matter power spectrum (using its HMCode implementation).
###Code
cosmology = {"Omega_m" : omm,
"Omega_b" : omb,
"Omega_v" : omv,
"h" : h,
"n_s" : ns,
"sigma_8" : sigma8,
"m_nu" : mnu}
halo_model = {"eta0" : eta0,
"As" : A}
Pk_HMx_dmonly = hmx.run_HMCode(cosmology=cosmology,
halo_model=halo_model,
k=k_lin,
z=z_lin,
pk_lin=pofk_lin_camb)
p.set_matter_power(redshifts=z_lin, kmax=max(k_lin), nonlinear=True)
r = camb.get_results(p)
Pk_nl_CAMB_interpolator = r.get_matter_power_interpolator()
pofk_nonlin_camb = Pk_nl_CAMB_interpolator.P(z_lin, k_lin, grid=True)
###Output
_____no_output_____
###Markdown
Finally, plot both the non-linear, HMCode power spectra, from CAMB and HMx.
###Code
fig, ax = plt.subplots(2, 1, sharex=True)
fig.subplots_adjust(hspace=0, right=0.95)
cb, cmap = colorbar("magma", ax, vmin=z_lin[0], vmax=z_lin[-1])
cb.set_label("z")
for i in range(len(z_lin)):
ax[0].loglog(k_lin, pofk_lin_camb[i], ls=":", c=cmap(z_lin[i]), label="Linear" if i == 0 else None)
ax[0].loglog(k_lin, pofk_nonlin_camb[i], ls="--", c=cmap(z_lin[i]), label="HMCode CAMB" if i == 0 else None)
ax[0].loglog(k_lin, Pk_HMx_dmonly[i], ls="-", c=cmap(z_lin[i]), label="HMCode HMx" if i == 0 else None)
ax[1].semilogx(k_lin, Pk_HMx_dmonly[i]/pofk_nonlin_camb[i]-1, c=cmap(z_lin[i]))
ax[0].legend(frameon=False)
ax[0].set_ylabel("$P(k)$ [Mpc$^3$ $h^{-3}$]")
ax[1].set_ylabel("Frac. diff. HMCode")
ax[1].set_xlabel("$k$ [$h$ Mpc$^{-1}$]")
ax[0].set_title("HMCode vs HMx")
# fig.savefig("plots/HMCode_test_CAMB_vs_HMx.png", dpi=300)
###Output
_____no_output_____
###Markdown
Matter and pressure power spectra from HMxHMx is much slower than HMCode, so we only use 8 redshifts here.
###Code
z_lin = np.linspace(0, 2, 8, endpoint=True)
p.set_matter_power(redshifts=z_lin, kmax=20.0, nonlinear=False)
r = camb.get_results(p)
k_lin, z_lin, pofk_lin_camb = r.get_matter_power_spectrum(minkh=1e-3, maxkh=20.0, npoints=128)
log_Theat = np.linspace(7.6, 8.0, 3)
Pk_HMx_matter = {}
for T in log_Theat:
print(f"Running HMx with log Theat={T:.1f}")
halo_model={"Theat" : 10**T}
Pk_HMx_matter[T] = hmx.run_HMx(cosmology=cosmology, halo_model=halo_model,
fields=[pyhmx.constants.field_matter, pyhmx.constants.field_gas],
mode=pyhmx.constants.HMx2020_matter_pressure_with_temperature_scaling,
k=k_lin,
z=z_lin,
pk_lin=pofk_lin_camb)
fig, ax = plt.subplots(2, 1, sharex=True)
fig.subplots_adjust(hspace=0, right=0.95)
cb, cmap = colorbar("plasma", ax, vmin=min(log_Theat), vmax=max(log_Theat))
cb.set_label("log T_heat")
ax[0].loglog(k_lin, Pk_HMx_dmonly[0], c="k", ls="--", label="HMCode")
for T in log_Theat:
ax[0].loglog(k_lin, Pk_HMx_matter[T][0,0,0], c=cmap(T))
ax[1].semilogx(k_lin, Pk_HMx_matter[T][0,0,0]/Pk_HMx_dmonly[0], c=cmap(T))
ax[0].legend(frameon=False)
ax[0].set_ylabel("$P(k)$ [Mpc$^3$ $h^{-3}$]")
ax[1].set_ylabel("Frac. diff. HMCode")
ax[1].set_xlabel("$k$ [$h$ Mpc$^{-1}$]")
ax[0].set_title("HMCode vs HMx")
# fig.savefig("plots/HMCode_vs_HMx.png", dpi=300)
###Output
_____no_output_____ |
code/plots/01_06_populations.ipynb | ###Markdown
Full initial data set (all available labels from SIMBAD and TNS)
###Code
# this folder is avaliable through zenodo:
# https://zenodo.org/record/5645609#.Yc5SpXXMJNg
dirname_input = '../../../../data/AL_data/'
flist = os.listdir(dirname_input)
simbad_alerts = []
tns_alerts = []
tns_classes = []
simbad_objects = []
tns_objects = []
simbad_classes = []
# read all tns file and n_files_simbad random simbad file
for name in flist:
if 'simbad' in name:
d1 = pd.read_parquet(dirname_input + name)
simbad_alerts.append(d1.shape[0])
nobjs = np.unique(d1['objectId'].values).shape[0]
simbad_objects.append(nobjs)
d2 = d1.drop_duplicates(subset=['objectId'], keep='first')
simbad_classes = simbad_classes + list(d2['cdsxmatch'].values)
elif 'tns' in name:
d1 = pd.read_parquet(dirname_input + name)
tns_alerts.append(d1.shape[0])
nobjs = np.unique(d1['objectId'].values).shape[0]
tns_objects.append(nobjs)
d2 = d1.drop_duplicates(subset=['objectId'], keep='first')
tns_classes = tns_classes + list(d2['TNS'].values)
# number of alerts with SIMBAD classification
sum(simbad_alerts)
# number of objects with SIMBAD classification
len(simbad_classes)
# check classes of all SIMBAD objects
simbad_orig_classes, simbad_orig_numbers = \
np.unique(simbad_classes, return_counts=True)
simbad_orig_classes_perc = 100*np.round(simbad_orig_numbers/len(simbad_classes), 4)
df_simbad_orig = pd.DataFrame(np.array([simbad_orig_classes,
simbad_orig_numbers,
simbad_orig_classes_perc]).transpose(),
columns=['class', 'number', 'perc'])
df_simbad_orig.to_csv('../../../../referee/data/simbad_orig_classes.csv',
index=False)
# number of alerts with TNS classification
sum(tns_alerts)
# number of objects with TNS classification
sum(tns_objects)
# check classes of all TNS objects
tns_orig_classes, tns_orig_numbers = \
np.unique(tns_classes, return_counts=True)
tns_orig_classes_perc = 100*np.round(tns_orig_numbers/len(tns_classes), 4)
df_tns_orig = pd.DataFrame(np.array([tns_orig_classes,
tns_orig_numbers,
tns_orig_classes_perc]).transpose(),
columns=['class', 'number', 'perc'])
df_tns_orig.to_csv('../../../../referee/data/tns_orig_classes.csv',
index=False)
###Output
_____no_output_____
###Markdown
Raw vs feature extraction
###Code
fname = '../../../../referee/data/raw.csv.gz'
data = pd.read_csv(fname)
data.shape
data.shape[0] - sum(tns_alerts)
np.unique(data['objectId'].values).shape
np.unique(data['objectId'].values).shape[0] - sum(tns_objects)
data_raw = []
galaxy = []
other_sn = []
mult = []
other_tns = []
snia = []
for i in range(data.shape[0]):
objtype = data.iloc[i]['TNS']
if objtype == '-99':
objtype = data.iloc[i]['cdsxmatch']
data_raw.append(class_dict[objtype])
big = class_dict[objtype]
if big == 'AGN-like':
galaxy.append(objtype)
if big == 'other_SN':
other_sn.append(objtype)
if big == 'multiple_object':
mult.append(objtype)
if big == 'other_TNS':
other_tns.append(objtype)
if big == 'SNIa':
snia.append([objtype, data.iloc[i]['objectId']])
data_raw = np.array(data_raw)
galaxy = np.array(galaxy)
other_tns = np.array(other_tns)
mult = np.array(mult)
snia = np.array(snia)
np.unique(snia[:,1]).shape
sntype, freq = np.unique(galaxy, return_counts=True)
print('Galaxy-sub-type --- number')
for i in range(len(sntype)):
print(sntype[i], ' -- ', freq[i])
sntype, freq = np.unique(other_sn, return_counts=True)
print('SN-sub-type --- number')
for i in range(len(sntype)):
print(sntype[i], ' -- ', freq[i])
sntype, freq = np.unique(other_tns, return_counts=True)
print('Other TNS-sub-type --- number')
for i in range(len(sntype)):
print(sntype[i], ' -- ', freq[i])
sntype, freq = np.unique(mult, return_counts=True)
print('Multiple object-sub-type --- number')
for i in range(len(sntype)):
print(sntype[i], ' -- ', freq[i])
features = pd.read_csv('../../../../referee/data/features.csv', index_col=False)
features_class = []
for i in range(features.shape[0]):
objtype = features.iloc[i]['type']
features_class.append(class_dict[objtype])
features_class = np.array(features_class)
objId = []
for i in range(features.shape[0]):
candid = features.iloc[i]['id']
indx = list(data['candid'].values).index(candid)
objId.append([data.iloc[indx]['objectId'],data.iloc[indx]['TNS']])
len(objId)
np.unique(np.array(objId)[:,0]).shape
Ia_flag = np.array([item in big_class['SNIa'] for item in np.array(objId)[:,1]])
Ia_id = np.array(objId)[Ia_flag]
np.unique(Ia_id[:,0]).shape
types_raw, number_raw = np.unique(data_raw, return_counts=True)
types_features, number_features = np.unique(features_class, return_counts=True)
raw_pop = pd.DataFrame()
raw_pop['type'] = types_raw
raw_pop['sample fraction'] = number_raw.astype(float)/len(data_raw)
raw_pop['number'] = number_raw
raw_pop['sample'] = 'raw'
c1 = pd.DataFrame()
c1['type'] = np.unique(features_class, return_counts=True)[0]
c1['sample fraction'] = np.unique(features_class, return_counts=True)[1]/len(features_class)
c1['sample'] = 'after feature extraction'
c1['number'] = np.unique(features_class, return_counts=True)[1]
pop = pd.concat([raw_pop,c1], ignore_index=True)
pop
sum(pop['number'][pop['sample'] == 'after feature extraction'])
sum(pop['number'][pop['sample'] == 'raw'])
c = ['#F5622E', '#15284F']
f, ax = plt.subplots(figsize=(8, 5))
sns.set_palette('Spectral')
sns.barplot(x="sample fraction", y="type", data=pop,
hue='sample', ci=None, palette=c)
ax.set(xlim=(0, 0.9), ylabel="")
ax.set_xlabel(xlabel="fraction of full sample", fontsize=14)
ax.set_yticklabels(types_raw, fontsize=14)
sns.despine(left=True, bottom=True)
plt.tight_layout()
#plt.show()
plt.savefig('../../../../referee/plots/perc_raw_features.pdf')
pop
###Output
_____no_output_____
###Markdown
Queried sample
###Code
res_queried = {}
for strategy in ['RandomSampling', 'UncSampling']:
flist = glob.glob('../../../../referee/' + strategy + '/queries/queried_' + strategy + '_v*.dat')
res_queried[strategy] = {}
for name in big_class.keys():
res_queried[strategy][name] = []
for j in range(len(flist)):
data = pd.read_csv(flist[j], delim_whitespace=True, index_col=False)
data_class = np.array([class_dict[item] for item in data['type'].values])
sntype, freq = np.unique(data_class, return_counts=True)
for i in range(len(freq)):
res_queried[strategy][sntype[i]].append(freq[i]/data.shape[0])
for strategy in ['RandomSampling', 'UncSampling']:
print('**** ' + strategy + ' ****')
for key in res_queried[strategy].keys():
print(key, ' -- ', np.round(100* np.mean(res_queried[strategy][key]), 2),
' -- ', np.round(100*np.std(res_queried[strategy][key]),2))
print('\n')
df1 = pd.DataFrame()
df1['type'] = res_queried['RandomSampling'].keys()
df1['sample fraction'] = [np.mean(res_queried['RandomSampling'][key])
for key in res_queried['RandomSampling'].keys()]
df1['strategy'] = 'RandomSampling'
df2 = pd.DataFrame()
df2['type'] = res_queried['UncSampling'].keys()
df2['sample fraction'] = [np.mean(res_queried['UncSampling'][key])
for key in res_queried['UncSampling'].keys()]
df2['strategy'] = 'UncSampling'
df = pd.concat([df2, df1], ignore_index=True)
c = ['#F5622E', '#15284F']
types = ['multiple_objects', 'star', 'AGN-like', 'other_SN', 'other_TNS', 'SNIa']
f, ax = plt.subplots(figsize=(8, 5))
sns.set_palette('Spectral')
sns.barplot(x="sample fraction", y="type", data=df,
hue='strategy', ci=None, palette=c)
ax.set(xlim=(0, 0.9), ylabel="")
ax.set_xlabel(xlabel="fraction of full sample", fontsize=14)
ax.set_yticklabels(types, fontsize=14)
sns.despine(left=True, bottom=True)
plt.tight_layout()
#plt.show()
plt.savefig('../../../../referee/plots/queried_classes.pdf')
###Output
_____no_output_____
###Markdown
Photometrically classified Ia sample
###Code
res_photIa = {}
for strategy in ['RandomSampling', 'UncSampling']:
res_photIa[strategy] = {}
for name in big_class.keys():
res_photIa[strategy][name] = []
res_photIa[strategy]['tot'] = []
flist = glob.glob('../../../../referee/' + strategy + '/class_prob/v*/class_prob_' + \
strategy + '_loop_299.csv')
for name in flist:
data = pd.read_csv(name)
phot_Ia = data[data['prob_Ia']> 0.5]
data_class = np.array([class_dict[item] for item in phot_Ia['type'].values])
sntype, freq = np.unique(data_class, return_counts=True)
for i in range(len(freq)):
res_photIa[strategy][sntype[i]].append(freq[i]/data_class.shape[0])
res_photIa[strategy]['tot'].append(phot_Ia.shape[0])
np.mean(res_photIa['RandomSampling']['other_SN'])
for strategy in ['RandomSampling', 'UncSampling']:
print('**** ' + strategy + ' ****')
for key in res_photIa[strategy].keys():
if key != 'tot':
print(key, ' -- ', np.round(100* np.mean(res_photIa[strategy][key]), 2),
' -- ', np.round(100*np.std(res_photIa[strategy][key]),2))
print('\n')
for strategy in ['RandomSampling', 'UncSampling']:
print(strategy,' ', np.mean(res_photIa[strategy]['tot']), ' +/- ',
np.std(res_photIa[strategy]['tot']))
print('\n')
res = []
for strategy in ['RandomSampling', 'UncSampling']:
for key in big_class.keys():
mean = np.mean(res_photIa[strategy][key])
std = np.std(res_photIa[strategy][key])
line = [key, mean, std, strategy]
res.append(line)
res2 = pd.DataFrame(data=res, columns=['type', 'perc', 'std', 'strategy'])
c = ['#F5622E', '#15284F']
types = ['multiple_objects', 'star', 'AGN-like', 'other_SN', 'other_TNS', 'SNIa']
f, ax = plt.subplots(figsize=(8, 5))
sns.set_palette('Spectral')
sns.barplot(x="perc", y="type", data=res2,
hue='strategy', ci=None, palette=c)
ax.set(xlim=(0, 0.9), ylabel="")
ax.set_xlabel(xlabel="fraction of full sample", fontsize=14)
ax.set_yticklabels(types, fontsize=14)
sns.despine(left=True, bottom=True)
plt.tight_layout()
#plt.show()
plt.savefig('../../../../referee/plots/photom_classified.pdf')
res2
###Output
_____no_output_____
###Markdown
number of Ia in test sample for best model
###Code
fname = '../../../../referee/UncSampling/queries/queried_UncSampling_v68.dat'
data = pd.read_csv(fname, index_col=False, delim_whitespace=True)
sum(data['type'].values == 'Ia')
1600-132-5
###Output
_____no_output_____ |
Samples/doe-notebooks-master/Week4--Full-factorials-3-factors.ipynb | ###Markdown
Goal: regular experiments with 3 factors===========================================Achieve a stability value of 50 days or more, for a new product.Data avaialble:* **A**: enzyme strength [numeric factor]* **B**: feed concentration [numeric factor]* **C**: mixer type [categorical factor]* *y* = Stability [days]
###Code
from process_improve import *
from bokeh.plotting import output_notebook
output_notebook()
A = B = C = c(-1, +1)
A, B, C = expand_grid(A=A, B=B, C=C)
A.name = "Enzyme strength"
B.name = "Feed concentration"
C.name = "Mixer type"
# Response: stability value
y = c(40, 27, 35, 21, 41, 27, 31, 20, name="Stability", units="days")
# Linear model using all factors to predict the response
expt = gather(A=A, B=B, C=C, y=y, title='Experiments to determine stability value')
expt
model_stability = lm("y ~ A*B*C", expt)
summary(model_stability);
###Output
_____no_output_____
###Markdown
Interpretation of the model ----------------------------------------The model shows:* An $R^2$ value of ____* The factors which have the greatest influence on the product's stability are: 1. ___ 2. ___, then finally 3. ___Use contour plots of all combinations (A and B, A and C, B and C) to verify which factor has the least influence on the stability. Does your visual conclusion in the contour plot match your conclusion from the linear regression model summary? Does it match the bar magnitudes in the Pareto plot?
###Code
# Contour plots:
# contour_plot(model_stability, "A", "B", dpi=40)
# contour_plot(model_stability, "A", "C", dpi=40)
# contour_plot(model_stability, "B", "C", dpi=40);
###Output
_____no_output_____
###Markdown
Model without the unimportant factor(s)---------------------------------------Remove factor C from consideration. Why?
###Code
expt_no_C = gather(A=A, B=B, y=y, title='Experiments to determine stability value: A and B only')
model_stability_no_C = lm("y ~ A*B", expt_no_C)
summary(model_stability_no_C);
contour_plot(model_stability_no_C, xlabel="A", ylabel="B", dpi=80);
models.predict(model_stability_no_C, A=-2, B=-2)
###Output
_____no_output_____ |
analysis/SeparateTrafficData.ipynb | ###Markdown
Per-Month Road Selection
###Code
aRoadPerMonth = [] #All Roads per month
aRoads = [] #All aRoadPerMonth
for m in range(0, 12):
aRoadPerMonth = []
if (m+1) < 10:
FILENAME = str(YEAR) + "-0" + str(m+1) + ".csv"
else:
FILENAME = str(YEAR) + "-" + str(m+1) + ".csv"
data = pd.read_csv(DIR + "//" + FILENAME , skipinitialspace=True)
data = data.drop(['entry_id', 'guid'], axis=1)
aData = np.array(data)
print("--Organizing Month " + str(m) + "--")
for i in range(len(aData)):
d = aData[i]
if(d[2] in ROADS_1 or d[2] in ROADS_2):
aRoadPerMonth.append(d)
print("--End of Month " + str(m) + "--")
aRoads.append(aRoadPerMonth)
cols = ["location_road", "location_bound", "location_area", "traffic", "timestamp", "update_timestamp"]
toExport = []
for m in range(len(aRoads)):
if(m==0):
df = pd.DataFrame(data=aRoads[m])
else:
if(len(aRoads[m]) <= 0):
continue
else:
df = df.append(aRoads[m])
df.columns = cols
df.head()
FILENAME = "manilaroads-" + str(YEAR) + ".csv"
print("Exporting to " + DIR + "//" + FILENAME)
df.to_csv(DIR + "//" + FILENAME, encoding='utf-8')
###Output
Exporting to C://Users//Ronnie Nieva//Documents//Dydy//school//THESIS//datasets//mmda//2017//manilaroads-2017.csv
###Markdown
If you want to search for the apperance of Roads
###Code
ROAD1 = ["Magsaysay Ave"]
ROAD2 = ["MAGSAYSAY_AVE"]
ROADNAME = []
ROADNAME += (ROAD1)
ROADNAME += (ROAD2)
for m in range(len(aRoads)):
inc = 0
for r in range(len(aRoads[m])):
if(aRoads[m][r][2] in ROADNAME):
inc += 1
if(inc < 3): #Just to limit printing
print(aRoads[m][r])
###Output
['COMMONWEALTH' 'NB' 'MAGSAYSAY_AVE' 'L' 'Sat, 11 Mar 2017 03:15:48 +0800'
'2017-03-10 20:38:56.995827']
['COMMONWEALTH' 'SB' 'MAGSAYSAY_AVE' 'L' 'Sat, 11 Mar 2017 03:15:47 +0800'
'2017-03-10 20:38:56.995827']
['COMMONWEALTH' 'NB' 'MAGSAYSAY_AVE' 'L' 'Mon, 10 Jul 2017 23:31:11 +0800'
'2017-07-10 15:30:03.054131']
['COMMONWEALTH' 'SB' 'MAGSAYSAY_AVE' 'L' 'Mon, 10 Jul 2017 23:31:11 +0800'
'2017-07-10 15:30:03.054131']
['COMMONWEALTH' 'NB' 'MAGSAYSAY_AVE' 'L' 'Tue, 01 Aug 2017 08:06:01 +0800'
'2017-08-01 00:00:03.211164']
['COMMONWEALTH' 'SB' 'MAGSAYSAY_AVE' 'ML' 'Tue, 01 Aug 2017 08:06:01 +0800'
'2017-08-01 00:00:03.211164']
['COMMONWEALTH' 'NB' 'MAGSAYSAY_AVE' 'L' 'Wed, 13 Sep 2017 07:52:19 +0800'
'2017-09-12 23:45:02.781803']
['COMMONWEALTH' 'SB' 'MAGSAYSAY_AVE' 'MH' 'Wed, 13 Sep 2017 07:52:19 +0800'
'2017-09-12 23:45:02.781803']
['COMMONWEALTH' 'NB' 'MAGSAYSAY_AVE' 'L' 'Sun, 01 Oct 2017 08:09:01 +0800'
'2017-10-01 00:00:03.547244']
['COMMONWEALTH' 'SB' 'MAGSAYSAY_AVE' 'L' 'Sun, 01 Oct 2017 08:09:01 +0800'
'2017-10-01 00:00:03.547244']
###Markdown
Year Road Selection
###Code
aRoadPerMonth = [] #All Roads per month
aRoads = [] #All aRoadPerMonth
FILENAME = str(YEAR) + ".csv"
data = pd.read_csv(DIR + "//" + FILENAME , skipinitialspace=True)
data = data.drop(['entry_id', 'guid'], axis=1)
aData = np.array(data)
print("--Organizing Month " + str(m) + "--")
for i in range(len(aData)):
d = aData[i]
if(d[2] in ROADS_1 or d[2] in ROADS_2):
aRoadPerMonth.append(d)
print("--End of Year " + str(YEAR) + "--")
aRoads.append(aRoadPerMonth)
cols = ["location_road", "location_bound", "location_area", "traffic", "timestamp", "update_timestamp"]
toExport = []
for m in range(len(aRoads)):
if(m==0):
df = pd.DataFrame(data=aRoads[m])
else:
if(len(aRoads[m]) <= 0):
continue
else:
df = df.append(aRoads[m])
df.columns = cols
df
FILENAME = "manilaroads-" + str(YEAR) + ".csv"
print("Exporting to " + DIR + "//" + FILENAME)
df.to_csv(DIR + "//" + FILENAME, encoding='utf-8')
###Output
Exporting to C://Users//Ronnie Nieva//Documents//Dydy//school//THESIS//datasets//mmda//2015//manilaroads-2015.csv
|
notebooks/bigquery:veglogg.standardized.situations.ipynb | ###Markdown
Denne spørringen viser antall hendelser fra vegloggen i desember 2021 per fylkesnummer.
###Code
query = f"""
SELECT
COALESCE(countyNumber, 'Ukjent') fylkesNr,
COUNT(*) antallHendelser
FROM `{project}.standardized.situations`
WHERE DATE(creationTime, "Europe/Oslo") BETWEEN "2021-12-01" AND "2021-12-31"
GROUP BY countyNumber
ORDER BY antallHendelser DESC
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____
###Markdown
Denne spørringen viser antall hendelser fra vegloggen i desember 2021 per vegnummer, for hendelser innen 10 km av Trondheim sentrum.
###Code
query = f"""
SELECT
roadNumber vegNr,
COUNT(*) antallHendelser,
FROM `{project}.standardized.situations`
WHERE DATE(creationTime, "Europe/Oslo") BETWEEN "2021-12-01" AND "2021-12-31"
AND roadNumber IS NOT NULL
AND st_distance(geography, st_geogfromtext('POINT (10.3939341 63.4301525)')) < 10000
GROUP BY vegNr
ORDER BY antallHendelser DESC
"""
print(query)
client.query(query).to_dataframe()
###Output
_____no_output_____ |
Jupyter_notebook/20200915-Comparison (China-Auburn).ipynb | ###Markdown
Compass heading
###Code
plt.plot(standardized_time_1, compass_heading_1, label='China')
plt.plot(standardized_time_2, compass_heading_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('Heading [degree]', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Temperature
###Code
plt.plot(standardized_time_1, temp_1, label='China')
plt.plot(standardized_time_2, temp_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('Temperature [degree]', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
PH
###Code
plt.plot(standardized_time_1, PH_1, label='China')
plt.plot(standardized_time_2, PH_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('PH', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Conductivity* around time 1000, catabot hit another boat at China
###Code
plt.plot(standardized_time_1, cond_1, label='China')
plt.plot(standardized_time_2, cond_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('Conductivity', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Chlorophyll * around time 1000, catabot hit another boat at China
###Code
plt.plot(standardized_time_1, chlorophyll_1, label='China')
plt.plot(standardized_time_2, chlorophyll_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('chlorophyll [RFU]', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
ODO
###Code
plt.plot(standardized_time_1, ODO_1, label='China')
plt.plot(standardized_time_2, ODO_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('ODO [mg/L]', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Sonar depth
###Code
plt.plot(standardized_time_1, sonar_1, label='China')
plt.plot(standardized_time_2, sonar_2, label='Auburn')
plt.xlabel('Time [sec]', fontsize=16)
plt.ylabel('sonar [m]', fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____ |
session2/Session2_Assignment_empty.ipynb | ###Markdown
Python for Psychologists - Session 2 Homework assignment **Exercise 1**. Below you can find a dictionary representing a small data base of people. Add up the ages of all three people using indexing.
###Code
people = {"Roswitha Rosenstein":{"age": 68, "gender":"female", "marital_status":"married"}, "Jürgen Jäck":{"age": 46, "gender":"male", "marital_status":"divorced"}, "Ulla Ulrich":{"age": 38, "gender":"female", "marital_status":"single"}}
###Output
_____no_output_____
###Markdown
**Exercise 2**.Below there is a list with duplicate values. Create a list that contains only the unqiue values, that is, a list without any of the values being duplicated. Do so without simply creating a new list and typing in the numbers manually. Also try to solve the problem without deleting or replacing any of the items of the list ;)
###Code
duplicate_list = [1,1,4,7,3,5,6,4,4,9,11,0,11]
###Output
_____no_output_____
###Markdown
**Exercise 3**. Try to change the third position in the list of the tuple below.
###Code
my_tuple = (1,["I", "am", "a", "list", "inside", "a", "tuple"])
###Output
_____no_output_____
###Markdown
**Exercise 4.**Try to execute the code below. In a markdown cell, explain why there is an error.
###Code
some_dict = {my_tuple: 3}
###Output
_____no_output_____
###Markdown
**Exercise 5**.Check if the element "inside" is part of the tuple `my_tuple`. Afterwards, check if the element `1` is part of my_tuple. Optional exercises **Exercise 6**.Check if the item ("I am a key", "I am its value") is part of the dictionary below. If you have no clue, go back to the session script, and if that doesn't help: google it! :)
###Code
my_dict = {"blablabla": "I am a value", "I am a key": "blublublu"}
###Output
_____no_output_____
###Markdown
Python for Psychologists - Session 2 Homework assignment **Exercise 0.** Use the dictionary below to complete the sentence, i.e., fill "....." with something meaningful. Hint: think about the ```types()``` you want to combine in the sentence and why it might (not) work
###Code
cute_animals = {"otter": 1, "dogs": 2}
"Dogs are one of my most favourite animals, acutally they are my "+ "...." +"th most favourite animals."
###Output
_____no_output_____
###Markdown
**Exercise 1**. Below you can find a dictionary representing a small data base of people. Add up the ages of all three people using indexing. Think about what *people* contains at first and index one of its keys ... and go on from there (hint: double indexing)```python len(people) = ? ```
###Code
people = {"Roswitha Rosenstein":{"age": 68, "gender":"female", "marital_status":"married"}, "Jürgen Jäck":{"age": 46, "gender":"male", "marital_status":"divorced"}, "Ulla Ulrich":{"age": 38, "gender":"female", "marital_status":"single"}}
###Output
_____no_output_____
###Markdown
**Exercise 2**.Below there is a list with duplicate values. Create a list that contains only the unqiue values, that is, a list without any of the values being duplicated. Do so without simply creating a new list and typing in the numbers manually. Also try to solve the problem without deleting or replacing any of the items of the list ;)
###Code
duplicate_list = [1,1,4,7,3,5,6,4,4,9,11,0,11]
###Output
_____no_output_____
###Markdown
**Exercise 3**. Try to change the third position in the list of the tuple below. Hint: double indexing
###Code
my_tuple = (1,["I", "am", "a", "list", "inside", "a", "tuple"])
###Output
_____no_output_____
###Markdown
**Exercise 4.**Try to execute the code below. In a markdown cell, explain why there is an error.
###Code
some_dict = {my_tuple: 3}
###Output
_____no_output_____
###Markdown
Explain here by double-clicking ... **Exercise 5**.Check if the element "inside" is part of the tuple `my_tuple`. Afterwards, check if the element `1` is part of my_tuple. **Exercise 6**.Check if the item ("I am a key", "I am its value") is part of the dictionary below. If you have no clue, go back to the session script, and if that doesn't help: google it! :)
###Code
my_dict = {"blablabla": "I am a value", "I am a key": "blublublu"}
###Output
_____no_output_____ |
src/14_Create_Submission_03.ipynb | ###Markdown
Introduction- ref: > https://www.kaggle.com/artgor/brute-force-feature-engineering- nb10, nb12 に続いて、特徴量を増やす。- molecule ごとを意識した特徴量を意識する Import evetything I nead :)
###Code
import glob
import multiprocessing
import gc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.metrics import mean_absolute_error
import lightgbm as lgb
###Output
_____no_output_____
###Markdown
Data Preparation
###Code
file_path = './input/champs-scalar-coupling/'
glob.glob(file_path + '*')
# train
path = file_path + 'train.csv'
train = pd.read_csv(path)
# test
path = file_path + 'test.csv'
test = pd.read_csv(path)
# structure
path = file_path + 'structures.csv'
structures = pd.read_csv(path)
print(f'There are {train.shape[0]} rows in train data.')
print(f'There are {test.shape[0]} rows in test data.')
print(f"There are {train['molecule_name'].nunique()} distinct molecules in train data.")
print(f"There are {test['molecule_name'].nunique()} distinct molecules in test data.")
print(f"There are {train['atom_index_0'].nunique()} unique atoms.")
print(f"There are {train['type'].nunique()} unique types.")
# using n_cpu
n_cpu = multiprocessing.cpu_count()-3
n_cpu
###Output
_____no_output_____
###Markdown
Feature Engineering **before**
###Code
train.head(0)
###Output
_____no_output_____
###Markdown
---**after**- structure から座標情報を付与
###Code
def map_atom_info(df, atom_idx):
df = pd.merge(df, structures, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
df = df.rename(columns={'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}'})
return df
train = map_atom_info(train, 0)
train = map_atom_info(train, 1)
test = map_atom_info(test, 0)
test = map_atom_info(test, 1)
train.head(3)
# メモリの開放
del structures
gc.collect()
###Output
_____no_output_____
###Markdown
---**距離情報の付与**
###Code
train_p_0 = train[['x_0', 'y_0', 'z_0']].values
train_p_1 = train[['x_1', 'y_1', 'z_1']].values
test_p_0 = test[['x_0', 'y_0', 'z_0']].values
test_p_1 = test[['x_1', 'y_1', 'z_1']].values
train['dist'] = np.linalg.norm(train_p_0 - train_p_1, axis=1)
test['dist'] = np.linalg.norm(test_p_0 - test_p_1, axis=1)
train['dist_x'] = (train['x_0'] - train['x_1']) ** 2
test['dist_x'] = (test['x_0'] - test['x_1']) ** 2
train['dist_y'] = (train['y_0'] - train['y_1']) ** 2
test['dist_y'] = (test['y_0'] - test['y_1']) ** 2
train['dist_z'] = (train['z_0'] - train['z_1']) ** 2
test['dist_z'] = (test['z_0'] - test['z_1']) ** 2
###Output
_____no_output_____
###Markdown
---**typeの分解**- 2JHC から、 2 を取り出す
###Code
train['type_0'] = train['type'].apply(lambda x: x[0])
test['type_0'] = test['type'].apply(lambda x: x[0])
# ここまでの特徴量
train.columns
###Output
_____no_output_____
###Markdown
---**現在の特徴量から、副次的な特徴量を生成**
###Code
def create_features(df):
df['molecule_couples'] = df.groupby('molecule_name')['id'].transform('count')
df['molecule_dist_mean'] = df.groupby('molecule_name')['dist'].transform('mean')
df['molecule_dist_min'] = df.groupby('molecule_name')['dist'].transform('min')
df['molecule_dist_max'] = df.groupby('molecule_name')['dist'].transform('max')
df['atom_0_couples_count'] = df.groupby(['molecule_name', 'atom_index_0'])['id'].transform('count')
df['atom_1_couples_count'] = df.groupby(['molecule_name', 'atom_index_1'])['id'].transform('count')
df[f'molecule_atom_index_0_x_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['x_1'].transform('std')
df[f'molecule_atom_index_0_y_1_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('mean')
df[f'molecule_atom_index_0_y_1_mean_diff'] = df[f'molecule_atom_index_0_y_1_mean'] - df['y_1']
df[f'molecule_atom_index_0_y_1_mean_div'] = df[f'molecule_atom_index_0_y_1_mean'] / df['y_1']
df[f'molecule_atom_index_0_y_1_max'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('max')
df[f'molecule_atom_index_0_y_1_max_diff'] = df[f'molecule_atom_index_0_y_1_max'] - df['y_1']
df[f'molecule_atom_index_0_y_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('std')
df[f'molecule_atom_index_0_z_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['z_1'].transform('std')
df[f'molecule_atom_index_0_dist_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('mean')
df[f'molecule_atom_index_0_dist_mean_diff'] = df[f'molecule_atom_index_0_dist_mean'] - df['dist']
df[f'molecule_atom_index_0_dist_mean_div'] = df[f'molecule_atom_index_0_dist_mean'] / df['dist']
df[f'molecule_atom_index_0_dist_max'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('max')
df[f'molecule_atom_index_0_dist_max_diff'] = df[f'molecule_atom_index_0_dist_max'] - df['dist']
df[f'molecule_atom_index_0_dist_max_div'] = df[f'molecule_atom_index_0_dist_max'] / df['dist']
df[f'molecule_atom_index_0_dist_min'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')
df[f'molecule_atom_index_0_dist_min_diff'] = df[f'molecule_atom_index_0_dist_min'] - df['dist']
df[f'molecule_atom_index_0_dist_min_div'] = df[f'molecule_atom_index_0_dist_min'] / df['dist']
df[f'molecule_atom_index_0_dist_std'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('std')
df[f'molecule_atom_index_0_dist_std_diff'] = df[f'molecule_atom_index_0_dist_std'] - df['dist']
df[f'molecule_atom_index_0_dist_std_div'] = df[f'molecule_atom_index_0_dist_std'] / df['dist']
df[f'molecule_atom_index_1_dist_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('mean')
df[f'molecule_atom_index_1_dist_mean_diff'] = df[f'molecule_atom_index_1_dist_mean'] - df['dist']
df[f'molecule_atom_index_1_dist_mean_div'] = df[f'molecule_atom_index_1_dist_mean'] / df['dist']
df[f'molecule_atom_index_1_dist_max'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('max')
df[f'molecule_atom_index_1_dist_max_diff'] = df[f'molecule_atom_index_1_dist_max'] - df['dist']
df[f'molecule_atom_index_1_dist_max_div'] = df[f'molecule_atom_index_1_dist_max'] / df['dist']
df[f'molecule_atom_index_1_dist_min'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('min')
df[f'molecule_atom_index_1_dist_min_diff'] = df[f'molecule_atom_index_1_dist_min'] - df['dist']
df[f'molecule_atom_index_1_dist_min_div'] = df[f'molecule_atom_index_1_dist_min'] / df['dist']
df[f'molecule_atom_index_1_dist_std'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('std')
df[f'molecule_atom_index_1_dist_std_diff'] = df[f'molecule_atom_index_1_dist_std'] - df['dist']
df[f'molecule_atom_index_1_dist_std_div'] = df[f'molecule_atom_index_1_dist_std'] / df['dist']
df[f'molecule_atom_1_dist_mean'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('mean')
df[f'molecule_atom_1_dist_min'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('min')
df[f'molecule_atom_1_dist_min_diff'] = df[f'molecule_atom_1_dist_min'] - df['dist']
df[f'molecule_atom_1_dist_min_div'] = df[f'molecule_atom_1_dist_min'] / df['dist']
df[f'molecule_atom_1_dist_std'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('std')
df[f'molecule_atom_1_dist_std_diff'] = df[f'molecule_atom_1_dist_std'] - df['dist']
df[f'molecule_type_0_dist_std'] = df.groupby(['molecule_name', 'type_0'])['dist'].transform('std')
df[f'molecule_type_0_dist_std_diff'] = df[f'molecule_type_0_dist_std'] - df['dist']
df[f'molecule_type_dist_mean'] = df.groupby(['molecule_name', 'type'])['dist'].transform('mean')
df[f'molecule_type_dist_mean_diff'] = df[f'molecule_type_dist_mean'] - df['dist']
df[f'molecule_type_dist_mean_div'] = df[f'molecule_type_dist_mean'] / df['dist']
df[f'molecule_type_dist_max'] = df.groupby(['molecule_name', 'type'])['dist'].transform('max')
df[f'molecule_type_dist_min'] = df.groupby(['molecule_name', 'type'])['dist'].transform('min')
df[f'molecule_type_dist_std'] = df.groupby(['molecule_name', 'type'])['dist'].transform('std')
df[f'molecule_type_dist_std_diff'] = df[f'molecule_type_dist_std'] - df['dist']
return df
train = create_features(train)
test = create_features(test)
# ここまでの特徴量
print(f'n_features: {len(train.columns)}')
print('-------------')
train.columns
###Output
n_features: 72
-------------
###Markdown
Preparation data for model
###Code
#
good_columns = [
'molecule_atom_index_0_dist_min',
'molecule_atom_index_0_dist_max',
'molecule_atom_index_1_dist_min',
'molecule_atom_index_0_dist_mean',
'molecule_atom_index_0_dist_std',
'dist',
'molecule_atom_index_1_dist_std',
'molecule_atom_index_1_dist_max',
'molecule_atom_index_1_dist_mean',
'molecule_atom_index_0_dist_max_diff',
'molecule_atom_index_0_dist_max_div',
'molecule_atom_index_0_dist_std_diff',
'molecule_atom_index_0_dist_std_div',
'atom_0_couples_count',
'molecule_atom_index_0_dist_min_div',
'molecule_atom_index_1_dist_std_diff',
'molecule_atom_index_0_dist_mean_div',
'atom_1_couples_count',
'molecule_atom_index_0_dist_mean_diff',
'molecule_couples',
'atom_index_1',
'molecule_dist_mean',
'molecule_atom_index_1_dist_max_diff',
'molecule_atom_index_0_y_1_std',
'molecule_atom_index_1_dist_mean_diff',
'molecule_atom_index_1_dist_std_div',
'molecule_atom_index_1_dist_mean_div',
'molecule_atom_index_1_dist_min_diff',
'molecule_atom_index_1_dist_min_div',
'molecule_atom_index_1_dist_max_div',
'molecule_atom_index_0_z_1_std',
'y_0',
'molecule_type_dist_std_diff',
'molecule_atom_1_dist_min_diff',
'molecule_atom_index_0_x_1_std',
'molecule_dist_min',
'molecule_atom_index_0_dist_min_diff',
'molecule_atom_index_0_y_1_mean_diff',
'molecule_type_dist_min',
'molecule_atom_1_dist_min_div',
'atom_index_0',
'molecule_dist_max',
'molecule_atom_1_dist_std_diff',
'molecule_type_dist_max',
'molecule_atom_index_0_y_1_max_diff',
'molecule_type_0_dist_std_diff',
'molecule_type_dist_mean_diff',
'molecule_atom_1_dist_mean',
'molecule_atom_index_0_y_1_mean_div',
'molecule_type_dist_mean_div',
'type']
print('usig n_features:', len(good_columns))
for f in ['atom_1', 'type_0', 'type']:
if f in good_columns:
lbl = LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
X = train[good_columns].copy()
y = train['scalar_coupling_constant']
X_test = test[good_columns].copy()
del train, test
gc.collect()
###Output
_____no_output_____
###Markdown
Training model on selected features
###Code
# Configuration
TARGET = 'scalar_coupling_constant'
# CAT_FEATS = ['atom_0','atom_1']
N_ESTIMATORS = 10000
VERBOSE = 1000
EARLY_STOPPING_ROUNDS = 200
RANDOM_STATE = 529
%%time
lgb_params = {'num_leaves': 128,
'min_child_samples': 79,
'objective': 'regression',
'max_depth': 9,
'learning_rate': 0.2,
"boosting_type": "gbdt",
"subsample_freq": 1,
"subsample": 0.9,
"bagging_seed": 11,
"metric": 'mae',
"verbosity": -1,
'reg_alpha': 0.1,
'reg_lambda': 0.3,
'colsample_bytree': 1.0
}
n_fold = 3
folds = KFold(n_splits=n_fold, shuffle=True, random_state=RANDOM_STATE)
# Setup arrays for storing results
prediction = np.zeros(len(X_test))
scores = []
feature_importance = pd.DataFrame()
# Train the model
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X)):
X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx]
y_train, y_valid = y.iloc[train_idx], y.iloc[valid_idx]
model = lgb.LGBMRegressor(**lgb_params, n_estimators = N_ESTIMATORS, n_jobs = n_cpu)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
eval_metric='mae',
verbose=VERBOSE,
early_stopping_rounds=EARLY_STOPPING_ROUNDS)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test, num_iteration=model.best_iteration_)
# feature importance
fold_importance = pd.DataFrame()
fold_importance["feature"] = good_columns
fold_importance["importance"] = model.feature_importances_
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
prediction /= folds.n_splits
scores.append(mean_absolute_error(y_valid, y_pred_valid))
print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))
scores.append(mean_absolute_error(y_valid, y_pred_valid))
prediction += y_pred
###Output
Training until validation scores don't improve for 200 rounds.
[1000] training's l1: 0.9727 valid_1's l1: 1.06124
[2000] training's l1: 0.831151 valid_1's l1: 0.978054
[3000] training's l1: 0.741566 valid_1's l1: 0.934866
[4000] training's l1: 0.674261 valid_1's l1: 0.906888
[5000] training's l1: 0.620014 valid_1's l1: 0.886955
[6000] training's l1: 0.574744 valid_1's l1: 0.871789
[7000] training's l1: 0.536093 valid_1's l1: 0.860693
[8000] training's l1: 0.502257 valid_1's l1: 0.851517
[9000] training's l1: 0.471715 valid_1's l1: 0.843602
[10000] training's l1: 0.444333 valid_1's l1: 0.837239
Did not meet early stopping. Best iteration is:
[10000] training's l1: 0.444333 valid_1's l1: 0.837239
CV mean score: 0.8372, std: 0.0000.
Training until validation scores don't improve for 200 rounds.
[1000] training's l1: 0.972187 valid_1's l1: 1.06192
[2000] training's l1: 0.828789 valid_1's l1: 0.975649
[3000] training's l1: 0.739158 valid_1's l1: 0.932504
[4000] training's l1: 0.672529 valid_1's l1: 0.904399
[5000] training's l1: 0.618814 valid_1's l1: 0.884633
[6000] training's l1: 0.57396 valid_1's l1: 0.869989
[7000] training's l1: 0.535202 valid_1's l1: 0.858388
[8000] training's l1: 0.501278 valid_1's l1: 0.849077
[9000] training's l1: 0.470976 valid_1's l1: 0.841485
[10000] training's l1: 0.443463 valid_1's l1: 0.834973
Did not meet early stopping. Best iteration is:
[10000] training's l1: 0.443463 valid_1's l1: 0.834973
CV mean score: 0.8365, std: 0.0011.
Training until validation scores don't improve for 200 rounds.
[1000] training's l1: 0.970222 valid_1's l1: 1.06427
[2000] training's l1: 0.828162 valid_1's l1: 0.97955
[3000] training's l1: 0.739263 valid_1's l1: 0.93657
[4000] training's l1: 0.672195 valid_1's l1: 0.908254
[5000] training's l1: 0.618816 valid_1's l1: 0.888415
[6000] training's l1: 0.573834 valid_1's l1: 0.87348
[7000] training's l1: 0.535051 valid_1's l1: 0.861849
[8000] training's l1: 0.501033 valid_1's l1: 0.852749
[9000] training's l1: 0.470553 valid_1's l1: 0.844673
[10000] training's l1: 0.443218 valid_1's l1: 0.838098
Did not meet early stopping. Best iteration is:
[10000] training's l1: 0.443218 valid_1's l1: 0.838098
CV mean score: 0.8365, std: 0.0013.
CPU times: user 4d 6h 30min 50s, sys: 50min 13s, total: 4d 7h 21min 3s
Wall time: 1h 41min 42s
###Markdown
Save Result
###Code
path_submittion = './output/' + 'nb14_submission_lgb_{}.csv'.format(np.mean(scores))
print(f'save pash: {path_submittion}')
submittion = pd.read_csv('./input/champs-scalar-coupling/sample_submission.csv')
submittion['scalar_coupling_constant'] = prediction
submittion.to_csv(path_submittion, index=False)
X.iloc[0:1000, :].to_csv('./dataframe/nb14_X_sample.csv', index=None)
###Output
_____no_output_____
###Markdown
Feature importance
###Code
feature_importance["importance"] /= folds.n_splits
cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values(
by="importance", ascending=False)[:50].index
best_features = feature_importance.loc[feature_importance.feature.isin(cols)]
plt.figure(figsize=(15, 20));
ax = sns.barplot(x="importance",
y="feature",
hue='fold',
data=best_features.sort_values(by="importance", ascending=False));
plt.title('LGB Features (avg over folds)');
# top 20 features
featers_fold1 = best_features[best_features['fold']==1]
featers_fold1['feature'][:20].values
# top 30 features
featers_fold1 = best_features[best_features['fold']==1]
featers_fold1['feature'][:30].values
###Output
_____no_output_____ |
00_SuperQuickStart.ipynb | ###Markdown
Welcome to Python and CoLabs
*David Noone ([email protected])*
This notebook is intended to be asuper quick starter to help get going with the more complete (and useful examples).
Files are available via github, and can be run on colab hosted by google.
Python is most useful with various libraries. For spatial data "numpy" is essential. It is almost always imported at the beginning. Similarly, we're inevitably going to want to make some plots: let's use matplot lib.
###Code
import math
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Push the "play" button above to make that cell run.
Now let's assign some data to a new array and make the super simplest plot one can imagine.
Make an "x" axis between 0 and 2 pi.
print values to the screen, so we can check we did it right!
###Code
npts = 20
xvals = np.linspace(0,2*math.pi,npts)
print(xvals)
###Output
_____no_output_____
###Markdown
Lets evaluate a function: y = A sin(n x)
###Code
amplitude = 1
wavenumber = 2
yvals = amplitude*np.sin(wavenumber*xvals)
###Output
_____no_output_____
###Markdown
And now, we're all set t make a graph
###Code
p = plt.plot(xvals, yvals)
###Output
_____no_output_____
###Markdown
At tis stage, we have a pretty good starting place. It does not have labels!
Try the folloing:
* Add an x axis label
* Add an y axis label
* Give it a title
###Code
# Add your code for x label, y label and a title.
#
###Output
_____no_output_____
###Markdown
And finally let's manipulate the figure.
Try:
* Making it smoother by changing the number of points
* Add a second line with different amplitude
* Add a third line with a different wave number
* Add a legend to your plot.
###Code
# Add you code to change "smoothness", additional curves, and a legend.
#
###Output
_____no_output_____
###Markdown
Finally, this plot is not bad, but could be fancier. Explore other options from matplot lib to change the colors, linestyle etc.
The [pyplot manual](https://matplotlib.org/stable/tutorials/introductory/pyplot.html) is here, and is a handy resource.
###Code
# Add your aesthetically fancy code here
#
###Output
_____no_output_____ |
datasets/eval2try.ipynb | ###Markdown
train and test
###Code
sentences = pd.read_table('train.txt', header=None)
train = sentences[0:16000]
test = sentences[16000:]
train[1].to_csv('s1.train', sep='\n', index=False)
train[2].to_csv('s2.train', sep='\n', index=False)
train[3].to_csv('label.train', sep='\n', index=False)
test[1].to_csv('s1.dev', sep='\n', index=False)
test[2].to_csv('s2.dev', sep='\n', index=False)
test[3].to_csv('label.dev', sep='\n', index=False)
###Output
C:\Users\ASUS\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.
"""Entry point for launching an IPython kernel.
C:\Users\ASUS\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.
This is separate from the ipykernel package so we can avoid doing imports until
C:\Users\ASUS\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.
"""
|
03_l2data.ipynb | ###Markdown
L2 Data Interface> Helpers to retrieve and process Diviner PDS L2 data.
###Code
# export
import warnings
from pathlib import Path
import numpy as np
import pvl
from yarl import URL
from planetarypy import geotools as gt
from planetarypy.utils import url_retrieve
DIVINER_URL = URL(
"https://pds-geosciences.wustl.edu/lro/lro-l-dlre-4-rdr-v1/lrodlr_1001/data"
)
root = Path("/luna4/maye/l2_data")
# export
class L2DataManager:
"Small helper to get lists of locally stored data."
@property
def labels(self):
return sorted(list(root.glob("dgdr_*.lbl")))
@property
def images(self):
return sorted(list(root.glob("dgdr_*.tif")))
def get_l2_image_paths():
return L2DataManager().images
get_l2_image_paths()
fname = get_l2_image_paths()[0]
fname
# export
class L2Data:
GDR_L2_URL = DIVINER_URL / "gdr_l2"
def __init__(
self,
cycle=None,
datatype="ltim",
map_res=1, # pix per degrees
projection="cylindrical",
format="jp2",
year=None,
):
self.cycle = cycle
self.datatype = datatype
self.map_res = map_res
self.projection = projection
self.format = format
self.year = year
if cycle is None and year is None:
warnings.warn("Set `year` for getting a correct folder URL.")
@property
def cycle(self):
return self._cycle
@cycle.setter
def cycle(self, value):
self._cycle = value
@property
def year(self):
if self.cycle is not None:
return str(self.cycle)[:4]
else:
return self._year
@year.setter
def year(self, value):
self._year = str(value)
@property
def map_res(self):
return self._map_res
@map_res.setter
def map_res(self, value):
self._map_res = str(value).zfill(3)
@property
def datatype(self):
return self._datatype
@datatype.setter
def datatype(self, value):
"""Set datatype string.
This is for a Diviner GDR L3 datatype.
Parameters
----------
value : {"jd", "ltim", "tb3", "tbol"}
RA = Rock abundance
RMS = RMS error for RA
ST = Regolith temperature
TBOL = Average Bolometric Temperature
"""
allowed = "jd ltim".split()
allowed += [f"tb{i}" for i in range(3, 10)]
allowed += [f"vb{i}" for i in range(1, 3)]
if not value.lower() in allowed:
raise ValueError(f"Only {allowed} allowed.")
else:
self._datatype = value.lower()
@property
def second_token(self):
# TODO: deal with "err, cnt"
return "avg"
@property
def fname(self):
"Construct L3 GDR data filename."
res = 128
if self.datatype == "tbol":
res = self.map_res
return f"dgdr_{self.datatype}_{self.second_token}_cyl_{self.cycle}n_{res}_{self.format}.{self.format}"
@property
def label(self):
return str(Path(self.fname).with_suffix(".lbl"))
@property
def folder_url(self):
return self.GDR_L2_URL / self.year / self.projection / self.format
@property
def data_url(self):
return self.folder_url / self.fname
@property
def label_url(self):
return self.folder_url / self.label
def download_label(self, subfolder=""):
if not subfolder:
p = root
else:
p = Path(subfolder)
p.mkdir(exist_ok=True)
url_retrieve(self.label_url, p / self.label)
def download_data(self, subfolder="", overwrite=False):
if not subfolder:
p = root
else:
p = Path(subfolder)
p.mkdir(exist_ok=True)
savepath = p / self.fname
if savepath.exists() and not overwrite:
print("File exists, use `overwrite=True` to force download.")
return
else:
url_retrieve(self.data_url, p / self.fname)
l2 = L2Data(fname)
# export
class LocalTime:
root = Path("/luna4/maye/l2_data")
name = "lt"
@classmethod
def from_fpath(cls, fpath):
cycle = int(fpath.name.split("_")[4][:-1])
return cls(cycle)
def __init__(self, cycle):
self.cycle = cycle
self.l2data = L2Data(cycle=cycle)
self.img = gt.ImgData(str(self.fname))
def read_window(self, ul_lon=0, ul_lat=1, width_degrees=1):
ul = gt.Point.copy_geodata(self.img.center, lon=ul_lon, lat=ul_lat)
ul.lonlat_to_pixel()
lr = gt.Point.copy_geodata(
ul, lon=ul_lon + width_degrees, lat=ul_lat - width_degrees
)
lr.lonlat_to_pixel()
win = gt.Window(ulPoint=ul, lrPoint=lr)
self.img.read_window(win)
@property
def label(self):
return pvl.load(root / self.l2data.label)
@property
def fname(self):
return root / self.l2data.fname
@property
def SCALING_FACTOR(self):
return self.label["UNCOMPRESSED_FILE"]["IMAGE"]["SCALING_FACTOR"]
@property
def OFFSET(self):
return self.label["UNCOMPRESSED_FILE"]["IMAGE"]["OFFSET"]
@property
def NODATA(self):
return self.label["UNCOMPRESSED_FILE"]["IMAGE"]["MISSING_CONSTANT"]
@property
def data(self):
data = self.img.data.astype("float")
data[data == self.NODATA] = np.nan
return data
@property
def scaled_data(self):
return self.data * self.SCALING_FACTOR + self.OFFSET
def get_pixel(self, xoff, yoff):
value = np.squeeze(self.img.ds.ReadAsArray(xoff, yoff, 1, 1))
if value == self.NODATA:
return np.nan
else:
return value * self.SCALING_FACTOR + self.OFFSET
def plot_window(self):
plt.figure()
plt.imshow(self.scaled_data, cmap="plasma")
plt.colorbar()
@property
def window_mean(self):
return np.nanmean(self.scaled_data)
@property
def window_std(self):
return np.nanstd(self.scaled_data)
@property
def n_valid(self):
return np.count_nonzero(~np.isnan(self.data))
###Output
_____no_output_____ |
.ipynb_checkpoints/data_analysis_overall-checkpoint.ipynb | ###Markdown
Data Analysis
###Code
import pandas as pd
crime = pd.read_csv('data/crimeandweather.csv')
crime['OCCURRED_ON_DATE'] = pd.to_datetime(crime['OCCURRED_ON_DATE'])
crime['DATE'] = pd.to_datetime(crime['DATE'])
crime['Lat'] = pd.to_numeric(crime['Lat'])
crime['Long'] = pd.to_numeric(crime['Long'])
print("strat date:", crime['OCCURRED_ON_DATE'].min())
print("end date:", crime['OCCURRED_ON_DATE'].max())
crime.head()
crimehour = pd.DataFrame()
crimehour['HOUR'] = crime.apply(lambda x : x['TIME'][0:2], axis = 1)
crimehour.head()
crimehourcount = pd.DataFrame()
crimehourcount['COUNT'] = crimehour['HOUR'].value_counts(sort=False)
crimehourcount = crimehourcount.sort_index()
crimehourcount.head()
import matplotlib.pyplot as plt
%matplotlib inline
# avg
avg = crime['OCCURRED_ON_DATE'].count()/24
plt.figure(figsize=(20,10))
plt.axhline(y=avg, color='red')
plt.bar(crimehourcount.index.tolist(), crimehourcount['COUNT'], 0.5)
plt.show
crimedaycount = pd.DataFrame()
daylist = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
crimedaycount['COUNT'] = crime['DAY_OF_WEEK'].value_counts(sort=False)
crimedaycount['DAY'] = crimedaycount.apply(lambda x : daylist.index(x.name), axis = 1)
crimedaycount = crimedaycount.sort_values(['DAY'])
crimedaycount.head()
# avg
avg = crime['OCCURRED_ON_DATE'].count()/7
plt.figure(figsize=(20,10))
plt.axhline(y=avg, color='red')
plt.bar(crimedaycount.index.tolist(), crimedaycount['COUNT'], 0.5)
plt.show
crimedistrictcount = pd.DataFrame()
crimedistrictcount['DISTRICT'] = crime['DISTRICT'].value_counts(sort=False)
crimedistrictcount = crimedistrictcount.sort_values(['DISTRICT'], ascending=False)
crimedistrictcount.head()
# avg
avg = crime['DISTRICT'].count()/crimedistrictcount['DISTRICT'].count()
plt.figure(figsize=(20,10))
plt.axhline(y=avg, color='red')
plt.bar(crimedistrictcount.index.tolist(), crimedistrictcount['DISTRICT'], 0.5)
plt.show
###Output
_____no_output_____
###Markdown
turn to temp
###Code
crimetempcount = pd.DataFrame()
crimetempcount['TAVG'] = crime['TAVG'].value_counts(sort=False)
crimetempcount = crimetempcount.sort_index()
crimetempcount.head()
plt.figure(figsize=(20,10))
plt.bar(crimetempcount.index.tolist(), crimetempcount['TAVG'], 0.5)
plt.show
plt.figure(figsize=(20,10))
plt.scatter(crime.groupby(['DISTRICT']).mean()['TAVG'].index, crime.groupby(['DISTRICT']).mean()['TAVG'].values, linewidths =10)
plt.show
crime['DATE'].value_counts(sort=False).index
crimedatecount = pd.DataFrame()
crimedatecount['DATE'] = crime['DATE'].value_counts(sort=False).index
crimedatecount['COUNT'] = crime['DATE'].value_counts(sort=False).values
crimedatecount = crimedatecount.sort_values(['DATE'])
crimedatecount.head()
plt.figure(figsize=(20,10))
plt.plot(crimedatecount['DATE'], crimedatecount['COUNT'])
crime[['DATE','TAVG']].head()
crimedatecountontem = pd.merge(crimedatecount, crime[['DATE','TAVG']], how='left', left_on='DATE')
# crimedatecountontem = crimedatecountontem.set_index('DATE')
crimedatecountontem.head()
plt.figure(figsize=(20,10))
plt.plot(crimedatecountontem.index,crimedatecountontem['COUNT'], label="amount of crime in days")
plt.plot(crimedatecountontem.index,crimedatecountontem['TAVG'], label="temperature")
plt.legend()
crimetempma = crimedatecountontem.rolling(window=20).mean()
crimetempma = crimetempma.dropna()
crimetempma.head()
plt.figure(figsize=(20,10))
plt.plot(crimetempma.index,crimetempma['COUNT'], label="amount of crime in days")
plt.plot(crimetempma.index,crimetempma['TAVG'], label="temperature")
plt.legend()
###Output
_____no_output_____ |
Lez11/Ripasso.ipynb | ###Markdown
Funzioni Booleane
###Code
help('stringa'.isalpha)
'stri2222nga'.isalpha()
'10'.isdecimal()
'10'.isdecimal() and 'stringa'.isalpha() and len('snixuniecerubceiurce')>10
# operatori di confronto.
34>3
2<3
3>=2
3!=2 #3 diverso da 2
(3<2) or (3>2)
###Output
_____no_output_____
###Markdown
If-else
###Code
n=8
if(n>3):
print('{} è maggiore di 3'.format(n))
elif(0<=n<=3):
print('{} è compreso tra 0 e 3 (inclusi) '.format(n))
else:
print('{} è negativo'.format(n))
###Output
_____no_output_____
###Markdown
While
###Code
import random
x=0
while x !=2: # condizione iniziale (per entrare all' inizio deve essere non soddisfatta!!)
x=random.randint(1,10) # istruzione che modifica la condizione del while
print(x,end="")
###Output
_____no_output_____
###Markdown
operatore *in*
###Code
s = '+748'
print('4' in s) #ritorna true se 4 è in s. VALE PER TUTTE LE STRUTTURE SEQUENZIALI come liste, tuple... ma anche per
#i set etc...
print(3 in ['2',3,789])
###Output
_____no_output_____
###Markdown
Ciclo For l'operatore in si usa per fare i cicli for.for i in sequenza di elementi: istruzioni eseguite
###Code
for i in 'disegno' :
print(i)
for i in range(4): # lafunzione range(n) è usata per iterare da 0 fino a n-1
print(i)
for i in range(2,4): #posso anche specificare da quele valore partire
print(i)
#classico esempio.... stampare tutti gli elementi della struttura sequenziale (una stringa in questo caso!)
s ='ciaoo'
for i in range(len(s)) :
print(s[i])
###Output
_____no_output_____
###Markdown
Funzioni Ricorsive Le funzioni possono richiamare se stesse, in modo ricorsivo. La struttura è la seguentedef funzione(n): if condizione di uscita (tipo n==0): fai qualcosa else: funzione(n-1)
###Code
def contoallarovesciaRic(n):
if n <= 0:
print('Via!') # LA CONDIZIONE DI USCITA è FONDAMENTALE PER NON RIMANERE INCASTRATI NEL LOOP RICORSIVO!
else:
print(n)
contoallarovesciaRic(n-1)
contoallarovesciaRic(10)
###Output
_____no_output_____
###Markdown
ListeSequenza ordinata di elementi eterogenei e **mutabili**!!! con può essere anche elementi di tipo diverso.
###Code
lista = ["ciao", 2.0, 5, [10, 20]]
#posso costruire una lista partendo range.
list(range(2,10,2))
#posso costruire una lista partendo da una stringa.
s1 = 'ciao come stai?'
print(list(s1))
#posso costruire una lista partendo da una stringa e applicando la funzione split.
s2 = s1.split(' ') # equivalemente s1.split(" ")
print(s2)
# Accedere a un elemento: **x[ indice ]**
# <-- Python e molti altri linguaggi cominciano a contare da zero.
x = [23,3,2,65, 6,7,8,9,10]
y = x[1:6:2] #dall'elemento di indice 1 fino all'elemento di indice 6 (escluso), con step 2 .
z = x[::2] #dall'inizio alla fine, con step 2.
x, y, z
x = [23,3,2,65, 6,7,8,9,10]
print([1,2]+[4,5]) # concatenazione
print([1,2]*3) # ripetizione concatenata
x[1:3]=['ciccia','casa','pluto'] #sostituisco gli elementi dall'indice 1 all'indice 3 (escluso)
print(x)
x[1:3]=[] #elimino gli elementi dall'indice 1 all'indice 3 (escluso)
print(x)
del x[0:2] #cancello i primi due elementi
print(x)
###Output
_____no_output_____
###Markdown
**ord**Signature: ord(c, /)Docstring: Return the Unicode code point for a one-character string.Type: builtin_function_or_method **len**Signature: len(obj, /)Docstring: Return the number of items in a container.Type: builtin_function_or_method **in**Serve per vedere se un elemento è in una lista e per ciclare sulla lista con il for
###Code
#classico ciclo for
for i in range(0,5,1):
print('x[{}]={}'.format(i,x[i]), end=' ')
#assegnazione per liste
list1 = ['carlo','magno']
print('prima: list1='+str(list1))
list2=list1 # list2 ed list1 puntano alla stesso oggetto in memoria.
list2[0]='alessandro' #tutte le modifiche che faccio su list2 le sto facendo anche su list1!!!!
print('dopo: list1='+str(list1))
#quando si effettua la copia
list3 = ['11',7,23]
list4 = list3.copy() # oppure list4=list3[:]
# list4 punta a un oggetto diverso da list3
print('list3 e list4 puntano allo stesso oggetto? '+str( id(list4)==id(list3)))
# tuttavia al loro interno ci sono delle variabili list3[0],list3[1],list3[3], list4[0],list4[1],list4[3]
#che puntano ai medesimi oggetti...
for i in range(len(list3)):
print('list3[{}] e list4[{}] puntano allo stesso oggetto? {}'.format(i,i,id(list3[i])==id(list4[i])))
# bisogna fare attenzione se alcuni di questi oggetti sono oggetti mutabili
# occhio liste annidate
# per copiare liste annidate posso usare deepcopy
# I metodi delle liste modificano l'oggetto di partenza, i metodi delle stringhe non modificano la lista stessa !!!!
list3 = ['11',7,23]
print(list3)
print(list3.append(1000000)) ### aggiunge elemento a list 3 ma non restituisce nessun valore
print(list3)
list3.pop(0) ### toglie elemento zero a list 3
print(list3)
list3.extend([6666,9999])
print(list3)
list3.sort()
print(list3)
print(list3*2) # ho creato una nuova lista
###Output
_____no_output_____
###Markdown
Tupleliste di valori separati da virgole!!! Sono immutabili!!!
###Code
tupla1 = (2,)
tupla2 = 2,
print(type(tupla1), type(tupla2))
(a,b,c) = (1,2,3)
print('a =',a, '; b =',b,'; c =',c, )
###Output
_____no_output_____
###Markdown
Dizionaridizionario = {chiave: valore}, la chiave può essere un qualsisi oggetto **immutabile**, come tuple, stringe, interi etc..!!!! NON è una struttura ordinata
###Code
diz1 = dict()
diz2 = {}
print(type(diz1),type(diz2))
Voti={'Antonio':27, 'Erica':30, 'Luca':20, 'Caterina':18 }
Voti['Antonio'] #leggere un valore
Voti['Ludovica'] = 30 #aggiungere un elemento
Voti['Antonio']=Voti['Antonio']-1 #cambiare un valore
del Voti['Luca'] #cancellare un elemento
Voti
print(len(Voti))
print(list(Voti.keys())) #lista chiavi
print(list(Voti.values())) #lista valori
print(list(Voti.items())) #lista chiavi-valori
print('Ludovica' in Voti.keys() )
#non è ordinato, e non rispetta l'ordine di inserimento!!
for i in Voti:
print(i)
###Output
_____no_output_____
###Markdown
Setcollezione non ordinata di oggetti **unici** e **immutabili**!!!
###Code
a={'a','a','a','c','b','s'} #a=set('s','5')
b = set('abracadabra')
print(type(a),type(b))
a, b
a.add('pippo')
print(a)
a.remove('pippo')
print(a)
print ('a - b ', a - b) # differenza
print ('a | b ', a | b) # unione, or logico.
print ('a & b ', a & b) #intersezione, and logico.
print('a^b ' , a^b) #simmetric difference
print('a' in a)
###Output
_____no_output_____
###Markdown
List comprehensions
###Code
pari1 = [2*n for n in range(5) ]
print(pari1)
pari2 = [n for n in range(10) if n%2==0]
print(pari2)
dadi = [(x,y) for x in range(1,7) for y in range(1,7)]
print(dadi)
#definire la lista [[1,2,3,..,n],[2*1,2*2,...,2*n],...,[n*1,n*2,n*n]]
def matrice(n):
return [[colonna*riga for colonna in range(1,n+1)] for riga in range(1,n+1)]
print(matrice(5))
###Output
[0, 2, 4, 6, 8]
[0, 2, 4, 6, 8]
[(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6)]
[[1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20], [5, 10, 15, 20, 25]]
###Markdown
Lettura Fileopen(file, mode=**'r'**, buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
###Code
file = open('files/provaTesto.txt','r') #leggo
for line in file:
print(line.strip())
file.close() #dopo aver lavorato con un file bisogna chiuderlo
# per non dover ogni volta chiudere il file posso usare with:
with open('files/provaTesto.txt') as file:
s=file.read() # stringa
print(type(s), s)
with open('files/provaTesto.txt') as file:
s=file.readlines() # lista
print(type(s), s)
###Output
_____no_output_____
###Markdown
Scrittura File open(file, mode=**'w'**, buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
###Code
with open("files/example.txt", "w") as file: #file è una variabile che si riferisce al file testuale (examples.txt) che ho creato
file.write("ciao\ncome va?\n") # nella cartella files in modalità di lettura ("w")
with open('files/example.txt') as file:
s=file.read()
print(s)
###Output
_____no_output_____
###Markdown
Aggiunta di testo su File open(file, mode=**'a'**, buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
###Code
with open("files/example.txt", "a")as file:
file.write("ciao\ncome va?\n")
with open('files/example.txt') as file:
s=file.read()
print(s)
###Output
_____no_output_____
###Markdown
lambda Expression
###Code
#funzioni anonime
f = lambda x : x*2 ## f(x)=x*2
g = lambda x : x+1 ## f(x)=x+1
somma = lambda x,y: x+y ## somma(x,y) = x+y
###Output
_____no_output_____
###Markdown
map , filter e reduce
###Code
#map (f, [1,2,3,4]) applica f a ogni elemento della lista e genera una nuova lista [f(1),f(2),f(3),f(4)]
list(map(lambda x: x+1,[1,2,3,4]))
#filter(f,list) filtra gli elementi che soddisfano il predicato f.
list(filter(lambda x: x>2,[1,2,3,4]))
#reduce si trova in un pacchetto specifico.
from functools import reduce # con questa lista stiamo dicendo dal package functools prendi la funzione reduce
product = reduce((lambda x, y: x * y), [1, 2, 3, 4])
print(product)
###Output
_____no_output_____ |
Amazon Augmented AI (A2I) and Amazon Translate.ipynb | ###Markdown
Amazon Augmented AI (Amazon A2I) integration with Amazon Translate [Example] IntroductionAmazon Translate is constantly learning and evolving to provide the “perfect” output. In domain sensitive applications such as legal, medical, construction, engineering, etc., customers can always improve the translation quality by using custom terminology (https://aws.amazon.com/blogs/machine-learning/introducing-amazon-translate-custom-terminology/). This is a great approach for most of the cases but there are some outliers which might require light post-editing by human teams. The post-editing process helps businesses to understand the needs of their customers better by capturing the nuances of local language that can be lost in translation.For such businesses and organizations who want to augment the output of Amazon Translate (and other Amazon AI services), Amazon Augmented AI (https://aws.amazon.com/augmented-ai/) (A2I) provides a managed approach to build human driven post-editing workflows. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers.In this tutorial, we will show how you can use **Amazon Augmented A2I and Amazon Translate to create a human review workflow which allows your private workforce to effectively review, correct and tag the documents translated by Amazon Translate, at scale**.To incorporate A2I in your Amazon Translate Workflows, you will the following resources:1. An **S3 Bucket** to store the files that you need to translate and process the output generated from the Human Review Workflow after the Human Loop has completed. 2. A **Worker Team** to review and improve the translations done using Amazon Translate. To learn more about Private Worker Teams, see https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-private.html3. A **Worker Task Template** to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-instructions-overview.html4. A **Human Review Workflow**, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. You can create a flow definition in the Amazon Augmented AI console or with Amazon A2I APIs. To learn more about both of these options, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html When using a custom task type, as this tutorial will show, you start a human loop using the Amazon Augmented AI Runtime API. When you call `start_human_loop()` in your custom application, a task is sent to human reviewers. Prerequisite Setup
###Code
# First, let's get the latest installations of our dependencies
!pip install --upgrade pip
!pip install boto3 --upgrade
!pip install -U botocore
###Output
_____no_output_____
###Markdown
Environment SetupWe need to set up the following data:* `REGION` - Region to call A2I.* `BUCKET_NAME` - A S3 bucket accessible by the given role * Used to store the input files and output results * Must be within the same region A2I is called from* `WORKTEAM_ARN` - To create your **Private Workteam**, visit the instructions here: https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-private.html After you have created your workteam, replace *\* below* `ROLE` - The IAM role used as part of StartHumanLoop. By default, this notebook will use the execution role. You can learn more about IAM Policies here https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
###Code
REGION = '<REGION-ID>'
BUCKET_NAME = '<BUCKET-NAME>'
WORKTEAM_ARN= "<YOUR-WORKTEAM-ARN>"
###Output
_____no_output_____
###Markdown
Role and PermissionsThe AWS IAM Role used to execute the notebook needs to have the following policies attached:* SagemakerFullAccess* TranslateFullAccess
###Code
from sagemaker import get_execution_role
import sagemaker
# Setting Role to the default SageMaker Execution Role
ROLE = get_execution_role()
display(ROLE)
###Output
_____no_output_____
###Markdown
Setup Bucket and Paths
###Code
import os
import boto3
import botocore
sess = sagemaker.Session()
###Output
_____no_output_____
###Markdown
Client Setup Let's setup the clients for Amazon S3, Amazon SageMaker A2I Runtime and Amazon Translate.
###Code
import boto3
import io
import json
import uuid
import botocore
import time
import botocore
# Amazon SageMaker client
sagemaker = boto3.client('sagemaker', REGION)
# Amazon Translate client
translate = boto3.client('translate', REGION)
# S3 client
s3 = boto3.client('s3', REGION)
# A2I Runtime client
a2i_runtime_client = boto3.client('sagemaker-a2i-runtime', REGION)
###Output
_____no_output_____
###Markdown
Set up a pretty printer for the AWS SDK responses
###Code
import pprint
# Pretty print setup
pp = pprint.PrettyPrinter(indent=2)
# Function to pretty-print AWS SDK responses
def print_response(response):
if 'ResponseMetadata' in response:
del response['ResponseMetadata']
pp.pprint(response)
###Output
_____no_output_____
###Markdown
Sample DataLet's create some sample text that we would test our translation with and store it in S3.
###Code
translation_text = """
Just then another visitor entered the drawing room: Prince Andrew Bolkónski, the little princess’ husband. He was a very handsome young man, of medium height, with firm, clearcut features. Everything about him, from his weary, bored expression to his quiet, measured step, offered a most striking contrast to his quiet, little wife. It was evident that he not only knew everyone in the drawing room, but had found them to be so tiresome that it wearied him to look at or listen to them. And among all these faces that he found so tedious, none seemed to bore him so much as that of his pretty wife. He turned away from her with a grimace that distorted his handsome face, kissed Anna Pávlovna’s hand, and screwing up his eyes scanned the whole company.
"""
key = "input/test.txt"
s3.put_object(Bucket=BUCKET_NAME, Key=key, Body=translation_text)
###Output
_____no_output_____
###Markdown
Create Control Plane Resources Create a Worker Task TempalteCreate a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required.For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis.We will be taking [translation review and correction UI](https://github.com/aws-samples/amazon-a2i-sample-task-uis/blob/master/text/translation-review-and-correction.liquid.html) and filling in the object categories in the labels variable in the template.
###Code
template = """
<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<style>
table, tr, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<crowd-form>
<div>
<h1>Instructions</h1>
<p>Please review the below translations and make corrections and improvements.</p>
<p>Your corrections should:
<ol>
<li>Make the translated text more accurately express the meaning of the original text</li>
<li>Make the translated text read more like something a person would write rather than an automated translation</li>
</ol>
</p>
</div>
<table>
<tr>
<th>Original</th>
<th>Translation</th>
<th style="width: 70px">Rating</th>
</tr>
{% for pair in task.input.translationPairs %}
<tr>
<td>{{ pair.originalText }}</td>
<td><crowd-text-area name="translation{{ forloop.index }}" value="{{ pair.translation }}"></crowd-text-area></td>
<td>
<p>
<input type="radio" id="good{{ forloop.index }}" name="rating{{ forloop.index }}" value="good" required>
<label for="good{{ forloop.index }}">Good</label>
</p>
<p>
<input type="radio" id="bad{{ forloop.index }}" name="rating{{ forloop.index }}" value="bad" required>
<label for="bad{{ forloop.index }}">Bad</label>
</p>
</td>
</tr>
{% endfor %}
</table>
</crowd-form>
"""
###Output
_____no_output_____
###Markdown
Create a Worker Task Template Creator FunctionThis function would be a higher level abstration, on the SageMaker package's method to create the Worker Task Template which we will use in the next step to create a human review workflow.
###Code
def create_task_ui(task_ui_name, template):
'''
Creates a Human Task UI resource.
Returns:
struct: HumanTaskUiArn
'''
response = sagemaker.create_human_task_ui(
HumanTaskUiName=task_ui_name,
UiTemplate={'Content': template})
return response
# Task UI name - this value is unique per account and region. You can also provide your own value here.
taskUIName = 'a2i-translate-test-01-ue-1'
# Create task UI
humanTaskUiResponse = create_task_ui(taskUIName, template)
humanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn']
print(humanTaskUiArn)
###Output
_____no_output_____
###Markdown
Creating the Flow Definition In this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:* The workforce that your tasks will be sent to.* The instructions that your workforce will receive. This is called a worker task template.* Where your output data will be stored.This demo is going to use the API, but you can optionally create this workflow definition in the console as well. For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.
###Code
def create_flow_definition(flow_definition_name):
'''
Creates a Flow Definition resource
Returns:
struct: FlowDefinitionArn
'''
response = sagemaker.create_flow_definition(
FlowDefinitionName= flow_definition_name,
RoleArn= ROLE,
HumanLoopConfig= {
"WorkteamArn": WORKTEAM_ARN,
"HumanTaskUiArn": humanTaskUiArn,
"TaskCount": 1,
"TaskDescription": "Please review the translations done using Amazon Translate and make corrections and improvements.",
"TaskTitle": "Review and Improve translations."
},
OutputConfig={
"S3OutputPath" : "s3://"+BUCKET_NAME+"/"
}
)
return response['FlowDefinitionArn']
###Output
_____no_output_____
###Markdown
Now we are ready to create our flow definition
###Code
# Flow definition name - this value is unique per account and region. You can also provide your own value here.
uniqueId = str(uuid.uuid4())
flowDefinitionName = f'translate-a2i-{uniqueId}'
flowDefinitionArn = create_flow_definition(flowDefinitionName)
print(flowDefinitionArn)
###Output
_____no_output_____
###Markdown
Translate DocumentsNow that we have the Human Review Workflow set up, we can translate our documents and pass them over to a Human Loop for review.
###Code
# Get file from S3 and load it into a variable
file_contents = s3.get_object(Bucket=BUCKET_NAME, Key=key)['Body'].read().decode("utf-8", 'ignore')
# Get just the filename without prefix or suffix
fileName = key[key.rindex('/')+1:key.rindex('.')]
# Create the human loop input JSON object
humanLoopInput = {
'SourceLanguage' : 'English',
'TargetLanguage' : 'Spanish',
'sourceLanguageCode':'en',
'targetLanguageCode' : 'es',
'translationPairs' : [],
'rowCount': 0,
'bucketName': BUCKET_NAME,
'keyName': key
}
translatedText = ''
rowCount = 0
print('Splitting file and performing translation')
# split the body by period to get individual sentences
for sentence in file_contents.split('.'):
if len(sentence.lstrip()) > 0:
# call translation
translate_response = translate.translate_text(
Text=sentence + '.',
SourceLanguageCode='en',
TargetLanguageCode='es'
)
translatedSentence = translate_response['TranslatedText']
translationPair = {
'originalText': sentence + '.',
'translation': translatedSentence
}
humanLoopInput['translationPairs'].append(translationPair)
rowCount+=1
translatedText = translatedText + translatedSentence + ' '
humanLoopInput['rowCount'] = rowCount
humanLoopName = 'Translate-A2I-Text' + str(int(round(time.time() * 1000)))
print('Starting human loop - ' + humanLoopName)
response = a2i_runtime_client.start_human_loop(
HumanLoopName=humanLoopName,
FlowDefinitionArn= flowDefinitionArn,
HumanLoopInput={
'InputContent': json.dumps(humanLoopInput)
}
)
# write the machine translated file to S3 bucket.
targetKey = ('machine_output/MO-{0}.txt').format(fileName)
print ('Writing translated text to '+ BUCKET_NAME + '/' + targetKey)
s3.put_object(Bucket=BUCKET_NAME, Key=targetKey, Body=translatedText.encode('utf-8'))
###Output
_____no_output_____
###Markdown
Check Status of Human LoopLet's define a function that allows us to check the status of Human Loop progress.
###Code
resp = a2i_runtime_client.describe_human_loop(HumanLoopName=humanLoopName)
print(f'HumanLoop Name: {humanLoopName}')
print(f'HumanLoop Status: {resp["HumanLoopStatus"]}')
print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}')
print('\n')
humanLoopStatus = resp["HumanLoopStatus"]
outputFilePath = resp["HumanLoopOutput"]
###Output
_____no_output_____
###Markdown
Wait For Work Team to Complete Task
###Code
workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:]
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
###Output
_____no_output_____
###Markdown
Check Status of Human Loop Again and process Task ResultsOnce the Human Loop Status has changed to completed, you can post process the results to build the final file, with Human Reviewed corrections, for future use.
###Code
resp = a2i_runtime_client.describe_human_loop(HumanLoopName=humanLoopName)
humanLoopStatus = resp["HumanLoopStatus"]
outputFilePath = resp["HumanLoopOutput"]['OutputS3Uri']
if humanLoopStatus == "Completed":
# Remove s3:// from S3 File Path
outputFilePath = outputFilePath.replace("s3://", "")
# recreate the output text document, including post edits.
tmsFile = s3.get_object(Bucket=outputFilePath.split('/')[0],
Key="/".join(outputFilePath.split('/')[1:]))['Body'].read()
tmsFile = json.loads(tmsFile.decode('utf-8'))
inputContent = tmsFile['inputContent']
rowcount = inputContent['rowCount']
answerContent = tmsFile['humanAnswers'][0]['answerContent']
editedContent = ''
for index in range(1, rowcount):
editedContent += (answerContent['translation'+str(index)] + " ")
# extract the file name
targetKeyName = inputContent['keyName']
targetKeyName = targetKeyName[targetKeyName.index('/') + 1: len(targetKeyName)]
# save the file.
s3.put_object(Bucket=BUCKET_NAME,
Key='post_edits/PO-{0}'.format(targetKeyName),
Body=editedContent.encode('utf-8'))
print("Output File successfully stored in s3://{0}/post_edits/PO-{1}".format(BUCKET_NAME,targetKeyName))
elif humanLoopStatus == "InProgress":
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
###Output
_____no_output_____ |
ai_thyroid_detection_faster_rcnn/demo.ipynb | ###Markdown
This project was build on top of the following repo: https://github.com/jwyang/faster-rcnn.pytorch Modified for thyroid nodule detection -------------------------------------------------------- Pytorch multi-GPU Faster R-CNN Licensed under The MIT License [see LICENSE for details] Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick --------------------------------------------------------
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import sys
import argparse
import torch.optim as optim
import torchvision.transforms as transforms
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.faster_rcnn.vgg16 import vgg16
# passed
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import time
import os
import pprint
from matplotlib import pyplot as plt
import PIL.Image
import pdb
import glob
from torch.utils.cpp_extension import CUDA_HOME
db_cache = glob.glob('/home/martin/JupyterLab/data/cache/*.pkl')
if len(db_cache):
os.remove(db_cache[0])
# Configs
from easydict import EasyDict
args = EasyDict()
args.dataset = 'pascal_voc'
args.net = 'res101'
args.large_scale = False
args.cuda = True
args.batch_size = 2
args.save_dir = '/home/martin/JupyterLab/output/faster_rcnn'
args.num_workers = 0
args.class_agnostic = True
args.lr = 0.001
args.optimizer = 'sgd'
args.resume = False
args.mGPUs = False
args.use_tfboard = False
args.start_epoch = 0
args.max_epochs = 1
args.lr_decay_step = 8
args.lr_decay_gamma = 0.333
args.disp_interval = 100
args.session = 1
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '5']
print('Called with args:')
pprint.pprint(args)
# mute large_scale variable
args.cfg_file = 'cfgs/{}.yml'.format(args.net)
from model.utils.config import cfg, cfg_from_file
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with cuda")
cfg.CUDA = args.cuda
cfg.USE_GPU_NMS = args.cuda
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.POOLING_MODE = 'align'
# output dir
output_dir = args.save_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(output_dir)
# DB helper
def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
if not (imdb.name.startswith('coco')):
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not (imdb.name.startswith('coco')):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
#ratio_index = rank_roidb_ratio(imdb)
print('done')
return imdb.roidb
# USE THYROID VOC
from datasets.thyroid_voc import pascal_voc
imdb = pascal_voc('trainval', '2007', '/home/martin/JupyterLab/data/VOCThyroid')
print('Loaded dataset `{:s}` for training'.format(imdb.name))
# data augmentation: 1x data -> 4x data
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
def filter_roidb(roidb):
# filter the image without bounding box.
print('before filtering, there are %d images...' % (len(roidb)))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % (len(roidb)))
return roidb
def rank_roidb_ratio(roidb):
# rank roidb based on the ratio between width and height.
ratio_large = 2 # largest ratio to preserve.
ratio_small = 0.5 # smallest ratio to preserve.
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = width / float(height)
if ratio > ratio_large:
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif ratio < ratio_small:
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
from torch.utils.data.sampler import Sampler
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0,batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
sampler_batch = sampler(train_size, args.batch_size)
roidb = filter_roidb(roidb)
ratio_list, ratio_index = rank_roidb_ratio(roidb)
from roi_data_layer.roibatchLoader import roibatchLoader
from importlib import reload
import sys
roibatchLoader=reload(sys.modules['roi_data_layer.roibatchLoader']).roibatchLoader
dataset = roibatchLoader(
roidb,
ratio_list,
ratio_index,
args.batch_size,
imdb.num_classes,
training=True
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler_batch,
num_workers=args.num_workers
)
from model.faster_rcnn.resnet import resnet
# initilize the network here.
fasterRCNN = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic)
fasterRCNN.create_architecture()
###########################################################################
##### Load from checkpoint
###########################################################################
input_dir = '/home/martin/JupyterLab/output/faster_rcnn/res101/pascal_voc/'
load_name = input_dir + 'faster_rcnn_1_73_2776.pth'
print("load checkpoint %s" % (load_name))
if args.cuda > 0:
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
fasterRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
fasterRCNN.cuda()
print('Ship faster RCNN to cuda')
# Specify class information
pascal_classes = np.asarray(['__background__', 'lesion'])
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda > 0:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data, volatile=True)
im_info = Variable(im_info, volatile=True)
num_boxes = Variable(num_boxes, volatile=True)
gt_boxes = Variable(gt_boxes, volatile=True)
fasterRCNN.eval()
start = time.time()
max_per_image = 100
thresh = 0.05
vis = True
import cv2
from model.utils.blob import im_list_to_blob
from model.rpn.bbox_transform import clip_boxes
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from matplotlib.pyplot import imread
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def vis_detections(im, class_name, dets, thresh=0.8):
"""Visual debugging of detections."""
for i in range(np.minimum(10, dets.shape[0])):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
score = dets[i, -1]
if score > thresh:
cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im
def get_gt_bbox(img_filename):
# e.g. '117.png'
annotation_path = '/home/martin/JupyterLab/data/VOCThyroid/VOC2007/Annotations/'
annotation_file = annotation_path + img_filename + '.xml'
import xml.etree.ElementTree as ET
tree = ET.parse(annotation_file)
objs = tree.findall('object')
bbox_list = []
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = int(bbox.find('xmin').text)
y1 = int(bbox.find('ymin').text)
x2 = int(bbox.find('xmax').text)
y2 = int(bbox.find('ymax').text)
bbox_list.append({
'bbox': (x1,y1,x2,y2),
'text': 'gt'
})
return(bbox_list)
def get_pred_bbox(class_name, dets, thresh=0.8):
# class_name = pascal_classes[j]
# dets = cls_dets.cpu().numpy()
# thresh = 0.3
bbox_list = []
# Get bbox for score > threshold
for i in range(np.minimum(10, dets.shape[0])):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
score = dets[i, -1]
if score > thresh:
bbox_list.append({
'bbox': bbox,
'score': score,
'text': '%s: %.3f' % (class_name, score)
})
# If no score > threshold, get bbox for highest score
if len(bbox_list) == 0:
dets = dets[dets[:,-1].argsort()]
bbox = tuple(int(np.round(x)) for x in dets[-1, :4])
score = dets[-1, -1]
bbox_list.append({
'bbox': bbox,
'score': score,
'text': '%s: %.3f' % (class_name, score)
})
return(bbox_list)
def vis_bbox(im, bbox_list, color = (255, 0, 0)):
for bbox_obj in bbox_list:
bbox = bbox_obj['bbox']
text = bbox_obj['text']
cv2.rectangle(im, bbox[0:2], bbox[2:4], color, 2)
cv2.putText(im, text, (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, color, thickness=1)
return im
def get_iou(bb1_raw, bb2_raw):
bb1 = {
'x1': bb1_raw[0],
'y1': bb1_raw[1],
'x2': bb1_raw[2],
'y2': bb1_raw[3],
}
bb2 = {
'x1': bb2_raw[0],
'y1': bb2_raw[1],
'x2': bb2_raw[2],
'y2': bb2_raw[3],
}
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
# DEMO MODE: LOAD IMG FROM DIR
args.image_dir = '/home/martin/JupyterLab/data/VOCThyroid/VOC2007/JPEGImages/'
# Load test.txt and combine into '[ID].png'
f_path = '/home/martin/JupyterLab/data/VOCThyroid/VOC2007/ImageSets/Main/'
f = open(f_path + 'test.txt', 'r')
imglist = f.readlines()
f.close()
imglist = list(map(lambda x: str(int(x)) + '.png', imglist))
imglist = imglist
######################### Loop code ###########################
iou_list = []
vis = 1
num_images = len(imglist)
print('Loaded Photo: {} images.'.format(num_images))
for i in range(num_images):
total_tic = time.time()
num_images -= 1
im_file = os.path.join(args.image_dir, imglist[num_images])
# im = cv2.imread(im_file)
im_in = np.array(imread(im_file))
# rgb -> bgr
im = im_in[:,:,::-1]
# im_scales is scale ratio
blobs, im_scales = _get_image_blob(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs
# im_info_np is height, width, scale ratio
im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
# convert to pytorch and permute color channel
im_data_pt = torch.from_numpy(im_blob)
im_data_pt = im_data_pt.permute(0, 3, 1, 2)
im_info_pt = torch.from_numpy(im_info_np)
with torch.no_grad():
im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
gt_boxes.resize_(1, 1, 5).zero_()
num_boxes.resize_(1).zero_()
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
# Here: box = drop first column of rois
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4)
else:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
pred_boxes /= im_scales[0]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im2show = np.copy(im)
# This pixel*255 operation is replaced by cv2.convertScaleAbs(im2show, alpha=(255.0))
#im2show = im2show*255
####
## bbox gen
####
# Loop through pascal classes and inference
for j in range(1, len(pascal_classes)):
# get all indices of score of j class exceeding threshold
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
# visuzalize gt box
img_filename = (imglist[num_images]).replace('.png', '')
gt_bbox_list = get_gt_bbox(img_filename)
if vis:
im2show = vis_bbox(im2show, gt_bbox_list, (0,255,0))
# if there is det
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
# keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=not cfg.USE_GPU_NMS)
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
# bbox pred / gt and iou calculation
pred_bbox_list = get_pred_bbox(pascal_classes[j], cls_dets.cpu().numpy(), 0.9)
# IoU calculation
if len(pred_bbox_list):
for bbox_obj in pred_bbox_list:
iou_list.append(get_iou(gt_bbox_list[0]['bbox'], bbox_obj['bbox']))
else:
iou_list.append(0.0)
if vis:
im2show = vis_bbox(im2show, gt_bbox_list, (0,255,0))
im2show = vis_bbox(im2show, pred_bbox_list, (0,0,255))
misc_toc = time.time()
nms_time = misc_toc - misc_tic
#if webcam_num == -1:
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \n' \
.format(num_images + 1, len(imglist), detect_time, nms_time))
sys.stdout.flush()
if vis:
img_output_path = '/home/martin/JupyterLab/output/faster_rcnn_img'
result_path = os.path.join(img_output_path, imglist[num_images][:-4] + "_det.jpg")
# PIXEL value conversion
im2show = cv2.convertScaleAbs(im2show, alpha=(255.0))
cv2.imwrite(result_path, im2show)
###Output
Loaded Photo: 299 images.
im_detect: 299/299 0.095s 0.001s
im_detect: 298/299 0.101s 0.002s
im_detect: 297/299 0.101s 0.004s
im_detect: 296/299 0.098s 0.001s
im_detect: 295/299 0.100s 0.003s
im_detect: 294/299 0.103s 0.004s
im_detect: 293/299 0.098s 0.007s
im_detect: 292/299 0.108s 0.002s
im_detect: 291/299 0.103s 0.005s
im_detect: 290/299 0.104s 0.001s
im_detect: 289/299 0.099s 0.002s
im_detect: 288/299 0.099s 0.003s
im_detect: 287/299 0.098s 0.004s
im_detect: 286/299 0.100s 0.004s
im_detect: 285/299 0.099s 0.007s
im_detect: 284/299 0.098s 0.004s
im_detect: 283/299 0.099s 0.004s
im_detect: 282/299 0.100s 0.004s
im_detect: 281/299 0.099s 0.004s
im_detect: 280/299 0.099s 0.004s
im_detect: 279/299 0.097s 0.005s
im_detect: 278/299 0.099s 0.008s
im_detect: 277/299 0.104s 0.002s
im_detect: 276/299 0.098s 0.004s
im_detect: 275/299 0.104s 0.001s
im_detect: 274/299 0.100s 0.002s
im_detect: 273/299 0.097s 0.007s
im_detect: 272/299 0.099s 0.004s
im_detect: 271/299 0.099s 0.003s
im_detect: 270/299 0.098s 0.003s
im_detect: 269/299 0.099s 0.004s
im_detect: 268/299 0.098s 0.003s
im_detect: 267/299 0.098s 0.004s
im_detect: 266/299 0.097s 0.004s
im_detect: 265/299 0.097s 0.003s
im_detect: 264/299 0.106s 0.007s
im_detect: 263/299 0.099s 0.004s
im_detect: 262/299 0.105s 0.002s
im_detect: 261/299 0.099s 0.004s
im_detect: 260/299 0.096s 0.007s
im_detect: 259/299 0.101s 0.002s
im_detect: 258/299 0.098s 0.004s
im_detect: 257/299 0.098s 0.004s
im_detect: 256/299 0.097s 0.002s
im_detect: 255/299 0.099s 0.004s
im_detect: 254/299 0.100s 0.004s
im_detect: 253/299 0.102s 0.002s
im_detect: 252/299 0.103s 0.001s
im_detect: 251/299 0.100s 0.004s
im_detect: 250/299 0.101s 0.002s
im_detect: 249/299 0.098s 0.007s
im_detect: 248/299 0.099s 0.004s
im_detect: 247/299 0.102s 0.002s
im_detect: 246/299 0.098s 0.004s
im_detect: 245/299 0.101s 0.001s
im_detect: 244/299 0.101s 0.003s
im_detect: 243/299 0.099s 0.004s
im_detect: 242/299 0.101s 0.001s
im_detect: 241/299 0.096s 0.003s
im_detect: 240/299 0.102s 0.001s
im_detect: 239/299 0.104s 0.001s
im_detect: 238/299 0.096s 0.002s
im_detect: 237/299 0.100s 0.004s
im_detect: 236/299 0.095s 0.004s
im_detect: 235/299 0.099s 0.003s
im_detect: 234/299 0.099s 0.003s
im_detect: 233/299 0.100s 0.004s
im_detect: 232/299 0.101s 0.003s
im_detect: 231/299 0.099s 0.005s
im_detect: 230/299 0.098s 0.004s
im_detect: 229/299 0.099s 0.003s
im_detect: 228/299 0.099s 0.004s
im_detect: 227/299 0.097s 0.004s
im_detect: 226/299 0.098s 0.003s
im_detect: 225/299 0.102s 0.004s
im_detect: 224/299 0.101s 0.004s
im_detect: 223/299 0.097s 0.002s
im_detect: 222/299 0.099s 0.004s
im_detect: 221/299 0.100s 0.002s
im_detect: 220/299 0.105s 0.002s
im_detect: 219/299 0.099s 0.004s
im_detect: 218/299 0.098s 0.005s
im_detect: 217/299 0.100s 0.002s
im_detect: 216/299 0.101s 0.008s
im_detect: 215/299 0.099s 0.004s
im_detect: 214/299 0.100s 0.004s
im_detect: 213/299 0.096s 0.004s
im_detect: 212/299 0.100s 0.005s
im_detect: 211/299 0.102s 0.003s
im_detect: 210/299 0.101s 0.004s
im_detect: 209/299 0.101s 0.004s
im_detect: 208/299 0.096s 0.004s
im_detect: 207/299 0.097s 0.004s
im_detect: 206/299 0.098s 0.003s
im_detect: 205/299 0.104s 0.002s
im_detect: 204/299 0.100s 0.004s
im_detect: 203/299 0.096s 0.003s
im_detect: 202/299 0.099s 0.002s
im_detect: 201/299 0.100s 0.003s
im_detect: 200/299 0.098s 0.004s
im_detect: 199/299 0.101s 0.002s
im_detect: 198/299 0.097s 0.004s
im_detect: 197/299 0.100s 0.004s
im_detect: 196/299 0.098s 0.004s
im_detect: 195/299 0.100s 0.004s
im_detect: 194/299 0.096s 0.004s
im_detect: 193/299 0.102s 0.003s
im_detect: 192/299 0.099s 0.004s
im_detect: 191/299 0.102s 0.001s
im_detect: 190/299 0.098s 0.002s
im_detect: 189/299 0.097s 0.004s
im_detect: 188/299 0.098s 0.004s
im_detect: 187/299 0.099s 0.003s
im_detect: 186/299 0.099s 0.005s
im_detect: 185/299 0.097s 0.004s
im_detect: 184/299 0.103s 0.002s
im_detect: 183/299 0.097s 0.003s
im_detect: 182/299 0.102s 0.004s
im_detect: 181/299 0.102s 0.001s
im_detect: 180/299 0.097s 0.004s
im_detect: 179/299 0.097s 0.005s
im_detect: 178/299 0.099s 0.002s
im_detect: 177/299 0.099s 0.004s
im_detect: 176/299 0.099s 0.001s
im_detect: 175/299 0.102s 0.003s
im_detect: 174/299 0.099s 0.003s
im_detect: 173/299 0.100s 0.004s
im_detect: 172/299 0.100s 0.008s
im_detect: 171/299 0.097s 0.004s
im_detect: 170/299 0.097s 0.005s
im_detect: 169/299 0.100s 0.003s
im_detect: 168/299 0.100s 0.004s
im_detect: 167/299 0.099s 0.007s
im_detect: 166/299 0.100s 0.002s
im_detect: 165/299 0.103s 0.001s
im_detect: 164/299 0.098s 0.004s
im_detect: 163/299 0.100s 0.003s
im_detect: 162/299 0.099s 0.004s
im_detect: 161/299 0.099s 0.004s
im_detect: 160/299 0.101s 0.003s
im_detect: 159/299 0.101s 0.004s
im_detect: 158/299 0.104s 0.002s
im_detect: 157/299 0.098s 0.007s
im_detect: 156/299 0.095s 0.009s
im_detect: 155/299 0.098s 0.004s
im_detect: 154/299 0.111s 0.002s
im_detect: 153/299 0.100s 0.002s
im_detect: 152/299 0.102s 0.004s
im_detect: 151/299 0.099s 0.002s
im_detect: 150/299 0.099s 0.004s
im_detect: 149/299 0.100s 0.005s
im_detect: 148/299 0.099s 0.004s
im_detect: 147/299 0.099s 0.004s
im_detect: 146/299 0.098s 0.002s
im_detect: 145/299 0.105s 0.002s
im_detect: 144/299 0.100s 0.009s
im_detect: 143/299 0.100s 0.002s
im_detect: 142/299 0.098s 0.003s
im_detect: 141/299 0.101s 0.008s
im_detect: 140/299 0.099s 0.004s
im_detect: 139/299 0.098s 0.005s
im_detect: 138/299 0.101s 0.006s
im_detect: 137/299 0.098s 0.005s
im_detect: 136/299 0.098s 0.002s
im_detect: 135/299 0.098s 0.004s
im_detect: 134/299 0.100s 0.004s
im_detect: 133/299 0.100s 0.005s
im_detect: 132/299 0.097s 0.004s
im_detect: 131/299 0.100s 0.004s
im_detect: 130/299 0.100s 0.002s
im_detect: 129/299 0.099s 0.004s
im_detect: 128/299 0.102s 0.005s
im_detect: 127/299 0.097s 0.002s
im_detect: 126/299 0.103s 0.004s
im_detect: 125/299 0.100s 0.008s
im_detect: 124/299 0.106s 0.002s
im_detect: 123/299 0.101s 0.004s
im_detect: 122/299 0.100s 0.005s
im_detect: 121/299 0.097s 0.002s
im_detect: 120/299 0.099s 0.004s
im_detect: 119/299 0.099s 0.005s
im_detect: 118/299 0.101s 0.002s
im_detect: 117/299 0.096s 0.004s
im_detect: 116/299 0.097s 0.001s
im_detect: 115/299 0.103s 0.003s
im_detect: 114/299 0.099s 0.012s
im_detect: 113/299 0.100s 0.005s
im_detect: 112/299 0.102s 0.003s
im_detect: 111/299 0.099s 0.004s
im_detect: 110/299 0.099s 0.004s
im_detect: 109/299 0.099s 0.004s
im_detect: 108/299 0.098s 0.004s
im_detect: 107/299 0.097s 0.004s
im_detect: 106/299 0.098s 0.004s
im_detect: 105/299 0.099s 0.004s
im_detect: 104/299 0.101s 0.004s
im_detect: 103/299 0.099s 0.001s
im_detect: 102/299 0.099s 0.004s
im_detect: 101/299 0.101s 0.004s
im_detect: 100/299 0.102s 0.003s
im_detect: 99/299 0.099s 0.004s
im_detect: 98/299 0.097s 0.004s
im_detect: 97/299 0.103s 0.004s
im_detect: 96/299 0.098s 0.004s
im_detect: 95/299 0.101s 0.004s
im_detect: 94/299 0.099s 0.004s
im_detect: 93/299 0.097s 0.004s
im_detect: 92/299 0.099s 0.007s
im_detect: 91/299 0.107s 0.002s
im_detect: 90/299 0.097s 0.003s
im_detect: 89/299 0.097s 0.005s
im_detect: 88/299 0.098s 0.003s
im_detect: 87/299 0.099s 0.004s
im_detect: 86/299 0.099s 0.005s
im_detect: 85/299 0.100s 0.002s
im_detect: 84/299 0.098s 0.001s
im_detect: 83/299 0.096s 0.004s
im_detect: 82/299 0.102s 0.002s
im_detect: 81/299 0.102s 0.009s
im_detect: 80/299 0.100s 0.004s
im_detect: 79/299 0.097s 0.002s
im_detect: 78/299 0.100s 0.004s
im_detect: 77/299 0.100s 0.004s
im_detect: 76/299 0.100s 0.006s
im_detect: 75/299 0.102s 0.003s
im_detect: 74/299 0.095s 0.001s
im_detect: 73/299 0.108s 0.002s
im_detect: 72/299 0.097s 0.004s
im_detect: 71/299 0.102s 0.004s
im_detect: 70/299 0.103s 0.002s
im_detect: 69/299 0.097s 0.004s
im_detect: 68/299 0.099s 0.004s
im_detect: 67/299 0.100s 0.004s
im_detect: 66/299 0.102s 0.004s
im_detect: 65/299 0.100s 0.004s
im_detect: 64/299 0.095s 0.005s
im_detect: 63/299 0.099s 0.004s
im_detect: 62/299 0.099s 0.004s
im_detect: 61/299 0.102s 0.003s
im_detect: 60/299 0.098s 0.004s
im_detect: 59/299 0.097s 0.001s
im_detect: 58/299 0.100s 0.003s
im_detect: 57/299 0.098s 0.005s
im_detect: 56/299 0.100s 0.004s
im_detect: 55/299 0.099s 0.002s
im_detect: 54/299 0.099s 0.005s
im_detect: 53/299 0.102s 0.004s
im_detect: 52/299 0.102s 0.002s
im_detect: 51/299 0.101s 0.004s
im_detect: 50/299 0.100s 0.004s
im_detect: 49/299 0.098s 0.005s
im_detect: 48/299 0.099s 0.005s
im_detect: 47/299 0.099s 0.004s
im_detect: 46/299 0.100s 0.004s
im_detect: 45/299 0.095s 0.005s
im_detect: 44/299 0.100s 0.004s
im_detect: 43/299 0.100s 0.008s
im_detect: 42/299 0.100s 0.004s
im_detect: 41/299 0.100s 0.004s
im_detect: 40/299 0.098s 0.002s
im_detect: 39/299 0.098s 0.007s
im_detect: 38/299 0.103s 0.004s
im_detect: 37/299 0.100s 0.003s
im_detect: 36/299 0.101s 0.004s
im_detect: 35/299 0.104s 0.001s
im_detect: 34/299 0.099s 0.005s
im_detect: 33/299 0.099s 0.007s
im_detect: 32/299 0.104s 0.001s
im_detect: 31/299 0.104s 0.002s
im_detect: 30/299 0.099s 0.001s
im_detect: 29/299 0.102s 0.005s
im_detect: 28/299 0.106s 0.004s
im_detect: 27/299 0.102s 0.004s
im_detect: 26/299 0.099s 0.001s
im_detect: 25/299 0.096s 0.002s
im_detect: 24/299 0.108s 0.004s
im_detect: 23/299 0.099s 0.004s
im_detect: 22/299 0.098s 0.003s
im_detect: 21/299 0.099s 0.005s
im_detect: 20/299 0.096s 0.004s
im_detect: 19/299 0.100s 0.004s
im_detect: 18/299 0.100s 0.004s
im_detect: 17/299 0.098s 0.005s
im_detect: 16/299 0.100s 0.002s
im_detect: 15/299 0.095s 0.004s
im_detect: 14/299 0.100s 0.003s
im_detect: 13/299 0.099s 0.003s
im_detect: 12/299 0.100s 0.004s
im_detect: 11/299 0.097s 0.006s
im_detect: 10/299 0.106s 0.002s
im_detect: 9/299 0.099s 0.004s
im_detect: 8/299 0.100s 0.005s
im_detect: 7/299 0.101s 0.002s
im_detect: 6/299 0.096s 0.004s
im_detect: 5/299 0.097s 0.008s
im_detect: 4/299 0.102s 0.003s
im_detect: 3/299 0.111s 0.002s
im_detect: 2/299 0.099s 0.004s
im_detect: 1/299 0.095s 0.004s
|
my_folder/tf_food_vision_Projekt.ipynb | ###Markdown
Das Repository vom Daniel mit dem Tensorflow Kurs werd ich zukünftig als meine AI-Programming Ressource hernehmen, ich hab´s geklont damit ich´s bearbeiten kann & auch meine eigenen Notebooks etc dazu tun kann;zB dieses tf_food_vision_Projekt wird vermutlich n Haufen Notizen bekommen.-> Datei -> Kopie in Github speichern -> my_folder/tf_food_vision_Projektso kann ich´s speichern dort, blöderweise kann ich´s nur von Collab aus bearbeiten & ned von Git aus.(Repository & Zweig auswählen ned vergessen)
###Code
!wget https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/extras/helper_functions.py
#!wget https://raw.githubusercontent.com/Carmondaii/tensorflow-deep-learning/main/extras/helper_functions_adapted.py
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator # so hama Daten bekomen am Anfang vom Kurs
from tensorflow.keras.preprocessing import image_dataset_from_directory # so später dann weil s bissl besser war
import tensorflow_datasets as tfds # für´s Projekt, hama so die Daten bekommen & mit der tf.data API weiter gmacht weil die anscheinend die beste Performance ermöglicht
# " To do this in effective way, we're going to be leveraging a number of methods from the tf.data API. ""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
import zipfile
import os
import numpy as np
import pathlib as pat
import helper_functions_adapted as hf
###Output
_____no_output_____
###Markdown
###Code
def reset_seeds(s=42):
np.random.seed(s)
tf.random.set_seed(s)
seed = 42
reset_seeds(seed)
def view_random_image(target_dir, target_classes, fig_size=(20, 10)):
'''
Function takes a directory & plots 1 random image of each class given in
target_classes.
'''
plt.figure(figsize=fig_size)
for i, clas in enumerate(target_classes):
plt.subplot(2, 5, i+1)
target_folder = target_dir+clas
random_image = random.sample(os.listdir(target_folder), 1)
img = mpimg.imread(target_folder + "/" + random_image[0])
plt.imshow(img)
plt.title(f" {clas}\n{img.shape}")
plt.axis("off")
return img
hf.yoloo()
###Output
_____no_output_____ |
Machine Learning & Data Science Masterclass - JP/02-Numpy/00-NumPy-Arrays.ipynb | ###Markdown
------
###Code
arr = np.arange(0,25)
arr
arr.reshape(5,5)
arr.shape
# max, min
random_array = np.random.randint(0, 101, 10)
random_array
random_array.max()
random_array.min()
# if we want index of min, max
random_array.argmax()
random_array.argmin()
random_array.dtype
random_array.shape
random_array = random_array.reshape(2,5)
random_array.shape
###Output
_____no_output_____
###Markdown
------------ Exercise
###Code
# TASK: Create a numpy array called myarray which consists of 101 evenly linearly spaced points between 0 and 10.
# MAKE SURE TO READ THE FULL INSTRUCTIONS ABOVE CAREFULLY, AS THE EVALUATION SCRIPT IS VERY STRICT.
# import ?
# myarray = ?
import numpy as np
myarray = np.linspace(0, 10, 101)
print(myarray)
###Output
[ 0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1. 1.1 1.2 1.3
1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7
2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. 4.1
4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 5. 5.1 5.2 5.3 5.4 5.5
5.6 5.7 5.8 5.9 6. 6.1 6.2 6.3 6.4 6.5 6.6 6.7 6.8 6.9
7. 7.1 7.2 7.3 7.4 7.5 7.6 7.7 7.8 7.9 8. 8.1 8.2 8.3
8.4 8.5 8.6 8.7 8.8 8.9 9. 9.1 9.2 9.3 9.4 9.5 9.6 9.7
9.8 9.9 10. ]
|
tex/proofs/J.ipynb | ###Markdown
Recursion formulae for $\mathbb{j}_v$ In this notebook we validate our recursion formulae for the integral $\mathbb{j}$.
###Code
%matplotlib inline
%run notebook_setup.py
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
from mpmath import ellipf, ellipe
from tqdm.notebook import tqdm
from scipy.special import binom
import warnings
warnings.simplefilter("ignore")
###Output
_____no_output_____
###Markdown
Here is the definition of the $\mathbb{j}$ integral, which we compute numerically:
###Code
def jexact(v, k2, alpha):
"""
The function j_v evaluated by direct numerical integration.
"""
res = 0
for i in range(0, len(alpha), 2):
func = (
lambda x: np.sin(x) ** (2 * v)
* (1 - np.sin(x + 0j) ** 2 / k2) ** 1.5
)
res += quad(
func, alpha[i], alpha[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
return res
###Output
_____no_output_____
###Markdown
And here is our analytic expression, computed from two lower bounds and a three-term upward recurrence relation:
###Code
@np.vectorize
def F(phi, k2):
"""Incomplete elliptic integral of the first kind."""
return float(ellipf(phi, k2).real)
@np.vectorize
def E(phi, k2):
"""Incomplete elliptic integral of the second kind."""
return float(ellipe(phi, k2).real)
def Delta(x):
"""The sum over the pairwise differences of an array."""
return sum(-np.array(x)[::2] + np.array(x)[1::2])
def j(v, k2, alpha):
"""
The function j_v computed from two lower boundary conditions
and a three-term upward recurrence relation.
"""
q = np.sqrt(np.maximum(0, 1 - np.sin(alpha) ** 2 / k2))
z0 = np.sin(alpha) * np.cos(alpha) * q / k2
z1 = ((3 * np.sin(alpha) ** 2 + 4) - 6 * k2) * z0
zv = q ** 5 * np.sin(alpha) ** (2 * v - 3) * np.cos(alpha) * k2
if v == 0:
return (1 / 3) * (
2 * (2 - 1 / k2) * Delta(E(alpha, 1 / k2))
+ (1 / k2 - 1) * Delta(F(alpha, 1 / k2))
+ Delta(z0)
)
elif v == 1:
return (1 / 15) * (
(-3 * k2 + 13 - 8 / k2) * Delta(E(alpha, 1 / k2))
+ (3 * k2 - 7 + 4 / k2) * Delta(F(alpha, 1 / k2))
+ Delta(z1)
)
else:
return (
2 * (v + (v - 1) * k2 + 1) * j(v - 1, k2, alpha)
- (2 * v - 3) * k2 * j(v - 2, k2, alpha)
+ Delta(zv)
) / (2 * v + 3)
###Output
_____no_output_____
###Markdown
We can verify that the two expressions agree for all $\{\alpha_1, \alpha_2 \}$ and for any $N$ (we choose 3 here for definiteness):
###Code
# Validation
k2 = 0.90
for alpha1 in np.array([5, 50, 95, 160, 190]) * np.pi / 180:
arr = np.linspace(alpha1, 220 * np.pi / 180, 50)
plt.plot(
arr, [jexact(3, k2, np.array([alpha1, alpha2])) for alpha2 in arr], "k--", lw=3
)
plt.plot(arr, [j(3, k2, np.array([alpha1, alpha2])) for alpha2 in arr])
plt.plot(np.nan, np.nan, "k--", lw=3, label="numerical")
plt.plot(np.nan, np.nan, "k-", label="analytic")
plt.legend()
plt.xlabel(r"$\alpha_2$", fontsize=22)
plt.ylabel(r"$\mathbb{j}_3$", fontsize=22);
###Output
_____no_output_____
###Markdown
where different colors correspond to different values of $\alpha_1$. Tridiagonal solve for J The upward recurrence relation presented above has poor stability in general. We can attain much higher stability by instead solving a tridiagonal system with a lower and an upper boundary condition.
###Code
def solve_numerical(N, k2, alpha):
"""Numerical solution for all j_v, 0 <= v <= N."""
return np.array([jexact(v, k2, alpha) for v in range(N + 1)])
def solve(N, k2, alpha):
"""
Return the array j[0 .. N], computed recursively using
a tridiagonal solver and a lower boundary condition
(analytic in terms of elliptic integrals) and an upper
boundary condition (computed as a series solution).
"""
# Useful quantities
sin = np.sin(alpha)
cos = np.cos(alpha)
q = np.sqrt(np.maximum(0, 1 - sin ** 2 / k2))
# Boundary conditions
j0 = jexact(0, k2, alpha)
jN = jexact(N, k2, alpha)
# Set up the tridiagonal problem
a = np.empty(N - 1)
b = np.empty(N - 1)
c = np.empty(N - 1)
for i, v in enumerate(range(2, N + 1)):
a[i] = -2 * (v + 1 + (v - 1) * k2) / (2 * v + 3)
b[i] = (2 * v - 3) * k2 / (2 * v + 3)
c[i] = Delta(k2 * sin ** (2 * v - 3) * cos * q ** 5) / (2 * v + 3)
# Add the boundary conditions
c[0] -= b[0] * j0
c[-1] -= jN
# Construct the tridiagonal matrix
A = np.diag(a, 0) + np.diag(b[1:], -1) + np.diag(np.ones(N - 2), 1)
# Solve
soln = np.linalg.solve(A, c)
return np.concatenate(([j0], soln, [jN]))
###Output
_____no_output_____
###Markdown
Here's a simply check that the tridiagonal solver agrees with direct numerical integration for a specific combination of input parameters:
###Code
k2 = 0.90
alpha = np.array([0.5, 1.0])
N = 10
plt.plot(solve_numerical(N, k2, alpha), lw=3, label="numerical")
plt.plot(solve(N, k2, alpha), lw=1.5, label="tridiagonal")
plt.xlabel("v")
plt.ylabel("$J_v$")
plt.legend();
###Output
_____no_output_____
###Markdown
Solver stability Let's randomize values of $k^2$ and $\alpha$ and compute the log of the difference between the tridiagonal solution and the numerical solution:
###Code
def random_k2():
"""Random k2 in the range [0, 1] or [1, 20]."""
if np.random.random() < 0.5:
return np.random.random()
else:
return 1 + 19 * np.random.random()
def random_alpha():
"""Random (alpha1, alpha2) in the range [0, pi] with alpha2 > alpha1."""
alpha1 = np.pi * np.random.random()
alpha2 = alpha1 + (np.pi - alpha1) * np.random.random()
return np.array([alpha1, alpha2])
ntimes = 1000
N = 20
logdiff = np.zeros((ntimes, N + 1))
for i in tqdm(range(ntimes)):
k2 = random_k2()
alpha = random_alpha()
logdiff[i] = np.log10(
np.maximum(1e-16, np.abs(solve(N, k2, alpha) - solve_numerical(N, k2, alpha)))
)
fig, ax = plt.subplots(1, N, sharex=True, sharey=True)
ax[0].set_ylim(-16, 1)
ax[0].set_xscale("log")
ax[0].set_xticklabels([])
for i, axis in enumerate(ax):
axis.set_title(i)
axis.hist(logdiff[:, i], orientation="horizontal", bins=np.arange(-16.5, 0))
###Output
_____no_output_____ |
FLEX/plot_FLEX_timeseries.ipynb | ###Markdown
FLEXThis notebook plots the time series of the surface forcing and simulated sea surface temperature and mixed layer depth in the [FLEX](https://gotm.net/cases/flex/) test case.
###Code
import sys
import numpy as np
import string
import matplotlib.pyplot as plt
# add the path of gotmtool
sys.path.append("../gotmtool")
from gotmtool import *
from gotmtool.diags import get_mld_deltaT
###Output
_____no_output_____
###Markdown
Load dataFirst, store the information of the five simulations into lists and dictionaries.
###Code
casenames = [
'GLS-C01A_tidal',
'GLS-C01A',
'KPP-CVMix',
'KPPLT-VR12',
'KPPLT-LF17',
]
colors = {
'GLS-C01A_tidal': 'tab:blue',
'GLS-C01A': 'tab:blue',
'KPP-CVMix': 'tab:olive',
'KPPLT-VR12': 'tab:orange',
'KPPLT-LF17': 'tab:red',
}
linestyles = {
'GLS-C01A_tidal': '--',
'GLS-C01A': '-',
'KPP-CVMix': '-',
'KPPLT-VR12': '-',
'KPPLT-LF17': '-',
}
casedir = '../gotm/run/FLEX'
save_fig = True
###Output
_____no_output_____
###Markdown
Load data into a dictionary.
###Code
sims = {}
dataset = {}
for case in casenames:
sim = Simulation(path=casedir+'/'+case)
sims[case] = sims
dataset[case] = sim.load_data()
###Output
_____no_output_____
###Markdown
FigureTime series of (a) net surface heat flux (W m$^{-2}$), (b) surface friction velocity (m s$^{-1}$), (c) La$_t^{-2}$ where La$_t$ is the turbulent Langmuir number, (d) sea surface temperature (SST; $^\circ$C), and (e) mixed layer depth (MLD; m) defined by a 0.2 $^\circ$C temperature threshold referenced to the surface.
###Code
fig, axarr = plt.subplots(5,sharex='col')
fig.set_size_inches([8,9])
rho_w = 1000.
data = dataset['KPP-CVMix']
ustar = data.u_taus
laturb = data.La_Turb
laturb = laturb.where(laturb < 1.e3, drop=True)
heat = data.heat
I0 = data.I_0
tflux = heat + I0
time = data.time
time_start = time[0]
time_end = time[-1]
tflux.plot(ax=axarr[0], color='k', linewidth=1)
# tflux.rolling(time=8, center=True).mean().plot(
# ax=axarr[0], color='k', linewidth=1.5)
ustar.plot(ax=axarr[1], color='k', linewidth=1)
# ustar.rolling(time=8, center=True).mean().plot(
# ax=axarr[1], color='k', linewidth=1.5)
(laturb**(-2)).plot(ax=axarr[2], color='k', linewidth=1)
# (laturb**(-2)).rolling(time=8, center=True).mean().plot(
# ax=axarr[2], color='k', linewidth=1.5)
data.sst_obs[:,0,0].rolling(time=1, center=True).mean().plot(
color='k', linestyle='-', linewidth=1, ax=axarr[3], label='Obs')
mld_obs = -get_mld_deltaT(data.temp_obs[:,:,0,0], zRef=0.)
mld_obs.rolling(time=1, center=True).mean().plot(
ax=axarr[4], color='k', linewidth=1, label='Obs')
for case in casenames:
temp = dataset[case].temp
temp[-1,:,0,0].rolling(time=1, center=True).mean().plot(
ax=axarr[3], color=colors[case], linestyle=linestyles[case],
linewidth=1, alpha=0.8, label=case)
mld = -get_mld_deltaT(temp[:,:,0,0], zRef=0.)
mld = mld.where(mld.values>-140.)
mld.rolling(time=1, center=True).mean().plot(
ax=axarr[4], color=colors[case], linestyle=linestyles[case],
linewidth=1, alpha=0.8, label=case)
axarr[3].legend(loc = 'upper center', fontsize=10, ncol=3,
bbox_to_anchor=(0.4, 0.95))
axarr[0].set_ylabel('$Q_0$ (W m$^{-2}$)', fontsize=12)
axarr[0].axhline(0, color='k', linewidth=0.75)
axarr[0].set_ylim([-200, 500])
axarr[1].set_ylabel('$u_*$ (m s$^{-1}$)', fontsize=12)
axarr[1].set_ylim([0, 0.03])
axarr[2].axhline(0.3**(-2), color='k', linestyle=':', linewidth=0.75)
axarr[2].axhline(0.4**(-2), color='k', linestyle=':', linewidth=0.75)
axarr[2].set_ylim([0, 12])
axarr[2].set_ylabel('La$_t^{-2}$', fontsize=12)
axarr[3].set_ylabel('SST ($^\circ$C)', fontsize=12)
axarr[3].set_ylim([6, 10])
axarr[4].set_ylim([-100, 0])
axarr[4].set_ylabel('MLD (m)', fontsize=12)
for i, ax in enumerate(axarr):
ax.set_title('')
ax.set_xlabel('')
ax.set_xlim([time_start,time_end])
ax.set_xticks([np.datetime64('1976-04-08'),
np.datetime64('1976-04-15'),
np.datetime64('1976-04-22'),
np.datetime64('1976-04-29'),
np.datetime64('1976-05-06'),
np.datetime64('1976-05-13'),
np.datetime64('1976-05-20'),
np.datetime64('1976-05-27'),
np.datetime64('1976-06-03'),
])
if i == 1 or i == 2:
ax.text(0.015, 0.08, '('+string.ascii_lowercase[i]+')', transform=ax.transAxes,
fontsize=12, va='bottom')
else:
ax.text(0.015, 0.92, '('+string.ascii_lowercase[i]+')', transform=ax.transAxes,
fontsize=12, va='top')
if save_fig:
plt.subplots_adjust(top=0.97, bottom=0.1, right=0.98, hspace=0.13)
figname = 'flex_ts.pdf'
fig.savefig(figname, dpi=300)
###Output
_____no_output_____ |
CL results.ipynb | ###Markdown
Read the filenames------Specify here the filenames to read the predictions, along with their abstraction level.
###Code
basedir_cl = '../results/echr/cl/evaluated_wordvectors_wiki/'
layers_string = '2000'
features = 'WordVectors'
outdir = '../results/echr/cl/evaluated_wordvectors_wiki/'
results_files = [
("NER",
"",
os.path.join(basedir_cl, "test_predictions_NER_%s.csv" % layers_string)
),
("ENTITY",
"",
os.path.join(basedir_cl, "test_predictions_NER_ENTITY_%s.csv" % layers_string)
# os.path.join(basedir_cl, "test_predictions_ENTITY_%s.csv" % layers_string)
),
("LKIF",
"",
os.path.join(basedir_cl, "test_predictions_NER_ENTITY_LKIF_%s.csv" % layers_string)
# os.path.join(basedir_cl, "test_predictions_LKIF_%s.csv" % layers_string)
),
("YAGO",
"",
os.path.join(basedir_cl, "test_predictions_NER_ENTITY_LKIF_YAGO_%s.csv" % layers_string)
# os.path.join(basedir_cl, "test_predictions_YAGO_%s.csv" % layers_string)
)
]
accuracy_results = []
for iteration, batch, cl in results_files:
batch_accuracy = 0
cl_accuracy = 0
pvalue = 0
if batch != "":
batch_results = pd.read_csv(batch)
batch_accuracy = accuracy_score(batch_results.true, batch_results.prediction)
if cl != "": # and iteration != "NER":
cl_results = pd.read_csv(cl)
cl_accuracy = accuracy_score(cl_results.true, cl_results.prediction)
if batch != "":
_, pvalue = ttest_rel(batch_results.prediction, cl_results.prediction)
accuracy_results.append({
'iteration': iteration,
'batch_accuracy': batch_accuracy,
'cl_accuracy': cl_accuracy,
'pvalue': pvalue
})
accuracy_results = pd.DataFrame(accuracy_results).rename_axis("Index", axis="columns")
accuracy_results
def read_predictions(filename, classes):
results = pd.read_csv(filename)
return results
results_df = []
for iteration, batch, cl in results_files:
if batch != "":
results = read_predictions(batch, classes[iteration])
if iteration == 'NER':
precision, recall, fscore, _ = precision_recall_fscore_support(
results.true, results.prediction,
labels=np.arange(classes[iteration][0].shape[0]),
warn_for=()
)
prec_rec_fscore = pd.DataFrame()
prec_rec_fscore['Class'] = classes[iteration][0]
prec_rec_fscore['ClassCount'] = classes[iteration][1]
prec_rec_fscore['Precision'] = precision
prec_rec_fscore['Recall'] = recall
prec_rec_fscore['Fscore'] = fscore
prec_rec_fscore['Iteration'] = iteration
prec_rec_fscore['Method'] = 'Batch Learning'
results_df.append(prec_rec_fscore)
continue
precision, recall, fscore, _ = precision_recall_fscore_support(
results.true, results.prediction,
labels=np.arange(classes[iteration][0].shape[0] - 1),
warn_for=()
)
prec_rec_fscore = pd.DataFrame()
prec_rec_fscore['Class'] = classes[iteration][0][:-1]
prec_rec_fscore['ClassCount'] = classes[iteration][1][:-1]
prec_rec_fscore['Precision'] = precision
prec_rec_fscore['Recall'] = recall
prec_rec_fscore['Fscore'] = fscore
prec_rec_fscore['Iteration'] = iteration
prec_rec_fscore['Method'] = 'Batch Learning'
results_df.append(prec_rec_fscore)
if cl != "":
results = read_predictions(cl, classes[iteration])
precision, recall, fscore, _ = precision_recall_fscore_support(
results.true, results.prediction,
labels=np.arange(classes[iteration][0].shape[0] - 1),
warn_for=()
)
prec_rec_fscore = pd.DataFrame()
prec_rec_fscore['Class'] = classes[iteration][0][:-1]
prec_rec_fscore['ClassCount'] = classes[iteration][1][:-1]
prec_rec_fscore['Precision'] = precision
prec_rec_fscore['Recall'] = recall
prec_rec_fscore['Fscore'] = fscore
prec_rec_fscore['Iteration'] = iteration
prec_rec_fscore['Method'] = 'Curriculum Learning'
results_df.append(prec_rec_fscore)
results_df = pd.concat(results_df)
results_df = pd.melt(results_df, id_vars=["Class", "ClassCount", "Iteration", "Method"],
value_vars=["Precision", "Fscore", "Recall"],
var_name="Metric", value_name="Value")
metric_map = {"Precision": 0, "Recall": 1, "Fscore": 2}
results_df['MetricRank'] = results_df['Metric'].map(metric_map)
results_df = results_df.sort_values(["Iteration", "Method", "MetricRank", "Class"]).reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Print results as a latex table
###Code
pivot_df = results_df[(results_df.Iteration == 'YAGO')]
pivot_df = pivot_df.pivot_table(index=['Class', 'Method'],
columns='Metric', values='Value')[['Precision', 'Recall', 'Fscore']].reset_index()
pivot_df = pivot_df.append(pivot_df[pivot_df.Method == 'Batch Learning'].mean(), ignore_index=True)
pivot_df = pivot_df.append(pivot_df[pivot_df.Method == 'Curriculum Learning'].mean(), ignore_index=True)
pivot_df = pivot_df[['Precision', 'Recall', 'Fscore']]
print(pivot_df.to_latex(index=False, float_format=lambda x: '%.2f' % x))
for iteration, idf in results_df.groupby("Iteration"):
if iteration == "NER":
continue
plt.clf()
ax = sns.boxplot(x='Metric', y='Value', hue='Method',
data=idf)
ax.set_title('Precision/Recall/F1-Score for Iteration %s \nwithout considering O class) using %s'
% (iteration, features))
ax.title.set_y(1.05)
ax.figure.set_size_inches(7, 5)
ax.figure.tight_layout(pad=1)
ax.figure.savefig(os.path.join(outdir, 'prec_rec_fscore/%s_prec_rec_fscore.png' % (iteration, )))
plt.show()
rdf_top_bottom = []
for (iteration, method), rdf in results_df.groupby(["Iteration", "Method"]):
if iteration == 'NER':
continue
prec_rdf = rdf[rdf.Metric == 'Precision']
rec_rdf = rdf[rdf.Metric == 'Recall']
fscore_rdf = rdf[rdf.Metric == 'Fscore']
to_take = np.ceil(prec_rdf.shape[0] * 0.2).astype(np.int32)
bottom_prec = prec_rdf.sort_values('ClassCount', ascending=False)[:to_take].Value.mean()
top_prec = prec_rdf.sort_values('ClassCount', ascending=True)[:to_take].Value.mean()
bottom_rec = rec_rdf.sort_values('ClassCount', ascending=False)[:to_take].Value.mean()
top_rec = rec_rdf.sort_values('ClassCount', ascending=True)[:to_take].Value.mean()
bottom_fscore = fscore_rdf.sort_values('ClassCount', ascending=False)[:to_take].Value.mean()
top_fscore = fscore_rdf.sort_values('ClassCount', ascending=True)[:to_take].Value.mean()
rdf_top_bottom.append({
'Iteration': iteration,
# 'Method': method,
'Top 20% Precision Mean': top_prec,
'Bottom 20% Precision Mean': bottom_prec,
'Top 20% Recall Mean': top_rec,
'Bottom 20% Recall Mean': bottom_rec,
'Top 20% Fscore Mean': top_fscore,
'Bottom 20% Fscore Mean': bottom_fscore,
})
rdf_top_bottom = pd.DataFrame(rdf_top_bottom,
columns=['Iteration', 'Top 20% Precision Mean', 'Bottom 20% Precision Mean',
'Top 20% Recall Mean', 'Bottom 20% Recall Mean', 'Top 20% Fscore Mean',
'Bottom 20% Fscore Mean'])
rdf_top_bottom.rename_axis("Index", axis="columns")
print(rdf_top_bottom.to_latex(index=False, float_format='%.2f'))
confusion_matrices = {}
columns = {}
for iteration, batch, cl in results_files:
if iteration == 'NER' or iteration == 'YAGO':
continue
classes_ = np.array([re.sub('^I-', '', cls) for cls in classes[iteration][0]])
if batch != "":
batch_df = read_predictions(batch, classes[iteration])
confusion_matrices[('batch', iteration)] =\
confusion_matrix(batch_df.true, batch_df.prediction, labels=np.arange(classes_.shape[0]))
columns[('batch', iteration)] = classes_
if cl != "":
cl_df = read_predictions(cl, classes[iteration])
confusion_matrices[('cl', iteration)] =\
confusion_matrix(cl_df.true, cl_df.prediction, labels=np.arange(classes_.shape[0]))
columns[('cl', iteration)] = classes_
for (method, iteration), cm in confusion_matrices.items():
plt.clf()
normalized_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
normalized_cm = pd.DataFrame(normalized_cm, columns=columns[(method, iteration)],
index=columns[(method, iteration)])
ax = sns.heatmap(normalized_cm.reindex(index=normalized_cm.index[::-1]), vmin=0.0, vmax=1.0, annot=False,
fmt=".2f", linewidths=.5, cmap="Blues", cbar=False)
method_name = 'Batch Learning' if method == 'batch' else 'Curriculum Learning'
# ax.set_title("Confusion Matrix Normalized Heatmap\nfor %s and Iteration %s using %s"
# % (method_name, iteration, features))
ax.tick_params(labelsize=17)
plt.xticks(rotation=90)
if iteration == 'ENTITY':
plt.yticks(rotation=0)
ax.title.set_y(1.05)
if iteration == 'LKIF':
ax.figure.set_size_inches(12, 12)
else:
ax.figure.set_size_inches(8, 8)
ax.figure.tight_layout(pad=1.5)
ax.figure.savefig(
'%s/heatmaps/%s_%s_heatmaps.png' % (outdir, method, iteration))
plt.show()
def mapping(label):
if counts[label] < 5:
return "< 5"
elif counts[label] < 15:
return "< 15"
elif counts[label] < 30:
return "< 30"
elif counts[label] < 50:
return "< 50"
elif counts[label] < 100:
return "< 100"
else:
return "O"
vmapping = np.vectorize(mapping)
# bins = ["< 5", "< 15", "< 30", "< 50", "< 100", "< 200", "< 500",
# "< 1000", "< 2000", "< 5000", "< 10000", "< 15000",
# "< 30000", "< 50000", "< 1000000", "O"]
bins = ["< 5", "< 15", "< 30", "< 50", "< 100"]
_, batch, cl = results_files[-1]
for method, method_file in [('batch', batch), ('cl', cl)]:
if method_file != "":
df = pd.read_csv(method_file)
counts = {}
for label_index in range(classes['YAGO'][0].shape[0]):
counts[label_index] = (df.true == label_index).sum()
true_mapped = vmapping(df.true.values)
prediction_mapped = vmapping(df.prediction.values)
cm = confusion_matrix(true_mapped, prediction_mapped, labels=bins)
plt.clf()
normalized_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
normalized_cm = pd.DataFrame(normalized_cm, columns=bins,
index=bins)
ax = sns.heatmap(normalized_cm.reindex(index=normalized_cm.index[::-1]), vmin=0.0, vmax=1.0, annot=False,
fmt=".2f", linewidths=.5, cmap="Blues", cbar=False)
method_name = 'Batch Learning' if method == 'batch' else 'Curriculum Learning'
# ax.set_title("Confusion Matrix Normalized Heatmap\nfor %s and Iteration YAGO using %s"
# % (method_name, features))
ax.tick_params(labelsize=17)
plt.xticks(rotation=90)
ax.title.set_y(1.05)
ax.set_xlabel('Bins (number of occurrences)')
ax.figure.set_size_inches(8, 8)
ax.figure.tight_layout(pad=1.5)
ax.figure.savefig(
'%s/heatmaps/%s_YAGO_heatmaps.png' % (outdir, method))
###Output
/home/mteruel/anaconda2/envs/env35/lib/python3.5/site-packages/ipykernel/__main__.py:38: RuntimeWarning: invalid value encountered in true_divide
###Markdown
Get average precision and recall
###Code
averages = ['micro', 'macro', 'weighted']
levels = [(iteration[0], average) for iteration in results_files for average in averages]
index = pd.MultiIndex.from_tuples(levels, names=['Task', 'Average type'])
batch_metrics = pd.DataFrame(0, index=index, columns=['Precision', 'Recall', 'F1 Score'])
cl_metrics = pd.DataFrame(0, index=index, columns=['Precision', 'Recall', 'F1 Score'])
for iteration, batch, cl in results_files:
if batch != "":
results = read_predictions(batch, classes[iteration])
for average in averages:
values = precision_recall_fscore_support(
results.true, results.prediction,
average=average, warn_for=()
)[:3]
print(values)
batch_metrics.loc[iteration, average] = values
if cl != "":
results = read_predictions(cl, classes[iteration])
for average in averages:
values = precision_recall_fscore_support(
results.true, results.prediction,
average=average, warn_for=()
)[:3]
cl_metrics.loc[iteration, average] = values
cl_metrics.rename_axis("Index", axis="columns")
###Output
_____no_output_____ |
docs/tutorials/06_User_defined_functionalities.ipynb | ###Markdown
>> MaaS Sim tutorial>> External functionalities>-----example of simulations with various functionalities included
###Code
%load_ext autoreload
%autoreload 2
import os, sys # add MaaSSim to path (not needed if MaaSSim is already in path)
module_path = os.path.abspath(os.path.join('../..'))
if module_path not in sys.path:
sys.path.append(module_path)
import random
from MaaSSim.utils import get_config, load_G, prep_supply_and_demand # simulator
from MaaSSim.traveller import travellerEvent
from MaaSSim.driver import driverEvent
from MaaSSim.data_structures import structures as inData
from MaaSSim.simulators import simulate
from MaaSSim.decisions import dummy_False
import pandas as pd
import logging
params = get_config('../../data/config/default.json') # load configuration
params.times.patience = 200 # 1 hour of simulation
params.simTime = 1 # 1 hour of simulation
params.nP = 10 # reuqests (and passengers)
params.nV = 10 # vehicles
params.nD = 1
###Output
_____no_output_____
###Markdown
user defined functions to reject with probability of 20 and 80% respectively, passed to MaaSSim by reference and called from simulator
###Code
def rand_reject8(**kwargs):
return random.random()>=0.8
params = get_config('../../data/config/delft.json') # load configuration
params.times.patience = 300 # 1 hour of simulation
params.simTime = 1 # 1 hour of simulation
params.nP = 10 # reuqests (and passengers)
params.nV = 10 # vehicles
params.nD = 1
# no functions
sim2 = simulate(params=params, f_trav_mode=dummy_False,f_driver_decline=dummy_False,f_platform_choice=dummy_False, logger_level = logging.CRITICAL)
# driver request decline behaviour
sim2.make_and_run(f_trav_mode=dummy_False,
f_driver_decline=rand_reject8)
# traveller rejects offers
sim2.make_and_run(f_trav_mode=rand_reject8,
f_driver_decline=dummy_False)
# both reject
sim2.make_and_run(f_trav_mode=rand_reject8,
f_driver_decline=rand_reject8)
import matplotlib.pyplot as plt
titles = 'no functions','driver request decline behaviour','traveller rejects offers', 'both reject'
fig, ax = plt.subplots(4,2, figsize = (20,10))
for i in range(4):
df = sim2.runs[i].trips.groupby('event').size().to_frame()
df.plot(kind='barh', ax = ax[i][0], title = titles[i]+ " travellers")
df = sim2.runs[i].rides.groupby('event').size().to_frame()
df.plot(kind='barh', ax = ax[i][1], title = " drivers")
fig.tight_layout()
###Output
_____no_output_____
###Markdown
> driver repositioning
###Code
from MaaSSim.driver import VehicleAgent, driverEvent
from MaaSSim.decisions import f_repos, f_dummy_repos
sim = simulate(inData, params = params, _print = False, f_driver_repos = f_repos, f_trav_mode=dummy_False, f_driver_decline=dummy_False,f_platform_choice=dummy_False, logger_level = logging.WARNING)
pd.DataFrame(sim.runs[0].rides.event.unique())
###Output
13-10-20 16:13:07-WARNING-Setting up 1h simulation at 2020-10-13 15:52:41 for 10 vehicles and 10 passengers in Delft, Netherlands
13-10-20 16:13:08-WARNING-simulation time 0.4 s
13-10-20 16:13:08-WARNING-assertion tests for simulation results - passed
|
Woof_MaxBlurPool_ResnetTrick_s192bs32_e200_9035.ipynb | ###Markdown
ResnetTrick_s192bs32_e200> size 192 bs 32 200 epochs runs. setup and imports
###Code
# pip install git+https://github.com/ayasyrev/model_constructor
# pip install git+https://github.com/kornia/kornia
from kornia.contrib import MaxBlurPool2d
from fastai.basic_train import *
from fastai.vision import *
from fastai.script import *
from model_constructor.net import *
from model_constructor.layers import SimpleSelfAttention, ConvLayer
import math
import torch
from torch.optim.optimizer import Optimizer, required
import itertools as it
###Output
_____no_output_____
###Markdown
utils
###Code
class Mish(nn.Module):
def __init__(self):
super().__init__()
print("Mish activation loaded...")
def forward(self, x):
#save 1 second per epoch with no x= x*() and then return x...just inline it.
return x *( torch.tanh(F.softplus(x)))
#Ranger deep learning optimizer - RAdam + Lookahead combined.
#https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
#Ranger has now been used to capture 12 records on the FastAI leaderboard.
#This version = 9.3.19
#Credits:
#RAdam --> https://github.com/LiyuanLucasLiu/RAdam
#Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
#Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
#summary of changes:
#full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
#supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
#changes 8/31/19 - fix references to *self*.N_sma_threshold;
#changed eps to 1e-5 as better default than 1e-8.
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95,0.999), eps=1e-5, weight_decay=0):
#parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
#parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
#N_sma_threshold of 5 seems better in testing than 4.
#In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
#prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params,defaults)
#adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
#now we can get to work...
#removed as we now use step from RAdam...no need for duplicate step counting
#for group in self.param_groups:
# group["step_counter"] = 0
#print("group step counter init")
#look ahead params
self.alpha = alpha
self.k = k
#radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
#self.first_run_check=0
#lookahead weights
#9/2/19 - lookahead param tensors have been moved to state storage.
#This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
#self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
#don't use grad for lookahead weights
#for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
#note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
#Uncomment if you need to use the actual closure...
#if closure is not None:
#loss = closure()
#Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
def get_data(size=128, woof=1, bs=64, workers=None, **kwargs):
if woof:
path = URLs.IMAGEWOOF # if woof
else:
path = URLs.IMAGENETTE
path = untar_data(path)
print('data path ', path)
n_gpus = num_distrib() or 1
if workers is None: workers = min(8, num_cpus()//n_gpus)
return (ImageList.from_folder(path).split_by_folder(valid='val')
.label_from_folder().transform(([flip_lr(p=0.5)], []), size=size)
.databunch(bs=bs, num_workers=workers)
.presize(size, scale=(0.35,1))
.normalize(imagenet_stats))
def get_learn(
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=1,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.95, #? 0.9
eps: Param("epsilon", float)=1e-6,
bs: Param("Batch size", int)=64,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='ranger',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
model: Param('model as partial', callable) = xresnet50
):
if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps)
elif opt=='ranger' : opt_func = partial(Ranger, betas=(mom,alpha), eps=eps)
data = get_data(size, woof, bs)
learn = (Learner(data, model(), wd=1e-2, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy(),))
print('Learn path', learn.path)
if mixup: learn = learn.mixup(alpha=mixup)
return learn
###Output
_____no_output_____
###Markdown
ResBlock
###Code
class NewResBlock(Module):
def __init__(self, expansion, ni, nh, stride=1,
conv_layer=ConvLayer, act_fn=act_fn,
# pool=nn.AvgPool2d(2, ceil_mode=True), sa=False,sym=False):
pool=nn.AvgPool2d(2, ceil_mode=True), sa=False,sym=False, zero_bn=True):
nf,ni = nh*expansion,ni*expansion
self.reduce = noop if stride==1 else pool
layers = [(f"conv_0", conv_layer(ni, nh, 3, stride=stride, act_fn=act_fn)),
(f"conv_1", conv_layer(nh, nf, 3, zero_bn=zero_bn, act=False))
] if expansion == 1 else [
(f"conv_0",conv_layer(ni, nh, 1, act_fn=act_fn)),
(f"conv_1",conv_layer(nh, nh, 3, stride=1, act_fn=act_fn)), #!!!
(f"conv_2",conv_layer(nh, nf, 1, zero_bn=zero_bn, act=False))
]
if sa: layers.append(('sa', SimpleSelfAttention(nf,ks=1,sym=sym)))
self.convs = nn.Sequential(OrderedDict(layers))
self.idconv = noop if ni==nf else conv_layer(ni, nf, 1, act=False)
self.merge =act_fn
def forward(self, x):
o = self.reduce(x)
return self.merge(self.convs(o) + self.idconv(o))
###Output
_____no_output_____
###Markdown
Parameters
###Code
lr = 0.004
epochs = 200
moms = (0.95,0.95)
start_pct = 0.2
size=192
bs=32
###Output
_____no_output_____
###Markdown
Model Constructor
###Code
model = xresnet50(c_out=10)
model.block = NewResBlock
pool = MaxBlurPool2d(3, True)
model.pool = pool
model.stem_pool = pool
# model.stem_sizes = [3,32,32,64]
model.stem_sizes = [3,32,64,64]
model.act_fn= Mish()
model.sa = True
###Output
Mish activation loaded...
###Markdown
repr model
###Code
model()
model.stem
model.body
model.head
###Output
_____no_output_____
###Markdown
Lr find
###Code
learn = get_learn(model=model,size=size,bs=bs)
learn.lr_find()
learn.recorder.plot()
###Output
_____no_output_____
###Markdown
epochs 200 9035
###Code
epochs = 200
mixup = 0.2
start_pct
###Output
_____no_output_____
###Markdown
1
###Code
learn = get_learn(model=model,size=size,bs=bs,mixup=mixup)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.metrics[-1]
learn.recorder.metrics[79]
learn.recorder.plot_losses()
learn.recorder.plot_metrics()
learn.recorder.metrics
res = ''
for num, i in enumerate(learn.recorder.metrics):
res += f"{num}, {i[0].item()}, {i[1].item()} \n"
with open('log_s192_e200_1.txt','w') as f:
f.writelines(res)
###Output
_____no_output_____
###Markdown
2
###Code
learn = get_learn(model=model,size=size,bs=bs,mixup=mixup)
learn.fit_fc(epochs, lr, moms,start_pct)
learn.recorder.plot_losses()
learn.recorder.plot_metrics()
res = ''
for num, i in enumerate(learn.recorder.metrics):
res += f"{num}, {i[0].item()}, {i[1].item()} \n"
with open('log_s192_e200_2.txt','w') as f:
f.writelines(res)
loss = ''
for num, i in enumerate(learn.recorder.losses):
loss += f"{num}, {i.item()} \n"
with open('log_loss_s192_e200_2.txt','w') as f:
f.writelines(loss)
val_loss = ''
for num, i in enumerate(learn.recorder.val_losses):
val_loss += f"{num}, {i.item()}\n"
with open('log_val_loss_s192_e200_2.txt','w') as f:
f.writelines(val_loss)
###Output
_____no_output_____
###Markdown
epochs 200 results
###Code
acc = np.array([0.9007381200790405, 0.9063374996185303])
acc.mean(), acc.std()
###Output
_____no_output_____ |
Chapter 4/Exersize 3. Identifying outliers and unexpected values in bivariate relationships .ipynb | ###Markdown
###Code
# import pandas, numpy, and matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.width', 75)
pd.set_option('display.max_columns', 7)
pd.set_option('display.max_rows', 20)
pd.options.display.float_format = '{:,.2f}'.format
covidtotals = pd.read_csv("https://raw.githubusercontent.com/sandeep92134/PYTHON-Data-Cleaning/master/Chapter%204/datasets/covidtotals.csv")
covidtotals.set_index("iso_code", inplace=True)
# set up the cumulative and demographic columns
totvars = ['location','total_cases','total_deaths','total_cases_pm',
'total_deaths_pm']
demovars = ['population','pop_density','median_age','gdp_per_capita',
'hosp_beds']
# generate a correlation matrix of the cumulative and demographic data
covidtotals.corr(method="pearson")
# get descriptive statistics on the cumulative values
covidtotalsonly = covidtotals.loc[:, totvars]
# see if some countries have unexpected low or high death rates given number of cases
covidtotalsonly['total_cases_q'] = pd.\
qcut(covidtotalsonly['total_cases'],
labels=['very low','low','medium',
'high','very high'], q=5, precision=0)
covidtotalsonly['total_deaths_q'] = pd.\
qcut(covidtotalsonly['total_deaths'],
labels=['very low','low','medium',
'high','very high'], q=5, precision=0)
pd.crosstab(covidtotalsonly.total_cases_q,
covidtotalsonly.total_deaths_q)
covidtotals.loc[(covidtotals.total_cases<300000) & (covidtotals.total_deaths>20000)].T
covidtotals.loc[(covidtotals.total_cases>300000) & (covidtotals.total_deaths<10000)].T
# do a scatterplot of total_cases by total_deaths
ax = sns.regplot(x="total_cases_pm", y="total_deaths_pm", data=covidtotals)
ax.set(xlabel="Cases Per Million", ylabel="Deaths Per Million", title="Total Covid Cases per Million and Deaths per Million by Country")
plt.show()
covidtotals.loc[(covidtotals.total_cases_pm<7500) \
& (covidtotals.total_deaths_pm>250),\
['location','total_cases_pm','total_deaths_pm']]
covidtotals.loc[(covidtotals.total_cases_pm>5000) \
& (covidtotals.total_deaths_pm<=50), \
['location','total_cases_pm','total_deaths_pm']]
covidtotals.loc[(covidtotals.total_cases>300000) & (covidtotals.total_deaths<10000)].T
covidtotals.loc[(covidtotals.total_cases<300000) & (covidtotals.total_deaths>20000)].T
>>> ax = sns.regplot(x="total_cases_pm", y="total_deaths_pm", data=covidtotals)
>>> ax.set(xlabel="Cases Per Million", ylabel="Deaths Per Million", title="Total Covid Cases per Million and Deaths per Million by Country")
>>> plt.show()
###Output
_____no_output_____ |
notebooks/parametric/1.0_Demo_GaussFlows.ipynb | ###Markdown
Gaussianization Flows
###Code
#@title Install Packages
# %%capture
try:
import sys, os
from pyprojroot import here
# spyder up to find the root
root = here(project_files=[".here"])
# append to path
sys.path.append(str(here()))
except ModuleNotFoundError:
%%capture
import os
os.system("pip install objax wandb chex")
os.system("pip install git+https://github.com/IPL-UV/rbig_jax.git#egg=rbig_jax")
# jax packages
import jax
import jax.numpy as np
from jax.config import config
# import chex
config.update("jax_enable_x64", False)
import numpy as onp
from functools import partial
import objax
import chex
# library functions
from rbig_jax.data import get_classic
from rbig_jax.plots import plot_joint, plot_joint_prob, plot_info_loss
from rbig_jax.transforms.mixture import MixtureGaussianCDF
from rbig_jax.transforms.logit import Logit
from rbig_jax.transforms.inversecdf import InverseGaussCDF
from rbig_jax.transforms.linear import HouseHolder
from rbig_jax.transforms.base import CompositeTransform
from rbig_jax.models.gaussflow import GaussianizationFlow
# logging
import tqdm
import wandb
# plot methods
import matplotlib.pyplot as plt
import seaborn as sns
sns.reset_defaults()
sns.set_context(context="talk", font_scale=0.7)
%matplotlib inline
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Demo Data
###Code
# %%wandb
# get data
n_samples = 10_000
n_features = 2
data = get_classic(n_samples)
# plot data
sns.jointplot(data[:, 0], data[:, 1], s=5, color='blue')
###Output
_____no_output_____
###Markdown
Model
###Code
# model hyperparameters
n_components = 20
n_reflections = 2
generator = objax.random.Generator(123)
learn_temperature = False
n_features = data.shape[1]
# initialize model
bijections = CompositeTransform([
# Layer I
MixtureGaussianCDF(n_features=n_features, n_components=n_components),
Logit(learn_temperature=learn_temperature),
HouseHolder(n_features=n_features, n_reflections=n_reflections, generator=generator),
# Layer II
MixtureGaussianCDF(n_features=n_features, n_components=n_components),
Logit(learn_temperature=learn_temperature),
HouseHolder(n_features=n_features, n_reflections=n_reflections, generator=generator),
# Layer III
MixtureGaussianCDF(n_features=n_features, n_components=n_components),
Logit(learn_temperature=learn_temperature),
HouseHolder(n_features=n_features, n_reflections=n_reflections, generator=generator),
# Layer IV
MixtureGaussianCDF(n_features=n_features, n_components=n_components),
Logit(learn_temperature=learn_temperature),
HouseHolder(n_features=n_features, n_reflections=n_reflections, generator=generator),
])
# initialize base distribution
base_dist = jax.scipy.stats.norm
# initialize Model
gf_model = GaussianizationFlow(n_features=n_features, bijections=bijections, base_dist=base_dist)
from pprint import pprint
# pprint(gf_model.vars())
###Output
_____no_output_____
###Markdown
Loss Function
###Code
# vectorized the model to allow for batches
# model_vectorized = objax.Vectorize(model, vc=model.vars())
# model_jitted = objax.Jit(model, vc=model.vars())
@objax.Function.with_vars(gf_model.vars())
def nll_loss(x):
return gf_model.score(x)
nll_loss(data)
###Output
_____no_output_____
###Markdown
Optimizer
###Code
# define the optimizer
opt = objax.optimizer.Adam(gf_model.vars())
# get grad values
gv = objax.GradValues(nll_loss, gf_model.vars())
lr = 0.01
epochs = 500
batch_size = 128
@objax.Function.with_vars(gf_model.vars() + opt.vars())
def train_op(x):
g, v = gv(x) # returns gradients, loss
opt(lr, g)
return v
# This line is optional: it is compiling the code to make it faster.
train_op = objax.Jit(train_op)
###Output
_____no_output_____
###Markdown
Testing
###Code
import itertools
from jax import device_put
import tqdm
from jax import random
# initialize parameters
key = random.PRNGKey(123)
itercount = itertools.count()
permute_rng, rng = random.split(key)
train_data = np.array(data)
losses = list()
pbar = tqdm.trange(epochs)
with pbar:
for i in pbar:
# batch processing
permute_rng, rng = random.split(rng)
# randomly shuffle the data
train_data = random.permutation(permute_rng, train_data)
# Train
avg_loss = []
for batch_index in range(0, n_samples, batch_size):
# compute loss
loss = float(train_op( train_data[batch_index:batch_index+batch_size])[0])
# append batch
avg_loss.append(loss)
# average loss
batch_loss = np.mean(np.stack(avg_loss))
# Log losses
losses.append(batch_loss)
pbar.set_postfix({"loss": f"{batch_loss:.4f}"})
###Output
100%|██████████| 500/500 [03:34<00:00, 2.33it/s, loss=1.4646]
###Markdown
Losses
###Code
plt.plot(losses)
###Output
_____no_output_____
###Markdown
Forward Transformation
###Code
# forward transformation
z = gf_model.transform(np.array(data))
sns.jointplot(z[:, 0], z[:, 1], s=5, color='red')
###Output
_____no_output_____
###Markdown
Inverse Transformation
###Code
# forward transformation
X_approx = gf_model.inverse_transform(z)
sns.jointplot(X_approx[:, 0], X_approx[:, 1], s=5, color='green')
###Output
_____no_output_____
###Markdown
Application I - Generating Samples
###Code
%%time
# generate samples in the latent domain
n_gen_samples = 10_000
# inverse transformation
X_samples = gf_model.sample(n_gen_samples)
sns.jointplot(X_samples[:, 0], X_samples[:, 1], s=5, color='purple')
###Output
_____no_output_____
###Markdown
Probability Density Estimation
###Code
%%time
# log probability
X_log_prob = gf_model.score_samples(np.array(data))
cmap = "Reds"
probs = X_log_prob
# probs = np.clip(probs, 0.0, 1.0)
probs = np.clip(probs, None, 0.0)
title = "Log Probability"
fig, ax = plt.subplots()
h = ax.scatter(data[:, 0], data[:, 1], s=1, c=probs, cmap=cmap, vmax=0.0)
plt.xlabel("X")
plt.ylabel("Y")
plt.colorbar(h)
ax.set_title(title)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Probability
###Code
ngrid = 1_000
buffer = 0.1
xline = np.linspace(data[:,0].min()-buffer, data[:,0].max()+buffer, ngrid)
yline = np.linspace(data[:,1].min()-buffer, data[:,1].max()+buffer, ngrid)
xgrid, ygrid = np.meshgrid(xline, yline)
xyinput = np.concatenate([xgrid.reshape(-1, 1), ygrid.reshape(-1, 1)], axis=1)
%%time
# log probability
log_prob = gf_model.score_samples(np.array(xyinput))
cmap = "Reds"
probs = np.exp(log_prob)
# probs = np.clip(probs, 0.0, 1.0)
title = "Probability"
fig, ax = plt.subplots()
h = ax.scatter(xyinput[:, 0], xyinput[:, 1], s=1, c=probs, cmap=cmap, vmin=0.0, vmax=1.0)
plt.xlabel("X")
plt.ylabel("Y")
plt.colorbar(h)
ax.set_title(title)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Negative Log-Likelihood
###Code
nll = gf_model.score(data)
print("Negative Log-Likelihood:", nll)
###Output
Negative Log-Likelihood: 1.4600139
|
awkward-numba/notebooks/PerformanceTestsTemplate.ipynb | ###Markdown
Performance Analysis of Awkward-array vs Numba optimized Awkward-array Content:- [Awkward package performance on large arrays](Awkward-package-performance-on-large-arrays) - [Profilling of Awkward package]([Profilling-of-Awkward-package]) - [Using %%timeit](Awkward-Array-Using-%%timeit) - [Using cProfile](Awkward-Array-Using-cProfile) - [Awkward Numba package](Awkward-Numba-package-performance-on-large-arrays) - [Profilling of Awkward Numba package](Profilling-of-Awkward-Numba-package) - [Using %%timeit](Awkward-Array-Numba-Using-%%timeit) - [Using cProfile](Awkward-Array-Numba-Using-cProfile) - [Speed Difference at a glance](Speed-Difference-at-a-galnce) Awkward Array Awkward-array is a pure Python+Numpy library for manipulating complex data structures as you would Numpy arrays. Even if your data structures
###Code
import awkward
from awkward import JaggedArray
array = awkward.fromiter([[1.1, 2.2, None, 3.3, None],
[4.4, [5.5]],
[{"x": 6, "y": {"z": 7}}, None, {"x": 8, "y": {"z": 9}}]
])
print("array = ",array, "\ntype: ",type(array))
array.tolist()
array[:, :2]
###Output
_____no_output_____
###Markdown
Awkward Array package performance on large arrays- [Using %%timeit](Awkward-Array-Using-%%timeit)- [Using cProfile](Awkward-Array-Using-cProfile) Awkward-Array-Using-%%timeit
###Code
%%timeit
benchmark_no_numba()
time_no_numba = %timeit -po benchmark_no_numba()
time_no_numba = []
for _ in range(10):
time = %timeit -o benchmark_no_numba(data)
time_no_numba.append(time.best)
time_no_numba
###Output
578 ms ± 17 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
490 ms ± 8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
491 ms ± 16.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
575 ms ± 36.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
549 ms ± 28.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
499 ms ± 43.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
578 ms ± 61.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
483 ms ± 8.13 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
480 ms ± 4.69 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
532 ms ± 69.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Awkward Array Numba - A Numba-jitted Awkward Array Awkward-array Numba uses jitted loops to help optimise the Awkward-array package. All function calls etc. remain the same as that of the base package.
###Code
import awkward_numba
from awkward_numba import JaggedArray
array = JaggedArray.fromiter([[1.1, 2.2, None, 3.3, None],
[4.4, [5.5]],
[{"x": 6, "y": {"z": 7}}, None, {"x": 8, "y": {"z": 9}}]
])
print("array = ",array, "\ntype: ",type(array))
array.tolist()
###Output
_____no_output_____
###Markdown
Profilling of Awkward Numba package- [Using %%timeit](Awkward-Array-Numba-Using-%%timeit)- [Using cProfile](Awkward-Array-Numba-Using-cProfile) Awkward Array Numba Using %%timeit
###Code
%%timeit
benchmark_numba()
time_numba = %timeit -o benchmark_numba()
time_numba = []
for _ in range(10):
time = %timeit -o benchmark_numba(data)
time_numba.append(time.best)
time_numba
###Output
732 ms ± 94.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
707 ms ± 32 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
713 ms ± 70.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
788 ms ± 56.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
627 ms ± 33.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
595 ms ± 18.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
620 ms ± 37 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
644 ms ± 51 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
611 ms ± 31 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
831 ms ± 94.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Speed Difference at a glance
###Code
# time_numba = [1,2,3,4,5,6,7]
# time_no_numba = [2*i for i in time_numba]
import matplotlib.pyplot as plt
time_numba_plot = plt.scatter([i for i in range(len(time_numba))],time_numba, marker = "o")
time_no_numba_plot = plt.scatter([i for i in range(len(time_numba))],time_no_numba, marker = "x")
plt.rcParams['axes.facecolor'] = "#dbe6f5"
plt.title("Numba vs No Numba")
plt.xlabel('Iteration')
plt.ylabel('Time Taken')
plt.legend((time_numba_plot , time_no_numba_plot),("Numba","No Numba"))
plt.rcParams["figure.figsize"] = [16,6]
plt.show()
time_numba_plot = plt.plot([i for i in range(len(time_numba))],time_numba, marker = "o")
time_no_numba_plot = plt.plot([i for i in range(len(time_numba))],time_no_numba, marker = "x")
plt.rcParams['axes.facecolor'] = "#dbe6f5"
plt.title("Numba vs No Numba")
plt.xlabel('Iteration')
plt.ylabel('Time Taken')
plt.legend((time_numba_plot , time_no_numba_plot),("Numba","No Numba"))
plt.rcParams["figure.figsize"] = [16,6]
plt.show()
objects = ('Numba','No Numba')
y_pos = np.arange(len(objects))
performance = [min(time_no_numba),min(time_numba)]
plt.bar(y_pos, performance, alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Usage')
plt.title('Programming language usage')
plt.show()
import numpy as np
import numba
from awkward_numba import JaggedArray
import random
import h5py
import awkward
#Error occurs when we use awkward_numba.JaggedArray
for i in range(3):
print("jagged_array_{}".format(i))
jagged_array = awkward.JaggedArray.fromiter([[random.randint(1,1000) for _ in range(random.randint(-1,10))]for _ in range(random.randint(-1,8))])
akdh5['jagged_array____{}'.format(i)] = jagged_array
for key in akdh5.keys():
print(akdh5[(key)])
for key in akdh5.keys():
print(key)
import random
import awkward_numba
from awkward_numba import JaggedArray
def dataset_generator():
h5file = h5py.File("dataset{}.hdf5".format(random.randint(1000,10000000)),"w")
awkwd = awkward.hdf5(h5file)
for i in range(10):
jagged_array = awkward_numba.JaggedArray.fromiter([[random.randint(1,1000) for _ in range(random.randint(-1,1000))]for _ in range(random.randint(-1,10000))])
awkwd['jagged_array_{}'.format(i)] = jagged_array
if(i%1==0):
print(i)
# print('jagged_array_{}'.format(i),jagged_array)
return h5file
def dataset_reader(h5file,numba=True):
awkwd = awkward_numba.hdf5(h5file)
for key in h5file.keys():
if numba:
min = awkward.JaggedArray._argminmax_general_numba(awkwd[(key)], True)
else:
min = awkward.JaggedArray._argminmax_general(awkwd[(key)], True)
# print("\nArray is: ",awkwd[key],"\n argminmax: ",min)
# import random
# import awkward_numba
# from awkward_numba import JaggedArray
# def dataset_generator():
# dataset = []
# for i in range(0,100):
# jagged_array = awkward_numba.JaggedArray.fromiter([[random.randint(1,1000) for _ in range(random.randint(-1,10))]for _ in range(random.randint(-1,600))])
# dataset.append(jagged_array)
# return dataset
# def dataset_reader(dataset,Numba=True):
# for data in dataset:
# if Numba:
# min = awkward_numba.JaggedArray._argminmax_general(data, True)
# else:
# min = awkward_numba.JaggedArray._argminmax_general_native(data, True)
# # print(key,": ",awkwd[(key)]," type: ",type(awkwd[(key)]))
# # print("/n argminmax",awkwd[(key)]._argminmax_general(True))
def benchmark_no_numba(data):
# data = dataset_generator()
dataset_reader(data,numba=False)
def benchmark_numba(data):
# data = dataset_generator()
dataset_reader(data,numba=True)
data = dataset_generator()
len(data)
%%timeit
dataset_reader(data,False)
%%timeit
dataset_reader(data)
arr = JaggedArray.fromiter([[1.1, 2.2, 3.3], [], [5.5, 4.4]])
def data_creator(size):
if size=="Large":
for i in range(100):
jagged_array = JaggedArray.fromiter([[random.randint(1,1000) for _ in range(random.randint(-1,10000))]for _ in range(random.randint(-1,16000))])
with open('dataset-awkd/jagged_array{}.txt'.format(i), 'w+') as hf:
hf.write(str(jagged_array))
return "dataset created successfully"
for i in range(3):
jagged_array = JaggedArray.fromiter([[random.randint(1,1000) for _ in range(random.randint(-1,10))]for _ in range(random.randint(-1,8))])
with open('dataset-awkd/jagged_array{}.txt'.format(i), 'w+') as hf:
hf.write(str(jagged_array))
###Output
_____no_output_____ |
scikit-learn/scipy-2018-sklearn/notebooks/.ipynb_checkpoints/08.Unsupervised_Learning-Clustering-checkpoint.ipynb | ###Markdown
Unsupervised Learning Part 2 -- Clustering Clustering is the task of gathering samples into groups of similarsamples according to some predefined similarity or distance (dissimilarity)measure, such as the Euclidean distance. In this section we will explore a basic clustering task on some synthetic and real-world datasets.Here are some common applications of clustering algorithms:- Compression for data reduction- Summarizing data as a reprocessing step for recommender systems- Similarly: - grouping related web news (e.g. Google News) and web search results - grouping related stock quotes for investment portfolio management - building customer profiles for market analysis- Building a code book of prototype samples for unsupervised feature extraction Let's start by creating a simple, 2-dimensional, synthetic dataset:
###Code
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=42)
X.shape
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1])
###Output
_____no_output_____
###Markdown
In the scatter plot above, we can see three separate groups of data points and we would like to recover them using clustering -- think of "discovering" the class labels that we already take for granted in a classification task.Even if the groups are obvious in the data, it is hard to find them when the data lives in a high-dimensional space, which we can't visualize in a single histogram or scatterplot. Now we will use one of the simplest clustering algorithms, K-means.This is an iterative algorithm which searches for three clustercenters such that the distance from each point to its cluster isminimized. The standard implementation of K-means uses the Euclidean distance, which is why we want to make sure that all our variables are measured on the same scale if we are working with real-world datastets. In the previous notebook, we talked about one technique to achieve this, namely, standardization. Question: what would you expect the output to look like?
###Code
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=42)
###Output
_____no_output_____
###Markdown
We can get the cluster labels either by calling fit and then accessing the ``labels_`` attribute of the K means estimator, or by calling ``fit_predict``.Either way, the result contains the ID of the cluster that each point is assigned to.
###Code
labels = kmeans.fit_predict(X)
labels
np.all(y == labels)
###Output
_____no_output_____
###Markdown
Let's visualize the assignments that have been found
###Code
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=labels)
###Output
_____no_output_____
###Markdown
Compared to the true labels:
###Code
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=y)
###Output
_____no_output_____
###Markdown
Here, we are probably satisfied with the clustering results. But in general we might want to have a more quantitative evaluation. How about comparing our cluster labels with the ground truth we got when generating the blobs?
###Code
from sklearn.metrics import confusion_matrix, accuracy_score
print('Accuracy score:', accuracy_score(y, labels))
print(confusion_matrix(y, labels))
np.mean(y == labels)
###Output
_____no_output_____
###Markdown
EXERCISE: After looking at the "True" label array y, and the scatterplot and `labels` above, can you figure out why our computed accuracy is 0.0, not 1.0, and can you fix it? Even though we recovered the partitioning of the data into clusters perfectly, the cluster IDs we assigned were arbitrary,and we can not hope to recover them. Therefore, we must use a different scoring metric, such as ``adjusted_rand_score``, which is invariant to permutations of the labels:
###Code
from sklearn.metrics import adjusted_rand_score
adjusted_rand_score(y, labels)
###Output
_____no_output_____
###Markdown
One of the "short-comings" of K-means is that we have to specify the number of clusters, which we often don't know *apriori*. For example, let's have a look what happens if we set the number of clusters to 2 in our synthetic 3-blob dataset:
###Code
kmeans = KMeans(n_clusters=2, random_state=42)
labels = kmeans.fit_predict(X)
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=labels)
kmeans.cluster_centers_
###Output
_____no_output_____
###Markdown
The Elbow MethodThe Elbow method is a "rule-of-thumb" approach to finding the optimal number of clusters. Here, we look at the cluster dispersion for different values of k:
###Code
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
random_state=0)
km.fit(X)
distortions.append(km.inertia_)
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.show()
###Output
_____no_output_____
###Markdown
Then, we pick the value that resembles the "pit of an elbow." As we can see, this would be k=3 in this case, which makes sense given our visual expection of the dataset previously. **Clustering comes with assumptions**: A clustering algorithm finds clusters by making assumptions with samples should be grouped together. Each algorithm makes different assumptions and the quality and interpretability of your results will depend on whether the assumptions are satisfied for your goal. For K-means clustering, the model is that all clusters have equal, spherical variance.**In general, there is no guarantee that structure found by a clustering algorithm has anything to do with what you were interested in**. We can easily create a dataset that has non-isotropic clusters, on which kmeans will fail:
###Code
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
###Output
_____no_output_____
###Markdown
Some Notable Clustering Routines The following are two well-known clustering algorithms. - `sklearn.cluster.KMeans`: The simplest, yet effective clustering algorithm. Needs to be provided with the number of clusters in advance, and assumes that the data is normalized as input (but use a PCA model as preprocessor).- `sklearn.cluster.MeanShift`: Can find better looking clusters than KMeans but is not scalable to high number of samples.- `sklearn.cluster.DBSCAN`: Can detect irregularly shaped clusters based on density, i.e. sparse regions in the input space are likely to become inter-cluster boundaries. Can also detect outliers (samples that are not part of a cluster).- `sklearn.cluster.AffinityPropagation`: Clustering algorithm based on message passing between data points.- `sklearn.cluster.SpectralClustering`: KMeans applied to a projection of the normalized graph Laplacian: finds normalized graph cuts if the affinity matrix is interpreted as an adjacency matrix of a graph.- `sklearn.cluster.Ward`: Ward implements hierarchical clustering based on the Ward algorithm, a variance-minimizing approach. At each step, it minimizes the sum of squared differences within all clusters (inertia criterion).Of these, Ward, SpectralClustering, DBSCAN and Affinity propagation can also work with precomputed similarity matrices. EXERCISE: digits clustering: Perform K-means clustering on the digits data, searching for ten clusters.Visualize the cluster centers as images (i.e. reshape each to 8x8 and use``plt.imshow``) Do the clusters seem to be correlated with particular digits? What is the ``adjusted_rand_score``? Visualize the projected digits as in the last notebook, but this time use thecluster labels as the color. What do you notice?
###Code
from sklearn.datasets import load_digits
digits = load_digits()
# ...
# %load solutions/08B_digits_clustering.py
###Output
_____no_output_____ |
MA477 - Theory and Applications of Data Science/Lessons/Lesson 5 - Seaborn/Lesson 5 - Seaborn.ipynb | ###Markdown
====================================================== MA477 - Theory and Applications of Data Science Lesson 5: Vizualising Data with Seaborn Dr. Valmir Bucaj United States Military Academy, West Point AY20-2====================================================== Lecture Outline Quick Overview and Installation Distribution Plots Categorical Plots Matrix Plots Regression Plots Overview and InstallationSeaborn is a Python visualization library built on top of Matplotlib and is especially designed for statistical plotting. In addition to having a wide range of beautiful styles and graphs it also works really well with Panda DataFrames. If you don't already have Seaborn installed in your computer, then you may install it either from your command line using the `pip` install command or from the anaconda terminal using the `conda` command: ```pythonpip install seabornconda install seaborn```For a more detailed and complete tutorial of Matplotlib you may visit the official website by clicking on the: Seaborn WebsiteLet's begin by importing seaborn, pandas, and numpy.
###Code
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Data Set`Seaborn` comes with a few pre-loaded data sets, so for the sake of illustration, we will use one of these sets.
###Code
diamonds=sns.load_dataset('diamonds', cache=True)
diamonds.shape
[np.random.randint(50000) for i in range(10)]
[0 for i in range(10)]
diamonds=diamonds.iloc[[np.random.randint(50000) for i in range(10000)]]
diamonds.head()
###Output
_____no_output_____
###Markdown
Description of DatasetThis classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization.Contentprice price in US dollars (\$326--\$18,823)carat weight of the diamond (0.2--5.01)cut quality of the cut (Fair, Good, Very Good, Premium, Ideal)color diamond colour, from J (worst) to D (best)clarity a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best))x length in mm (0--10.74)y width in mm (0--58.9)z depth in mm (0--31.8)depth total depth percentage = $z / mean(x, y) = 2 * z / (x + y)$ (43--79)Source: Kaggle If you want to check all the different pre-loaded datasets in `seaborn`, we may do so as follows:
###Code
sns.get_dataset_names()
###Output
C:\Users\Valmir.Bucaj\AppData\Local\Programs\Python\Python37\lib\site-packages\seaborn\utils.py:376: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 376 of the file C:\Users\Valmir.Bucaj\AppData\Local\Programs\Python\Python37\lib\site-packages\seaborn\utils.py. To get rid of this warning, pass the additional argument 'features="html.parser"' to the BeautifulSoup constructor.
gh_list = BeautifulSoup(http)
###Markdown
Distribution Plots Say we wanted to know how the `price` is distributed:
###Code
sns.set_style('whitegrid')
plt.figure(figsize=(8,6))
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('price',fontsize=14)
sns.distplot(diamonds['price'],bins=30,kde=False)
plt.show()
###Output
_____no_output_____
###Markdown
Observe above that we get a `smoothing` of the distribution. This is called the Kernel Density Estimate (KDE) and is an empirical estimate of the underlying distribution of our data. We may turn this off, if we want to, by setting `kde=False`, in which case we just have a histogram. You can experiment with all the different parmeters such as `bins, color` etc.What are some quick observations regarding the prices of the diamonds?Comparing Distributions of Two Features In case we are interested in comparing (visually) the distributions of two different features, say for example we are interested in knowing how the `carat` distribution compares to the `price`, then we may do so by using the `.jointplot()` method:
###Code
sns.set_style('whitegrid')
sns.jointplot(x='carat',y='price',data=diamonds,kind='kde')
plt.show()
###Output
_____no_output_____
###Markdown
PracticeGo ahead, and experiment with the differnt types of parameters, specifically try setting `kind=` to `hex`, `kde` and `reg` and see what you get. Question: What are some quick observations you can draw by looking at this scatterplot? In case we wanted to compare distributions for every single combination of the (numerical)features in our data, instead of using the `.jointplot()` repeatedly, we can do it all at once via the `.pairplot()` method:
###Code
sns.pairplot(data=diamonds,hue='cut',palette='coolwarm')
###Output
_____no_output_____
###Markdown
In the pairplot we can pass on a categorical column for the `hue` parameter. For example, if we pass on the `color` or `cut` column for `hue`, then what's going to happen is that the scatter plots will be colored based off the color or cut of the diamond. Practice: Experiment with the different parameters. Categorical PlotsWhat if for example we wanted to visualize a certain statistic of a numerical feature, such as `price, carat, depth ` etc. based off a categorical variable such as `color, cut ` etc.? In what follows we will talk about a few of the ways we can do this. BarplotA barplot takes in a categorical variable for x and a numerical variable for y, and it computes and visualizes different statistics(e.g. mean, standard deviation etc.) of the numerical feature for each of the categories. This should remind you of the `.groupby()` method we learned when we talked about Pandas. Depending on what statistic we want to compute, we can determine this by setting `estimator=` to `np.mean, np.std, np.median` etc.
###Code
diamonds.head()
plt.figure(figsize=(8,6))
sns.barplot(x='cut',y='price',data=diamonds,estimator=np.std)
plt.show()
###Output
_____no_output_____
###Markdown
PracticeTake a few moments and experiment with the different categorical and numerical features and different aggregate functions. Try to see if you observe anything interesting! CountplotSometimes we may simply be interested in knowing home many members of each category we have in our data. For this purpose we may use a `countplot`. It is essentially a `barplot` with the `estimator` set to be a count function. It also has a `hue` parameter, where you may specifically see how many of the elements in the selected `hue` are in each of the categories you are counting.
###Code
plt.figure(figsize=(8,6))
sns.countplot(x='color',data=diamonds,hue='clarity')
plt.show()
###Output
_____no_output_____
###Markdown
BoxplotsWe already discussed Boxplots when we talked about Matplotlib. We just want to briefly mention here that we can built them via seaborn as well. As we have already seen, Boxplots are great if we want to gain a good understanding of the distributions of different variables, as well as the distribution of a certain numerical value for the different categories. For example, if we were interested in knowing how the `price` of the diamonds is distributed among the diamonds of different `cut, color` etc. then a boxplot is a great way to gain a visual representation.It also has a `hue` parameter, which may be useful if we want to gain a better understanding of the distribution of the values of another category within each of the first selected category. Let's discuss and illustrate all of these features below:
###Code
plt.figure(figsize=(12,6))
sns.boxplot(x='cut',y='price',data=diamonds,hue='color')
###Output
_____no_output_____
###Markdown
Practice: Take a few moments and experiment with all the different parameters and features and see if you notice anything interesting in the data. ViolinplotsViolinplots are very similar to boxplots, in the sense that it also shows the distributions of the categorical data but in a slightly different way: it shows the kernel density estimations for each of the categories. Its innerworkings and parameters,however, are very similar as for the boxplot, including the `hue` parameter.
###Code
plt.figure(figsize=(12,6))
sns.violinplot(x='cut',y='price',data=diamonds)
plt.show()
###Output
_____no_output_____
###Markdown
Violinplots are especially aesthetacilly pleasing if we have a binary categorical data to set for `hue`. ExerciseIn this exercise you are to replace all the colors `E, G, I, J, F` with the color `D`. So, at the end of the day you should end up with only colors `D` and `H`. Once you do this, go ahead and set `hue` to `color`.
###Code
#Enter Code here
diamonds.head()
###Output
_____no_output_____
###Markdown
###Code
plt.figure(figsize=(12,6))
sns.violinplot(x='cut',y='carat',data=diamonds,hue='color',split=True)
###Output
_____no_output_____
###Markdown
Matrix Plots - Heatmaps & ClustermapsOne situation where we will often use the Heatmaps is to display the correlation between the different (numerical) features in our data. Recall that the correlation coefficient $\rho_{X,Y}$ between two random variables $X$ and $Y$ is given by:$$\rho_{X,Y}=\frac{Cov(X,Y)}{\sigma_X\sigma_Y},$$where $Cov(X,Y)=E\left[XY\right]-E\left[X\right]E\left[Y\right]$ and $\sigma_X,\sigma_Y$ are the respective standard deviations.Recall from Linear Algebra, that if our data is normalized to have mean zero and standard deviation one, then the correlation matrix for our data is is simply given by $$Corr(A)=\frac{1}{n-1}A^TA$$where by $A$ we have denoted our datamatrix (for example the dataframe `diamonds`, after normalization, in our case)For example, if we were interested in understanding what the correlation between all the different features in the `diamonds` dataset, we can represent that nicely via a heatmap:
###Code
diamonds.corr()
plt.figure(figsize=(12,8))
sns.heatmap(diamonds.corr(),cmap='coolwarm',annot=True,linecolor='white',lw='1')
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.show()
###Output
_____no_output_____
###Markdown
ClustermapsClustermaps are similar in flavor to Heatmaps, however, instead of plotting correlations between the variables, under the hood firt Hiearchical Clustering is run (we will learn about this when we discuss the unsupervised learning methods later in the semester, so don't worry too much about understanding the mechanics behind it for now) and then a variables that are more similar to one another are placed closer to each other.
###Code
plt.figure(figsize=(12,8))
sns.clustermap(diamonds.corr(),cmap='coolwarm',standard_scale=1)
plt.show()
###Output
_____no_output_____ |
World/LSTM_prediction.ipynb | ###Markdown
Fetch the data and construct the data frames
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# CONFIRMED
df_confirmed = pd.read_csv('https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
df_confirmed.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
df_confirmed = df_confirmed.groupby(['Country/Region']).sum()
df_confirmed.columns = pd.to_datetime(df_confirmed.columns)
df_confirmed_daily = df_confirmed - df_confirmed.shift(1, axis=1, fill_value=0)
df_confirmed_daily_moving = df_confirmed_daily.rolling(window=7, axis=1).mean()
# DEATHS
df_deaths = pd.read_csv('https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
df_deaths.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
df_deaths = df_deaths.groupby(['Country/Region']).sum()
df_deaths.columns = pd.to_datetime(df_deaths.columns)
df_deaths_daily = df_deaths - df_deaths.shift(1, axis=1, fill_value=0)
df_deaths_daily_moving = df_deaths_daily.rolling(window=7, axis=1).mean()
# RECOVERED
df_recovered = pd.read_csv('https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
df_recovered.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
df_recovered = df_recovered.groupby(['Country/Region']).sum()
df_recovered.columns = pd.to_datetime(df_recovered.columns)
df_recovered_daily = df_recovered - df_recovered.shift(1, axis=1, fill_value=0)
df_recovered_daily_moving = df_recovered_daily.rolling(window=7, axis=1).mean()
###Output
_____no_output_____
###Markdown
Let's plot the data
###Code
plt.figure(figsize=(16, 6))
plt.xlabel('Date', fontsize=16)
plt.ylabel('Cases', fontsize=16)
plt.title('Covid-19 confirmed cases (US, India, Brazil)', fontsize=16)
plt.plot(df_confirmed_daily_moving.loc['US'])
plt.plot(df_confirmed_daily_moving.loc['India'])
plt.plot(df_confirmed_daily_moving.loc['Brazil'])
plt.legend(['US', 'India', 'Brazil'])
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing Stage
###Code
country = 'US'
nfeatures = 1
nsteps = 7
feature_1 = df_confirmed_daily.loc[country]
dataset = np.column_stack([feature_1])
data_len = len(dataset[:, 0])
train_len = int(0.8 * data_len)
test_len = data_len - train_len
train_data = dataset[:train_len, :]
test_data = dataset[train_len:, :]
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
train_data = scaler.fit_transform(train_data)
test_data = scaler.transform(test_data)
train_x = np.array([train_data[i - nsteps:i, :] for i in range(nsteps, train_len)])
train_y = np.array([train_data[i, 0] for i in range(nsteps, train_len)])
test_x = np.array([test_data[i - nsteps:i, :] for i in range(nsteps, test_len)])
test_y = np.array([test_data[i, 0] for i in range(nsteps, test_len)])
###Output
_____no_output_____
###Markdown
Training Stage
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
model = Sequential([
LSTM(units=50, input_shape=(nsteps, nfeatures), return_sequences=True),
LSTM(units=50),
Dense(units=25),
Dense(units=nfeatures)
])
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x=train_x, y=train_y, batch_size=1, epochs=5)
predictions = model.predict(test_x)
predictions = scaler.inverse_transform(predictions)
###Output
Epoch 1/5
405/405 [==============================] - 4s 5ms/step - loss: 0.0087
Epoch 2/5
405/405 [==============================] - 2s 5ms/step - loss: 0.0047
Epoch 3/5
405/405 [==============================] - 2s 5ms/step - loss: 0.0049
Epoch 4/5
405/405 [==============================] - 2s 5ms/step - loss: 0.0044
Epoch 5/5
405/405 [==============================] - 2s 5ms/step - loss: 0.0049
###Markdown
Plotting of the predictions
###Code
plt.figure(figsize=(16, 8))
plt.title(f'Covid-19 confirmed cases of {country}', fontsize=18)
time_series = feature_1
train_time_series = time_series.iloc[0:train_len]
test_time_series = time_series.iloc[train_len:]
pred_time_series = pd.Series(data=predictions[:, 0], index=test_time_series.index[nsteps:])
plt.plot(train_time_series)
plt.plot(test_time_series)
plt.plot(pred_time_series)
plt.legend(['train', 'test', 'pred'])
plt.show()
###Output
_____no_output_____
###Markdown
Prediction of tomorrow
###Code
print(f'Prediction of tomorrow is {int(predictions[-1, 0])}')
###Output
_____no_output_____ |
docs/assets/demo/freia_image_multiscale.ipynb | ###Markdown
PyTorch PlayGroundThis is my notebook where I play around with all things PyTorch. I use the following packages:* PyTorch* Pyro* GPyTorch* PyTorch Lightning
###Code
# @title Install Packages
# %%capture
try:
import sys, os
from pyprojroot import here
# spyder up to find the root
root = here(project_files=[".here"])
# append to path
sys.path.append(str(root))
except ModuleNotFoundError:
import os
!pip install --upgrade pyro-ppl gpytorch pytorch-lightning tqdm wandb corner nflows
!pip install git+https://github.com/VLL-HD/FrEIA.git
#@title Load Packages
# TYPE HINTS
from typing import Tuple, Optional, Dict, Callable, Union
# PyTorch Settings
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import torch.distributions as dist
# PyTorch Lightning Settings
import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import WandbLogger
# NUMPY SETTINGS
import numpy as np
np.set_printoptions(precision=3, suppress=True)
# MATPLOTLIB Settings
import corner
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
# SEABORN SETTINGS
import seaborn as sns
sns.set_context(context='talk',font_scale=0.7)
# sns.set(rc={'figure.figsize': (12, 9.)})
# sns.set_style("whitegrid")
# PANDAS SETTINGS
import pandas as pd
pd.set_option("display.max_rows", 120)
pd.set_option("display.max_columns", 120)
# LOGGING SETTINGS
import sys
import logging
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format='%(asctime)s:%(levelname)s:%(message)s'
)
logger = logging.getLogger()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
logging.info("Using device: {}".format(device))
#logger.setLevel(logging.INFO)
%load_ext autoreload
%autoreload 2
###Output
/home/emmanuel/.conda/envs/gaussflow-gpu/lib/python3.8/site-packages/pytorch_lightning/metrics/__init__.py:43: LightningDeprecationWarning: `pytorch_lightning.metrics.*` module has been renamed to `torchmetrics.*` and split off to its own package (https://github.com/PyTorchLightning/metrics) since v1.3 and will be removed in v1.5
rank_zero_deprecation(
###Markdown
Demo Datasets
###Code
# FrEIA imports
import FrEIA.framework as Ff
import FrEIA.modules as Fm
from src.models.layers.multiscale import SplitPrior, GeneralizedSplitPrior
###Output
_____no_output_____
###Markdown
MNIST (sklearn) Simple (No Splits)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 1
height = 8
width = 8
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(3):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
###Output
Input:
torch.Size([100, 1, 8, 8])
Level: 1
DownSample
torch.Size([100, 4, 4, 4]) 6400
Level: 2
DownSample
torch.Size([100, 16, 2, 2]) 6400
Level: 3
DownSample
torch.Size([100, 64, 1, 1]) 6400
###Markdown
Splits (Half)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 1
height = 8
width = 8
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(3):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
SplitPrior,
# split = 2,
# split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
###Output
Input:
torch.Size([100, 1, 8, 8])
Level: 1
DownSample
torch.Size([100, 4, 4, 4]) 6400
Split
torch.Size([100, 2, 4, 4]) 3200
Level: 2
DownSample
torch.Size([100, 8, 2, 2]) 3200
Split
torch.Size([100, 4, 2, 2]) 1600
Level: 3
DownSample
torch.Size([100, 16, 1, 1]) 1600
Split
torch.Size([100, 8, 1, 1]) 800
###Markdown
MNIST Simple (No Splits)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 1
height = 28
width = 28
# initialize sequential network
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(2):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
inn.append(Fm.Flatten)
print("Final")
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 1, 28, 28])
Level: 1
DownSample
torch.Size([100, 4, 14, 14]) 78400
Level: 2
DownSample
torch.Size([100, 16, 7, 7]) 78400
Final
torch.Size([100, 784])
###Markdown
Splits (Half)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 1
height = 28
width = 28
# initialize sequential network
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(2):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Half Split")
inn.append(
SplitPrior,
prior = dist.Normal(0.0, 1.0)#
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
inn.append(Fm.Flatten)
print("Final")
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 1, 28, 28])
Level: 1
DownSample
torch.Size([100, 4, 14, 14]) 78400
Half Split
torch.Size([100, 2, 14, 14]) 39200
Level: 2
DownSample
torch.Size([100, 8, 7, 7]) 39200
Half Split
torch.Size([100, 4, 7, 7]) 19600
Final
torch.Size([100, 196])
###Markdown
Splits (Custom)
###Code
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape, np.prod(inn(X_init)[0].shape))
print(f"Resolution: I")
# DownSampling
print("DownSample I")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# DownSampling
print(f"Resolution: II")
print("DownSample II")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 4,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
inn.append(Fm.Flatten)
print("Final")
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 1, 28, 28]) 78400
Resolution: I
DownSample I
torch.Size([100, 4, 14, 14]) 78400
Resolution: II
DownSample II
torch.Size([100, 16, 7, 7]) 78400
Split
torch.Size([100, 4, 7, 7]) 19600
Final
torch.Size([100, 196])
###Markdown
Splits (Custom + Cheap)
###Code
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape, np.prod(inn(X_init)[0].shape))
print(f"Resolution: I")
# DownSampling
print("DownSample I")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 1,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# DownSampling
print(f"Resolution: II")
print("DownSample II")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 1,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
inn.append(Fm.Flatten)
print("Final")
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 1, 28, 28]) 78400
Resolution: I
DownSample I
torch.Size([100, 4, 14, 14]) 78400
Split
torch.Size([100, 1, 14, 14]) 19600
Resolution: II
DownSample II
torch.Size([100, 4, 7, 7]) 19600
Split
torch.Size([100, 1, 7, 7]) 4900
Final
torch.Size([100, 49])
###Markdown
CIFAR10 Splits (Half)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 3
height = 32
width = 32
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(4):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
SplitPrior,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
inn.append(Fm.Flatten)
print("Final")
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 3, 32, 32])
Level: 1
DownSample
torch.Size([100, 12, 16, 16]) 307200
Split
torch.Size([100, 6, 16, 16]) 153600
Level: 2
DownSample
torch.Size([100, 24, 8, 8]) 153600
Split
torch.Size([100, 12, 8, 8]) 76800
Level: 3
DownSample
torch.Size([100, 48, 4, 4]) 76800
Split
torch.Size([100, 24, 4, 4]) 38400
Level: 4
DownSample
torch.Size([100, 96, 2, 2]) 38400
Split
torch.Size([100, 48, 2, 2]) 19200
Final
torch.Size([100, 192])
###Markdown
Splits (Custom)
###Code
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print(f"---------\nResolution: I")
print("Input:")
print(X_init.shape)
print(f"---------\nResolution: II")
# DownSampling
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# DownSampling
print(f"---------\nResolution: III")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 12,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# DownSampling
print(f"---------\nResolution: IV")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 12,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape, np.prod(inn(X_init)[0].shape))
# # DownSampling
# print(f"---------\nResolution: V")
# print("DownSample")
# inn.append(
# Fm.IRevNetDownsampling,
# )
# print(inn(X_init)[0].shape)
inn.append(Fm.Flatten)
print(f"---------\nFully Connected")
print(inn(X_init)[0].shape)
###Output
---------
Resolution: I
Input:
torch.Size([100, 3, 32, 32])
---------
Resolution: II
DownSample
torch.Size([100, 12, 16, 16]) 307200
---------
Resolution: III
DownSample
torch.Size([100, 48, 8, 8]) 307200
Split
torch.Size([100, 12, 8, 8]) 76800
---------
Resolution: IV
DownSample
torch.Size([100, 48, 4, 4]) 76800
Split
torch.Size([100, 12, 4, 4]) 19200
---------
Fully Connected
torch.Size([100, 192])
###Markdown
EuroSat Splits (Half)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 3
height = 64
width = 64
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(6):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
# print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
SplitPrior,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 3, 64, 64])
Level: 1
DownSample
Split
torch.Size([100, 6, 32, 32])
Level: 2
DownSample
Split
torch.Size([100, 12, 16, 16])
Level: 3
DownSample
Split
torch.Size([100, 24, 8, 8])
Level: 4
DownSample
Split
torch.Size([100, 48, 4, 4])
Level: 5
DownSample
Split
torch.Size([100, 96, 2, 2])
Level: 6
DownSample
Split
torch.Size([100, 192, 1, 1])
###Markdown
Splits (Custom)
###Code
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print(f"---------\nResolution: I")
print("Input:")
print(X_init.shape)
print(f"---------\nResolution: II")
# DownSampling
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# DownSampling
print(f"---------\nResolution: III")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 12,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
# DownSampling
print(f"---------\nResolution: IV")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 12,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
# DownSampling
print(f"---------\nResolution: V")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 12,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
inn.append(Fm.Flatten)
print(f"---------\nFully Connected")
print(inn(X_init)[0].shape)
###Output
---------
Resolution: I
Input:
torch.Size([100, 3, 64, 64])
---------
Resolution: II
DownSample
torch.Size([100, 12, 32, 32])
---------
Resolution: III
DownSample
torch.Size([100, 48, 16, 16])
Split
torch.Size([100, 12, 16, 16])
---------
Resolution: IV
DownSample
torch.Size([100, 48, 8, 8])
Split
torch.Size([100, 12, 8, 8])
---------
Resolution: V
DownSample
torch.Size([100, 48, 4, 4])
Split
torch.Size([100, 12, 4, 4])
---------
Fully Connected
torch.Size([100, 192])
###Markdown
BigEarthNet Split (Half)
###Code
# a simple chain of operations is collected by ReversibleSequential
batch = 100
n_channels = 3
height = 120
width = 120
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print("Input:")
print(X_init.shape)
for ilayer in range(3):
print(f"Level: {ilayer+1}")
# DownSampling (Easy CheckerBoard Mask)
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
# print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
SplitPrior,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
inn.append(Fm.Flatten)
print(f"---------\nFully Connected")
print(inn(X_init)[0].shape)
###Output
Input:
torch.Size([100, 3, 120, 120])
Level: 1
DownSample
Split
torch.Size([100, 6, 60, 60])
Level: 2
DownSample
Split
torch.Size([100, 12, 30, 30])
Level: 3
DownSample
Split
torch.Size([100, 24, 15, 15])
---------
Fully Connected
torch.Size([100, 5400])
###Markdown
Split (Custom)
###Code
inn = Ff.SequenceINN(n_channels, height, width)
X_init = torch.randn((batch, n_channels, height, width))
print(f"---------\nResolution: I")
print("Input:")
print(X_init.shape)
print(f"---------\nResolution: II")
# DownSampling
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 3,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
# DownSampling
print(f"---------\nResolution: III")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 3,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
# DownSampling
print(f"---------\nResolution: IV")
print("DownSample")
inn.append(
Fm.IRevNetDownsampling,
)
print(inn(X_init)[0].shape)
# SplitPrior
print("Split")
inn.append(
GeneralizedSplitPrior,
split = 3,
split_dim=0,
prior = dist.Normal(0.0, 1.0)
)
print(inn(X_init)[0].shape)
# # DownSampling
# print(f"---------\nResolution: V")
# print("DownSample")
# inn.append(
# Fm.IRevNetDownsampling,
# )
# print(inn(X_init)[0].shape)
# # SplitPrior
# print("Split")
# inn.append(
# GeneralizedSplitPrior,
# split = 12,
# split_dim=0,
# prior = dist.Normal(0.0, 1.0)
# )
# print(inn(X_init)[0].shape)
inn.append(Fm.Flatten)
print(f"---------\nFully Connected:")
print(inn(X_init)[0].shape)
###Output
---------
Resolution: I
Input:
torch.Size([100, 3, 120, 120])
---------
Resolution: II
DownSample
torch.Size([100, 12, 60, 60])
Split
torch.Size([100, 3, 60, 60])
---------
Resolution: III
DownSample
torch.Size([100, 12, 30, 30])
Split
torch.Size([100, 3, 30, 30])
---------
Resolution: IV
DownSample
torch.Size([100, 12, 15, 15])
Split
torch.Size([100, 3, 15, 15])
---------
Fully Connected:
torch.Size([100, 675])
###Markdown
Data
###Code
from sklearn.datasets import load_digits
from torch.utils.data import Dataset, DataLoader
class Digits(Dataset):
"""Scikit-Learn Digits dataset."""
def __init__(self, mode='train', transforms=None):
digits = load_digits()
if mode == 'train':
self.data = digits.data[:1000].astype(np.float32)
elif mode == 'val':
self.data = digits.data[1000:1350].astype(np.float32)
else:
self.data = digits.data[1350:].astype(np.float32)
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
if self.transforms:
sample = self.transforms(sample)
return sample
num_x = 4
num_y = 4
digits = load_digits()
data = digits.data[:1000]
images = data.reshape((1000, 1, 8, 8))
fig, ax = plt.subplots(num_x, num_y)
for i, ax in enumerate(ax.flatten()):
plottable_image = np.reshape(data[i], (8, 8))
ax.imshow(plottable_image, cmap='gray')
ax.axis('off')
def plot_digits(data, num_x, num_y):
fig, ax = plt.subplots(num_x, num_y)
for i, ax in enumerate(ax.flatten()):
plottable_image = data[i]
ax.imshow(plottable_image, cmap='gray')
ax.axis('off')
plt.show()
return None
n_samples = 100
plot_digits(images[:n_samples].squeeze(), 5, 5)
###Output
_____no_output_____
###Markdown
Transformations
###Code
def plot_digits_channels(data, n_samples, n_channels):
fig, ax = plt.subplots(n_samples, n_channels)
for i in range(n_samples):
for j in range(n_channels):
plottable_image = data[i, j]
ax[i, j].imshow(plottable_image, cmap='gray')
ax[i, j].axis('off')
plt.show()
return None
# FrEIA imports
import FrEIA.framework as Ff
import FrEIA.modules as Fm
###Output
_____no_output_____
###Markdown
CheckerBoard Sampling
###Code
layer1 = Fm.IRevNetDownsampling([(channels, h, w,)])
layer2 = Fm.Split(dims_in=[(12, 16, 16,)])
layer3 = Fm.IRevNetDownsampling([(8, 14, 14,)])
layer4 = Fm.Split(dims_in=[(16, 8, 8)])
layer5 = Fm.IRevNetDownsampling([(24, 4, 4,)])
channels = 3
h = 64
w = 64
layer1 = Fm.IRevNetDownsampling([(channels, h, w,)])
layer2 = Fm.Split(dims_in=[(12, 32, 32,)])
layer3 = Fm.IRevNetDownsampling([(6, 32, 32,)])
layer4 = Fm.Split(dims_in=[(24, 16, 16)])
layer5 = Fm.IRevNetDownsampling([(12, 16, 16,)])
layer6 = Fm.Split(dims_in=[(48, 8, 8)])
layer7 = Fm.IRevNetDownsampling([(24, 8, 8,)])
layer8 = Fm.Split(dims_in=[(96, 8, 8)])
layer9 = Fm.IRevNetDownsampling([(48, 8, 8,)])
layer10 = Fm.Split(dims_in=[(192, 2, 2,)])
layer11 = Fm.IRevNetDownsampling([(96, 2, 2,)])
# forward transformations
input_image = (torch.ones((100, channels, h, w)),)
z1, _ = layer1.forward(x=input_image)
print("Layer I (DownSample)")
print(z1[0].shape)
z2, _ = layer2.forward(x=z1)
print("Layer II")
print(z2[0].shape)
z3, _ = layer3.forward(x=z2)
print("Layer III (DownSample)")
print(z3[0].shape)
z4, _ = layer4.forward(x=z3)
print("Layer IV")
print(z4[0].shape)
z5, _ = layer5.forward(x=z4)
print("Layer V (DownSample)")
print(z5[0].shape)
z6, _ = layer6.forward(x=z5)
print("Layer VI")
print(z6[0].shape)
z7, _ = layer7.forward(x=z6)
print("Layer VII (DownSampling)")
print(z7[0].shape)
z8, _ = layer8.forward(x=z7)
print("Layer VIII")
print(z8[0].shape)
z9, _ = layer9.forward(x=z8)
print("Layer IX")
print(z9[0].shape)
z10, _ = layer10.forward(x=z9)
print("Layer X")
print(z10[0].shape)
z11, _ = layer11.forward(x=z10)
print("Layer XI")
print(z11[0].shape)
print("Layer III (DownSample)")
print(z3[0].shape)
print("Layer VII (DownSampling)")
print(z7[0].shape)
print("Layer VIII")
print(z8[0].shape)
print("Layer IX (DownSampling)")
print(z9[0].shape)
plot_digits(images.squeeze(), 5, 1)
plot_digits_channels(z1[0], 5, 4)
plot_digits_channels(z2[0], 5, 16)
plot_digits_channels(z3[0], 5, 64)
###Output
_____no_output_____
###Markdown
Haar Sampling
###Code
layer1 = Fm.HaarDownsampling([(channels, h, w,)])
layer2 = Fm.HaarDownsampling([(4, 4, 4,)])
layer3 = Fm.HaarDownsampling([(16, 2, 2,)])
# forward transformations
input_image = (numpy2tensor(images),)
z1, _ = layer1.forward(x=input_image)
z2, _ = layer2.forward(x=z1)
z3, _ = layer3.forward(x=z2)
z1[0].shape, z2[0].shape, z3[0].shape
plot_digits(images.squeeze(), 5, 1)
plot_digits_channels(z1[0], 5, 4)
plot_digits_channels(z2[0], 5, 16)
plot_digits_channels(z3[0], 5, 64)
###Output
_____no_output_____
###Markdown
MultiScale I - Pure DownSampling SubNet
###Code
# we define a subnet for use inside an affine coupling block
# for more detailed information see the full tutorial
def subnet_fc(dims_in, dims_out):
return nn.Sequential(
nn.Linear(dims_in, 64),
nn.ReLU(),
nn.Linear(64, dims_out)
)
def sub_conv(ch_hidden, kernel):
pad = kernel // 2
return lambda ch_in, ch_out: nn.Sequential(
nn.Conv2d(ch_in, ch_hidden, kernel, padding=pad),
nn.ReLU(),
nn.Conv2d(ch_hidden, ch_out, kernel, padding=pad)
)
DIMS_IN = [1, 8, 8]
nodes = []
# input node
nodes.append(Ff.InputNode(3, 32, 32))
# Block Transforms
for k in range(2):
nodes.append(
Ff.Node(
nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':sub_conv(32, 3), 'clamp': 1.0},
)
)
# Downscaling
nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {}, name='downsample_1'))
# Block Transforms
for k in range(2):
nodes.append(
Ff.Node(
nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':sub_conv(64, 3), 'clamp': 1.0},
)
)
# Downscaling
nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {}, name='downsample_2'))
# Block Transforms
for k in range(2):
nodes.append(
Ff.Node(
nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':sub_conv(128, 3), 'clamp': 1.0},
)
)
# Downscaling
nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {}, name='downsample_3'))
# Block Transforms
for k in range(2):
nodes.append(
Ff.Node(
nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':sub_conv(128, 3), 'clamp': 1.0},
)
)
# Downscaling
nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {}, name='downsample_4'))
# Block Transforms
for k in range(2):
nodes.append(
Ff.Node(
nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':sub_conv(256, 3), 'clamp': 1.0},
)
)
# Downscaling
nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {}, name='downsample_4'))
# Downscaling
nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}, name='flatten'))
# Block Transforms
for k in range(2):
nodes.append(
Ff.Node(
nodes[-1],
Fm.GLOWCouplingBlock,
{'subnet_constructor':subnet_fc, 'clamp': 1.0},
)
)
# Base Distribution
nodes.append(Ff.OutputNode(nodes[-1]))
inn_model = Ff.ReversibleGraphNet(nodes, verbose=False)
test_data = torch.ones((100, 3, 32, 32))
z, ldj = inn_model.forward((test_data,))
z.shape
###Output
_____no_output_____
###Markdown
DataLoader
###Code
n_train = 5_000
n_valid = 1_000
data_init = digits.data[:100]
data_train = digits.data[:1000]
data_valid = digits.data[1000:1350]
data_test = digits.data[1350:]
X_train = torch.FloatTensor(data_train)
X_valid = torch.FloatTensor(data_valid)
X_test = torch.FloatTensor(data_test)
# make into dataset
train_ds = TensorDataset(X_train)
valid_ds = TensorDataset(X_valid)
test_ds = TensorDataset(X_test)
# make dataloader
batch_size = 16
num_workers = 4
shuffle = True
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers)
test_dl = DataLoader(test_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Model Lightning Module
###Code
from src.lit_plane import FlowLearnerPlane
import ml_collections
cfg = ml_collections.ConfigDict()
cfg.loss_fn = "inn"
cfg.num_epochs = 200
cfg.learning_rate = 1e-2
cfg.batch_size = 100
cfg.weight_decay = 0.0
cfg.beta1 = 0.9
cfg.beta2 = 0.999
cfg.betas = (cfg.beta1, cfg.beta2)
###Output
_____no_output_____
###Markdown
Standard RVP
###Code
# FrEIA imports
import FrEIA.framework as Ff
import FrEIA.modules as Fm
n_features = 64
# we define a subnet for use inside an affine coupling block
# for more detailed information see the full tutorial
def subnet_fc(dims_in, dims_out):
return nn.Sequential(nn.Linear(dims_in, 512), nn.ReLU(),
nn.Linear(512, dims_out))
# a simple chain of operations is collected by ReversibleSequential
inn = Ff.SequenceINN(N_DIM)
for k in range(8):
inn.append(
Fm.AllInOneBlock,
subnet_constructor=subnet_fc,
permute_soft=True,
gin_block=False,
learned_householder_permutation=0
)
base_dist = dist.MultivariateNormal(torch.zeros(n_features), torch.eye(n_features))
###Output
_____no_output_____
###Markdown
Initial Transformation
###Code
x = torch.Tensor(data)
z, log_jac_det = inn(x)
plot_digits(z.detach().numpy(), 4, 4)
###Output
_____no_output_____
###Markdown
Inverse Transformation
###Code
x_ori, _ = inn(z, rev=True)
plot_digits(x_ori.detach().numpy(), 4, 4)
###Output
_____no_output_____
###Markdown
Training
###Code
%%time
learn = FlowLearnerPlane(inn, base_dist, cfg)
trainer = pl.Trainer(
# epochs
min_epochs=5,
max_epochs=cfg.num_epochs,
# progress bar
progress_bar_refresh_rate=100,
# device
gpus=0,
# gradient norm
gradient_clip_val=1.0,
gradient_clip_algorithm='norm'
)
trainer.fit(learn, train_dataloader=train_dl, val_dataloaders=None)
###Output
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
/home/emmanuel/.conda/envs/gaussflow-gpu/lib/python3.8/site-packages/pytorch_lightning/trainer/configuration_validator.py:101: UserWarning: you defined a validation_step but have no val_dataloader. Skipping val loop
rank_zero_warn(f'you defined a {step_name} but have no {loader_name}. Skipping {stage} loop')
Set SLURM handle signals.
| Name | Type | Params
--------------------------------------
0 | model | SequenceINN | 464 K
--------------------------------------
398 K Trainable params
65.5 K Non-trainable params
464 K Total params
1.858 Total estimated model params size (MB)
###Markdown
Latent Space
###Code
x = torch.Tensor(data_init)
z, log_jac_det = learn.model(x)
plot_digits(z.detach().numpy(), 4, 4)
fig = corner.corner(z.detach().numpy()[:, :5], hist_factor=2, color="red")
###Output
2021-06-27 20:52:09,091:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,123:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,144:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,173:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,194:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,215:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,246:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,267:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,288:WARNING:Too few points to create valid contours
2021-06-27 20:52:09,310:WARNING:Too few points to create valid contours
###Markdown
Inverse Transform
###Code
x_ori, _ = learn.model(z, rev=True)
plot_digits(x_ori.detach().numpy(), 4, 4)
fig = corner.corner(x_ori.detach().numpy()[:, :5], hist_factor=2, color="green")
###Output
2021-06-27 20:53:32,156:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,180:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,213:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,257:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,290:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,312:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,335:WARNING:Too few points to create valid contours
2021-06-27 20:53:32,358:WARNING:Too few points to create valid contours
###Markdown
Sampling
###Code
# sample from the INN by sampling from a standard normal and transforming
# it in the reverse direction
n_samples = 100
z = torch.randn(n_samples, N_DIM)
samples, _ = learn.model(z, rev=True)
plot_digits(samples.detach().numpy(), 4, 4)
fig = corner.corner(samples.detach().numpy(), hist_factor=2, color="red")
###Output
_____no_output_____ |
preprocessing/trs_analysis/vol_trs_analysis.ipynb | ###Markdown
IntroductionNotebook to analyse the PyBossa taskruns from the Volunteers App. The analysis made on this notebook is needed once a number of volunteers participating of the empirical study, changed during the execution time, becoming necessary to rerun some tasks to distribute more equally the amount of tasks by volunteer.**The final amount should be around 10 tasks/volunteer.** Load Libraries and Data
###Code
from mod_finder_util import mod_finder_util
mod_finder_util.add_modules_origin_search_path()
import pandas as pd
import numpy as np
import seaborn as sns
import modules.utils.firefox_dataset_p2 as fd
from sklearn.metrics import cohen_kappa_score
taskruns_1 = fd.TaskRuns.read_volunteers_taskruns_1_df()
taskruns_2 = fd.TaskRuns.read_volunteers_taskruns_2_df()
taskruns = pd.concat([taskruns_1, taskruns_2])
print('TaskRuns shape: {}'.format(taskruns.shape))
###Output
TaskRuns_1 shape: (113, 11)
TaskRuns_2 shape: (10, 11)
TaskRuns shape: (123, 11)
###Markdown
Defective Taskruns The taskruns detailed below must be excluded from the generation of the oracles and the analysis in general, once we had tecnical problems recording the correct answers for them during the execution of the empirical study.
###Code
taskruns_1.iloc[0,:]
taskruns_2.iloc[0,:]
###Output
_____no_output_____
###Markdown
Volunteers Taskruns
###Code
taskruns['created'] = pd.to_datetime(taskruns['created'], yearfirst=True)
taskruns['created_month'] = taskruns.apply(lambda row: row['created'].month, axis=1)
taskruns['created_day'] = taskruns.apply(lambda row: row['created'].day, axis=1)
taskruns['created_hour'] = taskruns.apply(lambda row: row['created'].hour, axis=1)
grouped_trs = taskruns.groupby(by=['created_month','created_day','created_hour']).count()
grouped_trs
###Output
_____no_output_____
###Markdown
Volunteers Contributions
###Code
trs_p1 = taskruns[(taskruns.created_day == 22) & (taskruns.created_hour==13)].loc[:,'task_id']
trs_p2 = taskruns[(taskruns.created_day == 22) & ((taskruns.created_hour==14) | (taskruns.created_hour == 15))].loc[:,'task_id']
trs_p3 = taskruns[(taskruns.created_day == 25) & (taskruns.created_hour==12)].loc[:,'task_id']
trs_p4 = taskruns[(taskruns.created_day == 25) & ((taskruns.created_hour==18) | (taskruns.created_hour == 19))].loc[:,'task_id']
trs_p5 = taskruns[(taskruns.created_day == 26) & (taskruns.created_hour == 17)].loc[:,'task_id']
trs_p6 = taskruns[(taskruns.created_day == 26) & (taskruns.created_hour == 19)].loc[:,'task_id']
trs_p7 = taskruns[(taskruns.created_day == 27) & (taskruns.created_hour == 19)].loc[:,'task_id']
trs_p8 = taskruns[(taskruns.created_day == 1) & (taskruns.created_hour == 14)].loc[:,'task_id']
trs_p9 = taskruns[(taskruns.created_day == 1) & (taskruns.created_hour == 18)].loc[:,'task_id']
contri_df = pd.DataFrame(columns=['contributor','answers'])
contri_df['contributor'] = ['p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9']
contri_df['answers'] = [trs_p1.values, trs_p2.values, trs_p3.values, trs_p4.values, trs_p5.values, trs_p6.values, trs_p7.values, trs_p8.values, trs_p9.values]
contri_df['amount_answered'] = contri_df.apply(lambda row : len(row['answers']), axis=1)
contri_df
sns.barplot(data=contri_df, y=contri_df['amount_answered'], x=contri_df['contributor'])
###Output
_____no_output_____
###Markdown
Expected composition of resolution of tasks:* Lucas - P1: 11 tasks* Marcos - P2: 11 tasks* Diego - P3: 10 tasks* Isabelly - P4: 11 tasks* Anderson - P5: 10 tasks* Wesley - P6: 10 tasks* Jaziel - P7: 10 tasks * MatheusB - P8: 10 tasks* MatheusG - P9: 10 tasks**Total: 93 tasks**
###Code
contrib_10 = ['p3','p5','p7','p8','p6','p9']
contrib_11 = ['p1','p2','p4']
def tasks_to_rerun(row):
if row['contributor'] in contrib_10:
if len(row['answers']) > 10:
return row['answers'][10:]
else:
return []
else:
if row['contributor'] in contrib_11:
if len(row['answers']) > 11:
return row['answers'][11:]
else:
return []
contri_df['tasks_to_rerun'] = contri_df.apply(lambda row : tasks_to_rerun(row), axis=1)
contri_df['solved_tasks_range'] = contri_df.apply(lambda row : (row['answers'][0], row['answers'][-1]), axis=1)
contri_df
###Output
_____no_output_____
###Markdown
Tasks To Rerun
###Code
for arr in contri_df.tasks_to_rerun.values:
print('Tasks to Rerun: {}'.format(arr))
###Output
Tasks to Rerun: [1639 1640 1641 1642]
Tasks to Rerun: [1654 1655 1656 1657]
Tasks to Rerun: [1668 1669]
Tasks to Rerun: [1681 1682 1683 1684 1685 1686 1687 1688 1689 1690]
Tasks to Rerun: []
Tasks to Rerun: [1691 1692]
Tasks to Rerun: [1703]
Tasks to Rerun: [1714 1715 1716 1717 1718 1719 1720]
Tasks to Rerun: []
###Markdown
Count TaskRuns by Task
###Code
cols = ['task_id','id']
df = taskruns[cols].groupby(by='task_id').count()
df.rename(columns={"id" :'count_trs'}, inplace=True)
df.head(10)
###Output
_____no_output_____
###Markdown
Check All Tasks Have At Least One Answer
###Code
print(set(df.index) - set(range(1628,1720+1,1)))
###Output
{1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1918, 1919}
###Markdown
Check Answers of Rerunned Tasks
###Code
rerunned_tasks = []
for set_rerunned_tasks in contri_df.tasks_to_rerun.values:
for rr_task in set_rerunned_tasks:
rerunned_tasks.append(rr_task)
remaining_tasks = df[(df.index.isin(rerunned_tasks)) & (df.count_trs < 2)]
print(remaining_tasks.shape)
remaining_tasks
###Output
(10, 1)
###Markdown
Bug Reports with Missing TaskRuns
###Code
taskruns[taskruns.task_id.isin(remaining_tasks.index)].bug_id
###Output
_____no_output_____
###Markdown
Compare Order of TaskrunsThe order of the taskruns must be the same, ordered by the finish time or by the bug_id fields.
###Code
taskruns_ordered_by_finish_time = taskruns.sort_values(by='finish_time')
taskruns_ordered_by_bug_id = taskruns.copy() ## already ordered by bug_id
diffs = taskruns_ordered_by_finish_time.bug_id - taskruns_ordered_by_bug_id.bug_id
d = np.sum(diffs)
assert d == 0
###Output
_____no_output_____ |
notebooks/Dataset C - Obesity Level Estimation/Synthetic data generation/SDV Dataset C - Obesity.ipynb | ###Markdown
1. Read data
###Code
#read real dataset
real_data = pd.read_csv(HOME_PATH + TRAIN_FILE)
categorical_columns = ['Gender','family_history_with_overweight','FAVC','CAEC','SMOKE','SCC','CALC','MTRANS','Obesity_level']
for col in categorical_columns :
real_data[col] = real_data[col].astype('category')
data_train = real_data
real_data
real_data.dtypes
###Output
_____no_output_____
###Markdown
1.1. Create metada
###Code
metadata = Metadata()
metadata.add_table(name='data', data=real_data, primary_key='')
metadata
tables = dict()
tables['data'] = real_data
tables
###Output
_____no_output_____
###Markdown
2. Train the model and generate data
###Code
sdv = SDV()
sdv.fit(metadata, tables)
samples = sdv.sample()
synthetic_data = samples['data']
synthetic_data
float_2 = ['Height','Weight']
for col in float_2 :
synthetic_data[col] = abs(np.round(synthetic_data[col],2))
synthetic_data
real_data.describe()
synthetic_data.describe()
columns = real_data.columns
fig, axs = plt.subplots(nrows=6, ncols=3, figsize=(20,20))
idx = {0:[0,0], 1:[0,1], 2:[0,2], 3:[1,0], 4:[1,1], 5:[1,2], 6:[2,0], 7:[2,1], 8:[2,2], 9:[3,0], 10:[3,1], 11:[3,2], 12:[4,0],
13:[4,1], 14:[4,2], 15:[5,0], 16:[5,1]}
for i in range(0,len(columns)) :
data = np.column_stack((real_data[columns[i]], synthetic_data[columns[i]]))
axs[idx[i][0], idx[i][1]].hist(data, density=False, histtype='bar', label=['Real','Synthetic (SDV)'])
axs[idx[i][0], idx[i][1]].set_title(columns[i])
axs[idx[i][0], idx[i][1]].legend()
fig.delaxes(axs[5,2])
fig.tight_layout(pad=1.1)
synthetic_data.to_csv(HOME_PATH + SYNTHETIC_FILE, index = False)
###Output
_____no_output_____ |
01_advanced_topics_machine_learning/assignments/notebooks/4_Sequence/Lab08_Sol_Machine_Translation.ipynb | ###Markdown
Lab 08 Setup First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
###Code
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
!pip install -q -U tensorflow-addons
IS_COLAB = True
except Exception:
IS_COLAB = False
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. LSTMs and CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
LECTURE_ID = "08"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", LECTURE_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
###Output
No GPU was detected. LSTMs and CNNs can be very slow without a GPU.
###Markdown
Exercise 1Train an Encoder–Decoder model that can convert a date string from one format to another (e.g., from "April 22, 2019" to "2019-04-22")._ 1. Creating dataset2. Define the model as Encoder-Decoder: feed in the input sequence, which first goes through the encoder (an embedding layer followed by a single LSTM layer), which outputs a vector, then it goes through a decoder (a single LSTM layer, followed by a dense output layer), which outputs a sequence of vectors, each representing the estimated probabilities for all possible output character. Dataset Let's start by creating the dataset. We will use random days between 1000-01-01 and 9999-12-31:
###Code
from datetime import date
MONTHS = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
def random_dates(n_dates):
min_date = date(1000, 1, 1).toordinal()
max_date = date(9999, 12, 31).toordinal()
ordinals = np.random.randint(max_date - min_date, size=n_dates) + min_date
dates = [date.fromordinal(ordinal) for ordinal in ordinals]
x = [MONTHS[dt.month - 1] + " " + dt.strftime("%d, %Y") for dt in dates]
y = [dt.isoformat() for dt in dates]
return x, y
np.random.seed(42)
n_dates = 3
x_example, y_example = random_dates(n_dates)
print("{:25s}{:25s}".format("Input", "Target"))
print("-" * 50)
for idx in range(n_dates):
print("{:25s}{:25s}".format(x_example[idx], y_example[idx]))
INPUT_CHARS = "".join(sorted(set("".join(MONTHS)))) + "01234567890, "
INPUT_CHARS
OUTPUT_CHARS = "0123456789-"
def date_str_to_ids(date_str, chars=INPUT_CHARS):
return [chars.index(c) for c in date_str]
date_str_to_ids(x_example[0], INPUT_CHARS)
def prepare_date_strs(date_strs, chars=INPUT_CHARS):
X_ids = [date_str_to_ids(dt, chars) for dt in date_strs]
X = tf.ragged.constant(X_ids, ragged_rank=1)
return (X + 1).to_tensor() # using 0 as the padding token ID
def create_dataset(n_dates):
x, y = random_dates(n_dates)
return prepare_date_strs(x, INPUT_CHARS), prepare_date_strs(y, OUTPUT_CHARS)
np.random.seed(42)
X_train, Y_train = create_dataset(10000)
X_valid, Y_valid = create_dataset(2000)
X_test, Y_test = create_dataset(2000)
Y_train[0]
###Output
_____no_output_____
###Markdown
First version: a very basic seq2seq model First version: a very basic seq2seq modelLet's first try the simplest possible model: we feed in the input sequence, which first goes through the encoder (an embedding layer followed by a single LSTM layer), which outputs a vector, then it goes through a decoder (a single LSTM layer, followed by a dense output layer), which outputs a sequence of vectors, each representing the estimated probabilities for all possible output character.Since the decoder expects a sequence as input, we repeat the vector (which is output by the decoder) as many times as the longest possible output sequence.
###Code
embedding_size = 32
max_output_length = Y_train.shape[1]
np.random.seed(42)
tf.random.set_seed(42)
encoder = keras.models.Sequential([
keras.layers.Embedding(input_dim=len(INPUT_CHARS) + 1,
output_dim=embedding_size,
input_shape=[None]),
keras.layers.LSTM(128)
])
decoder = keras.models.Sequential([
keras.layers.LSTM(128, return_sequences=True),
keras.layers.Dense(len(OUTPUT_CHARS) + 1, activation="softmax")
])
model = keras.models.Sequential([
encoder,
keras.layers.RepeatVector(max_output_length),
decoder
])
optimizer = keras.optimizers.Nadam()
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer,
metrics=["accuracy"])
history = model.fit(X_train, Y_train, epochs=20,
validation_data=(X_valid, Y_valid))
def ids_to_date_strs(ids, chars=OUTPUT_CHARS):
return ["".join([("?" + chars)[index] for index in sequence])
for sequence in ids]
X_new = prepare_date_strs(["September 17, 2009", "July 14, 1789"])
ids = model.predict_classes(X_new)
for date_str in ids_to_date_strs(ids):
print(date_str)
###Output
2009-09-17
1789-07-14
###Markdown
However, since the model was only trained on input strings of length 18 (which is the length of the longest date), it does not perform well if we try to use it to make predictions on shorter sequences:
###Code
X_new = prepare_date_strs(["May 02, 2020", "July 14, 1789"])
ids = model.predict_classes(X_new)
for date_str in ids_to_date_strs(ids):
print(date_str)
###Output
2020-02-02
1789-02-14
###Markdown
Oops! We need to ensure that we always pass sequences of the same length as during training, using padding if necessary. Let's write a little helper function for that:
###Code
max_input_length = X_train.shape[1]
def prepare_date_strs_padded(date_strs):
X = prepare_date_strs(date_strs)
if X.shape[1] < max_input_length:
X = tf.pad(X, [[0, 0], [0, max_input_length - X.shape[1]]])
return X
def convert_date_strs(date_strs):
X = prepare_date_strs_padded(date_strs)
ids = model.predict_classes(X)
return ids_to_date_strs(ids)
convert_date_strs(["May 02, 2020", "July 14, 1789"])
###Output
_____no_output_____ |
Src/scene/scene.ipynb | ###Markdown
ngrams
###Code
import re
from nltk.util import ngrams
s = 'Red Viper vs. The Mountain.'
s = s.lower()
s = re.sub('[^a-zA-Z0-9\s]', ' ', s)
tokens = [token for token in s.split(" ") if token != ""]
output = list(ngrams(tokens, 2))
output
###Output
_____no_output_____
###Markdown
Synonyms
###Code
from nltk.corpus import wordnet
s1 = wordnet.synsets('wedding')
for c in chAttr:
res = 0
s2 = wordnet.synsets(c)
if s2 == []:
continue
x = s1[0]
y = s2[0]
res = x.wup_similarity(y)
print(c, res)
import nltk
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
document = 'old Frey kill'
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.tree import Tree
def extractNames(sentence):
tagged_sentence = nltk.pos_tag(nltk.word_tokenize(sentence))
names = []
for chunk in nltk.ne_chunk(tagged_sentence):
if type(chunk) == nltk.tree.Tree:
if chunk.label() in ['GPE', 'PERSON']:
names.append(' '.join([c[0] for c in chunk]))
elif type(chunk) == tuple and chunk[1] in ['NN', 'NNP', 'NNS', 'NNPS']:
names.append(chunk[0])
return names
extractNames('red viper is Oberyn Martell')
import json
from fuzzywuzzy import fuzz, process
def synSubstitution(query):
with open('../syn/wordcount-synonyms.json', 'r') as f:
syns = json.load(f)['synonyms']
syns = [json.dumps(syn) for syn in syns]
# syns = [i + for alt in syn['alt'] for i, syn in enumerate(syns)]
names = extractNames(query)
for name in names:
best = process.extract(name, syns, scorer=fuzz.token_set_ratio, limit=1)[0]
if best[1] > 80:
query += ' ' + json.loads(best[0])['accepted']
return query
synSubstitution('Aegon')
###Output
_____no_output_____ |
Animal Crossing Villager Insights/Data/.ipynb_checkpoints/Animal Crossing Cleaning-checkpoint.ipynb | ###Markdown
Animal Crossing Cleaning Script
###Code
# import libraries
import numpy as np
import pandas as pd
# set options
# changing the display settings
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_colwidth', -1)
###Output
C:\Users\joann\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: FutureWarning: Passing a negative integer is deprecated in version 1.0 and will not be supported in future version. Instead, use None to not limit the column width.
"""
###Markdown
1. General Data
###Code
general_data = pd.read_csv("Villager_Complete.csv", index_col= 0)
general_data = general_data.reset_index().drop("index", axis=1)
###Output
_____no_output_____
###Markdown
Name
###Code
general_data["Name"].value_counts()
general_data["Name"] = general_data["Name"].str.title()
general_data["Name"] = general_data["Name"].replace("Spork", "Crackle")
###Output
_____no_output_____
###Markdown
Gender
###Code
general_data["Gender"].value_counts()
general_data["Gender"] = general_data["Gender"].str.title()
###Output
_____no_output_____
###Markdown
Personality
###Code
general_data["Personality"].value_counts()
general_data["Personality"] = general_data["Personality"].str.title()
###Output
_____no_output_____
###Markdown
Species
###Code
general_data["Species"].value_counts()
general_data["Species"] = general_data["Species"].replace("Ostrich, appearance based on Red-Crowned Crane", "Ostrich")
general_data["Species"] = general_data["Species"].str.title()
general_data["Species"].value_counts()
###Output
_____no_output_____
###Markdown
Horoscope
###Code
general_data["Horoscope"].value_counts()
general_data["Horoscope"] = general_data["Horoscope"].replace("N", "Unknown")
general_data["Horoscope"] = general_data["Horoscope"].apply(lambda x: x.replace("(", ""))
general_data["Horoscope"] = general_data["Horoscope"].apply(lambda x: x.replace(")", ""))
general_data["Horoscope"] = general_data["Horoscope"].str.strip()
general_data["Horoscope"] = general_data["Horoscope"].str.title()
general_data["Horoscope"].value_counts()
###Output
_____no_output_____
###Markdown
Style
###Code
general_data["Style"].value_counts()
general_data["Style"] = general_data["Style"].apply(lambda x: x.replace("Rock n' roll", "Rock 'n' roll"))
general_data["Style"] = general_data["Style"].apply(lambda x: x.replace("Rock'n'roll", "Rock 'n' roll"))
general_data["Style"] = general_data["Style"].apply(lambda x: x.replace("Rock 'n' Roll", "Rock 'n' roll"))
general_data["Style"] = general_data["Style"].str.title()
general_data["Style"].value_counts()
###Output
_____no_output_____
###Markdown
Favorite Song
###Code
general_data["Favorite_Song"].value_counts()
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("[[K.K. Ragtime Neopolitan (NH)]]", "K.K. Ragtime Neopolitan")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("Two Days Ago (Neapolitan in New Horizons)", "Neapolitan")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("[[K.K. D & B ACPondering NL]]", "K.K. D & B")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("K.K. D&B", "K.K. D & B")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("[[Space K.K.(NL) K.K. Bossa(NH)]]", "K.K. Bossa")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("[[K.K. Bossa (Animal Crossing)K.K. Synth (New Leaf)]]", "K.K. Synth")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("Aloha K.K. (AC)K.K. Lament (NL)]]", "K.K. Lament")
general_data["Favorite_Song"] = general_data["Favorite_Song"].replace("[[K.K. SynthK.K. Disco (NH)]]", "K.K. Disco")
general_data["Favorite_Song"] = general_data["Favorite_Song"].str.strip()
general_data["Favorite_Song"] = general_data["Favorite_Song"].str.title()
general_data["Favorite_Song"].value_counts()
###Output
_____no_output_____
###Markdown
Skills
###Code
general_data["Skill"] = general_data["Skill"].str.title()
###Output
_____no_output_____
###Markdown
Goal
###Code
general_data["Goal"] = general_data["Goal"].str.title()
###Output
_____no_output_____
###Markdown
Style
###Code
# write to file
general_data.to_csv("Clean_Animal_Crossing_General.csv")
###Output
_____no_output_____
###Markdown
2. Tier Data
###Code
# read the completed clean data
general_data = pd.read_csv("Animal_Crossing_Full_Data.csv")
tier_data = pd.read_csv("Villager_Data_16_May_2020.csv")
tier_data = tier_data.rename(columns={"Unnamed: 0":"Rank_Overall"})
tier_data.head()
tier_data["Name"].value_counts()
tier_data["Name"] = tier_data["Name"].replace("Crackle(Spork)", "Crackle")
tier_data["Name"] = tier_data["Name"].replace("Buck(Brows)", "Buck")
###Output
_____no_output_____
###Markdown
Merge Rank with General Data
###Code
full_data = general_data.merge(tier_data, left_on="Name", right_on="Name", how="outer")
full_data
full_data.to_csv("Animal_Crossing_Full_Data.csv")
###Output
_____no_output_____ |
d2l-en/mxnet/chapter_attention-mechanisms/attention-scoring-functions.ipynb | ###Markdown
Attention Scoring Functions:label:`sec_attention-scoring-functions`In :numref:`sec_nadaraya-waston`,we used a Gaussian kernel to modelinteractions between queries and keys.Treating the exponent of the Gaussian kernelin :eqref:`eq_nadaraya-waston-gaussian`as an *attention scoring function* (or *scoring function* for short),the results of this function wereessentially fed intoa softmax operation.As a result,we obtaineda probability distribution (attention weights)over values that are paired with keys.In the end,the output of the attention poolingis simply a weighted sum of the valuesbased on these attention weights.At a high level,we can use the above algorithmto instantiate the framework of attention mechanismsin :numref:`fig_qkv`.Denoting an attention scoring function by $a$,:numref:`fig_attention_output`illustrates how the output of attention poolingcan be computed as a weighted sum of values.Since attention weights area probability distribution,the weighted sum is essentiallya weighted average.:label:`fig_attention_output`Mathematically,suppose that we havea query $\mathbf{q} \in \mathbb{R}^q$and $m$ key-value pairs $(\mathbf{k}_1, \mathbf{v}_1), \ldots, (\mathbf{k}_m, \mathbf{v}_m)$, where any $\mathbf{k}_i \in \mathbb{R}^k$ and any $\mathbf{v}_i \in \mathbb{R}^v$.The attention pooling $f$is instantiated as a weighted sum of the values:$$f(\mathbf{q}, (\mathbf{k}_1, \mathbf{v}_1), \ldots, (\mathbf{k}_m, \mathbf{v}_m)) = \sum_{i=1}^m \alpha(\mathbf{q}, \mathbf{k}_i) \mathbf{v}_i \in \mathbb{R}^v,$$:eqlabel:`eq_attn-pooling`wherethe attention weight (scalar) for the query $\mathbf{q}$and key $\mathbf{k}_i$is computed bythe softmax operation ofan attention scoring function $a$ that maps two vectors to a scalar:$$\alpha(\mathbf{q}, \mathbf{k}_i) = \mathrm{softmax}(a(\mathbf{q}, \mathbf{k}_i)) = \frac{\exp(a(\mathbf{q}, \mathbf{k}_i))}{\sum_{j=1}^m \exp(a(\mathbf{q}, \mathbf{k}_j))} \in \mathbb{R}.$$:eqlabel:`eq_attn-scoring-alpha`As we can see,different choices of the attention scoring function $a$lead to different behaviors of attention pooling.In this section,we introduce two popular scoring functionsthat we will use to develop moresophisticated attention mechanisms later.
###Code
import math
from d2l import mxnet as d2l
from mxnet import np, npx
from mxnet.gluon import nn
npx.set_np()
###Output
_____no_output_____
###Markdown
Masked Softmax OperationAs we just mentioned,a softmax operation is used tooutput a probability distribution as attention weights.In some cases,not all the values should be fed into attention pooling.For instance,for efficient minibatch processing in :numref:`sec_machine_translation`,some text sequences are padded withspecial tokens that do not carry meaning.To get an attention poolingoveronly meaningful tokens as values,we can specify a valid sequence length (in number of tokens)to filter out those beyond this specified rangewhen computing softmax.In this way,we can implement such a *masked softmax operation*in the following `masked_softmax` function,where any value beyond the valid lengthis masked as zero.
###Code
#@save
def masked_softmax(X, valid_lens):
"""Perform softmax operation by masking elements on the last axis."""
# `X`: 3D tensor, `valid_lens`: 1D or 2D tensor
if valid_lens is None:
return npx.softmax(X)
else:
shape = X.shape
if valid_lens.ndim == 1:
valid_lens = valid_lens.repeat(shape[1])
else:
valid_lens = valid_lens.reshape(-1)
# On the last axis, replace masked elements with a very large negative
# value, whose exponentiation outputs 0
X = npx.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, True,
value=-1e6, axis=1)
return npx.softmax(X).reshape(shape)
###Output
_____no_output_____
###Markdown
To demonstrate how this function works,consider a minibatch of two $2 \times 4$ matrix examples,where the valid lengths for these two examplesare two and three, respectively.As a result of the masked softmax operation,values beyond the valid lengthsare all masked as zero.
###Code
masked_softmax(np.random.uniform(size=(2, 2, 4)), np.array([2, 3]))
###Output
_____no_output_____
###Markdown
Similarly, we can alsouse a two-dimensional tensorto specify valid lengthsfor every row in each matrix example.
###Code
masked_softmax(np.random.uniform(size=(2, 2, 4)),
np.array([[1, 3], [2, 4]]))
###Output
_____no_output_____
###Markdown
Additive Attention:label:`subsec_additive-attention`In general,when queries and keys are vectors of different lengths,we can use additive attentionas the scoring function.Given a query $\mathbf{q} \in \mathbb{R}^q$and a key $\mathbf{k} \in \mathbb{R}^k$,the *additive attention* scoring function$$a(\mathbf q, \mathbf k) = \mathbf w_v^\top \text{tanh}(\mathbf W_q\mathbf q + \mathbf W_k \mathbf k) \in \mathbb{R},$$:eqlabel:`eq_additive-attn`wherelearnable parameters$\mathbf W_q\in\mathbb R^{h\times q}$, $\mathbf W_k\in\mathbb R^{h\times k}$, and $\mathbf w_v\in\mathbb R^{h}$.Equivalent to :eqref:`eq_additive-attn`,the query and the key are concatenatedand fed into an MLP with a single hidden layerwhose number of hidden units is $h$, a hyperparameter.By using $\tanh$ as the activation function and disablingbias terms,we implement additive attention in the following.
###Code
#@save
class AdditiveAttention(nn.Block):
"""Additive attention."""
def __init__(self, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
# Use `flatten=False` to only transform the last axis so that the
# shapes for the other axes are kept the same
self.W_k = nn.Dense(num_hiddens, use_bias=False, flatten=False)
self.W_q = nn.Dense(num_hiddens, use_bias=False, flatten=False)
self.w_v = nn.Dense(1, use_bias=False, flatten=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# After dimension expansion, shape of `queries`: (`batch_size`, no. of
# queries, 1, `num_hiddens`) and shape of `keys`: (`batch_size`, 1,
# no. of key-value pairs, `num_hiddens`). Sum them up with
# broadcasting
features = np.expand_dims(queries, axis=2) + np.expand_dims(
keys, axis=1)
features = np.tanh(features)
# There is only one output of `self.w_v`, so we remove the last
# one-dimensional entry from the shape. Shape of `scores`:
# (`batch_size`, no. of queries, no. of key-value pairs)
scores = np.squeeze(self.w_v(features), axis=-1)
self.attention_weights = masked_softmax(scores, valid_lens)
# Shape of `values`: (`batch_size`, no. of key-value pairs, value
# dimension)
return npx.batch_dot(self.dropout(self.attention_weights), values)
###Output
_____no_output_____
###Markdown
Let us demonstrate the above `AdditiveAttention` classwith a toy example,where shapes (batch size, number of steps or sequence length in tokens, feature size)of queries, keys, and valuesare ($2$, $1$, $20$), ($2$, $10$, $2$),and ($2$, $10$, $4$), respectively.The attention pooling outputhas a shape of (batch size, number of steps for queries, feature size for values).
###Code
queries, keys = np.random.normal(0, 1, (2, 1, 20)), np.ones((2, 10, 2))
# The two value matrices in the `values` minibatch are identical
values = np.arange(40).reshape(1, 10, 4).repeat(2, axis=0)
valid_lens = np.array([2, 6])
attention = AdditiveAttention(num_hiddens=8, dropout=0.1)
attention.initialize()
attention(queries, keys, values, valid_lens)
###Output
_____no_output_____
###Markdown
Although additive attention contains learnable parameters,since every key is the same in this example,the attention weights are uniform,determined by the specified valid lengths.
###Code
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys',
ylabel='Queries')
###Output
_____no_output_____
###Markdown
Scaled Dot-Product AttentionA more computationally efficientdesign for the scoring function can besimply dot product.However,the dot product operationrequires that both the query and the keyhave the same vector length, say $d$.Assume thatall the elements of the query and the keyare independent random variableswith zero mean and unit variance.The dot product ofboth vectors has zero mean and a variance of $d$.To ensure that the variance of the dot productstill remains one regardless of vector length,the *scaled dot-product attention* scoring function$$a(\mathbf q, \mathbf k) = \mathbf{q}^\top \mathbf{k} /\sqrt{d}$$divides the dot product by $\sqrt{d}$.In practice,we often think in minibatchesfor efficiency,such as computing attentionfor$n$ queries and $m$ key-value pairs,where queries and keys are of length $d$and values are of length $v$.The scaled dot-product attentionof queries $\mathbf Q\in\mathbb R^{n\times d}$,keys $\mathbf K\in\mathbb R^{m\times d}$,and values $\mathbf V\in\mathbb R^{m\times v}$is$$ \mathrm{softmax}\left(\frac{\mathbf Q \mathbf K^\top }{\sqrt{d}}\right) \mathbf V \in \mathbb{R}^{n\times v}.$$:eqlabel:`eq_softmax_QK_V`In the following implementation of the scaled dot product attention, we use dropout for model regularization.
###Code
#@save
class DotProductAttention(nn.Block):
"""Scaled dot product attention."""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# Shape of `queries`: (`batch_size`, no. of queries, `d`)
# Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`)
# Shape of `values`: (`batch_size`, no. of key-value pairs, value
# dimension)
# Shape of `valid_lens`: (`batch_size`,) or (`batch_size`, no. of queries)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# Set `transpose_b=True` to swap the last two dimensions of `keys`
scores = npx.batch_dot(queries, keys, transpose_b=True) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return npx.batch_dot(self.dropout(self.attention_weights), values)
###Output
_____no_output_____
###Markdown
To demonstrate the above `DotProductAttention` class,we use the same keys, values, and valid lengths from the earlier toy examplefor additive attention.For the dot product operation,we make the feature size of queriesthe same as that of keys.
###Code
queries = np.random.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.initialize()
attention(queries, keys, values, valid_lens)
###Output
_____no_output_____
###Markdown
Same as in the additive attention demonstration,since `keys` contains the same elementthat cannot be differentiated by any query,uniform attention weights are obtained.
###Code
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys',
ylabel='Queries')
###Output
_____no_output_____ |
phys-5bl-lab-t2-sw.ipynb | ###Markdown
Standing WavesThis notebook is used for analysis of the provided data for Lab T2-SW, PHYSICS 5BL.
###Code
import numpy as np
import scipy.optimize as opt
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams.update({'font.size': '16'})
###Output
_____no_output_____
###Markdown
Expt 3: Fixed Tension and Frequency, Varying Length Data input
###Code
def ssd(sample):
var = np.var(sample)
return np.sqrt(var * len(sample) / (len(sample) - 1))
def sem(sample):
return ssd(sample) / np.sqrt(len(sample))
def paste(arr):
# print('mean:', np.mean(arr),
# 'sd:', ssd(arr),
# 'sem:', sem(arr))
print('mean±sem: {m:.3f} ± {e:.3f}'.format(m=np.mean(arr), e=sem(arr)))
return np.mean(arr), sem(arr)
mean_lengths, length_errors = [], []
n_nodes = [6, 5, 4, 3, 2, 1]
l6 = [127.5, 128.0]
l, e = paste(l6)
mean_lengths += [l]
length_errors += [e]
l5 = [106.2, 107.0, 106.5]
l, e = paste(l5)
mean_lengths += [l]
length_errors += [e]
mean_lengths += [85]
length_errors += [0.2]
l3 = [65, 64.5, 64.5]
l, e = paste(l3)
mean_lengths += [l]
length_errors += [e]
l2 = [43.5, 43.2]
l, e = paste(l2)
mean_lengths += [l]
length_errors += [e]
l1 = [21.8, 22.0]
l, e = paste(l1)
mean_lengths += [l]
length_errors += [e]
print(mean_lengths, length_errors)
x_data, y_data = n_nodes, mean_lengths
y_er = length_errors
y_data = [y / 100 for y in y_data]
y_er = [y / 100 for y in y_er]
print(y_data, y_er)
###Output
[1.2775, 1.0656666666666665, 0.85, 0.6466666666666667, 0.4335, 0.21899999999999997] [0.0025, 0.0023333333333333257, 0.002, 0.0016666666666666668, 0.0014999999999999855, 0.0009999999999999966]
###Markdown
Nonlinear fitsFrom the theory, we have $n=2fL\sqrt{\frac{\mu}{T}}$; rearranging, we have that $L=\frac{n}{2f}\sqrt{\frac{T}{\mu}}$. Define the model:
###Code
FREQ = 60 # Hz
T = (0.249 + 0.009) * 9.8 # kg m/s^2
def length_model(nodes, mu):
"""Parameters is mu;
independent variable is num nodes.
Returns the length."""
return nodes / (2 * FREQ) * np.sqrt(T / mu)
###Output
_____no_output_____
###Markdown
Choose the model and initial fit parameter values then graph:
###Code
plt.figure(figsize=(10, 8))
# Initial guess values of fit parameters
muinit = 0.005
# Graph
space = np.linspace(1, 6, 1000)
plt.plot(space, length_model(space, muinit), color='orange')
plt.scatter(x_data, y_data)
plt.errorbar(x_data, y_data, yerr=y_er, fmt='none', capsize=10)
plt.xlabel('Num Nodes')
plt.ylabel('Length (m)')
plt.title('String Length v. Num Nodes')
plt.show()
###Output
_____no_output_____
###Markdown
Perform the fit then plot
###Code
# Perform the fit
start_pars = [muinit]
pars, cov = opt.curve_fit(length_model, x_data, y_data, p0=start_pars, sigma=y_er, absolute_sigma=True)
[MU] = pars
std_errs = np.sqrt(np.diag(cov))
print(np.transpose([pars, std_errs]))
# Values predicted from fit
ypred = length_model(space, MU)
# Calculating the chisq (*not reduced*)
def chisq(predicted, observed, errors):
res = np.array(observed)-np.array(predicted)
print(res, errors)
norm_res = res / errors
return np.sum(norm_res**2)
y_pred = [length_model(n, MU) for n in range(6, 0, -1)]
print(y_pred)
print(y_data)
chi2_prop = chisq(y_pred, y_data, y_er)
print('χ2 ={0:.3f}'.format (chi2_prop))
red_chi2 = chi2_prop / 5
## Best-fit plots
# Sets figure size etc.
fig1=plt.figure(figsize=(12, 8))
# Data and fit
plt.scatter(x_data, y_data)
plt.errorbar(x_data, y_data, yerr=y_er, fmt='none', capsize=10, label='Measured Lengths (m)')
plt.plot(space, ypred, color='orange', label='Best-fit line')
# Axes label and title
plt.legend()
plt.xlabel('Harmonic Number')
plt.ylabel('Length (m)')
plt.title('String Length v. Num Nodes with Line of Best Fit (Weighted Least Squares)')
# Text
plt.text(1,1.1, "$\\mu$ = %5.4f \u00b1 %5.4f kg/m" % (pars[0], std_errs[0]))
plt.text(1,1, "Fit line equation: $L=\\frac{n}{2f}\\sqrt{\\frac{T}{%5.4f}}, f=%2.0f Hz, T=%5.3f N$" %
(pars[0], FREQ, T))
plt.text(1, 0.9, "$\\tilde{\\chi}^2$ = %5.3f" % red_chi2)
plt.savefig('fig-wlsq-with-string.png')
# Residuals plot
# Residuals
r_lin = (np.array(y_data) - np.array(y_pred)) / np.array(y_er)
# Sets figure size etc.
plt.figure(figsize=(12, 8))
# Residuals
plt.errorbar(x_data, r_lin, fmt='o')
plt.axhline(color='r') # 0 line for reference
# Axes label and title
plt.title("Linear fit residuals")
plt.xlabel("Harmonic Number")
plt.ylabel("Residuals: y(observed) - y (predicted) (m)")
# plt.show()
plt.savefig('fig-resid.png')
###Output
_____no_output_____ |
amazon-sagemaker-clarify-a2i-sample.ipynb | ###Markdown
Using SageMaker Clarify and A2I to create transparent and reliable ML solutions 1. [Overview](Overview)2. [Prerequisites and Data](Prerequisites-and-Data) 1. [Initialize SageMaker](Initialize-SageMaker) 1. [Download data](Download-data) 1. [Loading the data: Adult Dataset](Loading-the-data:-Adult-Dataset) 1. [Data inspection](Data-inspection) 1. [Data encoding and upload to S3](Encode-and-Upload-Training-Data) 3. [Train and Deploy XGBoost Model](Train-XGBoost-Model) 1. [Train Model](Train-Model) 1. [Deploy Model to Endpoint](Deploy-Model)4. [Amazon SageMaker Clarify](Amazon-SageMaker-Clarify) 1. [Explaining Predictions](Explaining-Predictions) 1. [Viewing the Explainability Report](Viewing-the-Explainability-Report)5. [Create Control Plane Resources for A2I](Create-Control-Plane-Resources) 1. [Create Human Task UI](Create-Human-Task-UI) 2. [Create Flow Definition](Create-Flow-Definition)6. [Starting Human Loops](Scenario-1-:-When-Activation-Conditions-are-met-,-and-HumanLoop-is-created) 1. [Wait For Workers to Complete Task](Wait-For-Workers-to-Complete-Task) 2. [Check Status of Human Loop](Check-Status-of-Human-Loop) 3. [View Task Results](View-Task-Results)7. [Preparing new groundtruth data based on the reviewed results](Merge-the-A2I-prediction-results-with-the-test-data-to-generate-GroundTruth)8. [Clean Up](Clean-Up) OverviewThere are two major challenges being faced by customers looking to implement machine learning solutions in their line of business. 1. Machine learning models are getting more and more complex and opaque, which makes it harder to explain the predictions of such models. 2. Machine learning decisions lack the human understanding and collaboration. These challenges prevent lot of customers from financial and healthcare industries to implement machine learning solutions in their business critical functions. Amazon Sagemaker clarify and Amazon Augmented AI(A2I) try to solve both of these challenges from different perspectives.Amazon SageMaker Clarify helps improve your machine learning models by detecting potential bias and helping explain how these models make predictions. The fairness and explainability functionality provided by SageMaker Clarify takes a step towards enabling AWS customers to build trustworthy and understandable machine learning models.At the same time, Amazon A2I provides a way to introduce human review loop step in the machine learning inference pipeline. This greatly improves the trust and reliability in the machine learning process.Based on this understanding, in this notebook, we will look at an example of how we can use both SageMaker Clariy and Amazon A2I at the same time in a single machine learning pipeline to improve transparency and introduce reliability in the inference workflows.We will use the adult population dataset located at: https://archive.ics.uci.edu/ml/machine-learning-databases/adult/ to determine if a person's salary is greater than $50,000 or less than $50,000.Below are the steps we will perform as part of this notebook: 1. Train and deploy an XGBoost model on the Adult population dataset predicting if the person's salary is greater than $50,000.1. Run Batch inference on the model endpoint along with also running explainability analysis on the batch of records.1. Filter the negative predictions as we are interested in knowing why the model predicted a person's salary to be less than $50,000 and which features had the most impact in that process.1. Plot the SHAP values computed by SageMaker Clarify for those negative outcomes, to see which feature/s contributed the most in predicting the negative outcome.1. Use A2I Human Review Workflow providing the prediction score and SHAP plot for the human reviewer to analyze the outcome to verify the feature attributions in the model.1. Use the reviewed data as groundtruth to be used for re-training purposes. Prerequisites and Data Setup Amazon SageMaker Studio Notebook1. Onboard to Amazon SageMaker Studio using the quick start (https://docs.aws.amazon.com/sagemaker/latest/dg/onboard-quick-start.html). Please attach the [AmazonAugmentedAIFullAccess](https://console.aws.amazon.com/iam/home/policies/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAmazonAugmentedAIFullAccess) permissions policy to the IAM role you create during Studio onboarding to run this notebook.1. When user is created and is active, click Open Studio.1. In the Studio landing page, choose File --> New --> Terminal.1. In the terminal, enter the following code: * git clone https://github.com/aws-samples/amazon-sagemaker-clarify-a2i-demo1. Open the notebook by choosing “sagemaker-clarify-a2i.ipynb” in the amazon-sagemaker-clarify-a2i-demo folder in the left pane of the Studio landing page. Install open source SHAP libraryFirst of all, We will need to install the [open source SHAP library](https://shap.readthedocs.io/en/latest/index.html), as we will be using this library to plot the SHAP values computed by SageMaker Clarify further in this notebook. There are two ways of installing the SHAP library:1. If you are using SageMaker Notebook instances, then run `pip install shap` 2. If you are using SageMaker Studio Notebooks, then run `conda install -c conda-forge shap` If using SageMaker Studio notebook, execute the below cell, or else skip to the next cell.
###Code
conda install -c conda-forge shap
###Output
_____no_output_____
###Markdown
If using SageMaker Notebook Instances, execute the below cell.
###Code
pip install shap
###Output
_____no_output_____
###Markdown
NOTE__You need to restart the kernel, after installing the library for the changes to take effect.__ Initialize SageMaker
###Code
from sagemaker import Session
from sagemaker import get_execution_role
import pandas as pd
import numpy as np
import urllib
import os
# Define IAM role
role = get_execution_role()
session = Session()
bucket = session.default_bucket()
prefix = 'sagemaker/clarify-a2i-demo'
region = session.boto_region_name
###Output
_____no_output_____
###Markdown
Download dataData Source: [https://archive.ics.uci.edu/ml/machine-learning-databases/adult/](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/)Let's __download__ the data and save it in the local folder with the name adult.data and adult.test from UCI repository$^{[2]}$.$^{[2]}$Dua Dheeru, and Efi Karra Taniskidou. "[UCI Machine Learning Repository](http://archive.ics.uci.edu/ml)". Irvine, CA: University of California, School of Information and Computer Science (2017).
###Code
adult_columns = ["Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital Status",
"Occupation", "Relationship", "Ethnic group", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"]
if not os.path.isfile('adult.data'):
urllib.request.urlretrieve('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
'adult.data')
print('adult.data saved!')
else:
print('adult.data already on disk.')
if not os.path.isfile('adult.test'):
urllib.request.urlretrieve('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test',
'adult.test')
print('adult.test saved!')
else:
print('adult.test already on disk.')
###Output
_____no_output_____
###Markdown
Loading the data: Adult DatasetFrom the UCI repository of machine learning datasets, this database contains 14 features concerning demographic characteristics of 45,222 rows (32,561 for training and 12,661 for testing). The task is to predict whether a person has a yearly income that is more or less than $50,000.Here are the features and their possible values:1. **Age**: continuous.1. **Workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.1. **Fnlwgt**: continuous (the number of people the census takers believe that observation represents).1. **Education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.1. **Education-num**: continuous.1. **Marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.1. **Occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.1. **Relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.1. **Ethnic group**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.1. **Sex**: Female, Male. * **Note**: this data is extracted from the 1994 Census and enforces a binary option on Sex1. **Capital-gain**: continuous.1. **Capital-loss**: continuous.1. **Hours-per-week**: continuous.1. **Native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.Next we specify our binary prediction task: 15. **Target**: $50,000.
###Code
training_data = pd.read_csv("adult.data",
names=adult_columns,
sep=r'\s*,\s*',
engine='python',
na_values="?").dropna()
testing_data = pd.read_csv("adult.test",
names=adult_columns,
sep=r'\s*,\s*',
engine='python',
na_values="?",
skiprows=1).dropna()
training_data.head()
###Output
_____no_output_____
###Markdown
Data inspectionPlotting histograms for the distribution of the different features is a good way to visualize the data. Let's plot a few of the features that can be considered _sensitive_. Let's take a look specifically at the Sex feature of a census respondent. In the first plot we see that there are fewer Female respondents as a whole but especially in the positive outcomes, where they form ~$\frac{1}{7}$th of respondents.
###Code
training_data['Sex'].value_counts().sort_values().plot(kind='bar', title='Counts of Sex', rot=0)
training_data['Sex'].where(training_data['Target']=='>50K').value_counts().sort_values().plot(kind='bar', title='Counts of Sex earning >$50K', rot=0)
###Output
_____no_output_____
###Markdown
Encode and Upload Training DataHere we encode the training and test data. Encoding input data is not necessary for SageMaker Clarify, but is necessary for XGBoost models.The below cell does the following:- Prepare the training data for SageMaker training- Prepare the test data- Define the batch size, which we will use to create batch predictions- Prepare the explainability config data to be used for running the explainability analysis using SageMaker Clarify- Perform label encodingTo make this notebook run faster, we will be sending a batch of 100 records from the test dataset for prediction and using the same batch for generating explanations powered by SageMaker Clarify. Based on your use-case, you may increase the batch size or send the whole CSV to the endpoint. Generally for a production grade setup, you will not need to create batches as batch transform has the ability to break a large csv into multiple small CSVs. But just to make this notebook run faster, we are using a small batch of records for demonstration purpose and quick execution.
###Code
from sklearn import preprocessing
def number_encode_features(df):
result = df.copy()
encoders = {}
for column in result.columns:
if result.dtypes[column] == np.object:
encoders[column] = preprocessing.LabelEncoder()
result[column] = encoders[column].fit_transform(result[column].fillna('None'))
return result, encoders
#preparing the training data with no headers and target columns being the first
training_data = pd.concat([training_data['Target'], training_data.drop(['Target'], axis=1)], axis=1)
training_data, _ = number_encode_features(training_data)
training_data.to_csv('train_data.csv', index=False, header=False)
#preparing the baseline dataset to be used by SageMaker Clarify for explainability analysis
baseline_data = training_data.drop(['Target'], axis = 1)
baseline_data.to_csv('baseline_data.csv', index=False, header=False)
# now preparing the testing data
testing_data, _ = number_encode_features(testing_data)
# defining the batch of records to be used for doing batch predictions and calculating SHAP values.
# You can change this number based on your use-case
batch_size=100
# preparing the explanability data config csv having the batch of records from the testing_data, having target column being the first
explanability_data_config = pd.concat([testing_data['Target'], testing_data.drop(['Target'], axis=1)], axis=1)
explanability_data_config = explanability_data_config[:batch_size]
explanability_data_config.to_csv('explanability_data_config.csv', index=False, header=False)
# setting up the entire test dataset to csv
test_features = testing_data.drop(['Target'], axis = 1)
test_features.to_csv('test_features.csv', index=False, header=False)
# prepare the batch of records for performing inference
test_features_mini_batch = test_features[:batch_size]
test_features_mini_batch.to_csv('test_features_mini_batch.csv', index=False, header=False)
###Output
_____no_output_____
###Markdown
A quick note about our encoding: the "Female" Sex value has been encoded as 0 and "Male" as 1. Lastly, let's upload the train, test and explanability config data to S3
###Code
from sagemaker.s3 import S3Uploader
from sagemaker.inputs import TrainingInput
train_uri = S3Uploader.upload('train_data.csv', 's3://{}/{}'.format(bucket, prefix))
train_input = TrainingInput(train_uri, content_type='csv')
test_mini_batch_uri = S3Uploader.upload('test_features_mini_batch.csv', 's3://{}/{}'.format(bucket, prefix))
explanability_data_config_uri = S3Uploader.upload('explanability_data_config.csv', 's3://{}/{}'.format(bucket, prefix))
###Output
_____no_output_____
###Markdown
Train XGBoost Model Train ModelSince our focus is on understanding how to use SageMaker Clarify, we keep it simple by using a standard XGBoost model.
###Code
from sagemaker.image_uris import retrieve
from sagemaker.estimator import Estimator
container = retrieve('xgboost', region, version='1.2-1')
xgb = Estimator(container,
role,
instance_count=1,
instance_type='ml.m4.xlarge',
disable_profiler=True,
sagemaker_session=session)
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
objective='binary:logistic',
num_round=800)
xgb.fit({'train': train_input}, logs='None', wait='True')
###Output
_____no_output_____
###Markdown
Deploy ModelNow, let us deploy the model. Regarding this use case and others where model explainability is required, it is generally the backend teams running a nightly jobs to get the predictions and its explainations to send it to their workforce for review. Hence for such cases, a SageMaker Batch Transform job is more practical than a real-time endpoint. Hence we will setup a Batch Transform job for a small set of records from the test dataset to replicate this scenario. For setting up the batch transform, we need to specify the following:- instance_count – Number of EC2 instances to use.- instance_type – Type of EC2 instance to use, for example, ‘ml.c5.xlarge’. - strategy: The strategy used to decide how to batch records in a single request (default: None). Valid values: ‘MultiRecord’ and ‘SingleRecord’.- assemble_with: How the output is assembled (default: None). Valid values: ‘Line’ or ‘None’.- output_path: S3 location for saving the transform result. If not specified, results are stored to a default bucket. Note, file(s) will be named with '.out' suffixed to the input file(s) names. Note that in this case, running batch transform over again will overwrite existing output values unless you provide a different path each time.You can also setup a CloudWatch event to trigger a batch prediction at a particular time of the day/week/month
###Code
transformer_s3_output_path ='s3://{}/{}/predictions'.format(bucket, prefix)
xgb_transformer = xgb.transformer(instance_count=1,
instance_type='ml.c5.xlarge',
strategy='MultiRecord',
assemble_with='Line',
output_path=transformer_s3_output_path)
###Output
_____no_output_____
###Markdown
Run the Batch PredictionsNow it's time to run the batch predictions. Since the Transformer does not provide an API to check when the batch transform job is completed, one of the following options can be chosen:- Setup a CloudWatch event to send an SNS notification that the job is completed. (Recommended for any customer facing project in production)- call the wait() method on the transformer so that the notebook execution will wait for the transform job to complete.For demonstration purpose, we are using the second option.
###Code
xgb_transformer.transform(test_mini_batch_uri, content_type='text/csv', split_type='Line')
xgb_transformer.wait()
###Output
_____no_output_____
###Markdown
**_NOTE_**: **The output of the model is a prediction score between 0 and 1, where the prediction score will denote the probability of the person's salary being greater than $50,000.****For example:** if the model gives a prediction score of 0.3, it means that the model sees a 30% probability that the salary of the person would be greater than \\$50,000, which is quite a low probability. Similarly, if the prediction score is 0.9, it means the models finds a probability of 90% that the person's salary would be greater than $50,000 Amazon SageMaker ClarifyNow that the predictions have been made, let's setup a processor definition for SageMaker Clarify. For running the explainability analysis on the model, SageMaker Clarify uses SageMaker Processing jobs under the hood.The first step is to setup a `SageMakerClarifyProcessor`
###Code
from sagemaker import clarify
clarify_processor = clarify.SageMakerClarifyProcessor(role=role,
instance_count=1,
instance_type='ml.c5.xlarge',
sagemaker_session=session)
###Output
_____no_output_____
###Markdown
Writing ModelConfigNow, you setup the `ModelConfig` object. This object communicates information about your trained model**Note**: To avoid additional traffic to your production models, SageMaker Clarify sets up and tears down a temporary endpoint when processing. `ModelConfig` specifies your preferred instance type and instance count used to run your model on during Clarify's processing.
###Code
from sagemaker import clarify
model_config = clarify.ModelConfig(model_name=xgb_transformer.model_name,
instance_type='ml.c5.xlarge',
instance_count=1,
accept_type='text/csv')
###Output
_____no_output_____
###Markdown
Explaining PredictionsThere are expanding business needs and legislative regulations that require explainations of _why_ a model made the decision it did. SageMaker Clarify uses the [KernelSHAP](https://arxiv.org/abs/1705.07874) algorithm to explain the contribution that each input feature makes to the final decision.To do this, you need to provide some details in terms of setting up SHAP related configuration, an S3 output path where the explainability results will be stored and data configuration related to running the explainability analysis. Note that we are supplying the same `test_mini_batch_uri` which we used for predictions. The below cell does the following:- Calculates the baseline to be used in `shap_config`. Here the complete training dataset is supplied to calculate a good baseline. The `baseline_data.csv` is basically the training dataset without having the target column in it.- Treats the whole training dataset to be used as a baseline for `SHAPConfig`- Setup `DataConfig` providing details on where the input data is located and where to store the results along with more details.__NOTE__: The value for `num_samples` is given for demonstration purpose only. To increase the fidelity of SHAP values, use a larger value for `num_samples`
###Code
# Here use the mean value of training dataset as SHAP baseline
shap_baseline_df = pd.read_csv("baseline_data.csv", header=None)
shap_baseline = [list(shap_baseline_df.mean())]
# create the SHAPConfig
shap_config = clarify.SHAPConfig(baseline=shap_baseline,
num_samples=15,
agg_method='mean_abs',
use_logit=True)
explainability_output_path = 's3://{}/{}/explainability'.format(bucket, prefix)
# create the DataConfig
explainability_data_config = clarify.DataConfig(s3_data_input_path=explanability_data_config_uri,
s3_output_path=explainability_output_path,
label='Target',
headers=training_data.columns.to_list(),
dataset_type='text/csv')
###Output
_____no_output_____
###Markdown
Run the explainability analysisNow we are all set. Let us trigger the explainability analysis job. Once the job is finished, the result will be uploaded to the s3 output path set in the previous cell.
###Code
clarify_processor.run_explainability(data_config=explainability_data_config,
model_config=model_config,
explainability_config=shap_config)
###Output
_____no_output_____
###Markdown
Download the explanability results and batch predictionsNow, download the explanability result data and also the batch prediction data to start preparing it for A2I. The below cell will do the following:- Download the csv containing the SHAP values for individual rows passed as part of `data_config` in the `run_explainability method`- Download the `analysis.json` from explanability results, containing the global SHAP values and the expected `base value`- Download the batch transform prediction results- Create a single pandas dataframe containing predictions and the SHAP values corresponding to it- Creating a new column in the same dataframe, named as `Prediction` by keeping the value as `0` for all the prediction scores `less than 0.5` and value `1` for prediction scores `greater than 0.5` to `1` where, `0` denotes person's salary to be `less than $50,000` and `1` denotes the salary to be `greater than $50,000`
###Code
from sagemaker.s3 import S3Downloader
import json
# read the shap values
S3Downloader.download(s3_uri=explainability_output_path+"/explanations_shap", local_path="output")
shap_values_df = pd.read_csv("output/out.csv")
# read the inference results
S3Downloader.download(s3_uri=transformer_s3_output_path, local_path="output")
predictions_df = pd.read_csv("output/test_features_mini_batch.csv.out", header=None)
predictions_df = predictions_df.round(5)
# get the base expected value to be used to plot SHAP values
S3Downloader.download(s3_uri=explainability_output_path+"/analysis.json", local_path="output")
with open('output/analysis.json') as json_file:
data = json.load(json_file)
base_value = data['explanations']['kernel_shap']['label0']['expected_value']
print("base value: ", base_value)
predictions_df.columns = ['Probability_Score']
# join the probability score and shap values together in a single data frame
prediction_shap_df = pd.concat([predictions_df,shap_values_df],axis=1)
#create a new column as 'Prediction' converting the prediction to either 1 or 0
prediction_shap_df.insert(0,'Prediction', (prediction_shap_df['Probability_Score'] > 0.5).astype(int))
#adding an index column based on the batch size;to be used for merging the A2I predictions with the groundtruth.
prediction_shap_df['row_num'] = test_features_mini_batch.index
###Output
_____no_output_____
###Markdown
Step 5 - Set up a human review loop for high-confidence detection using Amazon A2I Amazon Augmented AI (Amazon A2I) makes it easy to build the workflows required for human review of ML predictions. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers.To incorporate Amazon A2I into your human review workflows you need:A worker task template to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see [A2I instructions overview](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-worker-template-console.html)A human review workflow, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. To learn more see [create flow definition](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html)When using a custom task type, you start a human loop using the Amazon Augmented AI Runtime API. When you call StartHumanLoop in your custom application, a task is sent to human reviewers. In this section, you set up a human review loop for low-confidence detections in Amazon A2I. It includes the following steps:* Create or choose your workforce* Create a human task UI* Create the flow definition* Trigger conditions for human loop activation* Check the human loop status and wait for reviewers to complete the task Let's now initialize some variables that we need in the subsequent steps
###Code
import io
import uuid
import time
import boto3
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
# Amazon SageMaker client
sagemaker_client = boto3.client('sagemaker')
# Amazon Augment AI (A2I) client
a2i = boto3.client('sagemaker-a2i-runtime')
# Amazon S3 client
s3 = boto3.client('s3')
# Flow definition name - this value is unique per account and region. You can also provide your own value here.
flow_definition_name = 'flow-def-clarify-a2i-' + timestamp
# Task UI name - this value is unique per account and region. You can also provide your own value here.
task_UI_name = 'task-ui-clarify-a2i-' + timestamp
# Flow definition outputs
flow_definition_output_path = f's3://{bucket}/{prefix}/clarify-a2i-results'
###Output
_____no_output_____
###Markdown
Create your workforceThis step requires you to use the AWS Console. You will create a private workteam and add only one user (you) to it. To create a private team:1. Go to AWS Console > Amazon SageMaker > Labeling workforces1. Click "Private" and then "Create private team".1. Enter the desired name for your private workteam.1. Enter your own email address in the "Email addresses" section.1. Enter the name of your organization and a contact email to administer the private workteam.1. Click "Create Private Team".1. The AWS Console should now return to AWS Console > Amazon SageMaker > Labeling workforces. Your newly created team should be visible under "Private teams". Next to it you will see an ARN which is a long string that looks like arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name. **Please enter this ARN in the cell below**1. You should get an email from [email protected] that contains your workforce username and password.1. In AWS Console > Amazon SageMaker > Labeling workforces, click on the URL in Labeling portal sign-in URL. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).1. This is your private worker's interface. When you create an A2I task in Verify your task using a private team below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.
###Code
workteam_arn = "<enter the ARN of your private labeling workforce>"
###Output
_____no_output_____
###Markdown
Create the human task UICreate a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required. For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis
###Code
template = r"""
<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<style>
table, tr, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<crowd-form>
<div>
<h1>Instructions</h1>
<p>Please review the predictions in the Predictions table based on the input data table below, and make corrections where appropriate. </p>
<p> Here are the labels: </p>
<p> 0: Salary is less than $50K </p>
<p> 1: Salary is greater than $50K </p>
<p> NOTE: There is also a column showing the probability score,
which tells you how confident the model is that the person's salary would be greater than $50,000.
Currently every row with probability score greater than 0.5 shows the prediction as 1
and for rows with probability less than 0.5, the prediction is marked as 0</p>
<p>Your task is to look at the prediction, probability score and the SHAP plot to understand which features contributed most to the model's prediction
and the probability of the model suggesting a positive outcome</p>
</div>
<div>
<h3> Adult Population dataset </h3>
</div>
<br>
<h1> Predictions Table </h1>
<table>
<tr>
<th>ROW NUMBER</th>
<th>MODEL PREDICTION</th>
<th>PROBABILITY SCORE</th>
<th>SHAP VALUES</th>
<th>AGREE/DISAGREE WITH ML RATING?</th>
<th>YOUR PREDICTION</th>
<th>CHANGE REASON </th>
</tr>
{% for pair in task.input.Pairs %}
<tr>
<td>{{ pair.row }}</td>
<td><crowd-text-area name="predicted{{ forloop.index }}" value="{{ pair.prediction }}"></crowd-text-area></td>
<td><crowd-text-area name="confidence{{ forloop.index }}" value="{{ pair.probability_score }}"></crowd-text-area></td>
<td><img src="{{ pair.shap_image_s3_uri | grant_read_access }}" alt="shap value plot" style="width:auto; height:auto;"></td>
<td>
<p>
<input type="radio" id="agree{{ forloop.index }}" name="rating{{ forloop.index }}" value="agree" required>
<label for="agree{{ forloop.index }}">Agree</label>
</p>
<p>
<input type="radio" id="disagree{{ forloop.index }}" name="rating{{ forloop.index }}" value="disagree" required>
<label for="disagree{{ forloop.index }}">Disagree</label>
</p>
</td>
<td>
<p>
<input type="text" name="True Prediction{{ forloop.index }}" placeholder="Enter your Prediction" />
</p>
</td>
<td>
<p>
<input type="text" name="Change Reason{{ forloop.index }}" placeholder="Explain why you changed the prediction" />
</p>
</td>
</tr>
{% endfor %}
</table>
</crowd-form>
"""
def create_task_ui():
'''
Creates a Human Task UI resource.
Returns:
struct: HumanTaskUiArn
'''
response = sagemaker_client.create_human_task_ui(
HumanTaskUiName=task_UI_name,
UiTemplate={'Content': template})
return response
# Create task UI
human_task_UI_response = create_task_ui()
human_task_Ui_arn = human_task_UI_response['HumanTaskUiArn']
print(human_task_Ui_arn)
###Output
_____no_output_____
###Markdown
Create the Flow DefinitionIn this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:- The workforce that your tasks will be sent to. - The instructions that your workforce will receive. This is called a worker task template. - Where your output data will be stored.This demo is going to use the API, but you can optionally create this workflow definition in the console as well. For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.
###Code
create_workflow_definition_response = sagemaker_client.create_flow_definition(
FlowDefinitionName= flow_definition_name,
RoleArn= role,
HumanLoopConfig= {
"WorkteamArn": workteam_arn,
"HumanTaskUiArn": human_task_Ui_arn,
"TaskCount": 1,
"TaskDescription": "Review the model predictions and SHAP values and determine if you agree or disagree. Assign a label of 1 to indicate positive result or 0 to indicate a negative result based on your review of the prediction, probability and SHAP values",
"TaskTitle": "Using Clarify and A2I"
},
OutputConfig={
"S3OutputPath" : flow_definition_output_path
}
)
flow_definition_arn = create_workflow_definition_response['FlowDefinitionArn']
# Describe flow definition - status should be active
for x in range(60):
describe_flow_definition_response = sagemaker_client.describe_flow_definition(FlowDefinitionName=flow_definition_name)
print(describe_flow_definition_response['FlowDefinitionStatus'])
if (describe_flow_definition_response['FlowDefinitionStatus'] == 'Active'):
print("Flow Definition is active")
break
time.sleep(2)
###Output
_____no_output_____
###Markdown
Trigger human loop for all predictions with a negative outcomeWe would like to send all the predictions with a negative outcome, to an Amazon A2I Human loop. We would like to check which features contributed to the model prediction while predicting a person's salary to be less than \\$50,000. This can help identify if the model is only giving negative outcome for people belonging to a certain gender or ethnicity group etc. We will also be showing the probability scores along with the predictions and SHAP plots. This is to give complete visibility to the reviewer about how confident the model was, while making a certain prediction.
###Code
negative_outcomes_df = prediction_shap_df[prediction_shap_df.iloc[:, 0] == 0]
###Output
_____no_output_____
###Markdown
Plot the SHAP values computed by SageMaker Clarify for the negative outcomesNow, Plot the SHAP values for each of the negative outcomes, export the plots as an image and upload them to an s3 location. These images will be rendered in the task review template along with the predictions.Also, to make it easy to access the s3 path of the images corresponding to predictions, appends the corresponding s3 uris of images in the same dataframe where predictions and SHAP values are present.
###Code
import shap
import matplotlib.pyplot as plt
column_list = list(test_features_mini_batch.columns)
s3_uris =[]
for i in range(len(negative_outcomes_df)):
explanation_obj = shap._explanation.Explanation(values=negative_outcomes_df.iloc[i,2:-1].to_numpy(), base_values=base_value, data=test_features_mini_batch.iloc[i].to_numpy(), feature_names=column_list)
shap.plots.waterfall(shap_values=explanation_obj, max_display=4, show=False)
img_name = 'shap-' + str(i) + '.png'
plt.savefig('shap_images/'+img_name, bbox_inches='tight')
plt.close()
s3_uri = S3Uploader.upload('shap_images/'+img_name, 's3://{}/{}/shap_images'.format(bucket, prefix))
s3_uris.append(s3_uri)
negative_outcomes_df['shap_image_s3_uri'] = s3_uris
print(f"{len(negative_outcomes_df)} out of {len(predictions_df)} samples or " +
'{:.1%} of the predictions will be sent to review.'.format(len(negative_outcomes_df)/len(predictions_df)))
###Output
_____no_output_____
###Markdown
Trigger the Human Review LoopNow, all is set to trigger the human review loop. The below cell will:- Pick a set of negative outcome records (for example: 3 records)- Create a human review loop for it, showing all the three records in a single template- Wait untill the reviewers have completed their tasks- Append all completed human review loop details in a list
###Code
import json
import time
# Note that the prediction is in terms of a probability from 0 to 1 for a discrete label of 1 indicating the person has a salary < $50K
prediction_list = negative_outcomes_df.iloc[:,:1].values.flatten().tolist()
probability_score_list = negative_outcomes_df.iloc[:,1:2].values.flatten().tolist()
probability_score_list
row_num_list = negative_outcomes_df.iloc[:,-2:-1].values.flatten().tolist()
NUM_TO_REVIEW = len(negative_outcomes_df) # You can change this number as desired
completed_human_loops = []
step_size = 3
for i in range(0, NUM_TO_REVIEW, step_size):
if i+step_size <= NUM_TO_REVIEW-1:
start_idx = i
end_idx = i+step_size
else:
start_idx = i
end_idx = NUM_TO_REVIEW
item_list = [{'row': "{}".format(row_num_list[j]), 'prediction': prediction_list[j], 'probability_score': probability_score_list[j], 'shap_image_s3_uri': s3_uris[j]} for j in range(start_idx, end_idx)]
ip_content = {'Pairs': item_list}
humanLoopName = str(uuid.uuid4())
start_loop_response = a2i.start_human_loop(
HumanLoopName=humanLoopName,
FlowDefinitionArn=flow_definition_arn,
HumanLoopInput={
"InputContent": json.dumps(ip_content)
}
)
print("Task - " + str(i) + " submitted, Now, Navigate to the private worker portal and perform the tasks. Make sure you've invited yourself to your workteam!")
response = a2i.describe_human_loop(HumanLoopName=humanLoopName)
status = response["HumanLoopStatus"]
while status != "Completed":
print("Task still in-progress, wait for 10 more seconds for reviewers to complete the task...")
time.sleep(10)
response = a2i.describe_human_loop(HumanLoopName=humanLoopName)
status = response["HumanLoopStatus"]
print("Human Review Loop for the Task - " + str(i) + " completed")
completed_human_loops.append(response)
###Output
_____no_output_____
###Markdown
Let's inspect the results of the human review tasks. We will also start preparing the groundtruth labels
###Code
import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
groundtruth_labels = {}
for resp in completed_human_loops:
splitted_string = re.split('s3://' + bucket + '/', resp['HumanLoopOutput']['OutputS3Uri'])
output_bucket_key = splitted_string[1]
response = s3.get_object(Bucket=bucket, Key=output_bucket_key)
content = response["Body"].read()
json_output = json.loads(content)
j=1
for i in range(0, step_size):
if json_output['humanAnswers'][0]['answerContent']['rating{}'.format(j)]['agree'] == True:
groundtruth_labels[json_output['inputContent']['Pairs'][i]['row']] = 0
else:
groundtruth_labels[json_output['inputContent']['Pairs'][i]['row']] = 1
j = j +1
json_output
###Output
_____no_output_____
###Markdown
Merge the A2I prediction results with the test data to generate GroundTruth Since the predictions have been reviewed by human reviewers with analysis provided by SageMaker Clarify, we can treat these predictions as groundtruth data for further re-training purposes.So, let us merge the A2I predictions with the batch of testdata used earlier.
###Code
new_training_data = testing_data[:batch_size]
new_training_data['row_num'] = test_features_mini_batch.index
for row in groundtruth_labels:
new_training_data.loc[(new_training_data.row_num == int(row)), 'Target'] = groundtruth_labels[row]
new_training_data.to_csv('new_training_data.csv', index=False, header=True)
S3Uploader.upload('new_training_data.csv', 's3://{}/{}'.format(bucket, prefix))
###Output
_____no_output_____
###Markdown
Clean UpFinally, don't forget to clean up the resources we set up and used for this demo!
###Code
session.delete_model(model_name)
###Output
_____no_output_____ |
get_reading.ipynb | ###Markdown
Get liquid display readings from video using OpenCV-PythonWritten by Lei LeiFaculty of Engineering, University of Nottingham Import packages
###Code
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['image.cmap'] = 'gray'
###Output
_____no_output_____
###Markdown
The reading updates once per second. We only need to extract one frame per second from the video file.
###Code
def rescaleFrame(frame, scale = 0.3):
width = int(frame.shape[1]*scale)
height = int(frame.shape[0]*scale)
dimensions = (width, height)
return cv2.resize(frame, dimensions, interpolation = cv2.INTER_AREA)
# Reading Video
capture=cv2.VideoCapture('/mnt/c/users/stcik/scire/papers/muon/80C.MOV')
count = 0
success = True
fps = int(capture.get(cv2.CAP_PROP_FPS))
out_folder='/mnt/c/users/stcik/scire/papers/muon/80C/frames'
os.makedirs(out_folder,exist_ok=True)
while success:
success, image = capture.read()
# print('read a new frame:', success)
if count%(1*fps) == 0 :
image = rescaleFrame(image)
cv2.imwrite(os.path.join(out_folder,'frame%d.jpg'%count),image)
# print('Successfully written frame%d!'%count)
count+=1
###Output
_____no_output_____
###Markdown
We need to crop the image to focus on the liquid crystal diplay area, this will save computational work.Define a crop function. Reshape and crop the image to save computation efforts.
###Code
def crop(img, y=200, h=400, x=160, w=300):
new_image = img[y:h, x:w]
cv2.imwrite("Images/new_image.jpg", new_image)
return new_image
###Output
_____no_output_____
###Markdown
Then, we define a plot function to save a little bit of input work.
###Code
def plot(image, cmap=None):
plt.axis('off')
plt.imshow(image)
###Output
_____no_output_____
###Markdown
We can first use the first image in the frame directory to get an idea on what parameters we should use.
###Code
image_path=r'/mnt/c/users/stcik/scire/papers/muon/80C/frames/frame6438.jpg'
image=cv2.imread(image_path)
new_image=crop(image, y=70, h=280, x=30, w=270)
plot(new_image)
###Output
_____no_output_____
###Markdown
After we get proper parameters, we can batch crop the frames.
###Code
import re
rDir = r'/mnt/c/users/stcik/scire/papers/muon/80C/frames'
out_folder='/mnt/c/users/stcik/scire/papers/muon/80C/cropped'
os.makedirs(out_folder,exist_ok=True)
savs=[]
for file in os.listdir(rDir):
if file.endswith(".jpg"):
savs.append(os.path.join(rDir, file))
for sav in savs:
image_path=sav
image=cv2.imread(image_path)
new_image=crop(image, y=70, h=280, x=30, w=270)
name = image_path.split('/')[-1]
cv2.imwrite(os.path.join(out_folder, f'80C_{name}.jpg'), new_image)
###Output
_____no_output_____
###Markdown
We need to have a look at the cropped images. For frames where camera moved a lot, we may need to mannually crop them.
###Code
image_path=r'/mnt/c/users/stcik/scire/papers/muon/80C/frames/frame4350.jpg'
image=cv2.imread(image_path)
new_image = crop(image, y=70, h=280, x=36, w=276)
name = image_path.split('/')[-1]
cv2.imwrite(os.path.join(/mnt/c/users/stcik/scire/papers/muon/80C/frames/, f'80C_{name}'), new_image)
###Output
_____no_output_____
###Markdown
We now need to make binary image from the cropped image.
###Code
def make_bin(img, gmin=150, gmax=255):
"""Make a binary image from the cropped image."""
# Drop the color
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Thresholding the image
thresh, img_bin = cv2.threshold(img, gmin, gmax, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# Invert the image
img_bin = ~img_bin
cv2.imwrite("Images/Image_bin.jpg", img_bin)
return img_bin
img_bin=make_bin(new_image)
plot(img_bin)
###Output
_____no_output_____
###Markdown
We now need to make final bin iamge for cv2 to find contours.Firstly, we define parameters to resolve vertical and horizontal lines.
###Code
line_min_width = 8
kernal_h = np.ones((4,line_min_width), np.uint8)
kernal_v = np.ones((line_min_width,4), np.uint8)
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_h)
plot(img_bin_h)
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_v)
plot(img_bin_v)
img_bin_final=img_bin_h|img_bin_v
plot(img_bin_final)
###Output
_____no_output_____
###Markdown
We can make the lines thicker so that the contours are easier to resolve.
###Code
final_kernel = np.ones((3,3), np.uint8)
img_bin_final=cv2.dilate(img_bin_final,final_kernel,iterations=1)
plot(img_bin_final)
###Output
_____no_output_____
###Markdown
Now we can fin the contours and visualise the contours found.
###Code
#Retrieve contours
contours, hierarchy = cv2.findContours(img_bin_final, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#Create box-list
box = []
# Get position (x,y), width and height for every contour
for c in contours:
x, y, w, h = cv2.boundingRect(c)
box.append([x,y,w,h])
print(box)
img_box = new_image.copy()
for n in box:
img_box = cv2.rectangle(img_box, (n[0],n[1]), (n[0]+n[2],n[1]+n[3]), (255,0,0), 2)
plt.axis('off')
plt.imshow(img_box)
###Output
_____no_output_____
###Markdown
We can now set up parameters to sort the contours found so that we keep our readings only. Firstly, lets sort our contours, so that the boxes are well ordered.
###Code
# We will use the size and position of the contourbox to filter the boxes.
def get_reading(img, contours):
# Get the width and height of the image
width, height,_ = img.shape
textcont=[]
for c in contours:
# Returns the location and width, height for every contour
x, y, w, h = cv2.boundingRect(c)
# print(h/height, y/(height-y-h))
# We will only use the height of the numbers, because all numbers have about the same height but 1 is much narrower than other numbers.
# We will only use the y positions, because the x positions has a larger distribution.
if 0.5 > h/height > 0.1 and 1.4 > y/(height-y-h) > 0.4:
textcont.append(c)
return textcont
textcont = get_reading(new_image, contours)
textbox = []
for c in textcont:
x, y, w, h = cv2.boundingRect(c)
textbox.append([x,y,w,h])
print(textbox)
img_box = new_image.copy()
for n in textbox:
img_box = cv2.rectangle(img_box, (n[0],n[1]), (n[0]+n[2],n[1]+n[3]), (255,0,0), 2)
plot(img_box)
###Output
_____no_output_____
###Markdown
Now we can crop the image according to the textboxes.
###Code
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
textcont, textboxes = sort_contours(textcont, method="left-to-right")
print(textboxes)
new_img = new_image[textboxes[0][1]:textboxes[0][1]+textboxes[0][3], textboxes[0][0]:textboxes[-1][0]+textboxes[-1][2]]
plot(new_img)
cv2.imwrite('outs/80C_frame6438.jpg.jpg', new_img)
###Output
_____no_output_____
###Markdown
To batch process the cropped frames, we need to make functions.
###Code
def make_bin(img, gmin=150, gmax=255):
"""Make a binary image from the cropped image."""
# Drop the color
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Thresholding the image
thresh, img_bin = cv2.threshold(img, gmin, gmax, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# Invert the image
img_bin = ~img_bin
cv2.imwrite("Images/Image_bin.jpg", img_bin)
return img_bin
def get_lines(img, line_min_width = 8, kernal_h = np.ones((4,line_min_width), np.uint8), kernal_v = np.ones((line_min_width,4), np.uint8)):
# Detect vertical lines from an image
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_v)
# Detect horizontal lines
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_h)
img_bin_final=img_bin_h|img_bin_v
final_kernel = np.ones((4, 4), np.uint8)
img_bin_final=cv2.dilate(img_bin_final,final_kernel,iterations=1)
return img_bin_final
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
def get_reading(img, img_path, contours, folder):
# Get the width and height of the image
width, height,_ = img.shape
textcont=[]
for c in contours:
# Returns the location and width, height for every contour
x, y, w, h = cv2.boundingRect(c)
# print(h/height, y/(height-y-h))
# We will only use the height of the numbers, because all numbers have about the same height but 1 is much narrower than other numbers.
# We will only use the y positions, because the x positions has a larger distribution.
if 0.50 > h/height > 0.15 and 1.2 > y/(height-y-h) > 0.12:
textcont.append(c)
textcont, textboxes = sort_contours(textcont, method="left-to-right")
new_img = img[textboxes[0][1]:textboxes[0][1]+textboxes[0][3], textboxes[0][0]:textboxes[-1][0]+textboxes[-1][2]]
name = img_path.split('/')[-1]
cv2.imwrite(os.path.join(folder, f'{name}'), new_img)
###Output
_____no_output_____
###Markdown
Finally, we can use the above defined functions to batch process the frames and save the detected region into ./outs directory.
###Code
savs=[]
rDir = r'/mnt/c/users/stcik/scire/papers/muon/80C/cropped'
out_folder=r'/mnt/c/users/stcik/scire/papers/muon/80C/outs'
os.makedirs(out_folder,exist_ok=True)
for file in os.listdir(rDir):
if file.endswith(".jpg"):
savs.append(os.path.join(rDir, file))
for sav in savs:
image=cv2.imread(sav)
img_bin = make_bin(image)
img_bin = get_lines(img_bin)
# Find contours for image, which will detect all the boxes
contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
get_reading(image, img_path=sav, contours = contours, folder = out_folder)
###Output
_____no_output_____
###Markdown
Now go to the ./outs directory, check the outputs. Change parameters and try again.I have 250 frames, only 4 of them are not correctly detect. The success rate is 98.4 %. If you have problem with certain images, you can go back to the manual mode and see what happened.I have problem with the following file, which turned out to be the reading in this image is not clear. The number '3' is broken which makes the contour box split into two small one and filtered by the conditions I set in the get_reading function. If we set the lower height bound smaller, it will work. It is useful to make the detected readings binary so that it is easier to be recognised by OCR packages.
###Code
rDir = r'/mnt/c/users/stcik/scire/papers/muon/90C/outs'
out_folder='/mnt/c/users/stcik/scire/papers/muon/90C/binary'
os.makedirs(out_folder, exist_ok=True)
savs=[]
for file in os.listdir(rDir):
if file.endswith(".jpg"):
savs.append(os.path.join(rDir,file))
for sav in savs:
image_path=sav
image=cv2.imread(sav)
img_bin=make_bin(image, gmin=150, gmax=255)
name = image_path.split('/')[-1]
cv2.imwrite(os.path.join(out_folder, f'{name}'), img_bin)
###Output
_____no_output_____ |
Sarka/Project.ipynb | ###Markdown
Reads data (does not include molecules with empty field for activity, activity units or canonical smiles)
###Code
mols = []
empty = 0
with open("bioactivity-CB1_receptor.txt") as file:
for line in file:
cols = line.strip().split("\t")
tup = [cols[0],Chem.MolFromSmiles(cols[10]),cols[14]]
if not tup[0]:
empty += 1
else:
if not tup[1]:
empty += 1
else:
if not tup[2]:
empty += 1
else:
mols.append(tup)
print(empty)
print(len(mols))
###Output
1091
3990
###Markdown
Filter: Exclude inorganic compounds and molecules with Ki higher than 10 microM
###Code
idx = []
activities = []
filtered_mols = []
for i,mol in enumerate(mols):
if float(mol[2]) < 10000:
if not mol[1].HasSubstructMatch(Chem.MolFromSmarts("[!#6;!#7;!#8;!#9;!#17;!#35;!#53;!#16]")):
idx.append(i)
for i in idx:
filtered_mols.append(mols[i][1])
activities.append(float(mols[i][2]))
print(len(filtered_mols))
pvals = [-math.log10(val) for val in activities]
###Output
3306
###Markdown
Calculates atributes of molecules for modeling
###Code
fps = [AllChem.GetMorganFingerprintAsBitVect(m, 2, nBits=512) for m in filtered_mols]
mqns = [rdMolDescriptors.MQNs_(m) for m in filtered_mols]
MACCs = [rdMolDescriptors.GetMACCSKeysFingerprint(m) for m in filtered_mols]
all_des = np.column_stack((fps,mqns,MACCs))
trainset, testset = ms.train_test_split(list(zip(all_des,pvals)))
print(len(trainset))
print(len(testset))
good_model = ensemble.RandomForestRegressor()
good_model.fit([all_des for all_des,pvals in trainset],[pvals for all_des,pvals in trainset])
test_predictions = good_model.predict([all_des for all_des,pvals in testset])
def plot(X, *args, highlights=set()):
fig = plt.figure()
#ax = plt.subplot(111)
for i, point in enumerate(X):
x,y = point
if i in highlights:
plt.plot([x],[y],"r.", markersize=4)
else:
plt.plot([x],[y],"k.", markersize=1)
plt.show()
plot(zip(test_predictions,[pvals for all_des,pvals in testset]))
print("R^2: %0.2f" % good_model.score([all_des for all_des,pvals in testset], [pvals for all_des,pvals in testset]))
print("R: %0.2f" % np.sqrt(good_model.score([all_des for all_des,pvals in testset], [pvals for all_des,pvals in testset])))
print("MSE: %0.2f" % mean_squared_error(good_model.predict([all_des for all_des,pvals in testset]), [pvals for all_des,pvals in testset]))
###Output
R^2: 0.65
R: 0.81
MSE: 0.45
###Markdown
Analysis
###Code
def make_graph_scaffold(mol):
Chem.rdmolops.RemoveStereochemistry(mol)
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(mol)
try:
Chem.SanitizeMol(atomic_scaffold)
graph_scaffold = MurckoScaffold.MakeScaffoldGeneric(atomic_scaffold)
except ValueError:
return None
return graph_scaffold
graph_scaffolds = [make_graph_scaffold(m) for m in filtered_mols]
Draw.MolsToGridImage(graph_scaffolds[20:30])
len(graph_scaffolds), len([x for x in graph_scaffolds if x])
dist_matrix = []
for i,n in enumerate(fps):
row = []
for j,m in enumerate(fps):
row.append(DataStructs.FingerprintSimilarity(fps[i],fps[j], metric=DataStructs.TanimotoSimilarity))
dist_matrix.append(row)
d = complete(dist_matrix)
fig = plt.figure(figsize=(25, 10))
dn = dendrogram(d)
plt.show()
clusters = fcluster(d, 5, criterion='maxclust')
hbas = [rdMolDescriptors.CalcNumLipinskiHBA(m) for m in filtered_mols]
hbds = [rdMolDescriptors.CalcNumLipinskiHBD(m) for m in filtered_mols]
logPs = [Chem.Descriptors.MolLogP(m) for m in filtered_mols]
X = np.column_stack((fps,mqns,MACCs,hbas,hbds,logPs))
pca = PCA(n_components = 2)
pca.fit(X)
print(pca.explained_variance_ratio_)
projected = pca.fit_transform(X)
###Output
_____no_output_____
###Markdown
PCA, color according to activity
###Code
plt.scatter(projected[:, 0], projected[:, 1],
c=activities,edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('coolwarm', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
###Output
_____no_output_____
###Markdown
PCA, color according to clusters based on Morgan fingerprints
###Code
plt.scatter(projected[:, 0], projected[:, 1],
c=clusters,edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('viridis', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
###Output
_____no_output_____ |
code/1_Numpy.ipynb | ###Markdown
Introducción a Numpy
###Code
import numpy as np
np.array([10,20,24,5,50,10,15])
a = np.array([11,12,13,14,15,50,52,54,77,88,43,100])
a[4]
a[3:]
a[3:7]
a[1::4]
np.zeros(5)
np.ones(5)
ones=np.ones((3,5))
ones
type(ones)
type(ones[1])
type(ones[2][2])
np.linspace(3, 10, 100)
b = np.array([['x', 'y', 'z'], ['a', 'b', 'c']])
print(b)
print(type(b))
print(f'Forma: {b.shape}')
print(f'Dimensión: {b.ndim}')
c = [12,4,5,6,3,1,56,67,3,23456,678,43,2,1,5,6]
np.sort(c)
cabeceras = [('nombre', 'S10'), ('edad', int)]
datos = [('Juan', 10), ('Maria', 70), ('Javier', 42), ('Lisney', 20)]
usuarios = np.array(datos, dtype = cabeceras)
np.sort(usuarios, order = 'edad')
np.arange(25)
np.arange(3,10)
np.arange(5,50,5)
np.full((3,5), 10)
np.diag([0,3,9,10])
###Output
_____no_output_____
###Markdown
RetoRealizar un arreglo de 3 dimensiones y ordernarlo por Nombre, Edad, País
###Code
cabeceras = [('nombre', 'S10'), ('edad', int), ('pais', 'S15')]
datos = [('Juan', 10, 'Colombia'), ('Maria', 70, 'Chile'), ('Javier', 42,'Ecuador'), ('Lisney', 20, 'Peru')]
usuarios = np.array(datos, dtype = cabeceras)
np.sort(usuarios, order = 'pais')
###Output
_____no_output_____ |
cidds_experiments.ipynb | ###Markdown
Load original data
###Code
#Open CIDDS-001
dirpath = "/mnt/h/CIDDS/CIDDS-001/traffic/"
filepaths = [dirpath+f for f in os.listdir(dirpath) if f.endswith('.csv')]
print(filepaths)
dtypes = {'Date first seen': object,'Duration': np.float64, 'Proto': object, 'Src IP Addr': object, 'Src Pt': np.int32, \
'Dst IP Addr': object, 'Dst Pt': np.float32, 'Packets': np.int64, 'Bytes': object, 'Flows': np.int32, \
'class': object, 'attackType': object, 'attackID': object}
#Open as external server dataframe and internal (openstack) server to maintain traffic distributions
df_ext = pd.concat([pd.read_csv(f, sep=',', header=0, dtype=dtypes, skipinitialspace=True) for f in list(filepaths[i] for i in range(0,4))], ignore_index= True)
df_int = pd.concat([pd.read_csv(f, sep=',', header=0, dtype=dtypes, skipinitialspace=True) for f in list(filepaths[i] for i in range(4,8))], ignore_index= True)
def fixBytesString(bytesString: str):
splitString = bytesString.split(' ')
if len(splitString) == 1:
return int(splitString[0])
elif len(splitString) == 2:
if splitString[1] == 'M':
return int(float(splitString[0])*1000000)
return -1
def fixColumns(df: pd.DataFrame):
df['Dst Pt'] = df['Dst Pt'].astype(int)
df['Proto'] = df['Proto'].str.strip()
df['Bytes'] = df['Bytes'].astype(str)
df['BytesAsInt'] = df['Bytes'].parallel_apply(fixBytesString)
return df
df_ext = fixColumns(df_ext)
df_int = fixColumns(df_int)
# Only include normal and attacker traffic as benign vs. malicious
df_ext = df_ext.loc[(df_ext['class'] == 'normal') | (df_ext['class'] == 'attacker')]
df_int = df_int.loc[(df_int['class'] == 'normal') | (df_int['class'] == 'attacker')]
# Rename normal instances for multiclass classification
df_ext.loc[df_ext['attackType'] == '---','attackType'] = 'normal'
df_int.loc[df_int['attackType'] == '---','attackType'] = 'normal'
print(df_ext['attackType'].value_counts())
print(df_int['attackType'].value_counts())
###Output
normal 134240
portScan 11036
bruteForce 1224
Name: attackType, dtype: int64
normal 28051906
dos 1480217
portScan 168177
pingScan 4134
bruteForce 4077
Name: attackType, dtype: int64
###Markdown
Data compositionTotal amount of instances = 29,855,011 Internal29,708,611 total instances (5.58% malicious) External146,500 total instances (8.37% malicious) One hot encoding and mean_bytes column
###Code
# One hot encoding for protocol
ohe_df = pd.get_dummies(df_ext['Proto'])
df_ext = df_ext.join(ohe_df)
ohe_df = pd.get_dummies(df_int['Proto'])
df_int = df_int.join(ohe_df)
#Generate mean bytes column
df_ext['mean_bytes'] = df_ext['BytesAsInt']/df_ext['Packets']
df_int['mean_bytes'] = df_int['BytesAsInt']/df_int['Packets']
print(df_ext.head())
#Get input columns and corresponding label vector
#Use duration, proto OHE, packets, bytesasint, average bytes
features = ['Duration','Packets','mean_bytes','BytesAsInt','ICMP','TCP','UDP']
#Try adding ports
features.extend(['Src Pt', 'Dst Pt'])
label = 'class' #'class' or 'attackType'
skf = StratifiedKFold(n_splits=5)
print(df_int.loc[df_int['class'] == 'normal'].shape)
df_int.drop(index=df_int.loc[df_int['class'] == 'normal'].sample(frac=0.5).index, inplace=True)
print(df_int.loc[df_int['class'] == 'normal'].shape)
print(df_ext.loc[df_ext['class'] == 'normal'].shape)
df_ext.drop(index=df_ext.loc[df_ext['class'] == 'normal'].sample(frac=0.5).index, inplace=True)
print(df_ext.loc[df_ext['class'] == 'normal'].shape)
import math
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
cfs = []
preds = list()
for (train_ext_idx, test_ext_idx), (train_int_idx, test_int_idx) in \
zip(skf.split(df_ext[features],df_ext['attackType']),skf.split(df_int[features], df_int['attackType'])):
print("TRAIN:", (len(train_int_idx)+len(train_ext_idx)), "TEST:", (len(test_int_idx)+len(test_ext_idx)))
X_train = pd.concat([df_ext[features].iloc[train_ext_idx,:],df_int[features].iloc[train_int_idx,:]], ignore_index=True)
X_test = pd.concat([df_ext[features].iloc[test_ext_idx,:],df_int[features].iloc[test_int_idx,:]], ignore_index=True)
y_train = pd.concat([df_ext[label].iloc[train_ext_idx],df_int[label].iloc[train_int_idx]], ignore_index=True)
y_test = pd.concat([df_ext[label].iloc[test_ext_idx],df_int[label].iloc[test_int_idx]], ignore_index=True)
#Apply Random Forest
rf_clf = RandomForestClassifier(n_estimators=100,min_samples_split=10,min_samples_leaf=5,max_samples=0.8,criterion='gini',n_jobs=5, verbose=5)
rf_clf.fit(X_train,y_train)
y_pred = rf_clf.predict(X_test)
cfs.append(confusion_matrix(y_test, y_pred))
preds.append([y_test, y_pred])
print(cfs)
print(np.shape(cfs))
cf = np.mean(cfs,axis=(0))
print(cf)
print(np.std(cfs,axis=(0)))
#objectToFile(preds, "cidds_preds_"+label)
#Load object from file
from sklearn.metrics import confusion_matrix
label='class'
preds_mem = objectFromFile("cidds_preds_"+label)
cfs = []
for pred_tuple in preds_mem:
cfs.append(confusion_matrix(pred_tuple[0], pred_tuple[1]))
cf = np.mean(cfs,axis=(0))
print(cf)
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
paper1_acc = 0.9990
paper1_rec = 0.999
paper1_spec = 1-0.0001
paper2_acc = 0.9705
paper2_spec = 1-0.0021
tp, fn, fp, tn = np.mean(cfs,axis=0).ravel()
print(tn,fp,fn,tp)
acc_scores = [accuracy_score(pred_tuple[0], pred_tuple[1]) for pred_tuple in preds]
rec_score = tp / (tp+fn)
spec_score = tn / (tn+fp)
print(np.mean(acc_scores), "\n")
print(rec_score, "\n")
print(spec_score)
print(fp / (fp+tn))
import matplotlib.patches as mpatches
#Colors
clr_acc = 'royalblue'
clr_rec = 'salmon'
clr_spec = 'lightgreen'
acc_patch = mpatches.Patch(color=clr_acc, label='accuracy')
rec_patch = mpatches.Patch(color=clr_rec, label='recall')
spec_patch = mpatches.Patch(color=clr_spec, label='specificity')
labels = ['Abdulhammed et al.\nRF (10 features)', 'Idhammad et al.\nNB+RF (10 features)', 'Our work\nRF (7 features)']
x = np.arange(len(labels))*10
width = 2.5 # the width of the bars
pad_width = 3
scores = [paper1_acc,paper1_rec,paper1_spec,paper2_acc,paper2_spec,np.mean(acc_scores),rec_score,spec_score]
fig, ax = plt.subplots(figsize=(7,6))
#Spawn bar(s) of group 1
plt.bar(x[0]-pad_width, height=scores[0], width=width, color=clr_acc)
plt.bar(x[0], height=scores[1], width=width, color=clr_rec)
plt.bar(x[0]+pad_width, height=scores[2], width=width, color=clr_spec)
#Spawn bar(s) of group 2
plt.bar(x[1]-pad_width/2, height=scores[3], width=width, color=clr_acc)
plt.bar(x[1]+pad_width/2, height=scores[4], width=width, color=clr_spec)
#Spawn bar(s) of group 3
plt.bar(x[2]-pad_width, height=scores[5], width=width, color=clr_acc)
plt.bar(x[2], height=scores[6], width=width, color=clr_rec)
plt.bar(x[2]+pad_width, height=scores[7], width=width, color=clr_spec)
#Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.tick_params(left = False)
#Set plot details
plt.rc('font', size=13)
ax.set_xticklabels(labels)
#plt.ylabel('Metric score')
#plt.yticks(size='14')
plt.ylim([0.8, 1])
plt.xticks(size='14')
ax.set_yticklabels([])
plt.title("CIDDS-001 results comparison", fontweight='bold', pad=25)
ax.set_xticks(x)
ax.set_xticklabels(labels)
add_value_labels(ax)
#ax.legend(handles=[acc_patch,rec_patch,spec_patch],bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax.set_axisbelow(True)
plt.grid(axis='y')
fig.tight_layout()
plt.savefig('CIDDS_binaryclass_bars.png',bbox_inches='tight')
plt.show()
for cf in cfs:
print(cf)
np.set_printoptions(suppress=True)
print('mean\n', np.mean(cfs,axis=0))
print('std. dev\n', np.std(cfs,axis=0))
print('std. dev %\n', np.divide(np.std(cfs,axis=0),np.mean(cfs,axis=0))*100)
#Plot confusion matrix
import seaborn as sns
#labels = ['Benign','Malicious']
#Standard heatmap
cf_norm = cf/cf.sum(axis=1)[:,None]
cf_percentages = ["{0:.2%}".format(value) for value in cf_norm.flatten()]
cf_numbers = [abbrv_num(value) for value in cf.flatten()]
cf_labels = ['{v1}\n({v2})'.format(v1=v1, v2=v2) for v1,v2 in zip(cf_percentages,cf_numbers)]
cf_labels = np.asarray(cf_labels).reshape(cf.shape)
fig, ax = plt.subplots(figsize=(6,6))
plt.rc('font', size=14)
#plot_confusion_matrix(rf_clf, df_test, y_test, ax=ax, normalize='true',xticks_rotation=30)
column_labels = sorted(y_test.unique())
#column_labels = ['Malicious', 'Benign']
column_labels = ['BruteForce', 'DoS', 'Benign', 'PingScan', 'PortScan']
sns.heatmap(cf_norm, annot=cf_labels, fmt='',cmap='Blues',cbar=False, vmin=0.0, vmax=1.0, ax=ax, \
xticklabels=column_labels, yticklabels=column_labels)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.yticks(rotation='0',size=12)
plt.xticks(rotation='65',size=12)
plt.title("CIDDS-001 mean multiclass classification matrix")
plt.savefig('CIDDS_multiclass_cf.png',bbox_inches='tight')
plt.show()
importance = rf_clf.feature_importances_
# summarize feature importance
for i,v in sorted(enumerate(importance),key=lambda x: x[1], reverse=True):
print('Feature: %s, Score: %.5f' % (features[i],v))
###Output
Feature: BytesAsInt, Score: 0.32722
Feature: Duration, Score: 0.26789
Feature: mean_bytes, Score: 0.24856
Feature: Packets, Score: 0.14413
Feature: UDP, Score: 0.00498
Feature: TCP, Score: 0.00486
Feature: ICMP, Score: 0.00237
###Markdown
Binary importanceFeature: mean_bytes, Score: 0.33674Feature: BytesAsInt, Score: 0.27816Feature: Duration, Score: 0.18937Feature: Packets, Score: 0.17305Feature: TCP, Score: 0.01338Feature: UDP, Score: 0.00510Feature: ICMP, Score: 0.00420 [array([[ 331886, 1887], [ 4746, 5632484]]), array([[ 332979, 794], [ 3745, 5633484]]), array([[ 331478, 2295], [ 4764, 5632465]]), array([[ 331925, 1848], [ 4751, 5632478]]), array([[ 330436, 3337], [ 4010, 5633219]])]Mean(5, 2, 2)[[3.317408e+05 2.032200e+03] [4.403200e+03 5.632826e+06]]Std. Dev.[[819.72175767 819.72175767] [437.37553658 437.21893829]]
###Code
import math
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import balanced_accuracy_score
fold_train_bacc = []
fold_val_bacc = []
for (train_ext_idx, test_ext_idx), (train_int_idx, test_int_idx) in \
zip(skf.split(df_ext[features],df_ext['attackType']),skf.split(df_int[features], df_int['attackType'])):
#Split train into train/val
extsplit_idx = math.floor(len(train_ext_idx)*0.75)
intsplit_idx = math.floor(len(train_int_idx)*0.75)
val_ext_idx = train_ext_idx[extsplit_idx:]
val_int_idx = train_int_idx[intsplit_idx:]
train_ext_idx = train_ext_idx[:extsplit_idx]
train_int_idx = train_int_idx[:intsplit_idx]
print("TRAIN:", (len(train_int_idx)+len(train_ext_idx)), "VAL:", (len(val_int_idx)+len(val_ext_idx)), "TEST:", (len(test_int_idx)+len(test_ext_idx)))
#Get train / val / test split data
X_train = pd.concat([df_ext[features].iloc[train_ext_idx,:],df_int[features].iloc[train_int_idx,:]], ignore_index=True)
X_val = pd.concat([df_ext[features].iloc[val_ext_idx,:],df_int[features].iloc[val_int_idx,:]], ignore_index=True)
X_test = pd.concat([df_ext[features].iloc[test_ext_idx,:],df_int[features].iloc[test_int_idx,:]], ignore_index=True)
y_train = pd.concat([df_ext[label].iloc[train_ext_idx],df_int[label].iloc[train_int_idx]], ignore_index=True)
y_val = pd.concat([df_ext[label].iloc[val_ext_idx],df_int[label].iloc[val_int_idx]], ignore_index=True)
y_test = pd.concat([df_ext[label].iloc[test_ext_idx],df_int[label].iloc[test_int_idx]], ignore_index=True)
#Apply Random Forest
measure_search = ['gini','entropy']
size_search = [1,5]
msamples_search = [0.7, 0.8, 0.9]
train_bacc_scores, val_bacc_scores = list(), list()
for (c, n, maxs) in [(c, n, maxs) for c in measure_search for n in size_search for maxs in msamples_search]:
print("Working on grid: ", c, n, maxs)
clf = RandomForestClassifier(n_estimators=n,min_samples_split=10,min_samples_leaf=5,max_samples=maxs,criterion=c,n_jobs=5)
clf.fit(X_train,y_train)
print("Finished building random forest")
#evaluate on train
#ytrain_pred = clf.predict(X_train)
#train_bacc = balanced_accuracy_score(y_train,ytrain_pred)
#train_bacc_scores.append(train_bacc)
#print("Finished evaluating train set")
#evaluate on val
yval_pred = clf.predict(X_val)
val_bacc = balanced_accuracy_score(y_val,yval_pred)
val_bacc_scores.append(val_bacc)
#Print progress
print('{%s,%.1f,%.1f} train: %.3f, val: %.3f' % (c, n, maxs, train_bacc, val_bacc))
#fold_train_bacc.append(train_bacc_scores)
fold_val_bacc.append(val_bacc_scores)
print(len(fold_train_bacc))
print(len(fold_val_bacc))
a = fold_train_bacc
a = np.mean(a,axis=0)
b = fold_val_bacc
b = np.mean(b, axis=0)
print(np.mean(fold_val_bacc, axis=0).reshape(2,2,4))
print(fold_train_bacc)
print("\n\n", fold_val_bacc)
print(a.reshape(2,4,4))
print("\n\n",b.reshape(2,4,4))
a_resh = a.reshape(2,4,4)
b_resh = b.reshape(2,4,4)
#print(a.reshape(2,16))
###Output
[[[0.99659652 0.99659533 0.99660712 0.5 ]
[0.99660044 0.99660265 0.99660291 0.5 ]
[0.99660129 0.9966051 0.99660564 0.5 ]
[0.99660248 0.99660353 0.99660654 0.5 ]]
[[0.99660211 0.99660263 0.99660704 0.5 ]
[0.9966013 0.9966074 0.99660926 0.5 ]
[0.9966057 0.99660663 0.99660893 0.5 ]
[0.99660543 0.99660925 0.99661008 0.5 ]]]
[[[0.98744506 0.98533491 0.98695768 0.5 ]
[0.98712065 0.98858129 0.98906852 0.5 ]
[0.98923099 0.98647087 0.98890612 0.5 ]
[0.9890686 0.99004275 0.98695806 0.5 ]]
[[0.99166569 0.98371129 0.98988018 0.5 ]
[0.9864713 0.9876073 0.99069186 0.5 ]
[0.98988029 0.98874388 0.98939318 0.5 ]
[0.98939325 0.98988023 0.98955557 0.5 ]]]
###Markdown
N-grams experiment Show amount of source IPs with more than 'threshold' flows
###Code
print(df_int.columns)
threshold = 2
vc_int = df_int['Src IP Addr'].value_counts()
res_int = df_int[df_int['Src IP Addr'].isin(vc_int[vc_int>threshold].index)]['Src IP Addr'].value_counts()
print(res_int)
vc_ext = df_ext['Src IP Addr'].value_counts()
res_ext = df_ext[df_ext['Src IP Addr'].isin(vc_ext[vc_ext>threshold].index)]['Src IP Addr'].value_counts()
print(res_ext)
###Output
192.168.220.15 2689683
DNS 1959126
192.168.220.16 1540082
192.168.210.5 1215934
192.168.200.8 1147514
...
15538_237 3
11116_6 3
16862_24 3
16326_230 3
10250_224 3
Name: Src IP Addr, Length: 15034, dtype: int64
OPENSTACK_NET 67120
EXT_SERVER 67120
ATTACKER1 6150
ATTACKER2 5494
ATTACKER3 616
Name: Src IP Addr, dtype: int64
###Markdown
Transform interesting features to N-gram representation df_int:
###Code
#Per source IP, grab N-gram and transform numerical features into new
#Done for bigrams and trigrams
features = ['Duration', 'Packets', 'BytesAsInt', 'mean_bytes']
#Create/reset columns for n_gram features
for feature in features:
column_mean = 'ngram_' + feature + '_mean'
column_std = 'ngram_' + feature + '_std'
if column_mean not in df_int.columns:
df_int[column_mean] = np.nan
if column_std not in df_int.columns:
df_int[column_std] = np.nan
#List of ngram features
featurelist = df_int.filter(regex='^ngram', axis=1).columns
#Window size 2 = bigrams, 3 = trigrams
winsize = 3
#Window type
indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=winsize)
for itr, (srcIP, _) in enumerate(res_int.iteritems()):
sub_df = df_int[df_int['Src IP Addr'] == srcIP]
for feature in features:
column_mean = 'ngram_' + feature + '_mean'
column_std = 'ngram_' + feature + '_std'
sub_df.loc[:,column_mean] = sub_df[feature].rolling(window=indexer, min_periods=winsize).mean()
sub_df.loc[:,column_std] = sub_df[feature].rolling(window=indexer, min_periods=winsize).std()
df_int.loc[:,featurelist] = df_int[featurelist].combine_first(sub_df[featurelist])
print('Progress: ' + str(itr+1) + '/' + str(len(res_int)), end='\r')
df_int_feather_path = "/mnt/h/CIDDS/CIDDS-001/feather/trigram_feather"
df_int.reset_index().to_feather(df_int_feather_path)
df_int_feather_path = "/mnt/h/CIDDS/CIDDS-001/feather/trigram_feather"
df_int = pd.read_feather(df_int_feather_path)
#Drop rows without ngram features
print(df_int.shape)
df_int.dropna(subset=df_int.filter(regex='^ngram', axis=1).columns, axis=0, how='any', inplace=True)
print(df_int.shape)
df_int
###Output
_____no_output_____
###Markdown
df_ext:
###Code
#Per source IP, grab N-gram and transform numerical features into new
#Done for bigrams and trigrams
features = ['Duration', 'Packets', 'BytesAsInt', 'mean_bytes']
#Create/reset columns for n_gram features
for feature in features:
column_mean = 'ngram_' + feature + '_mean'
column_std = 'ngram_' + feature + '_std'
if column_mean not in df_ext.columns:
df_ext[column_mean] = np.nan
if column_std not in df_ext.columns:
df_ext[column_std] = np.nan
#List of ngram features
featurelist = df_ext.filter(regex='^ngram', axis=1).columns
#Window size 2 = bigrams, 3 = trigrams
winsize = 3
#Window type
indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=winsize)
for itr, (srcIP, _) in enumerate(res_ext.iteritems()):
sub_df = df_ext[df_ext['Src IP Addr'] == srcIP]
for feature in features:
column_mean = 'ngram_' + feature + '_mean'
column_std = 'ngram_' + feature + '_std'
sub_df.loc[:,column_mean] = sub_df[feature].rolling(window=indexer, min_periods=winsize).mean()
sub_df.loc[:,column_std] = sub_df[feature].rolling(window=indexer, min_periods=winsize).std()
df_ext.loc[:,featurelist] = df_ext[featurelist].combine_first(sub_df[featurelist])
print('Progress: ' + str(itr+1) + '/' + str(len(res_ext)), end='\r')
#Drop rows without ngram features
print(df_ext.shape)
df_ext.dropna(subset=df_ext.filter(regex='^ngram', axis=1).columns, axis=0, how='any', inplace=True)
print(df_ext.shape)
###Output
(146500, 29)
(146490, 29)
###Markdown
Build Random Forest with ngram features
###Code
#Compare ngram feature set with non-ngram feature set by uncommenting feature set to be tested
features = df_int.filter(regex='^ngram', axis=1).columns
#features = ['Duration','Packets','mean_bytes','BytesAsInt','ICMP','TCP','UDP']
label = 'class' #'class' or 'attackType'
skf = StratifiedKFold(n_splits=5)
import math
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
cfs = []
preds = list()
for (train_ext_idx, test_ext_idx), (train_int_idx, test_int_idx) in \
zip(skf.split(df_ext[features],df_ext['attackType']),skf.split(df_int[features], df_int['attackType'])):
print("TRAIN:", (len(train_int_idx)+len(train_ext_idx)), "TEST:", (len(test_int_idx)+len(test_ext_idx)))
X_train = pd.concat([df_ext[features].iloc[train_ext_idx,:],df_int[features].iloc[train_int_idx,:]], ignore_index=True)
X_test = pd.concat([df_ext[features].iloc[test_ext_idx,:],df_int[features].iloc[test_int_idx,:]], ignore_index=True)
y_train = pd.concat([df_ext[label].iloc[train_ext_idx],df_int[label].iloc[train_int_idx]], ignore_index=True)
y_test = pd.concat([df_ext[label].iloc[test_ext_idx],df_int[label].iloc[test_int_idx]], ignore_index=True)
#Apply Random Forest
rf_clf = RandomForestClassifier(n_estimators=100,min_samples_split=10,min_samples_leaf=5,max_samples=0.8,criterion='gini',n_jobs=5, verbose=5)
rf_clf.fit(X_train,y_train)
y_pred = rf_clf.predict(X_test)
cfs.append(confusion_matrix(y_test, y_pred))
preds.append([y_test, y_pred])
importance = rf_clf.feature_importances_
# summarize feature importance
for i,v in sorted(enumerate(importance),key=lambda x: x[1], reverse=True):
print('Feature: %s, Score: %.5f' % (features[i],v))
###Output
Feature: ngram_BytesAsInt_mean, Score: 0.20894
Feature: ngram_Packets_mean, Score: 0.19248
Feature: ngram_mean_bytes_mean, Score: 0.16542
Feature: ngram_Duration_mean, Score: 0.11323
Feature: ngram_mean_bytes_std, Score: 0.10154
Feature: ngram_BytesAsInt_std, Score: 0.10119
Feature: ngram_Duration_std, Score: 0.09975
Feature: ngram_Packets_std, Score: 0.01746
###Markdown
Ngram feature set results
###Code
print(cfs)
print(np.shape(cfs))
cf = np.mean(cfs,axis=(0))
print(cf)
print(np.std(cfs,axis=(0)))
#objectToFile(preds, "cidds_trigrams_preds_"+label)
from sklearn.metrics import classification_report, confusion_matrix
label = 'class'
preds_mem = objectFromFile("cidds_trigrams_preds_"+label)
cfs = []
for pred_tuple in preds_mem:
cfs.append(confusion_matrix(pred_tuple[0], pred_tuple[1]))
#Plot confusion matrix
import seaborn as sns
#labels = ['Benign','Malicious']
#Standard heatmap
cf_norm = cf/cf.sum(axis=1)[:,None]
cf_percentages = ["{0:.2%}".format(value) for value in cf_norm.flatten()]
cf_numbers = [abbrv_num(value) for value in cf.flatten()]
cf_labels = ['{v1}\n({v2})'.format(v1=v1, v2=v2) for v1,v2 in zip(cf_percentages,cf_numbers)]
cf_labels = np.asarray(cf_labels).reshape(cf.shape)
fig, ax = plt.subplots(figsize=(6,6))
plt.rc('font', size=14)
#plot_confusion_matrix(rf_clf, df_test, y_test, ax=ax, normalize='true',xticks_rotation=30)
column_labels = sorted(y_test.unique())
column_labels = ['Malicious', 'Benign']
#column_labels = ['BruteForce', 'DoS', 'Benign', 'PingScan', 'PortScan']
sns.heatmap(cf_norm, annot=cf_labels, fmt='',cmap='Blues',cbar=False, vmin=0.0, vmax=1.0, ax=ax, \
xticklabels=column_labels, yticklabels=column_labels)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.yticks(rotation='0',size=12)
plt.xticks(rotation='65',size=12)
plt.title("CIDDS-001 mean binary classification matrix - Trigrams")
plt.savefig('CIDDS_binaryclass_trigrams_cf.png',bbox_inches='tight')
plt.show()
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
tn, fp, fn, tp = np.mean(cfs,axis=0).ravel()
print(tn,fp,fn,tp)
acc_scores = [accuracy_score(pred_tuple[0], pred_tuple[1]) for pred_tuple in preds]
rec_score = tp / (tp+fn)
spec_score = tn / (tn+fp)
print('Accuracy: ' + str(np.mean(acc_scores)), "\n")
print('Recall: ' + str(rec_score), "\n")
print('Specificity: ' + str(spec_score))
###Output
321091.4 12680.4 11297.6 2803957.0
Accuracy: 0.9923855829409579
Recall: 0.995987005935449
Specificity: 0.9620087736591287
###Markdown
Normal feature set results
###Code
print(cfs)
print(np.shape(cfs))
cf = np.mean(cfs,axis=(0))
print(cf)
print(np.std(cfs,axis=(0)))
objectToFile(preds, "cidds_trigrams_normal_preds_"+label)
from sklearn.metrics import classification_report, confusion_matrix
label = 'class'
preds_mem = objectFromFile("cidds_trigrams_normal_preds_"+label)
cfs = []
for pred_tuple in preds_mem:
cfs.append(confusion_matrix(pred_tuple[0], pred_tuple[1]))
#Plot confusion matrix
import seaborn as sns
#labels = ['Benign','Malicious']
#Standard heatmap
cf_norm = cf/cf.sum(axis=1)[:,None]
cf_percentages = ["{0:.2%}".format(value) for value in cf_norm.flatten()]
cf_numbers = [abbrv_num(value) for value in cf.flatten()]
cf_labels = ['{v1}\n({v2})'.format(v1=v1, v2=v2) for v1,v2 in zip(cf_percentages,cf_numbers)]
cf_labels = np.asarray(cf_labels).reshape(cf.shape)
fig, ax = plt.subplots(figsize=(6,6))
plt.rc('font', size=14)
#plot_confusion_matrix(rf_clf, df_test, y_test, ax=ax, normalize='true',xticks_rotation=30)
column_labels = sorted(y_test.unique())
column_labels = ['Malicious', 'Benign']
#column_labels = ['BruteForce', 'DoS', 'Benign', 'PingScan', 'PortScan']
sns.heatmap(cf_norm, annot=cf_labels, fmt='',cmap='Blues',cbar=False, vmin=0.0, vmax=1.0, ax=ax, \
xticklabels=column_labels, yticklabels=column_labels)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.yticks(rotation='0',size=12)
plt.xticks(rotation='65',size=12)
plt.title("CIDDS-001 mean binary classification matrix - Normal")
plt.savefig('CIDDS_binaryclass_trigrams_normal_cf.png',bbox_inches='tight')
plt.show()
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
tn, fp, fn, tp = np.mean(cfs,axis=0).ravel()
print(tn,fp,fn,tp)
acc_scores = [accuracy_score(pred_tuple[0], pred_tuple[1]) for pred_tuple in preds_mem]
rec_score = tp / (tp+fn)
spec_score = tn / (tn+fp)
print('Accuracy: ' + str(np.mean(acc_scores)), "\n")
print('Recall: ' + str(rec_score), "\n")
print('Specificity: ' + str(spec_score))
###Output
331742.4 2029.4 4400.6 5626108.6
Accuracy: 0.998921915315526
Recall: 0.999218436584741
Specificity: 0.9939197978978451
###Markdown
Feature: BytesAsInt, Score: 0.30886Feature: mean_bytes, Score: 0.27141Feature: Duration, Score: 0.23921Feature: Packets, Score: 0.16979Feature: TCP, Score: 0.00435Feature: UDP, Score: 0.00421Feature: ICMP, Score: 0.00217 Barplot of own feature sets
###Code
import matplotlib.patches as mpatches
#Scores
genset_acc = 0.999
genset_rec = 0.994
genset_spec = 0.999
trigramset_acc = 0.993
trigramset_rec = 0.995
trigramset_spec = 0.960
altset_acc = 0.998
altset_rec = 0.999
altset_spec = 0.994
#Colors
clr_acc = 'royalblue'
clr_rec = 'salmon'
clr_spec = 'lightgreen'
acc_patch = mpatches.Patch(color=clr_acc, label='accuracy')
rec_patch = mpatches.Patch(color=clr_rec, label='recall')
spec_patch = mpatches.Patch(color=clr_spec, label='specificity')
labels = ['General\n (7 features)', 'Alternative\n (4 features)', \
'Trigram\n (8 features)']
x = np.arange(len(labels))*10
width = 2.5 # the width of the bars
pad_width = 3
scores = [genset_acc,genset_rec,genset_spec,trigramset_acc,trigramset_rec,trigramset_spec,altset_acc,altset_rec,altset_spec]
fig, ax = plt.subplots(figsize=(7,6))
#Spawn bar(s) of group 1
plt.bar(x[0]-pad_width, height=scores[0], width=width, color=clr_acc)
plt.bar(x[0], height=scores[1], width=width, color=clr_rec)
plt.bar(x[0]+pad_width, height=scores[2], width=width, color=clr_spec)
#Spawn bar(s) of group 2
plt.bar(x[1]-pad_width, height=scores[3], width=width, color=clr_acc)
plt.bar(x[1], height=scores[4], width=width, color=clr_rec)
plt.bar(x[1]+pad_width, height=scores[5], width=width, color=clr_spec)
#Spawn bar(s) of group 3
plt.bar(x[2]-pad_width, height=scores[6], width=width, color=clr_acc)
plt.bar(x[2], height=scores[7], width=width, color=clr_rec)
plt.bar(x[2]+pad_width, height=scores[8], width=width, color=clr_spec)
#Hide the left, right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(True)
plt.tick_params(left = False)
#Set plot details
plt.rc('font', size=13)
plt.ylabel('Metric score')
plt.yticks()
#ax.set_yticklabels([])
plt.ylim([0.8, 1])
#ax.get_yaxis().set_visible(False)
plt.xticks(size='14')
plt.title("CIDDS-001 feature sets comparison", fontweight='bold', pad=25)
ax.set_xticks(x)
ax.set_xticklabels(labels)
add_value_labels(ax)
#ax.legend(handles=[acc_patch,rec_patch,spec_patch],bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax.set_axisbelow(True)
plt.grid(axis='y', color='grey')
fig.tight_layout()
plt.savefig('CIDDS_binaryclass_featuresets_bars.png',bbox_inches='tight')
plt.show()
###Output
_____no_output_____ |
03-dimensionality-reduction-and-clustering/03-dimensionality-reduction-clustering.ipynb | ###Markdown
Dimensionality Reduction and Clustering for Exploratory Data Analysis An important step in data analysis is data exploration and representation. We have already seen some concepts in Exploratory Data Analysis and how to use them in Python. In this tutorial we will see how by combining a technique called [Principal Component Analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) together with [Cluster Analysis](https://en.wikipedia.org/wiki/Cluster_analysis) we can **represent in a two dimensional space data defined in a higher dimensional one** while, at the same time, being able to group this data in similar groups or clusters and **find hidden relationships in our data**. More concretely, PCA reduces data dimensionality by finding **principal components**. These are the directions of maximum variation in a dataset. By reducing a dataset original features or variables to a reduced set of new ones based on the principal components, we end up with the minimum number of variables that keep the **maximum amount of variation or information about how the data is distributed**. If we end up with just two of these new variables, we will be able to represent each sample in our data in a two dimensional chart (e.g. a scatterplot). As an unsupervised data analysis technique, clustering organises data samples by proximity based on its variables. By doing so we will be able to understand how each data point relates to each other and discover groups of similar ones. Once we have each of this groups or clusters, we will be able to define a centroid for them, an ideal data sample that minimises the sum of the distances to each of the data points in a cluster. By analysing these centroids variables we will be able to define each cluster in terms of its characteristics. But enough theory for today. Let's put these ideas in practice by using Python so we better understand them in order to apply them in the future. Getting and preparing data The [Gapminder website](http://www.gapminder.org/) presents itself as *a fact-based worldview*. It is a comprehensive resource for data regarding different countries and territories indicators. Its [Data section](http://www.gapminder.org/data/) contains a list of datasets that can be accessed as Google Spreadsheet pages (add `&output=csv` to download as CSV). Each indicator dataset is tagged with a *Data provider*, a *Category*, and a *Subcategory*. For this tutorial, we will use a dataset related to prevalence of Infectious Tuberculosis: - [TB estimated prevalence (existing cases) per 100K](https://docs.google.com/spreadsheets/d/1X5Jp7Q8pTs3KLJ5JBWKhncVACGsg5v4xu6badNs4C7I/pub?gid=0) We invite the reader to repeat the process with the new cases and deaths datasets and share the results. This notebook is about exploring the countries distribution. Therefore we will work with datasets where each sample is a country and each variable is a year. So first, we need to download Google Spreadsheet data as CSV.
###Code
local_tb_existing_file = 'tb_existing_100.csv'
###Output
_____no_output_____
###Markdown
Now that we have it locally, we need to read the CSV file as a data frame.
###Code
import pandas as pd
existing_df = pd.read_csv(local_tb_existing_file, index_col = 0, thousands = ',')
existing_df.index.names = ['country']
existing_df.columns.names = ['year']
###Output
_____no_output_____
###Markdown
We have specified `index_col` to be 0 since we want the country names to be the row labels. We also specified the `thousands` separator to be ',' so Pandas automatically parses cells as numbers. We can use `head()` to check the first few lines.
###Code
existing_df.head()
###Output
_____no_output_____
###Markdown
Dimensionality reduction with PCA In this section we want to be able to represent each country in a two dimensional space. In our dataset, each sample is a country defined by 18 different variables, each one corresponding to TB cases counts per 100K (existing, new, deaths) for a given year from 1990 to 2007. These variables represent not just the total counts or average in the 1990-2007 range but also all the variation in the time series and relationships within countries in a given year. By using PCA we will be able to reduce these 18 variables to just the two of them that best captures that information. In order to do so, we will first how to perform PCA and plot the first two PCs in Python. We will close the section by analysing the resulting plot and each of the two PCs. Python's sklearn machine learning library comes with a [PCA implementation](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html). This implementation uses the `scipy.linalg` implementation of the [singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition). It only works for dense arrays (see [numPy dense arrays](http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html) or [sparse array PCA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.SparsePCA.htmlsklearn.decomposition.SparsePCA) if you are using sparse arrays) and is not scalable to large dimensional data. For large dimensional data we should consider something such as [Spark's dimensionality reduction features](http://spark.apache.org/docs/latest/mllib-dimensionality-reduction.html). In our case we just have 18 variables, and that is far from being a large number of features for any machine learning library and computer nowadays. When using this implementation of PCA we need to specify in advance the number of principal components we want to use. Then we can just call the `fit()` method with our data frame and check the results.
###Code
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(existing_df)
###Output
_____no_output_____
###Markdown
This gives us an object we can use to transform our data by calling `transform`.
###Code
existing_2d = pca.transform(existing_df)
###Output
_____no_output_____
###Markdown
Or we could have just called `fit_transform` to perform both steps in one single call. In both cases we will end up with a lower dimension representation of our data frame, as a numPy array. Let's put it in a new dataframe.
###Code
existing_df_2d = pd.DataFrame(existing_2d)
existing_df_2d.index = existing_df.index
existing_df_2d.columns = ['PC1','PC2']
existing_df_2d.head()
###Output
_____no_output_____
###Markdown
We can also print the explained variance ratio as follows.
###Code
print(pca.explained_variance_ratio_)
###Output
[ 0.91808789 0.060556 ]
###Markdown
We see that the first PC already explains almost 92% of the variance, while the second one accounts for another 6% for a total of almost 98% between the two of them. Now we are ready to plot the lower dimensionality version of our dataset. We just need to call plot on the data frame, by passing the kind of plot we want (see [here](http://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html) for more on plotting data frames) and what columns correspond to each axis. We also add an annotation loop that tags every point with its country name.
###Code
%matplotlib inline
ax = existing_df_2d.plot(kind='scatter', x='PC2', y='PC1', figsize=(16,8), grid=True)
for i, country in enumerate(existing_df.index):
ax.annotate(country, (existing_df_2d.iloc[i].PC2, existing_df_2d.iloc[i].PC1))
###Output
_____no_output_____
###Markdown
Let's now create a bubble chart, by setting the point size to a value proportional to the mean value for all the years in that particular country. First we need to add a new column containing the re-scaled mean per country across all the years.
###Code
from sklearn.preprocessing import normalize
existing_df_2d['country_mean'] = pd.Series(existing_df.mean(axis=1), index=existing_df_2d.index)
country_mean_max = existing_df_2d['country_mean'].max()
country_mean_min = existing_df_2d['country_mean'].min()
country_mean_scaled = (existing_df_2d.country_mean-country_mean_min) / country_mean_max
existing_df_2d['country_mean_scaled'] = pd.Series(
country_mean_scaled,
index=existing_df_2d.index)
existing_df_2d.head()
###Output
_____no_output_____
###Markdown
Now we are ready to plot using this variable size (we will ommit the country names this time since we are not so interested in them).
###Code
existing_df_2d.plot(kind='scatter', x='PC2', y='PC1', s=existing_df_2d['country_mean_scaled']*100, figsize=(16,8), grid=True)
###Output
_____no_output_____
###Markdown
Let's do the same with the sum instead of the mean.
###Code
existing_df_2d['country_sum'] = pd.Series(existing_df.sum(axis=1), index=existing_df_2d.index)
country_sum_max = existing_df_2d['country_sum'].max()
country_sum_min = existing_df_2d['country_sum'].min()
country_sum_scaled = (existing_df_2d.country_sum-country_sum_min) / country_sum_max
existing_df_2d['country_sum_scaled'] = pd.Series(
country_sum_scaled,
index=existing_df_2d.index)
existing_df_2d.plot(kind='scatter', x='PC2', y='PC1', s=existing_df_2d['country_sum_scaled']*100, figsize=(16,8), grid=True)
###Output
_____no_output_____
###Markdown
And finally let's associate the size with the change between 1990 and 2007. Note that in the scaled version, those values close to zero will make reference to those with negative values in the original non-scaled version, since we are scaling to a [0,1] range.
###Code
existing_df_2d['country_change'] = pd.Series(existing_df['2007']-existing_df['1990'], index=existing_df_2d.index)
country_change_max = existing_df_2d['country_change'].max()
country_change_min = existing_df_2d['country_change'].min()
country_change_scaled = (existing_df_2d.country_change - country_change_min) / country_change_max
existing_df_2d['country_change_scaled'] = pd.Series(
country_change_scaled,
index=existing_df_2d.index)
existing_df_2d[['country_change','country_change_scaled']].head()
existing_df_2d.plot(kind='scatter', x='PC2', y='PC1', s=existing_df_2d['country_change_scaled']*100, figsize=(16,8), grid=True)
###Output
_____no_output_____
###Markdown
PCA Results From the plots we have done, we can confirm that most variation happens along the y axis, that we have assigned to PC1. We saw that the first PC already explains almost 92% of the variance, while the second one accounts for another 6% for a total of almost 98% between the two of them. At the very top of our charts we could see an important concentration of countries, mostly developed. While we descend that axis, the number of countries is more sparse, and they belong to less developed regions of the world. When sizing points using two absolute magnitudes such as average and total number of cases, we can see that the directions also correspond to a variation in these magnitudes. Moreover, when using size to code the difference in the number of cases over time (2007 minus 1990), the size mostly changed along the direction of the second principal component, with more positive values (i.e. increase in the number of cases) having a bigger size. That is, while the first PC captures most of the variation within our dataset and this variation is based on the total cases in the 1990-2007 range, the second PC is largely affected by the change over time. In the next section we will try to discover other relationships between countries. Uncovering data structure with k-means clustering In this section we will use [k-means clustering](https://en.wikipedia.org/wiki/K-means_clustering) to group countries based on how similar their situation has been year-by-year. That is, we will cluster the data based in the 18 variables that we have. Then we will use the cluster assignment to colour the previous 2D chart, in order to discover hidden relationship within our data and better understand the world situation regarding the tuberculosis disease. When using k-means, we need to determine the right number of groups for our case. This can be done more or less accurately by iterating through different values for the number of groups and compare an amount called the within-cluster sum of square distances for each iteration. This is the squared sum of distances to the cluster center for each cluster member. Of course this distance is minimal when the number of clusters gets equal to the number of samples, but we don't want to get there. We normally stop when the improvement in this value starts decreasing at a lower rate. However, we will use a more intuitive approach based on our understanding of the world situation and the nature of the results that we want to achieve. Sometimes this is the way to go in data analysis, specially when doing exploration tasks. To use the knowledge that we have about the nature of our data is always a good thing to do. Again we will use sklearn, in this case its [k-means clustering](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) implementation, in order to perform our clustering on the TB data. Since we already decided on a number of clusters of 5, we will use it here straightaway.
###Code
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=5)
clusters = kmeans.fit(existing_df)
###Output
_____no_output_____
###Markdown
Now we need to store the cluster assignments together with each country in our data frame. The cluster labels are returned in `clusters.labels_`.
###Code
existing_df_2d['cluster'] = pd.Series(clusters.labels_, index=existing_df_2d.index)
###Output
_____no_output_____
###Markdown
And now we are ready to plot, using the cluster column as color.
###Code
import numpy as np
existing_df_2d.plot(
kind='scatter',
x='PC2',y='PC1',
c=existing_df_2d.cluster.astype(np.float),
figsize=(16,8), grid=True)
###Output
_____no_output_____ |
coding_challenges.ipynb | ###Markdown
Coding Challenges Functions, Strings Question:Write a program which will find all such numbers which are divisible by 7 but are not a multiple of 5,between 2000 and 3200 (both included).The numbers obtained should be printed in a comma-separated sequence on a single line.
###Code
l = []
for i in range(2000, 3201):
if i % 7 == 0 and i % 5 != 0:
l.append(str(i))
print(','.join(l))
###Output
2002,2009,2016,2023,2037,2044,2051,2058,2072,2079,2086,2093,2107,2114,2121,2128,2142,2149,2156,2163,2177,2184,2191,2198,2212,2219,2226,2233,2247,2254,2261,2268,2282,2289,2296,2303,2317,2324,2331,2338,2352,2359,2366,2373,2387,2394,2401,2408,2422,2429,2436,2443,2457,2464,2471,2478,2492,2499,2506,2513,2527,2534,2541,2548,2562,2569,2576,2583,2597,2604,2611,2618,2632,2639,2646,2653,2667,2674,2681,2688,2702,2709,2716,2723,2737,2744,2751,2758,2772,2779,2786,2793,2807,2814,2821,2828,2842,2849,2856,2863,2877,2884,2891,2898,2912,2919,2926,2933,2947,2954,2961,2968,2982,2989,2996,3003,3017,3024,3031,3038,3052,3059,3066,3073,3087,3094,3101,3108,3122,3129,3136,3143,3157,3164,3171,3178,3192,3199
###Markdown
Question:Write a program which can compute the factorial of a given numbers.The results should be printed in a comma-separated sequence on a single line.Suppose the following input is supplied to the program:8Then, the output should be:40320
###Code
def factorial_found():
num = int(input('factorial to be found: '))
nums = [i for i in range(num)]
nums = sorted(nums[1:], reverse=True)
for i in nums:
num *= i
return num
factorial_found()
###Output
factorial to be found: 8
###Markdown
Question:With a given integral number n, write a program to generate a dictionary that contains (i, i*i) such that is an integral number between 1 and n (both included). and then the program should print the dictionary.Suppose the following input is supplied to the program:8Then, the output should be:{1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64}
###Code
def dictionary():
num = int(input('Dictionary range: '))
ls = [i for i in range(num+1)[1:]]
d = {i:i*i for i in ls}
return d
dictionary()
###Output
Dictionary range: 8
###Markdown
Question:Write a program which accepts a sequence of comma-separated numbers from console and generate a list and a tuple which contains every number.Suppose the following input is supplied to the program:34,67,55,33,12,98Then, the output should be:['34', '67', '55', '33', '12', '98']('34', '67', '55', '33', '12', '98')
###Code
def list_and_tuple():
string = input('Please provide the numbers seperated by commas: ')
ls = string.split(',')
tp = tuple(ls)
print(ls)
print(tp)
return ls, tp
list_and_tuple()
###Output
Please provide the numbers seperated by commas: 34,67,55,33,12,98
['34', '67', '55', '33', '12', '98']
('34', '67', '55', '33', '12', '98')
###Markdown
Object oriented programming Question:Define a class which has at least two methods:getString: to get a string from console inputprintString: to print the string in upper case.Also please include simple test function to test the class methods. More Math Question:Write a program that calculates and prints the value according to the given formula: Q = Square root of [(2 * C * D)/H]Following are the fixed values of C and H: C is 50. H is 30. D is the variable whose values should be input to your program in a comma-separated sequence.ExampleLet us assume the following comma separated input sequence is given to the program: 100,150,180The output of the program should be: 18,22,24
###Code
import math
def formula():
D_string = input('Please provide numbers, seperated by a comma: ').split(',')
D = [int(i) for i in D_string]
Q = [round(math.sqrt((2 * 50 * i)/ 30)) for i in D]
return Q
formula()
###Output
Please provide numbers, seperated by a comma: 100,150,180
|
Code/Model 1 (regression trees)/AAT-DT_v3.ipynb | ###Markdown
Advanced Algorithmic Trading DT - V3 Updates from Last Version- Implement PCA - PCA performacne in V3 not as good as in V2 --> try PLS in V4 for dimensionality reduction- Do for: - Random Forest - Bagging - Boosting - Linear Regression- Remove Ridge Regression Import Packages
###Code
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import math
import datetime
import gc
from sklearn.ensemble import (BaggingRegressor, RandomForestRegressor, AdaBoostRegressor)
from sklearn.model_selection import ParameterGrid
from sklearn.tree import DecisionTreeRegressor
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import mean_squared_error
from technical_indicators import * # import all function
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import train_test_split
#import parfit as pf
from sklearn.metrics import r2_score
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
###Output
_____no_output_____
###Markdown
Set Parameters
###Code
# Set the random seed, number of estimators and the "step factor" used to plot the graph of MSE for each method
random_state = 42 # Seed
n_jobs = -1 # -1 --> all Processors # Parallelisation factor for bagging, random forests (controls the number of processor cores used)
n_estimators = 200 # total number of estimators ot use in the MSE graph
step_factor = 10 # controls cranularity of calculation by stepping through the number of estimators
axis_step = int(n_estimators / step_factor) # 1000/10 = 100 separate calculations will be performed for each of the 3 ensebmle methods
###Output
_____no_output_____
###Markdown
Read in Data via GitHub URL
###Code
url = "https://raw.githubusercontent.com/meenmo/Stat479_Project/master/Data/IBM.csv"
df_ORIGINAL = pd.read_csv(url)
###Output
_____no_output_____
###Markdown
*** Clean Data & Create Technical Indicator Variables- Create Deep copy of dataframe- Use Adjusted Close Data- Drop Close - Rename "Adj. Close" as "Close"- Create Lagged Features- Drop NaN- Create Technical Indicator Variables- Drop NaN- Re-set index as Date
###Code
df_features = df_ORIGINAL.copy(deep=True) # Create Deep
df_features.drop(['Close'], axis = 1, inplace = True) # drop close column
df_features.columns = ['Date', 'High', 'Low', 'Open', 'Volume', 'Close'] # Close is actually Adj. Close
df_features['Date'] = pd.to_datetime(df_features['Date'])
#df_features.head() # sanity check
"""
Creates Lagged Returns
- given OHLCV dataframe
- numer of lagged days
"""
def create_lag_features(df, lag_days):
df_ret = df.copy()
# iterate through the lag days to generate lag values up to lag_days + 1
for i in range(1,lag_days + 2):
df_lag = df_ret[['Date', 'Close']].copy()
# generate dataframe to shift index by i day.
df_lag['Date'] = df_lag['Date'].shift(-i)
df_lag.columns = ['Date', 'value_lag' + str(i)]
# combine the valuelag
df_ret = pd.merge(df_ret, df_lag, how = 'left', left_on = ['Date'], right_on = ['Date'])
#frees memory
del df_lag
# calculate today's percentage lag
df_ret['Today'] = (df_ret['Close'] - df_ret['value_lag1'])/(df_ret['value_lag1']) * 100.0
# calculate percentage lag
for i in range(1, lag_days + 1):
df_ret['lag' + str(i)] = (df_ret['value_lag'+ str(i)] - df_ret['value_lag'+ str(i+1)])/(df_ret['value_lag'+str(i+1)]) * 100.0
# drop unneeded columns which are value_lags
for i in range(1, lag_days + 2):
df_ret.drop(['value_lag' + str(i)], axis = 1, inplace = True)
return df_ret
### Run Function
df_features = create_lag_features(df_features, 5) # 5 lag features
#df_features.head(7)
# drop earlier data with missing lag features
df_features.dropna(inplace=True)
# reset index
df_features.reset_index(drop = True, inplace = True)
#### GENERATE TECHNICAL INDICATORS FEATURES
df_features = standard_deviation(df_features, 14)
df_features = relative_strength_index(df_features, 14) # periods
df_features = average_directional_movement_index(df_features, 14, 13) # n, n_ADX
df_features = moving_average(df_features, 21) # periods
df_features = exponential_moving_average(df_features, 21) # periods
df_features = momentum(df_features, 14) #
df_features = average_true_range(df_features, 14)
df_features = bollinger_bands(df_features, 21)
df_features = ppsr(df_features)
df_features = stochastic_oscillator_k(df_features)
df_features = stochastic_oscillator_d(df_features, 14)
df_features = trix(df_features, 14)
df_features = macd(df_features, 26, 12)
df_features = mass_index(df_features)
df_features = vortex_indicator(df_features, 14)
df_features = kst_oscillator(df_features, 10, 10, 10, 15, 10, 15, 20, 30)
df_features = true_strength_index(df_features, 25, 13)
#df_features = accumulation_distribution(df_features, 14) # Causes Problems, apparently
df_features = chaikin_oscillator(df_features)
df_features = money_flow_index(df_features, 14)
df_features = on_balance_volume(df_features, 14)
df_features = force_index(df_features, 14)
df_features = ease_of_movement(df_features, 14)
df_features = commodity_channel_index(df_features, 14)
df_features = keltner_channel(df_features, 14)
df_features = ultimate_oscillator(df_features)
df_features = donchian_channel(df_features, 14)
# drop earlier data with missing lag features
df_features.dropna(inplace=True)
df_features = df_features.reset_index(drop = True)
###########################################################################################
# Store Variables now for plots later
daily_index = df_features.index
daily_returns = df_features["Today"]
daily_price = df_features["Close"]
# Re-set "Date" as the index
df_features = df_features.set_index('Date')
### Sanity Check
df_features.head(10)
###Output
_____no_output_____
###Markdown
PCA & Create X & y- Drop all data used to create technical indicators (this is done in the book)- Then Standardize, necessary for PCA- Run PCA- Select Appropriate number of components- Create X & yNOTE: some technical indicators use Present data, but for simplicity, just ignore this
###Code
### Standardize Data
##########################################################################################
# Drop Columns
list_of_columns_to_exclude = ["High", "Low", "Open", "Volume","Close", "Today"]
X_temp_standardized = df_features.copy(deep=True)
X_temp_standardized.drop(list_of_columns_to_exclude, axis = 1, inplace = True) # drop columns
# Standardize
X_temp_standardized
dates = X_temp_standardized.index # get dates to set as index after data is standardized
names = X_temp_standardized.columns # Get column names first
X_temp_standardized = StandardScaler().fit_transform(X_temp_standardized)
# Convert to DataFrame
X_temp_standardized = pd.DataFrame(X_temp_standardized, columns=names, index=dates)
### PCA
##########################################################################################
# Fit PCA
pca_all = PCA().fit(X_temp_standardized)
# Count number of components to retain
count = 0
for var in pca_all.explained_variance_.round(4):
if var >= 1:
count += 1
# # Sanity Check
# np.set_printoptions(suppress=True) # Stop scientifit notation
# print(pca_all.explained_variance_.round(4)[0:10])
# print(count)
# # Scree plot
# plt.figure(figsize=(15, 8))
# plt.plot(np.cumsum(pca_all.explained_variance_ratio_))
# plt.plot((pca_all.explained_variance_ratio_))
# plt.xlabel('Number of Principal Components')
# plt.ylabel('Cumulative Explained Variance');
# plt.title('Scree Plot -- X_one_standardized')
### Transform Data & Select only PCs/Columns up to count
##########################################################################################
def create_column_names(numColumns):
colNames = []
for i in range(numColumns):
tempName = "PC" + str(i+1)
colNames.append(tempName)
return colNames
X = pca_all.transform(X_temp_standardized)
X = pd.DataFrame(X, columns=create_column_names(X.shape[1]), index=dates)
X = X.iloc[:,:count]
### Get y
##########################################################################################
y_temp = pd.DataFrame(df_features["Today"], index=X.index) # can only standardize a dataframe
y = StandardScaler().fit_transform(y_temp) # Standardize, cause we did it for our original variables
y = pd.DataFrame(y, index=X.index, columns=["Today"]) # convert back to dataframe
y = y["Today"] # now re-get y as a Pandas Series
### Sanity Check
print("Shape of X: ", X.shape)
print("Shape of y: ", y.shape)
# Check Types
print(type(X)) # Needs to be <class 'pandas.core.frame.DataFrame'>
print(type(y)) # Needs ro be <class 'pandas.core.series.Series'>
###Output
Shape of X: (4190, 9)
Shape of y: (4190,)
<class 'pandas.core.frame.DataFrame'>
<class 'pandas.core.series.Series'>
###Markdown
Split: Train & Validatte / Test- Train & Validate: < '2018-01-01'- Test: >= '2018-01-01'
###Code
X_train_all = X.loc[(X.index < '2018-01-01')]
y_train_all = y[X_train_all.index]
# # creates all test data which is all after January 2018
X_test = X.loc[(X.index >= '2018-01-01'),:]
y_test = y[X_test.index]
### Sanity Check
print("Shape of X_train_all: ", X_train_all.shape)
print("Shape of y_train_all: ", y_train_all.shape)
print("Shape of X_test: ", X_test.shape)
print("Shape of y_test: ", y_test.shape)
###Output
Shape of X_train_all: (3979, 9)
Shape of y_train_all: (3979,)
Shape of X_test: (211, 9)
Shape of y_test: (211,)
###Markdown
Time Series Train Test Split ---- Random Forest
###Code
"""
Execute Random Forest for differnt number of Time Series Splits
"""
def Call_Random_Forest(numSplits):
### Prepare Random Forest
##############################################################################
# Initialize Random Forest Instance
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1, random_state=123)
rf_mse = [] # MSE
rf_r2 = [] # R2
### Time Series Split
##############################################################################
splits = TimeSeriesSplit(n_splits=numSplits) # 3 splits
splitCount = 0 # dummy count var to track current split num in print statements
for train_index, test_index in splits.split(X_train_all):
splitCount += 1
# Train Split
X_train = X_train_all.iloc[0:len(train_index) - 1,:].copy(deep=True)
y_train = y[X_train.index]
# Validate Split
X_val = X_train_all.iloc[len(train_index) - 1:,:].copy(deep=True)
y_val = y[X_val.index]
# # Print Statements
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print("Split: ", splitCount)
# print('Observations: ', (X_train.shape[0] + X_test.shape[0]))
# #print('Cutoff date, or first date in validation data: ', X_val.iloc[0,0])
# print('Training Observations: ', (X_train.shape[0]))
# print('Testing Observations: ', (X_test.shape[0]))
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
### Run Random Forest
rf.fit(X_train, y_train)
prediction = rf.predict(X_val)
mse = mean_squared_error(y_val, prediction)
r2 = r2_score(y_val, prediction)
rf_mse.append(mse)
rf_r2.append(r2)
# print("rf_mse: ", rf_mse)
# print("rf_r2: ", rf_r2)
### Time Series Split
##############################################################################
# Plot the chart of MSE versus number of estimators
plt.figure(figsize=(12, 7))
plt.title('Random Forest - MSE & R-Squared')
### MSE
plt.plot(list(range(1,splitCount+1)), rf_mse, 'b-', color="blue", label='MSE')
plt.plot(list(range(1,splitCount+1)), rf_r2, 'b-', color="green", label='R-Squared')
plt.plot(list(range(1,splitCount+1)), np.array([0] * splitCount), 'b-', color="red", label='Zero')
plt.legend(loc='upper right')
plt.xlabel('Train/Test Split Number')
plt.ylabel('Mean Squared Error & R-Squared')
plt.show()
###Output
_____no_output_____
###Markdown
Bagging
###Code
"""
Execute Bagging for differnt number of Time Series Splits
"""
def Call_Bagging(numSplits):
### Prepare Bagging
##############################################################################
# Initialize Bagging Instance
bagging = BaggingRegressor(n_estimators=150, n_jobs=-1, random_state=123)
bagging_mse = [] # MSE
bagging_r2 = [] # R2
### Time Series Split
##############################################################################
splits = TimeSeriesSplit(n_splits=numSplits) # 3 splits
splitCount = 0 # dummy count var to track current split num in print statements
for train_index, test_index in splits.split(X_train_all):
splitCount += 1
# Train Split
X_train = X_train_all.iloc[0:len(train_index) - 1,:].copy(deep=True)
y_train = y[X_train.index]
# Validate Split
X_val = X_train_all.iloc[len(train_index) - 1:,:].copy(deep=True)
y_val = y[X_val.index]
# # Print Statements
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print("Split: ", splitCount)
# print('Observations: ', (X_train.shape[0] + X_test.shape[0]))
# #print('Cutoff date, or first date in validation data: ', X_val.iloc[0,0])
# print('Training Observations: ', (X_train.shape[0]))
# print('Testing Observations: ', (X_test.shape[0]))
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
### Run Random Forest
bagging.fit(X_train, y_train)
prediction = bagging.predict(X_val)
mse = mean_squared_error(y_val, prediction)
r2 = r2_score(y_val, prediction)
bagging_mse.append(mse)
bagging_r2.append(r2)
### Time Series Split
##############################################################################
# Plot the chart of MSE versus number of estimators
plt.figure(figsize=(12, 7))
plt.title('Bagging - MSE & R-Squared')
### MSE
plt.plot(list(range(1,splitCount+1)), bagging_mse, 'b-', color="blue", label='MSE')
plt.plot(list(range(1,splitCount+1)), bagging_r2, 'b-', color="green", label='R-Squared')
plt.plot(list(range(1,splitCount+1)), np.array([0] * splitCount), 'b-', color="red", label='Zero')
plt.legend(loc='upper right')
plt.xlabel('Train/Test Split Number')
plt.ylabel('Mean Squared Error & R-Squared')
plt.show()
###Output
_____no_output_____
###Markdown
Boosting
###Code
"""
Execute Random Forest for differnt number of Time Series Splits
"""
def Call_Boosting(numSplits):
### Prepare Boosting
##############################################################################
# Initialize Boosting Instance
boosting = AdaBoostRegressor(DecisionTreeRegressor(),
n_estimators=150, random_state=123,learning_rate=0.01)
boosting_mse = [] # MSE
boosting_r2 = [] # R2
### Time Series Split
##############################################################################
splits = TimeSeriesSplit(n_splits=numSplits) # 3 splits
splitCount = 0 # dummy count var to track current split num in print statements
for train_index, test_index in splits.split(X_train_all):
splitCount += 1
# Train Split
X_train = X_train_all.iloc[0:len(train_index) - 1,:].copy(deep=True)
y_train = y[X_train.index]
# Validate Split
X_val = X_train_all.iloc[len(train_index) - 1:,:].copy(deep=True)
y_val = y[X_val.index]
# # Print Statements
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print("Split: ", splitCount)
# print('Observations: ', (X_train.shape[0] + X_test.shape[0]))
# #print('Cutoff date, or first date in validation data: ', X_val.iloc[0,0])
# print('Training Observations: ', (X_train.shape[0]))
# print('Testing Observations: ', (X_test.shape[0]))
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
### Run Random Forest
boosting.fit(X_train, y_train)
prediction = boosting.predict(X_val)
mse = mean_squared_error(y_val, prediction)
r2 = r2_score(y_val, prediction)
boosting_mse.append(mse)
boosting_r2.append(r2)
### Time Series Split
##############################################################################
# Plot the chart of MSE versus number of estimators
plt.figure(figsize=(12, 7))
plt.title('Boosting - MSE & R-Squared')
### MSE
plt.plot(list(range(1,splitCount+1)), boosting_mse, 'b-', color="blue", label='MSE')
plt.plot(list(range(1,splitCount+1)), boosting_r2, 'b-', color="green", label='R-Squared')
plt.plot(list(range(1,splitCount+1)), np.array([0] * splitCount), 'b-', color="red", label='Zero')
plt.legend(loc='upper right')
plt.xlabel('Train/Test Split Number')
plt.ylabel('Mean Squared Error & R-Squared')
plt.show()
###Output
_____no_output_____
###Markdown
Linear Regression
###Code
"""
Execute Linear Regression for different number of Time Series Splits
"""
def Call_Linear(numSplits):
### Prepare Random Forest
##############################################################################
# Initialize Random Forest Instance
linear = LinearRegression(n_jobs=-1, normalize=True, fit_intercept=False) # if we don't fit the intercept we get a better prediction
linear_mse = [] # MSE
linear_r2 = [] # R2
### Time Series Split
##############################################################################
splits = TimeSeriesSplit(n_splits=numSplits) # 3 splits
splitCount = 0 # dummy count var to track current split num in print statements
for train_index, test_index in splits.split(X_train_all):
splitCount += 1
# Train Split
X_train = X_train_all.iloc[0:len(train_index) - 1,:].copy(deep=True)
y_train = y[X_train.index]
# Validate Split
X_val = X_train_all.iloc[len(train_index) - 1:,:].copy(deep=True)
y_val = y[X_val.index]
# # Print Statements
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print("Split: ", splitCount)
# print('Observations: ', (X_train.shape[0] + X_test.shape[0]))
# #print('Cutoff date, or first date in validation data: ', X_val.iloc[0,0])
# print('Training Observations: ', (X_train.shape[0]))
# print('Testing Observations: ', (X_test.shape[0]))
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
### Run Random Forest
linear.fit(X_train, y_train)
prediction = linear.predict(X_val)
mse = mean_squared_error(y_val, prediction)
r2 = r2_score(y_val, prediction)
r2 = np.corrcoef(y_val, prediction)[0, 1]
r2 = r2*r2 # square of correlation coefficient --> R-squared
linear_mse.append(mse)
linear_r2.append(r2)
### Time Series Split
##############################################################################
# Plot the chart of MSE versus number of estimators
plt.figure(figsize=(12, 7))
plt.title('Linear Regression - MSE & R-Squared')
### MSE
plt.plot(list(range(1,splitCount+1)), linear_mse, 'b-', color="blue", label='MSE')
plt.plot(list(range(1,splitCount+1)), linear_r2, 'b-', color="green", label='R-Squared')
plt.plot(list(range(1,splitCount+1)), np.array([0] * splitCount), 'b-', color="red", label='Zero')
plt.legend(loc='upper right')
plt.xlabel('Train/Test Split Number')
plt.ylabel('Mean Squared Error & R-Squared')
plt.show()
###Output
_____no_output_____
###Markdown
Misc. Graphs ---- Price, Returns & Cumulative Returns
###Code
# figure dimenstions
length = 15
height = 5
### Prices
plt.figure(figsize=(length, height))
plt.title('IBM Adj Close Price Graph')
plt.plot(daily_index, daily_price, 'b-', color="blue", label='Prices')
plt.legend(loc='upper right')
plt.xlabel('Days')
plt.ylabel('Prices')
plt.show()
### Returns
plt.figure(figsize=(length, height))
plt.title('IBM Daily Returns')
plt.plot(daily_index, daily_returns, 'b-', color="blue", label='Returns')
plt.legend(loc='upper right')
plt.xlabel('Days')
plt.ylabel('Returns')
plt.show()
### Cumulative Returns
plt.figure(figsize=(length, height))
plt.title('IBM Cumulative Returns')
cumulative_returns = daily_returns.cumsum()
plt.plot(daily_index, cumulative_returns, 'b-', color="green", label='Cumulative Returns')
plt.legend(loc='upper right')
plt.xlabel('Days')
plt.ylabel('Cumulative Return')
plt.show()
###Output
_____no_output_____
###Markdown
First - A Note on R-Squared What Does A Negative R Squared Value Mean?- What does R-squared tell us? - It tells us whether a horizonal line through the vertical mean of the data is a better predictor- For a Linear Regression - R-squared is just the coreelation coefficient squared - R-squared can't be negative, becasue at 0, it becomes the horizontal line- For All other Model - For practical purposes, the lowest R2 you can get is zero, but only because the assumption is that if your regression line is not better than using the mean, then you will just use the mean value. - However if your regression line is worse than using the mean value, the r squared value that you calculate will be negative. - Note that the reason R2 can't be negative in the linear regression case is just due to chance and how linear regression is contructed Note- In our R2 computation for the linear model, we're still getting a negative R2, not sure why
###Code
Call_Random_Forest(20)
Call_Bagging(20)
# Call_Boosting(20) # takes forever to run
###Output
_____no_output_____
###Markdown
Note that PCA does a lot worse! Why?__Linear Models__ Penalized linear models use shrinkage and variable selection to manage high dimensionality by forcing the coefficients on most regressors near or exactly to zero. This can produce suboptimal forecasts when predictors are highly correlated. A simple example of this problem is a case in which all of the predictors are equal to the forecast target plus an iid noise term. In this situation, choosing a subset of predictors via LASSO penalty is inferior to taking a simple average of the predictors and using this as the sole predictor in a univariate regression.__PCA & PCR__ A drawback of PCR is that it fails to incorporate the ultimate statistical objective—forecasting returns in the dimension reduction step. PCA condenses data into components based on the covariation among the predictors. This happens prior to the forecasting step and without consideration of how predictors associate with future returns. Hence, this is probably why our forecasts are so bad__Partial Least Squares (PLS)__ In contrast, partial least squares performs dimension reduction by directly exploiting covariation of predictors with the forecast target. PLS regression proceeds as follows:- For each predictor j, estimate its univariate return prediction coefficient via OLS. - This coefficient, denoted φj, reflects the “partial” sensitivity of returns to each predictor j. - Next, average all predictors into a single aggregate component with weights proportional to φj, placing the highest weight on the strongest univariate predictors, and the least weight on the weakest. - In this way, PLS performs its dimension reduction with the ultimate forecasting objective in mind. To form more than one predictive component, the
###Code
Call_Linear(100)
###Output
_____no_output_____ |
cicr.ipynb | ###Markdown
Exemplo: dinâmicas variadas em sinalização celular Modelo da resposta do ião cálcio intracelular a estímulos externos (hormonas, fertilização, etc) Componentes:- Recetor de membrana- IP3 como segundo mensageiro- Reservatório intracelular de cálcio sensível ao IP3- Reservatório intracelular de cálcio sensível ao cálcio citosólico- Transportes ativos, perdas, assimilação de cálcio do exterior, etc   Neste modelo, o parâmetro $\beta$ representa a intensidade do estímulo, variando no intervalo $[0, 1]$. $0$ significa ausência de estímulo e $1$ significa o estímulo máximo (100%). Podemos simular o comportamento deste modelo para diferentes valores de $\beta$.(Nota: Na declaração do modelo, o parâmetro $\beta$ é representado por `B`.)
###Code
m = read_model("""
title Calcium Spikes
v0 = -> Ca, 1
v1 = -> Ca, k1*B*step(t, 1.0)
k1 = 7.3
B = 0.1
export = Ca -> , 10 ..
leak = CaComp -> Ca, 1 ..
!! Ca
v2 = Ca -> CaComp, 65 * Ca**2 / (1+Ca**2)
v3 = CaComp -> Ca, 500*CaComp**2/(CaComp**2+4) * Ca**4/(Ca**4 + 0.6561)
init = state(Ca = 0.1, CaComp = 0.63655)""")
@interact(B=(0.0, 1.0, 0.01))
def stimulate(B=0.1):
m.parameters.B = B
m.solve(tf=8.0, npoints=2000, title='Cytosolic $Ca^{2+}$, $\\beta$ = %g' % B).plot(fig_size = (9,6), yrange=(0,1.5), legend=False, style=styles)
plt.show()
###Output
_____no_output_____
###Markdown
Para os que estão a ler uma versão estática deste notebook, a variação do estímulo ($\beta$) tem o seguinte efeito no comportamento do cálcio citosólico:
###Code
m = read_model("""
title Calcium Spikes
v0 = -> Ca, 1
v1 = -> Ca, k1*B*step(t, 1.0)
k1 = 7.3
B = 0.1
export = Ca -> , 10 ..
leak = CaComp -> Ca, 1 ..
!! Ca
v2 = Ca -> CaComp, 65 * Ca**2 / (1+Ca**2)
v3 = CaComp -> Ca, 500*CaComp**2/(CaComp**2+4) * Ca**4/(Ca**4 + 0.6561)
init = state(Ca = 0.1, CaComp = 0.63655)""")
stimulus = (0.0, 0.1, 0.2, 0.25, 0.28, 0.29, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.75, 0.8, 0.9, 1.0)
titles = ['$\\beta$ = %g' % B for B in stimulus]
s = m.scan({'B':stimulus}, tf=8.0, npoints=1000)
s.plot(yrange=(0, 1.5), titles=titles,
legend=False, fig_size=(18.0, 13.0),
suptitlegend="Dynamics of cytosolic $Ca^{2+}$ as a function of stimulus")
###Output
_____no_output_____ |
improvement-prediction/classification/draw_different_versions_of_dataset_2.ipynb | ###Markdown
This notebook explores different ways of drawing samples that correspond to "dataset 2". Initially, "dataset 2" is a dataset with which we obtained a good prediction model for augmentation classes, but we need to verify whether we just "got lucky" or if we can re-draw this dataset a bunch of times and get to similar results. Here, we draw samples from a larger dataset, namely "dataset 3", where each query Qi is randomly paired with multiple candidate datasets from different datasets.
###Code
import pandas as pd
dataset_3 = pd.read_csv('training-simplified-data-generation-many-candidates-per-query_with_median_and_mean_based_classes.csv')
original_dataset_2 = pd.read_csv('training-simplified-data-generation_with_median_and_mean_based_classes.csv')
## get the numbers of positive and negative gains for both datasets
print('negative in dataset_3', dataset_3.loc[dataset_3['gain_in_r2_score'] <= 0].shape[0], 'positive in dataset_3', dataset_3.loc[dataset_3['gain_in_r2_score'] > 0].shape[0])
print('negative in original_dataset_2', original_dataset_2.loc[original_dataset_2['gain_in_r2_score'] <= 0].shape[0], 'positive in original_dataset_2', original_dataset_2.loc[original_dataset_2['gain_in_r2_score'] > 0].shape[0])
###Output
('negative in dataset_3', 1097156, 'positive in dataset_3', 1019700)
('negative in original_dataset_2', 4177, 'positive in original_dataset_2', 5707)
###Markdown
Both datasets look relatively balanced. Let's draw other "versions" of dataset 2 by getting dataset 3 and, for each \ with gain_marker = 'positive', get one “negative”. They must have the same query id.
###Code
import numpy as np
import random
def create_version_of_dataset_2(larger_dataset, n_queries, one_candidate_per_query=True):
"""This function draws candidates from larger_dataset for n_queries of its queries.
If one_candidate_per_query == True, it only draws one candidate, with either
gain_marker == 'positive' or gain_marker == 'negative', per query. Otherwise, it
draws two candidates (one with gain_marker == 'positive' and one with gain_marker == 'negative')
"""
queries = np.random.choice(list(set(larger_dataset['query'])), n_queries)
subdatasets = []
for q in queries:
subtable = larger_dataset.loc[larger_dataset['query'] == q]
if one_candidate_per_query:
sample = subtable.loc[random.sample(list(subtable.index), 1)]
else:
positives = subtable.loc[subtable['gain_marker'] == 'positive']
sample_positive = positives.loc[random.sample(list(positives.index), 1)]
negatives = subtable.loc[subtable['gain_marker'] == 'negative']
sample_negative = negatives.loc[random.sample(list(negatives.index), 1)]
sample = pd.concat([sample_positive, sample_negative])
subdatasets.append(sample)
return pd.concat(subdatasets)
## Draw versions of dataset 2 with two candidates per query (one with gain_marker == 'positive'
## and one with gain_marker == 'negative'), and with one candidate per query (either gain_marker == 'positive'
## or gain_marker == 'negative')
NUMBER_OF_VERSIONS_WITH_ONE_CANDIDATE_PER_QUERY = 10
NUMBER_OF_VERSIONS_WITH_TWO_CANDIDATES_PER_QUERY = 10
NUMBER_OF_QUERIES = len(set(original_dataset_2['query']))
ocpq = 0 #one candidate per query
while ocpq < NUMBER_OF_VERSIONS_WITH_ONE_CANDIDATE_PER_QUERY:
dataset = create_version_of_dataset_2(dataset_3, NUMBER_OF_QUERIES)
ocpq += 1
break
dataset.head
###Output
_____no_output_____ |
.ipynb_checkpoints/12. Decision Tree ..... [abhishek201202]-checkpoint.ipynb | ###Markdown
1) Code Using Sklearn Decision Tree
###Code
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz ## to convert Decision tree into pdf
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
x_train , x_test , y_train , y_test = train_test_split(iris.data , iris.target , random_state = 1)
algo = DecisionTreeClassifier()
algo.fit(x_train , y_train)
import os
os.environ['PATH'] = os.environ['PATH']+';'+os.environ['CONDA_PREFIX']+r"\Library\bin\graphviz"
####https://datascience.stackexchange.com/questions/37428/graphviz-not-working-when-imported-inside-pydotplus-graphvizs-executables-not
## GraphViz's executable are not found : resolve with the help of above link
## first install by command : conda install pydotplus
## and then if error comes then we have to user this 2 lines which re written above
import pydotplus
dot_data = export_graphviz(algo , out_file = None , feature_names = iris.feature_names , class_names = iris.target_names)
## out_file == None , means that we don't want to store the
## if we want to store the data give the file name
## return a ( .data ) and then we export into .pdf
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("iris.pdf")
###Output
_____no_output_____
###Markdown
 
###Code
y_train_pred = algo.predict(x_train)
y_test_pred = algo.predict(x_test)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train , y_train_pred)
confusion_matrix(y_test , y_test_pred)
###Output
_____no_output_____ |
course1_intro/1. Hello World.ipynb | ###Markdown
Install tensorflow cpu version in conda:```conda create -n tf tensorflowconda activate tf The notebook doesn't load correctly, so install notebookconda install notebook```
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss = 'mean_squared_error')
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype = float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
model.fit(xs, ys, epochs=500)
print(model.predict([10.0]))
###Output
[[18.991423]]
###Markdown
Exercise
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([1,2,3,4,5,6],dtype=float)
ys = np.array([1, 1.5, 2, 2.5, 3, 3.5])
model.fit(xs, ys, epochs=500)
print(model.predict([7.0]))
###Output
Epoch 1/500
1/1 [==============================] - 0s 0s/step - loss: 16.2783
Epoch 2/500
1/1 [==============================] - 0s 996us/step - loss: 7.5883
Epoch 3/500
1/1 [==============================] - 0s 0s/step - loss: 3.5658
Epoch 4/500
1/1 [==============================] - 0s 998us/step - loss: 1.7038
Epoch 5/500
1/1 [==============================] - 0s 0s/step - loss: 0.8415
Epoch 6/500
1/1 [==============================] - 0s 997us/step - loss: 0.4421
Epoch 7/500
1/1 [==============================] - 0s 0s/step - loss: 0.2568
Epoch 8/500
1/1 [==============================] - 0s 997us/step - loss: 0.1707
Epoch 9/500
1/1 [==============================] - 0s 0s/step - loss: 0.1305
Epoch 10/500
1/1 [==============================] - 0s 0s/step - loss: 0.1115
Epoch 11/500
1/1 [==============================] - 0s 997us/step - loss: 0.1023
Epoch 12/500
1/1 [==============================] - 0s 997us/step - loss: 0.0977
Epoch 13/500
1/1 [==============================] - 0s 997us/step - loss: 0.0952
Epoch 14/500
1/1 [==============================] - 0s 0s/step - loss: 0.0937
Epoch 15/500
1/1 [==============================] - 0s 998us/step - loss: 0.0926
Epoch 16/500
1/1 [==============================] - 0s 997us/step - loss: 0.0917
Epoch 17/500
1/1 [==============================] - 0s 0s/step - loss: 0.0910
Epoch 18/500
1/1 [==============================] - 0s 997us/step - loss: 0.0903
Epoch 19/500
1/1 [==============================] - 0s 0s/step - loss: 0.0896
Epoch 20/500
1/1 [==============================] - 0s 997us/step - loss: 0.0890
Epoch 21/500
1/1 [==============================] - 0s 0s/step - loss: 0.0883
Epoch 22/500
1/1 [==============================] - 0s 996us/step - loss: 0.0877
Epoch 23/500
1/1 [==============================] - 0s 998us/step - loss: 0.0870
Epoch 24/500
1/1 [==============================] - 0s 0s/step - loss: 0.0864
Epoch 25/500
1/1 [==============================] - 0s 0s/step - loss: 0.0858
Epoch 26/500
1/1 [==============================] - 0s 998us/step - loss: 0.0851
Epoch 27/500
1/1 [==============================] - 0s 0s/step - loss: 0.0845
Epoch 28/500
1/1 [==============================] - 0s 998us/step - loss: 0.0839
Epoch 29/500
1/1 [==============================] - 0s 0s/step - loss: 0.0833
Epoch 30/500
1/1 [==============================] - 0s 997us/step - loss: 0.0827
Epoch 31/500
1/1 [==============================] - 0s 0s/step - loss: 0.0821
Epoch 32/500
1/1 [==============================] - 0s 0s/step - loss: 0.0815
Epoch 33/500
1/1 [==============================] - 0s 0s/step - loss: 0.0809
Epoch 34/500
1/1 [==============================] - 0s 0s/step - loss: 0.0803
Epoch 35/500
1/1 [==============================] - 0s 997us/step - loss: 0.0797
Epoch 36/500
1/1 [==============================] - 0s 998us/step - loss: 0.0791
Epoch 37/500
1/1 [==============================] - 0s 0s/step - loss: 0.0786
Epoch 38/500
1/1 [==============================] - 0s 0s/step - loss: 0.0780
Epoch 39/500
1/1 [==============================] - 0s 0s/step - loss: 0.0774
Epoch 40/500
1/1 [==============================] - 0s 0s/step - loss: 0.0769
Epoch 41/500
1/1 [==============================] - 0s 0s/step - loss: 0.0763
Epoch 42/500
1/1 [==============================] - 0s 997us/step - loss: 0.0757
Epoch 43/500
1/1 [==============================] - 0s 998us/step - loss: 0.0752
Epoch 44/500
1/1 [==============================] - 0s 997us/step - loss: 0.0746
Epoch 45/500
1/1 [==============================] - 0s 998us/step - loss: 0.0741
Epoch 46/500
1/1 [==============================] - 0s 0s/step - loss: 0.0736
Epoch 47/500
1/1 [==============================] - 0s 998us/step - loss: 0.0730
Epoch 48/500
1/1 [==============================] - 0s 0s/step - loss: 0.0725
Epoch 49/500
1/1 [==============================] - 0s 998us/step - loss: 0.0720
Epoch 50/500
1/1 [==============================] - 0s 0s/step - loss: 0.0714
Epoch 51/500
1/1 [==============================] - 0s 998us/step - loss: 0.0709
Epoch 52/500
1/1 [==============================] - 0s 0s/step - loss: 0.0704
Epoch 53/500
1/1 [==============================] - 0s 997us/step - loss: 0.0699
Epoch 54/500
1/1 [==============================] - 0s 995us/step - loss: 0.0694
Epoch 55/500
1/1 [==============================] - 0s 996us/step - loss: 0.0689
Epoch 56/500
1/1 [==============================] - 0s 998us/step - loss: 0.0684
Epoch 57/500
1/1 [==============================] - 0s 0s/step - loss: 0.0679
Epoch 58/500
1/1 [==============================] - 0s 0s/step - loss: 0.0674
Epoch 59/500
1/1 [==============================] - 0s 997us/step - loss: 0.0669
Epoch 60/500
1/1 [==============================] - 0s 0s/step - loss: 0.0664
Epoch 61/500
1/1 [==============================] - 0s 998us/step - loss: 0.0659
Epoch 62/500
1/1 [==============================] - 0s 0s/step - loss: 0.0654
Epoch 63/500
1/1 [==============================] - 0s 0s/step - loss: 0.0650
Epoch 64/500
1/1 [==============================] - 0s 996us/step - loss: 0.0645
Epoch 65/500
1/1 [==============================] - 0s 0s/step - loss: 0.0640
Epoch 66/500
1/1 [==============================] - 0s 0s/step - loss: 0.0635
Epoch 67/500
1/1 [==============================] - 0s 0s/step - loss: 0.0631
Epoch 68/500
1/1 [==============================] - 0s 998us/step - loss: 0.0626
Epoch 69/500
1/1 [==============================] - 0s 0s/step - loss: 0.0622
Epoch 70/500
1/1 [==============================] - 0s 997us/step - loss: 0.0617
Epoch 71/500
1/1 [==============================] - 0s 0s/step - loss: 0.0613
Epoch 72/500
1/1 [==============================] - 0s 0s/step - loss: 0.0608
Epoch 73/500
1/1 [==============================] - 0s 0s/step - loss: 0.0604
Epoch 74/500
1/1 [==============================] - 0s 997us/step - loss: 0.0599
Epoch 75/500
1/1 [==============================] - 0s 0s/step - loss: 0.0595
Epoch 76/500
1/1 [==============================] - 0s 0s/step - loss: 0.0591
Epoch 77/500
1/1 [==============================] - 0s 998us/step - loss: 0.0586
Epoch 78/500
1/1 [==============================] - 0s 0s/step - loss: 0.0582
Epoch 79/500
1/1 [==============================] - 0s 998us/step - loss: 0.0578
Epoch 80/500
1/1 [==============================] - 0s 0s/step - loss: 0.0574
Epoch 81/500
1/1 [==============================] - 0s 997us/step - loss: 0.0569
Epoch 82/500
1/1 [==============================] - 0s 0s/step - loss: 0.0565
Epoch 83/500
1/1 [==============================] - 0s 997us/step - loss: 0.0561
Epoch 84/500
1/1 [==============================] - 0s 997us/step - loss: 0.0557
Epoch 85/500
1/1 [==============================] - 0s 0s/step - loss: 0.0553
Epoch 86/500
1/1 [==============================] - 0s 0s/step - loss: 0.0549
Epoch 87/500
1/1 [==============================] - 0s 0s/step - loss: 0.0545
Epoch 88/500
1/1 [==============================] - 0s 0s/step - loss: 0.0541
Epoch 89/500
1/1 [==============================] - 0s 0s/step - loss: 0.0537
Epoch 90/500
1/1 [==============================] - 0s 997us/step - loss: 0.0533
Epoch 91/500
1/1 [==============================] - 0s 0s/step - loss: 0.0529
Epoch 92/500
1/1 [==============================] - 0s 0s/step - loss: 0.0525
Epoch 93/500
1/1 [==============================] - 0s 997us/step - loss: 0.0522
Epoch 94/500
1/1 [==============================] - 0s 0s/step - loss: 0.0518
Epoch 95/500
1/1 [==============================] - 0s 0s/step - loss: 0.0514
Epoch 96/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0510
Epoch 97/500
1/1 [==============================] - 0s 0s/step - loss: 0.0507
Epoch 98/500
1/1 [==============================] - 0s 996us/step - loss: 0.0503
Epoch 99/500
1/1 [==============================] - 0s 997us/step - loss: 0.0499
Epoch 100/500
1/1 [==============================] - 0s 0s/step - loss: 0.0496
Epoch 101/500
1/1 [==============================] - 0s 997us/step - loss: 0.0492
Epoch 102/500
1/1 [==============================] - 0s 0s/step - loss: 0.0488
Epoch 103/500
1/1 [==============================] - 0s 997us/step - loss: 0.0485
Epoch 104/500
1/1 [==============================] - 0s 0s/step - loss: 0.0481
Epoch 105/500
1/1 [==============================] - 0s 961us/step - loss: 0.0478
Epoch 106/500
1/1 [==============================] - 0s 0s/step - loss: 0.0474
Epoch 107/500
1/1 [==============================] - 0s 0s/step - loss: 0.0471
Epoch 108/500
1/1 [==============================] - 0s 0s/step - loss: 0.0467
Epoch 109/500
1/1 [==============================] - 0s 999us/step - loss: 0.0464
Epoch 110/500
1/1 [==============================] - 0s 0s/step - loss: 0.0461
Epoch 111/500
1/1 [==============================] - 0s 997us/step - loss: 0.0457
Epoch 112/500
1/1 [==============================] - 0s 0s/step - loss: 0.0454
Epoch 113/500
1/1 [==============================] - 0s 0s/step - loss: 0.0451
Epoch 114/500
1/1 [==============================] - 0s 997us/step - loss: 0.0447
Epoch 115/500
1/1 [==============================] - 0s 0s/step - loss: 0.0444
Epoch 116/500
1/1 [==============================] - 0s 998us/step - loss: 0.0441
Epoch 117/500
1/1 [==============================] - 0s 0s/step - loss: 0.0438
Epoch 118/500
1/1 [==============================] - 0s 0s/step - loss: 0.0434
Epoch 119/500
1/1 [==============================] - 0s 0s/step - loss: 0.0431
Epoch 120/500
1/1 [==============================] - 0s 998us/step - loss: 0.0428
Epoch 121/500
1/1 [==============================] - 0s 0s/step - loss: 0.0425
Epoch 122/500
1/1 [==============================] - 0s 0s/step - loss: 0.0422
Epoch 123/500
1/1 [==============================] - 0s 998us/step - loss: 0.0419
Epoch 124/500
1/1 [==============================] - 0s 0s/step - loss: 0.0416
Epoch 125/500
1/1 [==============================] - 0s 997us/step - loss: 0.0413
Epoch 126/500
1/1 [==============================] - 0s 0s/step - loss: 0.0410
Epoch 127/500
1/1 [==============================] - 0s 0s/step - loss: 0.0407
Epoch 128/500
1/1 [==============================] - 0s 0s/step - loss: 0.0404
Epoch 129/500
1/1 [==============================] - 0s 0s/step - loss: 0.0401
Epoch 130/500
1/1 [==============================] - 0s 0s/step - loss: 0.0398
Epoch 131/500
1/1 [==============================] - 0s 0s/step - loss: 0.0395
Epoch 132/500
1/1 [==============================] - 0s 998us/step - loss: 0.0392
Epoch 133/500
1/1 [==============================] - 0s 0s/step - loss: 0.0389
Epoch 134/500
1/1 [==============================] - 0s 0s/step - loss: 0.0386
Epoch 135/500
1/1 [==============================] - 0s 997us/step - loss: 0.0384
Epoch 136/500
1/1 [==============================] - 0s 0s/step - loss: 0.0381
Epoch 137/500
1/1 [==============================] - 0s 0s/step - loss: 0.0378
Epoch 138/500
1/1 [==============================] - 0s 997us/step - loss: 0.0375
Epoch 139/500
1/1 [==============================] - 0s 0s/step - loss: 0.0373
Epoch 140/500
1/1 [==============================] - 0s 0s/step - loss: 0.0370
Epoch 141/500
1/1 [==============================] - 0s 997us/step - loss: 0.0367
Epoch 142/500
1/1 [==============================] - 0s 0s/step - loss: 0.0365
Epoch 143/500
1/1 [==============================] - 0s 998us/step - loss: 0.0362
Epoch 144/500
1/1 [==============================] - 0s 0s/step - loss: 0.0359
Epoch 145/500
1/1 [==============================] - 0s 997us/step - loss: 0.0357
Epoch 146/500
1/1 [==============================] - 0s 0s/step - loss: 0.0354
Epoch 147/500
1/1 [==============================] - 0s 997us/step - loss: 0.0351
Epoch 148/500
1/1 [==============================] - 0s 0s/step - loss: 0.0349
Epoch 149/500
1/1 [==============================] - 0s 0s/step - loss: 0.0346
Epoch 150/500
1/1 [==============================] - 0s 0s/step - loss: 0.0344
Epoch 151/500
1/1 [==============================] - 0s 0s/step - loss: 0.0341
Epoch 152/500
1/1 [==============================] - 0s 997us/step - loss: 0.0339
Epoch 153/500
1/1 [==============================] - 0s 0s/step - loss: 0.0336
Epoch 154/500
1/1 [==============================] - 0s 996us/step - loss: 0.0334
Epoch 155/500
1/1 [==============================] - 0s 0s/step - loss: 0.0331
Epoch 156/500
1/1 [==============================] - 0s 997us/step - loss: 0.0329
Epoch 157/500
1/1 [==============================] - 0s 0s/step - loss: 0.0327
Epoch 158/500
1/1 [==============================] - 0s 0s/step - loss: 0.0324
Epoch 159/500
1/1 [==============================] - 0s 997us/step - loss: 0.0322
Epoch 160/500
1/1 [==============================] - 0s 0s/step - loss: 0.0320
Epoch 161/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0317
Epoch 162/500
1/1 [==============================] - 0s 993us/step - loss: 0.0315
Epoch 163/500
1/1 [==============================] - 0s 0s/step - loss: 0.0313
Epoch 164/500
1/1 [==============================] - 0s 0s/step - loss: 0.0310
Epoch 165/500
1/1 [==============================] - 0s 0s/step - loss: 0.0308
Epoch 166/500
1/1 [==============================] - 0s 998us/step - loss: 0.0306
Epoch 167/500
1/1 [==============================] - 0s 0s/step - loss: 0.0304
Epoch 168/500
1/1 [==============================] - 0s 997us/step - loss: 0.0301
Epoch 169/500
1/1 [==============================] - 0s 0s/step - loss: 0.0299
Epoch 170/500
1/1 [==============================] - 0s 997us/step - loss: 0.0297
Epoch 171/500
1/1 [==============================] - 0s 0s/step - loss: 0.0295
Epoch 172/500
1/1 [==============================] - 0s 993us/step - loss: 0.0293
Epoch 173/500
1/1 [==============================] - 0s 0s/step - loss: 0.0291
Epoch 174/500
1/1 [==============================] - 0s 998us/step - loss: 0.0288
Epoch 175/500
1/1 [==============================] - 0s 0s/step - loss: 0.0286
Epoch 176/500
1/1 [==============================] - 0s 997us/step - loss: 0.0284
Epoch 177/500
1/1 [==============================] - 0s 0s/step - loss: 0.0282
Epoch 178/500
1/1 [==============================] - 0s 998us/step - loss: 0.0280
Epoch 179/500
1/1 [==============================] - 0s 0s/step - loss: 0.0278
Epoch 180/500
1/1 [==============================] - 0s 997us/step - loss: 0.0276
Epoch 181/500
1/1 [==============================] - 0s 0s/step - loss: 0.0274
Epoch 182/500
1/1 [==============================] - 0s 996us/step - loss: 0.0272
Epoch 183/500
1/1 [==============================] - 0s 997us/step - loss: 0.0270
Epoch 184/500
1/1 [==============================] - 0s 0s/step - loss: 0.0268
Epoch 185/500
1/1 [==============================] - 0s 0s/step - loss: 0.0266
Epoch 186/500
1/1 [==============================] - 0s 997us/step - loss: 0.0264
Epoch 187/500
1/1 [==============================] - 0s 0s/step - loss: 0.0262
Epoch 188/500
1/1 [==============================] - 0s 996us/step - loss: 0.0260
Epoch 189/500
1/1 [==============================] - 0s 0s/step - loss: 0.0259
Epoch 190/500
1/1 [==============================] - 0s 996us/step - loss: 0.0257
Epoch 191/500
1/1 [==============================] - 0s 0s/step - loss: 0.0255
Epoch 192/500
1/1 [==============================] - 0s 0s/step - loss: 0.0253
Epoch 193/500
1/1 [==============================] - 0s 997us/step - loss: 0.0251
Epoch 194/500
1/1 [==============================] - 0s 0s/step - loss: 0.0249
Epoch 195/500
1/1 [==============================] - 0s 0s/step - loss: 0.0247
Epoch 196/500
1/1 [==============================] - 0s 0s/step - loss: 0.0246
Epoch 197/500
1/1 [==============================] - 0s 0s/step - loss: 0.0244
Epoch 198/500
1/1 [==============================] - 0s 0s/step - loss: 0.0242
Epoch 199/500
1/1 [==============================] - 0s 0s/step - loss: 0.0240
Epoch 200/500
1/1 [==============================] - 0s 0s/step - loss: 0.0239
Epoch 201/500
1/1 [==============================] - 0s 997us/step - loss: 0.0237
Epoch 202/500
1/1 [==============================] - 0s 0s/step - loss: 0.0235
Epoch 203/500
1/1 [==============================] - 0s 998us/step - loss: 0.0233
Epoch 204/500
1/1 [==============================] - 0s 0s/step - loss: 0.0232
Epoch 205/500
1/1 [==============================] - 0s 996us/step - loss: 0.0230
Epoch 206/500
|
oldbabylonian/cookbook/posTag.ipynb | ###Markdown
---To get started: consult [start](start.ipynb)--- Part of Speech tagging Teamname | discipline | stage | affiliation--- | --- | --- | ---Alba de Ridder | Assyriology | master student | NINO, LeidenMartijn Kokken | Assyriology | master student | NINO, LeidenDirk Roorda | Computer Science | researcher | DANS, Den HaagCale Johnson | Assyriology | researcher and lecturer | Univ BirminghamCaroline Waerzeggers | Assyriology | director | NINO, Leiden
###Code
from tf.app import use
from pos import PosTag
COLOPHON = dict(
acronym="ABB-pos",
corpus="Old Babylonian Letter Corpus (ABB)",
dataset="oldbabylonian",
compiler="Dirk Roorda",
editors="Alba de Ridder, Martijn Kokken",
initiators="Cale Johnson, Caroline Waerzeggers",
institute="NINO, DANS",
)
###Output
_____no_output_____
###Markdown
Status* 2019-06-06 Personal pronouns added* 2019-06-05 Dirk has reorganised the messy code after the sprint into a repeatable and documented workflow. The workflow covers special cases, prepositions, and nouns, not yet the extra insights of the sprint.* 2019-06-03/04 Martijn, Alba and Dirk do a two-day sprint to follow-up on heuristics supplied by Cale Johnson. Martijn and Alba provide extra insights. IntroductionWe collect and execute ideas to tag all word occurrences with a part-of-speech, such as `noun`, `prep`, `verb`.In the end, we intend to provide extra features to the Old Babylonian corpus, as a standard module that will be always loadedalongside the corpus.This notebook will produce some word-level features:* `pos`: main category of the word: `noun`, `verb`, `prep`, `pcl` (particle)* `subpos`: secondary category of the word: `rel` (relation), `neg` (negation)But in the meanwhile, it is work in progress, and during the work we collect candidate assignments in sets, which we save to disk.These sets correspond to `noun`, `prep`, `nonprep` words as far as we have tagged them in the current state of the workflow.The sets are all saved in a file `sets.tfx`, both next to this notebook (so that you can get it through GitHub), as in a sharedDropbox folder `obb`, so that the Akkadian specialists (Alba de Ridder, Martijn Kokken, Cale Johnson) have instant access to them andcan test them in their TF-browser.See [pos](pos.ipynb) to see how you can make use of these results. Method OverviewWe perform the following steps in that order: Known wordsWe identify a bunch of words in closed categories that tend to interfere with noun/verb detection.After identification, we exclude them from all subsequent pattern detection. PrepositionsWe detect a few prepositions, especially those that (nearly) always preceed a noun. NounsWe use several markers to detect nouns:* determinatives* prepositions* Sumerian logograms* numeralsWe collect the marked occurrences and then look up the unmarked occurrences of the same words.In this way we extend the detection of nouns considerably.We have to deal with one big complication, though: **unkowns**.If we have marked word occurrences with unknown signs in them, we cannot be confident that unmarked occurrencesof the same thing are really occurrences of the same underlying word.So, if we transfer categorizations from marked occurrences to unmarked occurrences, we only do so ifthe word in question does not have unknowns.We save a lot of intermediate sets: for each step we save the nouns that result from that step:These sets may overlap.We also save subsets of these sets, namely the occurrences that are positively marked, andthe occurrences that lack marking and have been inferred.These marked and unmarked subsets of each step are disjoint.whole step | marked | unmarked--- | --- | ---`noundet` | `nounMdet` | `nounUdet``nounprep` | `nounMprep` | `nounUprep``nounlogo` | `nounMlogo` | `nounUlogo``nounnum` | `nounMnum` | `nounUnum`**Note on determinatives**Determinative and phonetic complements are signs marked in ATF by being inside `{ }`, and in TF by having `det=1`.From now on, we will abbreviate it: a **det** is a determinative or a phonetic complement. Start the enginesWe load the Python modules we need.
###Code
%load_ext autoreload
%autoreload 2
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
We load the corpus and obtain a handle to it: `A`.
###Code
A = use("oldbabylonian:clone", checkout="clone", hoist=globals(), silent="deep")
###Output
_____no_output_____
###Markdown
Run the workflow We set up the detection machinery.
###Code
PT = PosTag(A)
###Output
_____no_output_____
###Markdown
Step: InventoryWe collect all the words and their occurrences and sift through determinatives and numerals.We make a dictionary of words and their occurrences.When we compute the word form, we pick the basic info of a sign, not the full ATF-representation with flags and brackets.We also store the form without the *dets* that are present in the word.
###Code
PT.prepare()
###Output
_____no_output_____
###Markdown
Step: Known wordsThe case specification is a string.
###Code
cases = """
la + u2-ul + u2-la = pcl, neg
sza = pcl, rel
u3 + u2-lu + u2 = pcl, conj
lu = pcl
an-nu-um + an-ni-im + an-nu-u2 = prn, dem
i-na-an-na + a-nu-um-ma = adv, tmp
"""
###Output
_____no_output_____
###Markdown
To be read as follows:Each line specifies a bunch of words, separated by `+` on the left hand side of the `=`;the right hand side specifies the categories those words receive, separated by `,`.The first category is the `pos`, (main part-of-speech),the second category is the `subpos` (sub category within the main part-of-speech).We use abbreviated forms, because users of this dataset will have to type them quite often. Categoriescategory | subcategory | meaning--- | --- | ---`pcl` | | particle (unspecified)`pcl` | `neg` | negative particle`pcl` | `rel` | relative particle`pcl` | `conj` | conjunction`prn` | `dem` | demonstrative pronoun`adv` | `tmp` | temporal adverb
###Code
PT.doKnownCases(cases)
###Output
distinct words: 13
pos assignments: 7681
subpos assignments: 7293
###Markdown
Step: Pronouns - personal
###Code
prnPrs = """
nom:
1csg:
- a-na-ku
- a-na-ku-ma
- a-na-ku-u2
- a-na-ku-u2-ma
- a-na-ku-ma-mi
2msg:
- at-ta
- at-ta-ma
- at-ta-a
- at-ta-a-ma
2fsg:
- at-ti
- at-ti-ma
- at-ti-i-ma
3msg:
- szu-u2
- szu-u2-ma
3fsg:
- szi-i
- szi-i-ma
1mpl:
- ni-nu
- ni-i-ni
2mpl:
- at-tu-nu
- at-tu-nu-ma
- at-tu-nu-u2
- at-tu-u2-nu
- at-tu-u2-nu-ma
2fpl:
- at-ti-na-ma
3mpl:
- szu-nu
- szu-nu-ma
- szu-nu-mi
- szu-nu-u2
3fpl:
- szi-na
obl:
1csg:
- ia-ti
- ia-ti-i-ma
- ia-a-ti
2msg:
- ka-ta
- ka-ta-a-ma
- ka-a-ti
- ka-ti:
- P510880 reverse:8
- P306656 obverse:8
- ka-ti-i:
- P292855 obverse:4
- P292983 obverse:4
2fsg:
- ka-ti
3csg:
- szu-a-ti
- szu-a-tu
- sza-a-ti
- sza-a-tu
- szi-a-ti
1cpl:
- ni-a-ti
2mpl:
- ku-nu-ti
2fpl:
- /
3mpl:
- szu-nu-ti
3fpl:
- szi-na-ti
dat:
1csg:
- ia-szi
- ia-szi-im
- ia-a-szi
- ia-a-szi-im
2csg:
- ka-szi-im
- ka-szi-im-ma
- ka-a-szum
3msg:
- szu-a-szi-im
1cpl:
- /
2mpl:
- ku-nu-szi-im
2fpl:
- /
3mpl:
- szu-nu-szi-im-ma
3fpl:
- /
"""
PT.doPrnPrs(prnPrs)
###Output
31s 55 forms declared, but 54 ones encountered:
31s missed at-tu-u2-nu => nom, 2mpl
###Markdown
Step: PrepositionsThe following prepositions are known to precede nouns.
###Code
preps = """
i-na
a-na
e-li
isz-tu
it-ti
ar-ki
"""
PT.doPreps(preps)
###Output
distinct words: 6
pos assignments: 5943
subpos assignments: 0
non-prep occs: 70562
###Markdown
We have made a set of all non-prepositions, i.e. all word occurrences not of one of these prepositions. Step: Nouns pass: DeterminersWe take all words that have a *det*.We collect the *markedData* for this step: all words that have a *det* inside.The *unmarkedData* for this step are the occurrences of the stripped forms of the marked words, i.e.the forms with the *det*s removed.But only if those forms do not have an unknown in them., i.e. a `x`, `n`, or `...`. pass: PrepositionsWords after the given set of prepositions are usually nouns.However, sometimes there are multiple prepositions in a row.We take care that we do not mark those second prepostions as nouns. pass: Sumerian logogramsAny word that has one or more Sumerian logograms in it, will be marked as noun.Sumerian logograms are defined as signs within the scope of an enclosing `_ _` pair.In TF such signs are characterized by having `langalt=1`.The unmarked data are the occurrences of the same words, but where none of the signs have `langalt=1`. pass: NumeralsNumerals are individual signs, but they can be part of words.In those cases, we call the whole word a numeral.We consider the category of numeral words as a subcategory of the nouns.Note that there are also unknown numerals: those with reading `n`.A numeral is always marked, there is no concept of unmarked occurrences of numerals.
###Code
PT.doNouns()
###Output
Before step det : 0 words in 0 occurrences
Due to step det marked : 2088 words in 6173 occurrences
Due to step det unmarked : 290 words in 1920 occurrences
Due to step det all : 2378 words in 8093 occurrences
After step det : 2378 words in 8093 occurrences
----------------------------------------
Before step prep : 2378 words in 8093 occurrences
Due to step prep marked : 2210 words in 5803 occurrences
Due to step prep unmarked : 2100 words in 14055 occurrences
Due to step prep all : 2210 words in 19858 occurrences
After step prep : 3998 words in 23015 occurrences
----------------------------------------
Before step logo : 3998 words in 23015 occurrences
Due to step logo marked : 1616 words in 11647 occurrences
Due to step logo unmarked : 1572 words in 3593 occurrences
Due to step logo all : 1616 words in 15240 occurrences
After step logo : 4750 words in 26352 occurrences
----------------------------------------
Before step num : 4750 words in 26352 occurrences
Due to step num marked : 47 words in 2238 occurrences
Due to step num unmarked : 0 words in 0 occurrences
Due to step num all : 47 words in 2238 occurrences
After step num : 4755 words in 26369 occurrences
----------------------------------------
noun with 4755 words and 26369 occurrences
nounMdet with 2088 words and 6173 occurrences
nounMlogo with 1616 words and 11647 occurrences
nounMnum with 47 words and 2238 occurrences
nounMprep with 2210 words and 5803 occurrences
nounUdet with 290 words and 1920 occurrences
nounUlogo with 1572 words and 3593 occurrences
nounUnum with 0 words and 0 occurrences
nounUprep with 2100 words and 14055 occurrences
noundet with 2378 words and 8093 occurrences
nounlogo with 1616 words and 15240 occurrences
nounnum with 47 words and 2238 occurrences
nounprep with 2210 words and 19858 occurrences
pos assignments: 26560
subpos assignments: 2238
###Markdown
ResultsWe specify the metadata that we want to include into our new features.
###Code
metaData = {
"": COLOPHON,
"pos": {
"valueType": "str",
"description": "primary part-of-speech category on full words",
},
"subpos": {
"valueType": "str",
"description": "secondary category within part-of-speech on full words",
},
"cs": {
"valueType": "str",
"description": "grammatical case: nom, acc, acg, gen, dat",
},
"ps": {
"valueType": "str",
"description": "grammatical person: 1, 2, 3",
},
"gn": {
"valueType": "str",
"description": "grammatical gender: m, f, c",
},
"nu": {
"valueType": "str",
"description": "grammatical number: sg, du, pl",
},
}
###Output
_____no_output_____
###Markdown
The next cell saves the features to disk, and the sets as well.
###Code
PT.export(metaData)
###Output
_____no_output_____ |
EDyA_I/theme_2/code/diccionario/.ipynb_checkpoints/diccionario-checkpoint.ipynb | ###Markdown
Ataque de diccionario
###Code
#Esta línea se ocupa para que las gráficas que se generen queden embebidas dentro de la página
%pylab inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def crearCombinaciones(abecedario):
veces = 0
for d1 in abecedario:
for d2 in abecedario:
for d3 in abecedario:
for d4 in abecedario:
veces += 1
return veces
#Datos de entrada, se generan 10 puntos entre 0 y 5
abecedario = ['a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t',
'u','v','w','x','y','z']
x = []
y = []
cont = 2
while cont < len(abecedario):
x.append(str(cont))
y.append(str(crearCombinaciones(abecedario[:cont])))
cont += 1
fig, ax = plt.subplots(facecolor='w', edgecolor='k')
ax.plot(x, y, marker="o", color="r", linestyle='None')
ax.grid(True)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.grid(True)
plt.title('Puntos')
plt.show()
fig.savefig("graph.png")
###Output
_____no_output_____ |
examples/parameter_detail.ipynb | ###Markdown
Parameters explanation of PERLER (Y. Okochi, 02/09/21) This tutorial is application of perler to D. melanogaster (Dmel) dataset (Karaiskos et al., 2017). import perler
###Code
import perler
###Output
/Users/yasokochi/OneDrive - Kyoto University/backup/07_perler_private_test/.venv/lib/python3.8/site-packages/pandas/compat/__init__.py:97: UserWarning: Could not import the lzma module. Your installed Python is incomplete. Attempting to use lzma compression will result in a RuntimeError.
warnings.warn(msg)
###Markdown
Import the additional modules These modules are not necessary for the procedures of perler. However, they are necessary for loading and saving data.
###Code
#for loading data
import pandas as pd
%matplotlib inline
###Output
_____no_output_____
###Markdown
Loading dataset This dataset is generated by Karaiskos et al., 2017and is aquired from Drosophila Virtual Expression eXplorer (DVEX, URL: https://shiny.mdc-berlin.de/DVEX/)・dge_normalized.txt: scRNAseq data・bdtnp.txt: in situ data・geometry_dvex.txt: cell location dataref: Karaiskos et al, 2017 (doi: 10.1126/science.aan3235)
###Code
RNAseq = pd.read_csv('data/dge_normalized.txt',sep='\t').T
BDTNP = pd.read_csv('data/bdtnp.txt', sep='\t')
location=pd.read_csv("data/geometry_dvex.txt", delimiter=" ") #optional
###Output
_____no_output_____
###Markdown
Making PERLER object The essencial parameters of this function are...・data; scRNAseq data with gene columns and sample rows・reference; in situ hybridization data with gene columns and sample rows・n_metagenes (int) ; it depends on how many metagenes you want to extract. default is 60.Additionally, you can choose・DR (str) ; How to reduce dimentionality of the dataset. 'PLSC' (default), 'PCA', and 'NA' (no DR (dimensionality reduction)) can be used.・print_iter (bool) ; If print_iter is True, likelihood is printed in each 5 steps of EM algorithm implemented in plr.em_algorithm().
###Code
%%time
plr = perler.PERLER(data = RNAseq, reference=BDTNP, print_iter=True)
###Output
CPU times: user 2.73 s, sys: 1.08 s, total: 3.8 s
Wall time: 313 ms
###Markdown
you can see the parameters of this object Dimensinality reduction
###Code
plr.DR
###Output
_____no_output_____
###Markdown
scRNAseq data
###Code
plr.data.head()
###Output
_____no_output_____
###Markdown
Reference data
###Code
plr.ref.head()
###Output
_____no_output_____
###Markdown
Generative linear mapping (the first step of perler) The parameter fitting by EM algorithm The essencial parameters of this function are...・optimize_pi (bool) ; If True, mixing coefficients (pi) are optimized by EM algorithm. If False, mixing coefficients are fixed inital values. The default is True.
###Code
%%time
plr.em_algorithm(optimize_pi = False)
###Output
0 -inf
5 -299624.18626059196
10 -299590.62390697666
15 -299589.8590121893
CPU times: user 10min 37s, sys: 6min 28s, total: 17min 5s
Wall time: 1min 13s
###Markdown
Calculate the pair-wise distance between scRNAseq data and reference data
###Code
%%time
plr.calc_dist()
###Output
CPU times: user 10 s, sys: 582 ms, total: 10.6 s
Wall time: 9.15 s
###Markdown
showing pair-wise distance between scRNAseq data ans ISH data
###Code
plr.DM
###Output
_____no_output_____
###Markdown
Hyperparameter estimation conducting LOOCV experiment The essencial parameters of this function are...・workers (int) ; numbers of workers in multiprocessing using joblib. The default is -1 (using the max numbers of workers in your computer)
###Code
%%time
plr.loocv()
###Output
[Parallel(n_jobs=-1)]: Using backend LokyBackend with 16 concurrent workers.
[Parallel(n_jobs=-1)]: Done 9 tasks | elapsed: 3.9min
[Parallel(n_jobs=-1)]: Done 18 tasks | elapsed: 7.3min
[Parallel(n_jobs=-1)]: Done 29 tasks | elapsed: 7.5min
[Parallel(n_jobs=-1)]: Done 40 tasks | elapsed: 11.1min
[Parallel(n_jobs=-1)]: Done 53 tasks | elapsed: 14.5min
[Parallel(n_jobs=-1)]: Done 62 out of 84 | elapsed: 14.8min remaining: 5.2min
[Parallel(n_jobs=-1)]: Done 71 out of 84 | elapsed: 18.1min remaining: 3.3min
[Parallel(n_jobs=-1)]: Done 80 out of 84 | elapsed: 18.3min remaining: 55.0s
###Markdown
fitting the hyperparameters by grid search The essencial parameters of this function are...・workers (int) ; numbers of workers in multiprocessing of scipy.optimize.brute function. The default is -1 (using the max numbers of workers in your computer)Additionallly, you can choose・grids (tupple) ; set the ranges parameters of scipy.optimize.brute function. The default is ((0,1), (0,1)).
###Code
%%time
plr.grid_search()
###Output
/Users/yasokochi/OneDrive - Kyoto University/backup/07_perler_private_test/.venv/lib/python3.8/site-packages/numpy/lib/function_base.py:2642: RuntimeWarning: invalid value encountered in true_divide
c /= stddev[:, None]
/Users/yasokochi/OneDrive - Kyoto University/backup/07_perler_private_test/.venv/lib/python3.8/site-packages/numpy/lib/function_base.py:2643: RuntimeWarning: invalid value encountered in true_divide
c /= stddev[None, :]
###Markdown
showing the estimated hyperparameters
###Code
plr.res
###Output
_____no_output_____
###Markdown
Spatial reconstruction (the second step of perler) The essencial parameters of this function are...・location (pandas.DataFrame object, optional) ; If you have cell location data of ISH data, you can add location data to the result of perler through this parameter. This pandas.DataFrame object must have columns which specify x_axis and y_axis (and z_axis for 3_dimensional data) of the coordinates of the cells. The default is None. For Dmel dataset...・mirror (bool, only requierd in the Dmel dataset (Karaiskos., et al, 2017)) ; In Dmel dataset, the result of perler must be mirrored for visualization. Please see Methods in our manuscripts and Karaiskos, et al., 2017. The default is False.・_3d (bool, only requierd in the Dmel dataset (Karaiskos., et al, 2017)) ; In Dmel dataset, the columns of cell location dataframe is changed from ['x_coord'...] to ['X'...] in our implementation for the clarity of the code. The default is False.
###Code
%%time
plr.spatial_reconstruction(location = location, mirror = True, _3d = True)
###Output
CPU times: user 4.32 s, sys: 729 ms, total: 5.05 s
Wall time: 1.89 s
###Markdown
showing the reconstructed result
###Code
plr.result_with_location.head()
###Output
_____no_output_____
###Markdown
Visualization (for the Dmel dataset (Karaiskos., et al, 2017 )) For visualization of Dmel dataset, we implemented a function, Dmel_visualization()This function enables you to visualize a Dmel virtual embryo of the specific gene from the specific view The essencial parameters of this function are...・gene (str) ; gene name you want to visualize・view (str) ; set the view of the Dmel virtual embryo. This parameter must be among "lateral", "anterior", "posterior", "top", and "bottom". The default is "lateral".・color_map (str) ; color map of plt.scatter() function. The default is "BuPu".
###Code
plr.Dmel_visualization(gene = 'wg')
plr.Dmel_visualization(gene = 'wg', view = 'anterior')
plr.Dmel_visualization(gene = 'wg', color_map = 'viridis')
###Output
/Users/yasokochi/OneDrive - Kyoto University/backup/07_perler_private_test/.venv/lib/python3.8/site-packages/perler/perler_class.py:698: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
fig.show()
|
workflow/Consult-USFWS-TESS.ipynb | ###Markdown
__Description__This notebook uses the TESS class of the TESS module in the bispy package. In this notebook information from TESS is retrieved using a subset of species from WYNDD's species list (scientific names) that have ranges overlapping with the WLCI geospatial boundary.__Source(s)__'../sources/wyndd_wlci_species_list.json' : A list of scientific names. This list is a subset of species from WYNDD's species list that have ranges overlapping with the WLCI geospatial boundary. This file was created in notebook _Consult-and-Explore-WYNDD-Species-Data.ipynb_ __Output(s)___'../cache/tess.json'_ : Information from TESS on WLCI referenced species
###Code
#Import needed packages
import json
import bispy
from IPython.display import display
from joblib import Parallel, delayed
tess = bispy.ecos.Tess()
bis_utils = bispy.bis.Utils()
# Open list of scientific names to process
with open("../sources/wyndd_wlci_species_list.json", "r") as f:
spp_list = json.loads(f.read())
import requests
import xmltodict
from bs4 import BeautifulSoup
from bispy import bis
from urllib.parse import urlparse
bis_utils = bis.Utils()
#Tess class update is awaiting review and merge into bispy, when merge happens this code can be dropped
class Tess:
def __init__(self):
self.description = 'Set of functions for working with the USFWS Threatened and Endangered Species System'
self.tess_api_base = "https://ecos.fws.gov/ecp0/TessQuery?request=query&xquery=/SPECIES_DETAIL"
def search(self, criteria):
tess_result = bis_utils.processing_metadata()
tess_result["processing_metadata"]["status"] = "failure"
tess_result["processing_metadata"]["status_message"] = "Search failed"
if criteria.isdigit():
tess_result["processing_metadata"]["api"] = f'{self.tess_api_base}[TSN={criteria}]'
tess_result["parameters"]= {'tsn': criteria}
else:
tess_result["processing_metadata"]["api"] = f'{self.tess_api_base}[SCINAME="{criteria}"]'
tess_result["parameters"]= {'Scientific Name': criteria}
# Query the TESS XQuery service
tess_response = requests.get(tess_result["processing_metadata"]["api"])
if tess_response.status_code != 200:
tess_result["processing_metadata"]["status"] = "error"
tess_result["processing_metadata"]["status_message"] = f"HTTP Status Code: {tess_response.status_code}"
return tess_result
# Build an unordered dict from the TESS XML response (we don't care about ordering for our purposes here)
tessDict = xmltodict.parse(tess_response.text, dict_constructor=dict)
if "results" not in tessDict.keys() or tessDict["results"] is None:
tess_result["processing_metadata"]["status"] = "failure"
return tess_result
tess_result["processing_metadata"]["status"] = "success"
tess_result["data"] = tessDict["results"]
return tess_result
tess = Tess()
# Use joblib to run multiple requests for TESS records in parallel via ITIS scientific names
tess_result = Parallel(n_jobs=8)(delayed(tess.search)(name)for name in spp_list)
# Use joblib to run multiple requests for TESS records in parallel via ITIS scientific names
#tess_result = Parallel(n_jobs=8)(delayed(tess.search)(name)for name in spp_list)
#len(tess_result)
# Filter to give just cases where TESS species names matched with ITIS species names
tess_success=[i for i in tess_result if i['processing_metadata']['status'] == 'success']
#Print message to user
print (f'{len(tess_success)} out of {len(spp_list)} species were successfully connected to TESS')
# Cache the array of retrieved documents and return/display a random sample for verification
display(bis_utils.doc_cache("../cache/tess.json", tess_success))
###Output
_____no_output_____ |
2019-bern/09-Photutils/photutils_overview.ipynb | ###Markdown
Photutils- Code: https://github.com/astropy/photutils- Documentation: http://photutils.readthedocs.org/en/stable/- Issue Tracker: https://github.com/astropy/photutils/issues Photutils can be used for:- Background and background noise estimation- Source Detection and Extraction - DAOFIND and IRAF's starfind - Image segmentation - local peak finder- Aperture photometry- PSF photometry- PSF matching- Centroids- Morphological properties- Elliptical isophote analysis In this section, we will:- Learn how to perform aperture photometry- Learn how to use photutils' image segmentation module--- Preliminaries
###Code
# Initial imports
import numpy as np
import matplotlib.pyplot as plt
# Change some default plotting parameters
import matplotlib as mpl
mpl.rcParams['image.origin'] = 'lower'
mpl.rcParams['image.interpolation'] = 'nearest'
from matplotlib.colors import LogNorm
# Run the %matplotlib magic command to enable inline plotting
# in the current notebook. Choose one of these:
%matplotlib inline
# %matplotlib notebook
###Output
_____no_output_____
###Markdown
Load the data We'll start by reading data and error arrays from FITS files. These are cutouts from the HST Extreme-Deep Field (XDF) taken with WFC3/IR in the F160W filter.
###Code
from astropy.io import fits
sci_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_sci.fits'
rms_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_rms.fits'
sci_hdulist = fits.open(sci_fn)
rms_hdulist = fits.open(rms_fn)
sci_hdulist[0].header['BUNIT'] = 'electron/s'
###Output
_____no_output_____
###Markdown
Print some info about the data.
###Code
sci_hdulist.info()
###Output
_____no_output_____
###Markdown
Define the data and error arrays.
###Code
data = sci_hdulist[0].data.astype(np.float)
error = rms_hdulist[0].data.astype(np.float)
###Output
_____no_output_____
###Markdown
Extract the data header and create a WCS object.
###Code
from astropy.wcs import WCS
hdr = sci_hdulist[0].header
wcs = WCS(hdr)
###Output
_____no_output_____
###Markdown
Display the data.
###Code
from astropy.visualization import ImageNormalize, LogStretch
norm = ImageNormalize(vmin=1e-4, vmax=5e-2, stretch=LogStretch())
plt.imshow(data, norm=norm)
plt.title('XDF F160W Cutout')
###Output
_____no_output_____
###Markdown
--- Part 1: Aperture Photometry Photutils provides circular, elliptical, and rectangular aperture shapes (plus annulus versions of each). These are names of the aperture classes, defined in pixel coordinates:* `CircularAperture`* `CircularAnnulus`* `EllipticalAperture`* `EllipticalAnnulus`* `RectangularAperture`* `RectangularAnnulus`Along with variants of each, defined in celestial coordinates:* `SkyCircularAperture`* `SkyCircularAnnulus`* `SkyEllipticalAperture`* `SkyEllipticalAnnulus`* `SkyRectangularAperture`* `SkyRectangularAnnulus`These look something like this: Methods for handling aperture/pixel intersection In general, the apertures will only partially overlap some of the pixels in the data.There are three methods for handling the aperture overlap with the pixel grid of the data array. NOTE: the `subpixels` keyword is ignored for the **'exact'** and **'center'** methods. Perform circular-aperture photometry on some sources in the XDF First, we define a circular aperture at a given position and radius (in pixels).
###Code
from photutils import CircularAperture
position = (90.73, 59.43) # (x, y) pixel position
radius = 5. # pixels
aperture = CircularAperture(position, r=radius)
aperture
print(aperture)
###Output
_____no_output_____
###Markdown
We can plot the aperture on the data using the aperture `plot()` method:
###Code
plt.imshow(data, norm=norm)
aperture.plot(color='red', lw=2)
###Output
_____no_output_____
###Markdown
Now let's perform photometry on the data using the `aperture_photometry()` function. **The default aperture method is 'exact'.**Also note that the input data is assumed to have zero background. If that is not the case, please see the documentation for the `photutils.background` subpackage for tools to help subtract the background. Learn More:See the [local background subtraction notebook](photutils_local_backgrounds.ipynb) for examples of local background subtraction.The background was already subtracted for our XDF example data.
###Code
from photutils import aperture_photometry
phot = aperture_photometry(data, aperture)
phot
###Output
_____no_output_____
###Markdown
The output is an Astropy `QTable` (Quantity Table) with sum of data values within the aperture (using the defined pixel overlap method).The table also contains metadata, which is accessed by the `meta` attribute of the table. The metadata is stored as a python (ordered) dictionary:
###Code
phot.meta
phot.meta['version']
###Output
_____no_output_____
###Markdown
Aperture photometry using the **'center'** method gives a slightly different (and less accurate) answer:
###Code
phot = aperture_photometry(data, aperture, method='center')
phot
###Output
_____no_output_____
###Markdown
Now perform aperture photometry using the **'subpixel'** method with `subpixels=5`:These parameters are equivalent to SExtractor aperture photometry.
###Code
phot = aperture_photometry(data, aperture, method='subpixel', subpixels=5)
phot
###Output
_____no_output_____
###Markdown
Photometric Errors We can also input an error array to get the photometric errors.
###Code
phot = aperture_photometry(data, aperture, error=error)
phot
###Output
_____no_output_____
###Markdown
The error array in our XDF FITS file represents only the background error. If we want to include the Poisson error of the source we need to calculate the **total** error:$\sigma_{\mathrm{tot}} = \sqrt{\sigma_{\mathrm{b}}^2 + \frac{I}{g}}$ where $\sigma_{\mathrm{b}}$ is the background-only error,$I$ are the data values, and $g$ is the "effective gain".The "effective gain" is the value (or an array if it's variable across an image) needed to convert the data image to count units (e.g. electrons or photons), where Poisson statistics apply.Photutils provides a `calc_total_error()` function to perform this calculation.
###Code
from photutils.utils import calc_total_error
# this time include the Poisson error of the source
# our data array is in units of e-/s
# so the "effective gain" should be the exposure time
eff_gain = hdr['TEXPTIME']
tot_error = calc_total_error(data, error, eff_gain)
phot = aperture_photometry(data, aperture, error=tot_error)
phot
###Output
_____no_output_____
###Markdown
The total error increased only slightly because this is a small faint source. Units We can also input the data (and error) units via the `unit` keyword.
###Code
# input the data units
import astropy.units as u
unit = u.electron / u.s
phot = aperture_photometry(data, aperture, error=tot_error, unit=unit)
phot
phot['aperture_sum']
###Output
_____no_output_____
###Markdown
Instead of inputting units via the units keyword, `Quantity` inputs for data and error are also allowed.
###Code
phot = aperture_photometry(data * unit, aperture, error=tot_error * unit)
phot
###Output
_____no_output_____
###Markdown
The `unit` will not override the data or error unit.
###Code
phot = aperture_photometry(data * unit, aperture, error=tot_error * unit, unit=u.photon)
phot
###Output
_____no_output_____
###Markdown
Performing aperture photometry at multiple positions Now let's perform aperture photometry for three sources (all with the same aperture size). We simply define three (x, y) positions.
###Code
positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]
radius = 5.
apertures = CircularAperture(positions, r=radius)
###Output
_____no_output_____
###Markdown
Let's plot these three apertures on the data.
###Code
plt.imshow(data, norm=norm)
apertures.plot(color='red', lw=2)
###Output
_____no_output_____
###Markdown
Now let's perform aperture photometry.
###Code
phot = aperture_photometry(data, apertures, error=tot_error, unit=unit)
phot
###Output
_____no_output_____
###Markdown
Each source is a row in the table and is given a unique **id** (the first column). Adding columns to the photometry table We can add columns to the photometry table. Let's calculate the signal-to-noise (SNR) ratio of our sources and add it as a new column to the table.
###Code
snr = phot['aperture_sum'] / phot['aperture_sum_err'] # units will cancel
phot['snr'] = snr
phot
###Output
_____no_output_____
###Markdown
Now calculate the F160W AB magnitude and add it to the table.
###Code
f160w_zpt = 25.9463
# NOTE that the log10() function can be applied only to dimensionless quantities
# so we use the value() method to get the number value of the aperture sum
abmag = -2.5 * np.log10(phot['aperture_sum']) + f160w_zpt
phot['abmag'] = abmag
phot
###Output
_____no_output_____
###Markdown
Now, using the WCS defined above, calculate the sky coordinates for these objects and add it to the table.
###Code
from astropy.wcs.utils import pixel_to_skycoord
# convert pixel positions to sky coordinates
x, y = np.transpose(positions)
coord = pixel_to_skycoord(x, y, wcs)
# we can add the astropy SkyCoord object directly to the table
phot['sky coord'] = coord
phot
###Output
_____no_output_____
###Markdown
We can also add separate RA and Dec columns, if preferred.
###Code
phot['ra_icrs'] = coord.icrs.ra
phot['dec_icrs'] = coord.icrs.dec
phot
###Output
_____no_output_____
###Markdown
If we write the table to an ASCII file using the ECSV format we can read it back in preserving all of the units, metadata, and SkyCoord objects.
###Code
phot.write('my_photometry.txt', format='ascii.ecsv')
# view the table on disk
!cat my_photometry.txt
###Output
_____no_output_____
###Markdown
Now read the table in ECSV format.
###Code
from astropy.table import QTable
tbl = QTable.read('my_photometry.txt', format='ascii.ecsv')
tbl
tbl.meta
tbl['aperture_sum'] # Quantity array
tbl['sky coord'] # SkyCoord array
###Output
_____no_output_____
###Markdown
Aperture photometry using Sky apertures First, let's define the sky coordinates by converting our pixel coordinates.
###Code
positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]
x, y = np.transpose(positions)
coord = pixel_to_skycoord(x, y, wcs)
coord
###Output
_____no_output_____
###Markdown
Now define circular apertures in sky coordinates.For sky apertures, the aperture radius must be a `Quantity`, in either pixel or angular units.
###Code
from photutils import SkyCircularAperture
radius = 5. * u.pix
sky_apers = SkyCircularAperture(coord, r=radius)
sky_apers.r
radius = 0.5 * u.arcsec
sky_apers = SkyCircularAperture(coord, r=radius)
sky_apers.r
###Output
_____no_output_____
###Markdown
When using a sky aperture in angular units, `aperture_photometry` needs the WCS transformation, which can be provided in two ways.
###Code
# via the wcs keyword
phot = aperture_photometry(data, sky_apers, wcs=wcs)
phot
# or via a FITS hdu (i.e. header and data) as the input "data"
phot = aperture_photometry(sci_hdulist[0], sky_apers)
phot
###Output
_____no_output_____
###Markdown
Learn More: Aperture Photometry in the [Extended notebook](photutils_extended.ipynb):- Bad pixel masking- Encircled flux- Aperture photometry at multiple positions using multiple apertures --- Part 2: Image Segmentation Image segmentation is the process where sources are identified and labeled in an image.The sources are detected by using a S/N threshold level and defining the minimum number of pixels required within a source.First, let's define a threshold image at 2$\sigma$ (per pixel) above the background.
###Code
bkg = 0. # background level in this image
nsigma = 2.
threshold = bkg + (nsigma * error) # this should be background-only error
###Output
_____no_output_____
###Markdown
Now let's detect "8-connected" sources of minimum size 5 pixels where each pixel is 2$\sigma$ above the background."8-connected" pixels touch along their edges or corners. "4-connected" pixels touch along their edges. For reference, SExtractor uses "8-connected" pixels.The result is a segmentation image (`SegmentationImage` object). The segmentation image is the isophotal footprint of each source above the threshold: an array in which each object is labeled with an integer. As a simple example, a segmentation map containing two distinct sources might look like this:```0 0 0 0 0 0 0 0 0 00 1 1 0 0 0 0 0 0 01 1 1 1 1 0 0 0 2 01 1 1 1 0 0 0 2 2 21 1 1 0 0 0 2 2 2 21 1 1 1 0 0 0 2 2 01 1 0 0 0 0 2 2 0 00 1 0 0 0 0 2 0 0 00 0 0 0 0 0 0 0 0 0```where all of the pixels labeled `1` belong to the first source, all those labeled `2` belong to the second, and all null pixels are designated to be background.
###Code
from photutils import detect_sources
npixels = 5
segm = detect_sources(data, threshold, npixels)
print('Found {0} sources'.format(segm.nlabels))
###Output
_____no_output_____
###Markdown
Display the segmentation image.
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
ax1.imshow(data, norm=norm)
lbl1 = ax1.set_title('Data')
ax2.imshow(segm, cmap=segm.cmap())
lbl2 = ax2.set_title('Segmentation Image')
###Output
_____no_output_____
###Markdown
It is better to filter (smooth) the data prior to source detection.Let's use a 5x5 Gaussian kernel with a FWHM of 2 pixels.
###Code
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2 pixels
kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
kernel.normalize()
ssegm = detect_sources(data, threshold, npixels, filter_kernel=kernel)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
ax1.imshow(segm, cmap=segm.cmap())
lbl1 = ax1.set_title('Original Data')
ax2.imshow(ssegm, cmap=ssegm.cmap())
lbl2 = ax2.set_title('Smoothed Data')
###Output
_____no_output_____
###Markdown
Source deblending Note above that some of our detected sources were blended. We can deblend them using the `deblend_sources()` function, which uses a combination of multi-thresholding and watershed segmentation.How the sources are deblended can be controlled with the two keywords `nlevels` and `contrast`:- `nlevels` is the number of multi-thresholding levels to use- `contrast` is the fraction of the total source flux that a local peak must have to be considered as a separate object
###Code
from photutils import deblend_sources
segm2 = deblend_sources(data, ssegm, npixels, filter_kernel=kernel,
contrast=0.001, nlevels=32)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 8))
ax1.imshow(data, norm=norm)
ax1.set_title('Data')
ax2.imshow(ssegm, cmap=ssegm.cmap())
ax2.set_title('Original Segmentation Image')
ax3.imshow(segm2, cmap=segm2.cmap())
ax3.set_title('Deblended Segmentation Image')
print('Found {0} sources'.format(segm2.max_label))
###Output
_____no_output_____
###Markdown
Measure the photometry and morphological properties of detected sources
###Code
from photutils import source_properties
catalog = source_properties(data, segm2, error=error, wcs=wcs)
###Output
_____no_output_____
###Markdown
`catalog` is a `SourceCatalog` object. It behaves like a list of `SourceProperties` objects, one for each source.
###Code
catalog
catalog[0] # the first source
catalog[0].xcentroid # the xcentroid of the first source
###Output
_____no_output_____
###Markdown
Please go [here](http://photutils.readthedocs.org/en/latest/api/photutils.segmentation.SourceProperties.htmlphotutils.segmentation.SourceProperties) to see the complete list of available source properties. We can create a Table of isophotal photometry and morphological properties using the ``to_table()`` method of `SourceCatalog`:
###Code
tbl = catalog.to_table()
tbl
###Output
_____no_output_____
###Markdown
Additional properties (not stored in the table) can be accessed directly via the `SourceCatalog` object.
###Code
# get a single object (id=12)
obj = catalog[11]
obj.id
obj
###Output
_____no_output_____
###Markdown
Let's plot the cutouts of the data and error images for this source.
###Code
fig, ax = plt.subplots(figsize=(12, 8), ncols=3)
ax[0].imshow(obj.make_cutout(segm2.data))
ax[0].set_title('Source id={} Segment'.format(obj.id))
ax[1].imshow(obj.data_cutout_ma)
ax[1].set_title('Source id={} Data'.format(obj.id))
ax[2].imshow(obj.error_cutout_ma)
ax[2].set_title('Source id={} Error'.format(obj.id))
###Output
_____no_output_____ |
Python_Jupyter_Training/Week_1/.ipynb_checkpoints/6) Data Structures Pt 3-checkpoint.ipynb | ###Markdown
What you will lean- Operations on dictionaries Dictionary operationsAs with lists there are a number of "things" we can do with dictionaries. We'll learn:- update- get- del- keys() UpdateThere are two ways to add things to a dictionary
###Code
bookAuthorPair = {"Harry Potter": "JK Rolling"}
print(bookAuthorPair)
bookAuthorPair["The Kingkiller Chronicles"] = "Patrick Rothfuss" # define value of a key
print(bookAuthorPair)
other = {"Game of Thrones": "George R.R. Martin", "The Lord of the Rings": "J. R. R. Tolkien"}
bookAuthorPair.update(other) # using the update function
# effetive for adding multible key/value pairs into dict or merging two dics
print(bookAuthorPair)
###Output
{'Harry Potter': 'JK Rolling'}
{'Harry Potter': 'JK Rolling', 'The Kingkiller Chronicles': 'Patrick Rothfuss'}
{'Harry Potter': 'JK Rolling', 'The Kingkiller Chronicles': 'Patrick Rothfuss', 'Game of Thrones': 'George R.R. Martin', 'The Lord of the Rings': 'J. R. R. Tolkien'}
###Markdown
Note- There can only be ONE instance of a key. No duplicates
###Code
# say we have this dict
x = {"Best Movie Ever": "Inception"}
print(x)
# we cannot have another key with the same name or we would overwrite the current value
x["Best Movie Ever"] = "Shrek"
print(x)
###Output
{'Best Movie Ever': 'Inception'}
{'Best Movie Ever': 'Shrek'}
###Markdown
Getting a value from a key
###Code
bookAuthorPair = {
'Harry Potter': 'JK Rolling',
'The Kingkiller Chronicles': 'Patrick Rothfuss',
'Game of Thrones': 'George R.R. Martin',
'The Lord of the Rings': 'J. R. R. Tolkien'}
book = 'Harry Potter'
author = bookAuthorPair.get(book)
print(f"{book} by {author}")
###Output
Harry Potter by JK Rolling
###Markdown
Note- If a key does not exist then None will be returned
###Code
bookAuthorPair = {
'Harry Potter': 'JK Rolling',
'The Kingkiller Chronicles': 'Patrick Rothfuss',
'Game of Thrones': 'George R.R. Martin',
'The Lord of the Rings': 'J. R. R. Tolkien'}
book = "Every Tool's a Hammer"
author = bookAuthorPair.get(book)
print(f"{book} by {author}")
###Output
Every Tool's a Hammer by None
###Markdown
Note- You can check the "truth-y-ness" of a dict too
###Code
dict1 = {}
dict2 = {1:2}
test1 = bool(dict1)
test2 = bool(dict2)
print(f"dict1 has stuff in it: {test1}")
print(f"dict2 has stuff in it: {test2}")
###Output
dict1 has stuff in it: False
dict2 has stuff in it: True
###Markdown
DelAs with lists we can delete key/value paire from dics but specifying the key we wish to remove
###Code
bookAuthorPair = {
'Harry Potter': 'JK Rolling',
'The Kingkiller Chronicles': 'Patrick Rothfuss',
'Game of Thrones': 'George R.R. Martin',
'The Lord of the Rings': 'J. R. R. Tolkien'}
del bookAuthorPair['Harry Potter']
print(bookAuthorPair)
###Output
{'The Kingkiller Chronicles': 'Patrick Rothfuss', 'Game of Thrones': 'George R.R. Martin', 'The Lord of the Rings': 'J. R. R. Tolkien'}
###Markdown
Note- As with lists, if we try to del something that isn't there an error will be thrown
###Code
bookAuthorPair = {
'Harry Potter': 'JK Rolling',
'The Kingkiller Chronicles': 'Patrick Rothfuss',
'Game of Thrones': 'George R.R. Martin',
'The Lord of the Rings': 'J. R. R. Tolkien'}
del bookAuthorPair["Every Tool's a Hammer"]
print(bookAuthorPair)
###Output
_____no_output_____
###Markdown
Keys()We can use the keys method to get all of the keys in dict What is a method?A function is a like f(x)=2x+3 and a method is similar. Data structures can have interal functions asscoated to them, like pop, get, or update. These interal functions are known as methods. They are spesific to a data sturcutre (more on this later).
###Code
bookAuthorPair = {
'Harry Potter': 'JK Rolling',
'The Kingkiller Chronicles': 'Patrick Rothfuss',
'Game of Thrones': 'George R.R. Martin',
'The Lord of the Rings': 'J. R. R. Tolkien'}
listOfBooks = bookAuthorPair.keys()
print(listOfBooks)
###Output
dict_keys(['Harry Potter', 'The Kingkiller Chronicles', 'Game of Thrones', 'The Lord of the Rings'])
###Markdown
Note- This will be SUPER useful as we move to loops - our next section What you need to do 1) - copy the varable x from below and add a list of classes to Nolan (you can make up course numbers) 2) - Get Brendon's class grades - create an empty list - Determine the letter value of each grade (A>94, 94>A->90, 90>B+>87, etc) *hint: use indexes 0-2 to pull grades *hint: use if/elif to check what letter grade each grade percentage is - based on the letter grade use the gradeToGPA dict (see below) to get the coresponding GPA value *hint: use get - with this GPA value add it to the list - find the averae of the list and print the average GPA *hint: use sum() and length() *hint you should get an average GPA of 2.6679
###Code
# Problem #1
x = {
"students": {
"Brendon": {
"Major": "Electrical Engineering",
"Year": "Senior",
"Classes": ["ECE 251", "ECE 451", "ECE 450", "ECE 455", "ECE 401"],
"Class Grades": [88.7, 75.5, 81.5]
},
"Nolan": {
"Major": "Mechanical Engineering",
"Year": "Graduated",
}
}
}
gradeToGPA = {
"A": 4,
"A-": 3.667,
"B+": 3.337,
"B": 3,
"B-": 2.667,
"C+": 2.337,
"C": 2,
"C-": 1.667,
"D+": 1.337,
"D": 1,
"F": 0
}
###Output
2.6679999999999997
|
utils/HCA/HCA.ipynb | ###Markdown
HCA Baseline CodesVersion: 1.0Author: Weizhe Lin @ Feb 2021
###Code
def hca(img, all_start, all_end, steps=100):
'''
HCA function
Args:
img: map
all_start: start positions of agents
all_end: goal positions of agents
steps: maximum allowed steps
Return:
all_path: calculated paths of agents
'''
all_path = []
robot_loc = np.where(img==3)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i,j] == 3:
img[i,j] = 0
res_imgs = np.expand_dims(img, axis=0).repeat(steps, axis=0)
for i in range(len(robot_loc[0])):
res_imgs[0, robot_loc[0][i], robot_loc[1][i]] = 3
for i in range(len(all_start)):
robot_path = AStarTime(res_imgs, (all_start[i][0],all_start[i][1]), (all_end[i][0], all_end[i][1]))
#print(i)
if len(robot_path) == 1:
new_path = []
for j in range(steps-1):
res_imgs[j, all_start[i][0],all_start[i][1]] = 3
new_path.append([all_start[i][0],all_start[i][1],j])
all_path.append(new_path)
continue
else:
for loc in robot_path:
res_imgs[loc[2], loc[0], loc[1]] = 3
all_path.append(robot_path)
return all_path
# Load dataset
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map20x20_density_p1/10_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 100
data = []
ecbs_data = [] # save expert(ECBS) data
for root, dirs, files in os.walk(os.path.join(DATA_FOLDER, 'input')):
for f in files:
print(len(data), end='\r')
if len(data) >= DATA_NUM:
break
IDMAP = f.split('IDMap')[1].split('_')[0]
IDCase = f.split('IDCase')[1].split('.yaml')[0]
IDMAP = int(IDMAP)
IDCase = int(IDCase)
if IDMAP >= DATA_RANGE[0] and IDMAP <= DATA_RANGE[1]:
with open(os.path.join(root, f),'r',encoding='utf-8') as fs:
cont = fs.read()
x = yaml.load(cont)
# print(IDMAP, IDCase)
output_name = f.replace('input', 'output').split('.yaml')[0] + '_ECBS.yaml'
output_path = os.path.join(DATA_FOLDER, 'output_ECBS', output_name)
if os.path.exists(output_path):
with open(output_path ,'r',encoding='utf-8') as fs:
cont = fs.read()
y = yaml.load(cont)
# print(output_name)
data.append(x)
ecbs_data.append(y)
print(len(data))
print(len(ecbs_data))
# flowtime_increase_list = []
# time_cost_list = []
# all_success_count = []
# individual_success_count = []
# dim_num = None
# for input_data, output_data in tqdm(zip(data, ecbs_data), total=len(data)):
# success_count = []
# start_time = time.time()
# expert_makespan = output_data['statistics']['makespan']
# expert_cost = output_data['statistics']['cost']
# # print('===')
# # print(input_data)
# # print(output_data)
# env = np.zeros(input_data['map']['dimensions'])
# for obs in input_data['map']['obstacles']:
# env[obs[0], obs[1]] = 1
# if not dim_num:
# dim_num = [input_data['map']['dimensions'][0], len(input_data['agents'])]
# all_start = []
# all_end = []
# img = env.copy()
# for agent_data in input_data['agents']:
# start = agent_data['start']
# end = agent_data['goal']
# all_start.append(start)
# all_end.append(end)
# img[start[0], start[1]] = 3
# all_paths = hca(img, all_start, all_end, steps=expert_makespan*3+1)
# cost = 0
# makespan = 0
# collision_dict = {}
# for agent_path, goal_pos in zip(all_paths, all_end):
# final_x = agent_path[-1][0]
# final_y = agent_path[-1][1]
# if goal_pos[0] != final_x or goal_pos[1] != final_y:
# # Did not reach end
# cost += expert_makespan*3
# success_count.append(0)
# else:
# # reach goal
# cost += len(agent_path)
# success_count.append(1)
# makespan = max(makespan, len(agent_path))
# for agent_path_t in agent_path:
# t = agent_path_t[2]
# x = agent_path_t[0]
# y = agent_path_t[1]
# pos = (x,y)
# if pos in collision_dict.setdefault(t, {}).keys():
# print('found_collision!', t, collision_dict[t], agent_path)
# else:
# collision_dict[t][pos] = True
# end_time = time.time()
# time_elapsed = end_time - start_time
# time_cost_list.append(time_elapsed)
# flowtime_increase = cost/expert_cost-1
# flowtime_increase_list.append(flowtime_increase)
# individual_success_count += success_count
# # print((0 not in success_count), success_count)
# # if (0 in success_count):
# # print(input_data)
# # print(output_data)
# # print(all_paths[success_count.index(0)])
# all_success_count.append((0 not in success_count))
print('{}x{}({})'.format(dim_num[0], dim_num[0], dim_num[1]))
flowtime_increase_array = np.array(flowtime_increase_list)
time_cost_array = np.array(time_cost_list)
all_success_array = np.array(all_success_count)
individual_success_array = np.array(individual_success_count)
print('FT_increase;{};{}'.format(np.mean(flowtime_increase_array), np.std(flowtime_increase_array)))
print('time_cost;{};{}'.format(np.mean(time_cost_array), np.std(time_cost_array)))
print('all_success;{};{}'.format(np.mean(all_success_array), np.std(all_success_array)))
print('individual_success_rate;{};{}'.format(np.mean(individual_success_array), np.std(individual_success_array)))
print('{}x{}({})'.format(dim_num[0], dim_num[0], dim_num[1]),';','{};{};{};{};{};{};{};{}'.format(
np.mean(flowtime_increase_array), np.std(flowtime_increase_array),
np.mean(time_cost_array), np.std(time_cost_array),
np.mean(all_success_array), np.std(all_success_array),
np.mean(individual_success_array), np.std(individual_success_array),
))
LOG_TIME = int(time.time())
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map20x20_density_p1/10_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 10
run_in_pipeline()
LOG_TIME = int(time.time())
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map20x20_density_p1/10_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 4500
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map28x28_density_p1/20_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map35x35_density_p1/30_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map40x40_density_p1/40_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map45x45_density_p1/50_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map50x50_density_p1/60_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/EffectiveDensity/map65x65_density_p1/100_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
LOG_TIME = 1612452524
DATA_FOLDER = '/media/pc/文档/Dataset/SameMap_diffRobot/map50x50_density_p1/10_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/SameMap_diffRobot/map50x50_density_p1/20_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/SameMap_diffRobot/map50x50_density_p1/30_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/SameMap_diffRobot/map50x50_density_p1/40_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/SameMap_diffRobot/map50x50_density_p1/50_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
DATA_FOLDER = '/media/pc/文档/Dataset/SameMap_diffRobot/map50x50_density_p1/100_Agent'
# DATA_FOLDER = '/home/pc/experiment_data/60_Agent'
DATA_RANGE = [427, 800]
DATA_NUM = 1000
run_in_pipeline()
def run_in_pipeline():
data = []
ecbs_data = []
for root, dirs, files in os.walk(os.path.join(DATA_FOLDER, 'input')):
for f in files:
print('loading...', len(data), end='\r')
if len(data) >= DATA_NUM:
break
IDMAP = f.split('IDMap')[1].split('_')[0]
IDCase = f.split('IDCase')[1].split('.yaml')[0]
IDMAP = int(IDMAP)
IDCase = int(IDCase)
if IDMAP >= DATA_RANGE[0] and IDMAP <= DATA_RANGE[1]:
with open(os.path.join(root, f),'r',encoding='utf-8') as fs:
cont = fs.read()
x = yaml.load(cont)
output_name = f.replace('input', 'output').split('.yaml')[0] + '_ECBS.yaml'
output_path = os.path.join(DATA_FOLDER, 'output_ECBS', output_name)
if os.path.exists(output_path):
with open(output_path ,'r',encoding='utf-8') as fs:
cont = fs.read()
y = yaml.load(cont)
data.append(x)
ecbs_data.append(y)
print('finished loading:', len(data))
print(len(ecbs_data))
flowtime_increase_list = []
makespan_list = []
time_cost_list = []
all_success_count = []
individual_success_count = []
num_reachGoal_list = []
dim_num = None
for input_data, output_data in tqdm(zip(data, ecbs_data), total=len(data)):
success_count = []
start_time = time.time()
expert_makespan = output_data['statistics']['makespan']
expert_cost = output_data['statistics']['cost']
# print('===')
# print(input_data)
# print(output_data)
env = np.zeros(input_data['map']['dimensions'])
for obs in input_data['map']['obstacles']:
env[obs[0], obs[1]] = 1
if not dim_num:
dim_num = [input_data['map']['dimensions'][0], len(input_data['agents'])]
all_start = []
all_end = []
img = env.copy()
for agent_data in input_data['agents']:
start = agent_data['start']
end = agent_data['goal']
all_start.append(start)
all_end.append(end)
img[start[0], start[1]] = 3
all_paths = hca(img, all_start, all_end, steps=expert_makespan*3+1)
cost = 0
makespan = 0
collision_dict = {}
for agent_path, goal_pos in zip(all_paths, all_end):
final_x = agent_path[-1][0]
final_y = agent_path[-1][1]
if goal_pos[0] != final_x or goal_pos[1] != final_y:
# Did not reach end
cost += expert_makespan*3
success_count.append(0)
else:
# reach goal
cost += len(agent_path)
success_count.append(1)
makespan_list.append(len(agent_path))
makespan = max(makespan, len(agent_path))
# for agent_path_t in agent_path:
# t = agent_path_t[2]
# x = agent_path_t[0]
# y = agent_path_t[1]
# pos = (x,y)
# if pos in collision_dict.setdefault(t, {}).keys():
# print('found_collision!', t, collision_dict[t], agent_path)
# else:
# collision_dict[t][pos] = True
end_time = time.time()
time_elapsed = end_time - start_time
time_cost_list.append(time_elapsed)
flowtime_increase = cost/expert_cost-1
flowtime_increase_list.append(flowtime_increase)
individual_success_count += success_count
all_success_count.append((0 not in success_count))
num_reachGoal_list.append(np.count_nonzero(np.array(success_count)))
print('{}x{}({})'.format(dim_num[0], dim_num[0], dim_num[1]))
flowtime_increase_array = np.array(flowtime_increase_list)
makespan_array = np.array(makespan_list)
time_cost_array = np.array(time_cost_list)
all_success_array = np.array(all_success_count)
individual_success_array = np.array(individual_success_count)
print('FT_increase;{};{}'.format(np.mean(flowtime_increase_array), np.std(flowtime_increase_array)))
print('time_cost;{};{}'.format(np.mean(time_cost_array), np.std(time_cost_array)))
print('all_success;{};{}'.format(np.mean(all_success_array), np.std(all_success_array)))
print('individual_success_rate;{};{}'.format(np.mean(individual_success_array), np.std(individual_success_array)))
log_time = LOG_TIME
mat_data = {
'rate_ReachGoal':[[np.mean(all_success_array)]],
'num_agents_trained':[[dim_num[1]]],
'num_agents_testing':[[dim_num[1]]],
'map_size_testing':[[dim_num[0], dim_num[0]]],
'map_size_trained': [[dim_num[0], dim_num[0]]],
'map_density_trained': [[0.1]],
'map_density_testing': [[0.1]],
'K':[[0]],
'trained_model_epoch':[[0]],
'log_time':[[log_time]],
'std_deltaMP': [[np.std(makespan_array)]],
'mean_deltaMP':[[np.mean(makespan_array)]],
'list_deltaMP':[[makespan_list]],
'mean_deltaFT':[[np.mean(flowtime_increase_array)]],
'std_deltaFT':[[np.std(flowtime_increase_array)]],
'list_deltaFT': [flowtime_increase_list],
'list_reachGoal':all_success_count,
'list_computationTime':[time_cost_list],
'list_numAgentReachGoal':[num_reachGoal_list],
'action_policy': 'exp_multinorm',
'hidden_state': [[0]],
}
pprint(mat_data)
file_name = 'HCA_{}x{}({})_{}_exp_multinorm.mat'.format(dim_num[0], dim_num[0], dim_num[1], log_time)
save_mat(file_name, mat_data)
print('{}x{}({})'.format(dim_num[0], dim_num[0], dim_num[1]),';','{};{};{};{};{};{};{};{}'.format(
np.mean(flowtime_increase_array), np.std(flowtime_increase_array),
np.mean(time_cost_array), np.std(time_cost_array),
np.mean(all_success_array), np.std(all_success_array),
np.mean(individual_success_array), np.std(individual_success_array),
))
print('========done========')
###Output
_____no_output_____ |
extra-content/pipeline.ipynb | ###Markdown
Restart the kernel after having executed the above cell.
###Code
import sagemaker
session = sagemaker.Session()
###### CLUSTER CONFIGURATION
cluster_id = input("The name of your Redshift cluster:")
cluster_role_name = input("The name of the Role you've associated to your Redshift Cluster (not the ARN, default: myRedshiftRole):") or "myRedshiftRole"
cluster_role_arn = f'arn:aws:iam::{session.account_id()}:role/{cluster_role_name}'
database = input("The database of your Redshift cluster (default: dev)") or 'dev'
db_user = input("The user of your Redshift cluster (default: awsuser)") or 'awsuser'
###### OUTPUT S3 PATH
bucket = input("Your S3 bucket (leave empty for default):") or session.default_bucket()
key_prefix = input("The path where to save the output of the Redshift query in S3 (default: redshift-demo/redshift2processing/data/)") or "redshift-demo/redshift2processing/data/"
output_s3_uri = f's3://{bucket}/{key_prefix}'
###### QUERY STRING
query_string = "select * from users" # this will work on the default Free Tier Redshift cluster. Change if needed.
# Output the info
print(f'\n\nCluster ID: {cluster_id}\nRole ARN: {cluster_role_arn}\nOutput S3 URI: {output_s3_uri}')
from sagemaker.workflow.lambda_step import LambdaStep, Lambda, LambdaOutput, LambdaOutputTypeEnum
from sagemaker.workflow.parameters import ParameterString, ParameterInteger
from sagemaker.workflow.steps import ProcessingStep, ProcessingInput, ProcessingOutput
from sagemaker.sklearn import SKLearnProcessor
from sagemaker import get_execution_role
# Pipelines parameters
sql_query = ParameterString(name='SQL_QUERY', default_value=query_string)
s3_path = ParameterString(name='S3_PATH', default_value=output_s3_uri)
role = ParameterString(name='REDSHIFT_ROLE', default_value=cluster_arn_role)
partition_by_column = ParameterString(name='PARTITION_BY_COLUMN', default_value='state')
processing_instance_type = ParameterString(name='PROCESS_INSTANCE_TYPE', default_value='ml.m5.xlarge')
processing_instance_count = ParameterInteger(name='PROCESS_INSTANCE_COUNT', default_value=3)
##################
### Pipeline steps
##################
# Lambda Step - Unload from Redshift with partitions
l = Lambda(
function_name='RedshiftPartitionUnloader',
script='lambda/handler.py',
handler='handler.lambda_handler',
timeout=60*5,
memory_size=256,
execution_role_arn='arn:aws:iam::859755744029:role/LambdasCanDoEverything'
)
lambda_step = LambdaStep(
name='RedshiftPartitionUnloaderLAMBDA',
lambda_func=l,
inputs={
'SQL_QUERY': sql_query,
'S3_PATH': s3_path,
'REDSHIFT_ROLE': role,
'PARTITION_BY_COLUMN': partition_by_column
},
outputs=[LambdaOutput('status', LambdaOutputTypeEnum.String), LambdaOutput('s3_path', LambdaOutputTypeEnum.String)]
)
# Processing Step - Read from S3 partitioned data and transform
p = SKLearnProcessor(
framework_version='0.23-1',
role=get_execution_role(),
instance_type=processing_instance_type,
instance_count=processing_instance_count
)
processing_step = ProcessingStep(
name='RedshiftPartitionUnloaderPROCESSING',
processor=p,
inputs=[ProcessingInput(
source=s3_path,
destination='/opt/ml/processing/input/data/',
s3_data_distribution_type='ShardedByS3Key'
)],
outputs=[
ProcessingOutput(output_name="train", source="/opt/ml/processing/train"),
ProcessingOutput(output_name="test", source="/opt/ml/processing/test"),
],
code='processing.py', depends_on=[lambda_step]
)
from sagemaker.workflow.pipeline import Pipeline
pipeline = Pipeline(
name='RedshiftPartitionUnloaderPIPELINE', parameters=[sql_query, s3_path, role, partition_by_column], steps=[lambda_step, processing_step]
)
pipeline.upsert(role_arn=get_execution_role())
execution = pipeline.start()
execution.wait()
###Output
_____no_output_____ |
additional_resources/additional_datasets/car_loan/LGB_Baseline.ipynb | ###Markdown
Data
###Code
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
sample_submit = pd.read_csv('input/sample_submit.csv')
train.head()
###Output
_____no_output_____
###Markdown
Train
###Code
X = train.drop(['customer_id','mobileno_flag', 'idcard_flag', 'disbursed_date','loan_default'],axis=1)
y = train['loan_default']
X_test = test.drop(['customer_id','mobileno_flag', 'idcard_flag', 'disbursed_date'],axis=1)
def prob_to_label(x,threshold):
labels = x.copy()
labels[labels<threshold]=0
labels[labels>=threshold]=1
return labels
# (440000 + 165000*(1 - np.exp((-0.3) * x / 6)))
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def feval_RMSPE(preds, lgbm_train):
labels = lgbm_train.get_label()
return 'RMSPE', round(rmspe(y_true = labels, y_pred = preds),5), False
def f1_score_custom(y_true, y_pred):
y_pred = y_pred.round()
return 'f1', f1_score(y_true, y_pred,average='macro'), True
def floss(prediction, target, beta=1, log_like=False):
EPS = 1e-10
# print(prediction)
N = prediction.size
TP = (prediction * target).sum()
# TP = np.logical_and(prediction,target).sum()
H = beta * target.sum() + prediction.sum()
fmeasure = (1 + beta) * TP / (H + EPS)
if log_like:
floss_ = -torch.log(fmeasure)
else:
floss_ = (1 - fmeasure)
return floss_
def floss_macro(prediction, target, beta=1, log_like=False):
EPS = 1e-10
prediction_inv = np.logical_not(prediction)
target_inv = np.logical_not(target)
TP_0 = (prediction_inv * target_inv).sum()
H_0 = beta * target_inv.sum() + prediction_inv.sum()
fmeasure_0 = (1 + beta) * TP_0 / (H_0 + EPS)
TP_1 = (prediction * target).sum()
H_1 = beta * target.sum() + prediction.sum()
fmeasure_1 = (1 + beta) * TP_1 / (H_1 + EPS)
fmeasure = (fmeasure_0+fmeasure_1)/2
if log_like:
floss_ = -torch.log(fmeasure)
else:
floss_ = - fmeasure
return floss_
def floss_macro_2(prediction, target):
return -f1_score(target,prediction,average='macro')
def feval_floss(preds, lgbm_train,threshold):
labels = lgbm_train.get_label()
preds = prob_to_label(preds,threshold=threshold)
return 'floss', round(floss(target = labels, prediction = preds),5), False
def feval_floss_macro(preds, lgbm_train):
labels = lgbm_train.get_label()
# print('labels_sum: ',labels.sum())
# print(preds)
preds = prob_to_label(preds,threshold=threshold)
# print('pred_sum: ',preds.sum())
# print(round(floss_macro(target = labels, prediction = preds),5))
return 'floss_macro', round(floss_macro(target = labels, prediction = preds),5), False
def feval_floss_macro_xgb(preds, train_matrix):
labels = train_matrix.get_label()
# print('labels_sum: ',labels.sum())
print(preds)
preds = prob_to_label(preds,threshold=threshold)
# print('pred_sum: ',preds.sum())
# print(round(floss_macro(target = labels, prediction = preds),5))
return 'floss_macro', round(floss_macro(target = labels, prediction = preds),5)
# def evalmcc(preds, dtrain):
# labels = dtrain.get_label()
# return 'MCC', matthews_corrcoef(labels, preds > THRESHOLD)
def feval_floss_macro_2(preds, lgbm_train):
labels = lgbm_train.get_label()
preds = prob_to_label(preds,threshold=threshold)
return 'floss_macro_sk', round(floss_macro_2(target = labels, prediction = preds),5), False
base_params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric':'None', # (lambda y_true, y_pred: f1_score_custom(y_true, y_pred)),
'min_child_weight': 5,
'num_leaves': 2 ** 7,
'early_stopping_rounds':200,
'lambda_l2': 10,
'feature_fraction': 0.9,
'bagging_fraction': 0.9,
'bagging_freq': 4,
'learning_rate': 0.01,
'seed': 2021,
'n_jobs':-1,
'verbose': -1,
}
lgbm_params = {
# Key Parameters
'objective': 'binary',
'metric': 'auc',
'boosting_type': 'gbdt', # 'dart'
'learning_rate': 0.01,
# 'num_iterations':100 # default=100
'early_stopping_rounds':200,
# 'min_child_weight': 5, #叶子上的最小样本数
# 'max_depth': default=-1
# 'num_leaves': 2 ** 7, # default = 31(2**5-1)
'num_threads': -1, # 'n_jobs'
# Learning Control Parameters
# 'lambda_l1':
'lambda_l2': 10,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4,
'verbose': -1,
'seed': 2021,
}
oof = pd.DataFrame() # out-of-fold result
cv_scores = []
cv_scores_2 = []
cv_scores_xgb = []
cv_scores_xgb_2 = []
cv_scores_cat = []
cv_scores_cat_2 = []
f1_list = []
# cv_mean =[]
# cv_std = []
models = [] # models
scores = 0.0 # validation score
threshold = 0.25
# thresholds = [0.238,0.239,0.240,0.241,0.242,0.243,0.244,0.245,
# 0.246,0.247,0.248, 0.249,0.250,0.251,0.252,0.253]
def cv_model(clf, X, y, clf_name):
folds = 5
seed = 2021
kf = KFold(n_splits=folds, random_state=seed, shuffle=True)
for fold, (trn_idx, val_idx) in enumerate(kf.split(X, y)):
print("Fold :", fold+1)
# create dataset
X_train, y_train = X.loc[trn_idx], y[trn_idx]
X_valid, y_valid = X.loc[val_idx], y[val_idx]
if clf_name == 'lgb':
lgbm_train = lgbm.Dataset(X_train,y_train)
lgbm_valid = lgbm.Dataset(X_valid,y_valid,reference = lgbm_train)
# model
model = lgbm.train(params=base_params,
train_set=lgbm_train,
valid_sets=[lgbm_train, lgbm_valid],
num_boost_round=5000,
feval = feval_floss_macro,
verbose_eval=200,
# categorical_feature = ['Driving_flag']
)
# validation
y_pred = model.predict(X_valid, num_iteration=model.best_iteration)
y_pred = prob_to_label(y_pred,threshold)
# FLOSS = round(floss(target= y_valid, prediction = y_pred),3)
# print(f'Performance of the prediction: , floss: {FLOSS}')
# cv_scores.append(roc_auc_score(y_valid, y_pred))
cv_scores.append(f1_score(y_valid, y_pred,average='macro'))
cv_scores_2.append(f1_score(y_valid, y_pred,average='micro'))
models.append(model)
print("*" * 100)
if clf_name == "xgb":
train_matrix = clf.DMatrix(X_train, y_train)
valid_matrix = clf.DMatrix(X_valid, y_valid)
watchlist = [(train_matrix, 'train'),(valid_matrix, 'eval')]
model = clf.train(xgb_params,
train_matrix,
num_boost_round=50000,
evals=watchlist,
# feval=feval_floss_macro_xgb,
verbose_eval=200,
early_stopping_rounds=200)
y_pred = model.predict(valid_matrix, ntree_limit=model.best_ntree_limit)
y_pred = prob_to_label(y_pred,threshold)
cv_scores.append(f1_score(y_valid, y_pred,average='macro'))
cv_scores_2.append(f1_score(y_valid, y_pred,average='micro'))
# pred_labels = prob_to_label(y_pred,i)
# f1 = f1_score(y_valid,pred_labels,average='macro')
# f1_list.append(f1)
models.append(model)
print("*" * 100)
# test_pred = model.predict(test_x , ntree_limit=model.best_ntree_limit)
if clf_name == "cat":
model = clf(iterations=20000, **cat_params)
model.fit(X_train, y_train,
eval_set=(X_valid, y_valid),
# cat_features=[],
use_best_model=True,
# eval_metric = feval_floss_macro,
verbose=500)
y_pred = model.predict(X_valid)
y_pred = prob_to_label(y_pred,threshold)
cv_scores_cat.append(f1_score(y_valid, y_pred,average='macro'))
cv_scores_cat_2.append(f1_score(y_valid, y_pred,average='micro'))
models.append(model)
print("*" * 100)
# test_pred = model.predict(test_x)
print('cv_score: ',cv_scores)
print('avg_cv_score: ', np.average(np.array(cv_scores)))
def lgb_model(x_train, y_train):
lgb_train = cv_model(lgbm, x_train, y_train, "lgb")
return lgb_train
%%time
lgb_train = lgb_model(X, y)
model = models[0]
importances = pd.DataFrame({'Feature': model.feature_name(),
'Importance': model.feature_importance(importance_type='gain')})
importances.sort_values(by = 'Importance', inplace=True)
importances2 = importances.nlargest(15,'Importance', keep='first').sort_values(by='Importance', ascending=True)
importances2[['Importance', 'Feature']].plot(kind = 'barh', x = 'Feature', figsize = (12,6), color = 'blue', fontsize=11);
plt.ylabel('Feature', fontsize=12)
###Output
_____no_output_____ |
Analysis/study_Shower.ipynb | ###Markdown
First, we'll import the Parsl library and the various components we'll need:
###Code
import parsl
from parsl.config import Config
from parsl.executors.threads import ThreadPoolExecutor
from parsl.app.app import bash_app,python_app
from parsl import File
###Output
_____no_output_____
###Markdown
Before we can use Parsl's functions, we'll need to define and then load its configuration as a Parsl `Config` object:
###Code
config = Config(
executors=[ThreadPoolExecutor()],
lazy_errors=True
)
parsl.load(config)
###Output
_____no_output_____
###Markdown
Now we define the Python functions we'll use for the workflow. By decorating each function as an App, Parsl will be able to parallelize them during execution. We define these as `bash` Apps because we'll use the functions to invoke Perl scripts in the same way we would from the Bash shell's command line:
###Code
## Define Apps ##
@bash_app
def WireDelay(threshIn='', outputs=[], geoDir='', daqId='', fw='', stdout='stdout.txt', stderr='stderr.txt'):
return 'perl ./perl/WireDelay.pl %s %s %s %s %s' %(threshIn,outputs[0],geoDir,daqId,fw)
@bash_app
def Combine(inputs=[], outputs=[], stdout='stdout.txt', stderr='stderr.txt'):
return 'perl ./perl/Combine.pl ' + ' '.join(inputs) + ' ' + str(outputs[0])
@bash_app
def Sort(inputs=[], outputs=[], key1='1', key2='1', stdout='stdout.txt', stderr='stderr.txt'):
return 'perl ./perl/Sort.pl %s %s %s %s' %(inputs[0], outputs[0], key1, key2)
@bash_app
def EventSearch(inputs=[], outputs=[], gate='', detCoinc='2', chanCoinc='2', eventCoinc='2', stdout='stdout.txt', stderr='stderr.txt'):
return 'perl ./perl/EventSearch.pl %s %s %s %s %s %s' %(inputs[0],outputs[0],gate,detCoinc,chanCoinc,eventCoinc)
###Output
_____no_output_____
###Markdown
The last step before the workflow itself is to define the parameters that the Apps will require as inputs. When used in the Cosmic Ray e-Lab, these are selected by the user through the interface.For this analysis, the necessary parameters are:* **thresholdAll** -- the names and locations of the threshold files that the analysis uses as input data* **wireDelayData** -- what we'd like the analysis to name the Wire Delay files that will be created during execution* **geoDir** -- the location of the directory that contains the geography (`.geo`) files of the relevant detectors* **detectors** -- the DAQ IDs of all detectors used in the analysis* **firmwares** -- the versions of the firmware used on each detector's DAQ board. This can affect how the data from that detector is interpreted!* **combineOut** -- what we'd like the analysis to name the Combined Data file that will be created during execution* **sort_sortKey1**, **sort_sortKey2** -- which columns the Sort() function should sort in ascending order. **sort_sortKey1** is the primary sort column, while **sort_sortKey2** is the secondary sort column.* **sortOut** - what we'd like the analysis to name the Sorted Data file that will be created during execution* **gate** -- the size of the gate in nanoseconds. The analysis will search for events that are coincident within this time interval* **detectorCoincidence** -- how many different detectors should record hits within the gate interval in order for it to qualify as a candidate event* **channelCoincidence** -- how many different channels on each detector should record hits within the gate interval in order for it to qualify as a candidate event* **eventCoincidence** -- how many hits a channel should record within the gate interval in order for it to qualify as a candidate event* **eventCandidates** -- what we'd like the analysis to name the Event Candidates file that will be created as the end result of its executionSince these parameters will be used to construct command-line invocations of Perl scripts, we define them all as strings (even the numbers! Python itself won't be doing any math with them).
###Code
## Analysis Parameters ##
# Define what are typically the command-line arguments
# For WireDelay:
thresholdAll = ('files/6119.2016.0104.1.thresh', 'files/6203.2016.0104.1.thresh')
wireDelayData = ('6119.2016.0104.1.wd', '6203.2016.0104.1.wd')
geoDir = './geo'
detectors = ('6119', '6203')
firmwares = ('1.12', '1.12')
# For Combine:
combineOut = 'combineOut'
# For Sort:
sort_sortKey1 = '2'
sort_sortKey2 = '3'
sortOut = 'sortOut'
# For EventSearch:
gate = '1000'
detectorCoincidence = '1'
channelCoincidence = '2'
eventCoincidence = '2'
eventCandidates = 'eventCandidates'
###Output
_____no_output_____
###Markdown
Now we're ready to call on our Apps to do their data crunching. Note carefully the use of `futures` objects and the `inputs[]` and `outputs[]` parameters, which are provided by Parsl. These define the workflow by telling Parsl which things **must** happen before which other things so that the DataFlowKernel doesn't try to execute Apps in the wrong order - trying to run a function before its input data is ready, for example.
###Code
## Workflow ##
# 1) WireDelay() takes input Threshold (.thresh) files and converts
# each to a Wire Delay (.wd) file:
WireDelay_futures = []
for i in range(len(thresholdAll)):
WireDelay_futures.append(WireDelay(threshIn=thresholdAll[i], outputs=[wireDelayData[i]], geoDir=geoDir, daqId=detectors[i], fw=firmwares[i]))
# WireDelay_futures is a list of futures.
# Each future has an outputs list with one output.
WireDelay_outputs = [i.outputs[0] for i in WireDelay_futures]
# 2) Combine() takes the WireDelay files output by WireDelay() and combines
# them into a single file with name given by --combineOut
Combine_future = Combine(inputs=WireDelay_outputs, outputs=[combineOut])
# 3) Sort() sorts the --combineOut file, producing a new file with name given
# by --sortOut
Sort_future = Sort(inputs=Combine_future.outputs, outputs=[sortOut], key1=sort_sortKey1, key2=sort_sortKey2)
# 4) EventSearch() processes the --sortOut file and identifies event
# candidates in a output file with name given by --eventCandidates
# NB: This output file is interpreted by the e-Lab webapp, which expects it
# to be named "eventCandidates"
EventSearch_future = EventSearch(inputs=Sort_future.outputs, outputs=[eventCandidates], gate=gate, detCoinc=detectorCoincidence, chanCoinc=channelCoincidence, eventCoinc=eventCoincidence)
# Wait for the final result before exiting.
x = EventSearch_future.result()
print("Call to EventSearch completed with exit code:", x)
###Output
Call to EventSearch completed with exit code: 0
###Markdown
And we're done! The `eventCandidates` file now exists in the working directory and lists every event from the input threshold data that, according to our criteria, might have been part of a shower of cosmic rays.This will typically be a large file -- too large to read here -- but we can check what it looks like using the Bash shell's `head` utility:
###Code
!head -5 ./eventCandidates
###Output
#[event number] [num events] [num hit detectors] [ID1.chan] [JD1] [Rising edge 1], [ID2.chan] [JD2] [Rising edge 2], ...
#gatewidth=1.15740740740741e-11 (1000 nanoseconds), detector coincidence=1, channel coincidence=2, event coincidence=2
1 3 1 6203.1 2457392 0.2452230125667072 6203.4 2457392 0.2452230125667216 6203.2 2457392 0.2452230125676070
2 3 1 6203.1 2457392 0.2452298337203386 6203.4 2457392 0.2452298337203386 6203.2 2457392 0.2452298337212240
3 3 1 6203.4 2457392 0.2452305862390307 6203.1 2457392 0.2452305862391320 6203.4 2457392 0.2452305862392767
|
7章 レコメンド.ipynb | ###Markdown
- 最小二乗法(ordinary least squares: OLS)による回帰をNumpy, sklearnで. - 特徴量の方がサンプルデータより多いとOLSできないので,lasso, ridge, elastic netsという3つの手法を試す.- レコメンドという,応用範囲が広い重要な分野の基礎を学ぶ. まとめ- 最小二乗法の復習 - sklearnのボストン物件価格データを利用 - 特徴量一つだけよりバイアスを加えた方が,さらにたくさん特徴量があった方が一般的に良い精度が出る. - 回帰分析でも交差検定をして正しい汎化能力を確かめよう. - 特徴量の数P > サンプル数N のとき,訓練誤差をほぼゼロにすることができるが,汎化能力が著しく落ちる. - 10-K reportsのデータはP>Nとなっており,これでP>N問題を確かめた. - 過学習対策 - Lasso: L1罰則項(回帰係数の絶対値の総和)をペナルティとする. - スパースなモデルに. - Ridge: L2罰則項(回帰係数の2乗の総和)をペナルティとする. - Elastic Net: L1, L2の比率を設定し,それらの和を罰則項とする. - P>N問題の解消,相関の高い特徴量からの複数考慮といった効果がある.- パラメータ探索には2段階の交差検定が必要 - 1段目: テストデータと訓練データに分ける - 2段目: 1段目の訓練データをさらに分割し,それぞれ違ったパラメータを与えて訓練した結果,誤差が最少となったパラメータを採用- 映画の推薦問題を罰則付き回帰分析で解いてみる - Netflix Challengeのアカデミック用データを利用 - ターゲットユーザ以外のユーザが行なった評価を特徴量としてLassoで解いてみた - 全体の平均をスコアの予測値とするよりはLassoを使った方が8%ほどの精度改善が見られた. 回帰を用いて物件価格を予想するボストンの物件の価格を予想する問題を考える. sklearnのデータセットを使って教師あり学習を行う.
###Code
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
print(boston.feature_names)
###Output
.. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
['CRIM' 'ZN' 'INDUS' 'CHAS' 'NOX' 'RM' 'AGE' 'DIS' 'RAD' 'TAX' 'PTRATIO'
'B' 'LSTAT']
###Markdown
まずは部屋の数の平均値と家の値段をプロットしていく.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
x = boston.data[:, 5]
plt.scatter(x, boston.target)
x = np.array([ [v] for v in x])
y = boston.target
slope, _, _, _ = np.linalg.lstsq(x, y, rcond=None) #[傾き, バイアス(あれば)], 誤差, ランク, 特異値. rcondはワーニング対策
ran = np.arange(0, 10, 0.1)
plt.scatter(ran, slope * ran)
###Output
_____no_output_____
###Markdown
あまりうまくフィッティングできていない. 特徴量の要素が1つだけなので,家の値段 × 傾き = 家の値段 という式を最適化していることになるが,これは雑すぎる. 次に,バイアス項を追加してみる.
###Code
x = boston.data[:, 5]
plt.scatter(x, boston.target)
x = np.array([ [v, 1] for v in x]) # バイアス項を追加するため,1を追加 y = ax + b = a x + b 1, この xと1に当たるところをlstsqに渡す.
y = boston.target
(slope, bias), total_error, _, _ = np.linalg.lstsq(x, y, rcond=None)
ran = np.arange(0, 10, 0.05)
plt.scatter(ran, slope * ran + bias)
plt.xlim(3, 10)
plt.ylim(0, 55)
rmse = np.sqrt(total_error[0]/len(x))
print(rmse)
###Output
6.6030713892225625
###Markdown
外れ値(outlier)にはうまく対応できていないが,結果は良くなっている. このモデルがどれだけうまくデータに適合できているか,定量的な評価を行うには,平均二乗平方根誤差(root mean squared error: RMSE)を求める. バイアス項 あり:なし = 7.6:6.6 と改善されている. RMSEは標準偏差のようなものなので,誤差が正規分布に従うと仮定すると,ほとんどのデータはおおよそ平均値から標準誤差2つ分の範囲に収まる. -2RMSE ~ 2RMSEの範囲は信頼区間(confidence interval)に相当する. そのため,この結果からわかることは,我々が予測する家の値段の誤差は多くても$\pm 6.6 \times 2 \times1000 = \pm 13000$ドルの範囲に収まるであろうと言える. 多次元回帰データを全て使って予測してみる.
###Code
x = np.array([ np.concatenate((v, [1])) for v in boston.data])
y = boston.target
slope, total_error, _, _ = np.linalg.lstsq(x, y, rcond=None)
rmse = np.sqrt(total_error[0]/len(x))
print(rmse)
###Output
4.679191295697282
###Markdown
データをたくさん使ったためか,誤差が改善された. ここでは14次元の回帰を行なっているため,結果のプロットはできない. 回帰における交差検定回帰における訓練誤差とテスト誤差の差はあまり大きくないことが予想されるが,それでも交差検定を行う. ここからは,今後より進んだ手法への置き換えをスムーズにするために,sklearnのLinearRegressionを利用する.
###Code
from sklearn.linear_model import LinearRegression
def rmse(model, data, target):
p = model.predict(data).ravel() # 予測にはmapを使わず,直接データ全体を入力するようになった
e = p - target # インスタンスごとの誤差を得る
total_error = np.sum(e * e)
rmse = np.sqrt(total_error / len(p))
return rmse
lr = LinearRegression(fit_intercept=True) # fit_intercept: バイアス項を追加
lr.fit(x, y)
print(f"RMSE of training: {rmse(lr, x, y)}")
###Output
RMSE of training: 6.603071389222561
###Markdown
numpyのRMSEとだいたい同じになった. KFoldによって10分割の交差検定を行う.
###Code
from sklearn.model_selection import KFold
def kfold_regression(model, x, y, n_splits=10):
kf = KFold(n_splits=n_splits)
rmses = []
for train, test in kf.split(x):
model.fit(x[train], y[train])
rmses.append(rmse(model, x[test], y[test]))
return np.asarray(rmses).mean()
print(f"RMSE on 10-fold CV: {kfold_regression(lr, x, y)}")
###Output
RMSE on 10-fold CV: 5.941570588762131
###Markdown
訓練誤差よりもテスト誤差は大きくなったが,汎化能力についてより正しい評価を行なっていると言える. 最小二乗法はモデルが単純で高速なため,最初に試してみるモデルとして良い. 罰則付き回帰通常の回帰では過学習を起こすので,パラメータが過剰に適合することに対して罰則を追加する罰則付き回帰(penalized regression)を行なってみる. 罰則付き回帰はバイアス-バリアンスのトレードオフの一つとみることができる. 罰則を用いることは誤差(バイアス)を大きくすることであり,それはバリアンスを減らすことになるので訓練データに適合しすぎることを防ぐ. よって全体の汎化能力はより高くなると期待できる. L1, L2罰則項まず,最小二乗法の最適化の式は以下$$ \vec{b^*} = \arg \min_{\vec{b}} (y - X \vec{b})^2 $$L1罰則項(Lasso回帰): 係数の絶対値の和を追加, 誤差を小さくするだけでなく,最小限の係数で抑えようとする.->スパースなモデルに!(不要特徴量削減)$$ \vec{b^*} = \arg \min_{\vec{b}} (y - X \vec{b})^2 + \lambda \sum_i | b_i | $$L2罰則項(Ridge回帰): 係数の二乗の和を追加$$ \vec{b^*} = \arg \min_{\vec{b}} (y - X \vec{b})^2 + \lambda \sum_i b_i^2 $$ここで$\lambda$罰則項の強さを調整するパラメータ. Lasso回帰とRidge回帰を合わせるとElastic netと呼ばれる手法になる. $$ \vec{b^*} = \arg \min_{\vec{b}} (y - X \vec{b})^2 + \lambda \sum_i \Bigl\{ \alpha |b_i| + (1-\alpha)b_i^2 \Bigr\} $$詳しくはこちら http://tekenuko.hatenablog.com/entry/2017/11/18/214317 scikit-learnのLassoとElastic netを使用するsklearnで先ほどの最小二乗法をElastic netによる回帰に簡単に差し替えられる. L1罰則項を追加するにはLasso, L2罰則項を追加するにはRidgeクラスを使えばいい.
###Code
from sklearn.linear_model import ElasticNet
en = ElasticNet(fit_intercept=True, alpha=0.5)
print(f"RMSE on 10-fold CV: {kfold_regression(en, x, y)}")
###Output
RMSE on 10-fold CV: 6.596253559088761
###Markdown
RMSEは5.18から5.11に減少. PがNより大きい場合特徴量の数Pがサンプル数Nより大きい場合の問題について考える. 例えば,テキストデータの単語1つ1つを特徴量とすると,特徴量の方が多くなりがちである. サンプルより特徴量の方が多い時,訓練データに対して完全に当てはめることが可能になり,訓練誤差を0にできる. なぜならば,連立方程式の数が変数の数より地位し場合,常に連立方程式の解を求められることと同じだからである. しかしそれは,汎化能力が低くなってしまう原因となる. テキストに基づいたサンプルデータカーネギーメロン大学のNoah Smith教授らは米証券取引委員会(America SEC)に提出される決算報告書,10-K reportsのデータマイニングを研究している. 10-K reportsを元に株価の推移を予測したい. 16087のサンプルに対して,単語の種類を特徴量とすると150360個にも及ぶ. データは次のシェルスクリプトでダウンロードできる.(mac, wgetはbrew install wget) ``` bash!/usr/bin/env bashwget http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/E2006.train.bz2bunzip2 E2006.train.bz2``` データセットはSVMLightというSVM関連のツール用のフォーマットになっており,sklearnでロード可能
###Code
from sklearn.datasets import load_svmlight_file
data, target = load_svmlight_file('./data/E2006.train') # 10秒くらいかかる
###Output
_____no_output_____
###Markdown
targetは単純な1次元のベクトルだが,dataはスパースな行列. まずはtargetの各要素の値を見てみる.
###Code
print(f'Target shape: {target.shape}')
print(f'Min target value: {target.min()}')
print(f'Max target value: {target.max()}')
print(f'Mean target value: {target.mean()}')
print(f'StdDev target: {target.std()}')
###Output
Target shape: (16087,)
Min target value: -7.89957807346873
Max target value: -0.519409526940154
Mean target value: -3.5140531366944456
StdDev target: 0.6322783539114604
###Markdown
一応dataの形も見ておく
###Code
print(f'Data shape: {data.shape}')
# print(f'Data example: {data[0]}')
###Output
Data shape: (16087, 150360)
###Markdown
とりあえずこのまま最小二乗法にぶち込んでみる
###Code
lr.fit(data, target)
rmse(lr, data, target)
###Output
_____no_output_____
###Markdown
誤差はかなり小さいが,これはtargetの標準偏差0.6に比べてとても小さい値になっている. 特徴量の数がサンプル数よりはるかに大きいので,過剰適合を起こしていると考えられる. 試しに,交差検定を行なってみる.
###Code
kfold_regression(lr, data, target)
###Output
_____no_output_____
###Markdown
誤差は0.78程度になる. しかしそもそも標準偏差が0.6なので,常に-3.5と予測していてもRMSEは0.6程度にしかならないことを考えれば,より悪い結果と言える. この過学習への対策として正則化が行われる. elastic netを用いて交差検定を行ってみる.
###Code
en = ElasticNet(alpha=0.5)
en.fit(data, target)
kfold_regression(en, data, target)
###Output
_____no_output_____
###Markdown
常に平均値を予測結果とするよりは,良い結果になった.あんまり良くなってないけど. ハイパーパラメータの賢い設定方法罰則パラメータは大きい値に設定した場合,極端な例としては全ての係数が0になるのが最適解となってしまうため,未学習の可能性がある. 小さすぎると今度はただの最小二乗法になってしまうので,過学習のおそれがある. 最適なパラメータを求めるには,パラメータについて取りうる候補を用意し,交差検定で最適な値を一つ選び出す. したがって,訓練データとテストデータに分ける交差検定と,最適なパラメータを選び出す交差検定の2段階の交差検定を用いなければならない. このとき,パラメータ調整のためのデータ(2段目)は1段目の訓練データのみを使うことに注意する. 2段目で分割されたデータはそれぞれ異なるパラメータで訓練され,もっとも結果が良かったパラメータを使ってテストを行った結果を採用する. 訓練やパラメータ調整で利用されたデータは汚されたデータと呼ばれ,汎化能力を計測するために使うことはできないことに留意する. こういった2段階の交差検定は,LassoCV, RidgeCV, ElasticNetCVで行うことができる.
###Code
# CV設定しろっていうワーニングをとりあえず止める
import warnings
warnings.filterwarnings('ignore')
from sklearn.linear_model import ElasticNetCV
met = ElasticNetCV(alphas=[.125, .25, .5, 1., 2., 4.]) # alphaは探索対象なので探す必要がないが,時間かかりすぎなので制限
kf = KFold(n_splits=10)
err = 0
for n, (train, test) in enumerate(kf.split(data)):
met.fit(data[train], target[train])
p = met.predict(data[test]).ravel()
e = p - target[test]
e = np.dot(e, e)
err += e
print(f"Cross Validation: {n}, err: {e}")
print(np.sqrt(err / len(target)))
###Output
Cross Validation: 0, err: 279.76675152278665
Cross Validation: 1, err: 244.654851577256
Cross Validation: 2, err: 249.28855645149685
Cross Validation: 3, err: 258.30418227923565
Cross Validation: 4, err: 296.1305187299544
Cross Validation: 5, err: 249.7932932467228
Cross Validation: 6, err: 247.76458906671712
Cross Validation: 7, err: 271.0096111522141
Cross Validation: 8, err: 280.27278665256557
Cross Validation: 9, err: 299.01610344895033
0.4078548439101767
###Markdown
レーティング予測とレコメンド映画のお気に入りを予測する問題が「Netflix Challenge」として有名. コンテストの勝者は,先進の機械学習を組み合わせた手法+データの前処理への多くの労力によって優勝した. 前処理の段階で,あるユーザが全体的に高い評価をしていたり,常にネガティブな評価をする傾向がある時を考慮しなければならない. 他にも,どれくらい古い映画か?何人のユーザから評価をつけられたか?といった特徴量を利用していた. 良い結果を出すためには,優れたアルゴリズムだけではなく,「手を汚す」必要がある. コンテストで実際に使われたデータセットを手に入れることはできない. しかし,同様なデータがアカデミック用として,GroupLensというミネソタ大学の研究所によって公開されている. クラス分類問題によるアプローチを取ることもできるが,この問題を回帰問題として考え,本章で学んだ手法を適用してみる. クラス分類問題だと,次の2点を考慮するのが難しいため,回帰問題として扱う方が理にかなっている. - 予測の誤差は大きいほど重大に評価しなければならない.例えば,4点と5点の誤差は大きくないが,1点と5点の誤差は甚大である. - 中間的な値が意味を持つ.ユーザの入力は整数だが,予測値は4.7のような少数になっても意味がある.我々には2つの選択肢がある. - 映画を対象とするモデル- ユーザを対象とするモデル(今回はこっち) あるユーザを対象として,そのユーザの映画の評価方法を学習する.予測器はある映画を入力すると,予測スコアを出力する. 映画の特徴量は,対象ユーザ以外の他ユーザの映画に対してのスコアとする. 対象ユーザと評価のつけ方が似ているユーザに対して大きな回帰係数がつくことになる. まずはデータを取得する.シェルスクリプトは以下``` bash!/usr/bin/env bashwget http://www.grouplens.org/system/files/ml-100k.zip --no-check-certificateunzip ml-100k.zip```次にデータをロードし,(ch07/usermodel.pyを参照)
###Code
from scipy import sparse # 2次元スパース行列のためのパッケージ
data = np.array([ [int(tok) for tok in line.split('\t')[:3]] for line in open('./data/ml-100k/u.data')])
ij = data[:, :2]
ij -= 1 # original data is in 1-based system
values = data[:, 2]
reviews = sparse.csc_matrix((values, ij.T)).astype(float) # Compressed Sparse Column matrix
print(reviews.shape)
# print(reviews[0])
###Output
(943, 1682)
###Markdown
学習するデータは疎行列で,評価付けされた映画に対してのみ1~5の値がついている.それ以外は0. 今回は前と同じではつまらないのでLassoCVクラスを利用してみる.
###Code
from sklearn.linear_model import LassoCV
reg = LassoCV(alphas=[.125, .25, .5, 1., 2., 4.]) # 交差検定で用いるalphaの値を制限
###Output
_____no_output_____
###Markdown
インデックスがi番目のユーザuに対して,モデルを学習する関数を書いていく. uおための映画の評価方法をモデル化するには,uが評価付けした映画に対してだけ興味を持てば良い. uによって評価付けされた映画のインデックスを抜き出す.
###Code
def target_user_data(user_id):
i = user_id
u = reviews[i]
u = u.toarray().ravel() # スパース行列を通常の配列に戻してflatten
ps, = np.where(u > 0) # 評価付けされたインデックスのみを取得
us = np.delete(np.arange(reviews.shape[0]), i) # ユーザi以外のユーザ配列を作成
x = reviews[us][:, ps].T # ユーザi以外のユーザがつけた映画のスコア
y = u[ps] # 対象ユーザuが評価付けを行なった映画のスコアを抜き出す
return x, y
# 優れた映画は平均値が高くなるため,映画ごとに正規化(平均値を引く) を行う
def movie_norm(xc): # xc: 全ての映画の特徴量(ユーザi以外の評価)
xc = xc.copy().toarray() # スパースでない配列に変換することでsklearnが高速に処理できる.(メモリ領域の許す限りは)
x1 = np.array([xi[xi > 0].mean() for xi in xc]) # 全ての映画について,評価の平均値をとる. 評価がついているもののみを対象とする.
x1 = np.nan_to_num(x1) # 評価が1つもついておらず,NANになる場合があるので,これを0に置き換える.
# 全ての映画に対し,それぞれの映画の評価の平均値を引く.
for i in range(xc.shape[0]):
xc[i] -= (xc[i] > 0) * x1[i] # 平均値を引くが,評価付けしていない映画のスコアは0のまま
return xc, x1 # 正規化した配列, 平均値
def learn_for(user_id, model=reg):
x, y = target_user_data(user_id)
kf = KFold(n_splits=10)
err = 0
for n, (train, test) in enumerate(kf.split(x)):
xc, x1 = movie_norm(x[train])
model.fit(xc, y[train] - x1) # 学習にはラベルの評価も正規化を行う.
xc, x1 = movie_norm(x[test])
p = model.predict(xc).ravel()
e = np.sum( ((p + x1) - y[test]) ** 2 )
err += e
# print(f"Cross Validation: {n}, err: {e}")
return np.sqrt(err / len(target))
rmses = []
for i in range(reviews.shape[0]):
rmse = learn_for(i)
# print(f"RMSE for user {i}: {rmse}")
rmses.append(rmse)
print(f"Average RMSE: {np.asarray(rmses).mean()}")
###Output
Average RMSE: 0.06882747921849655
###Markdown
ユーザのある映画への評価を予測する時,もっとも単純なのは平均値を予測すること. そのような単純な手法よりは80%良い結果になる. ここではユーザごとに完全に別のモデルを学習しており,その結果は目覚ましいとは言えない. 次回は回帰の手法を上回る,全てのユーザと全ての映画の情報を統合した賢い方法を紹介する.
###Code
# 単純に平均を予測値としてみる
class AverageModel():
def fit(self, x, y):
pass
def predict(self, x):
return np.zeros(x.shape[0])
rmses = []
for i in range(reviews.shape[0]):
rmse = learn_for(i, model=AverageModel())
rmses.append(rmse)
print(f"Average RMSE: {np.asarray(rmses).mean()}")
0.06882747 / 0.07407938
###Output
_____no_output_____ |
jupyter_notebook/upstream_data_processing_scenarios.ipynb | ###Markdown
Post Processing for Scenarios Import Packages
###Code
import pandas as pd
import numpy as np
import os
import glob
import sqlite3
###Output
_____no_output_____
###Markdown
Set up for Data Import
###Code
# Get the directory of all csv files
d = '/Users/rwang/RMI/Climate Action Engine - Documents/OCI Phase 2/Upstream/upstream_data_pipeline_sp/Outputs_Scenario_fix/' #path to folder where files are located
os.chdir(d) #change directory to path
#Define column names for results csvs
column_names = [
'Downhole pump',
'Water reinjection ',
'Natural gas reinjection',
'Water flooding',
'Gas lifting',
'Gas flooding',
'Steam flooding',
'Oil sands mine (integrated with upgrader)',
'Oil sands mine (non-integrated with upgrader)',
'Field location (Country)',
'Field_name',
'Field age',
'Field depth',
'Oil production volume',
'Number of producing wells',
'Number of water injecting wells',
'Production tubing diameter',
'Productivity index',
'Reservoir pressure',
'Reservoir temperature',
'Offshore?',
'API gravity',
'Gas composition N2',
'Gas composition CO2',
'Gas composition C1',
'Gas composition C2',
'Gas composition C3',
'Gas composition C4+',
'Gas composition H2S',
'Gas-to-oil ratio (GOR)',
'Water-to-oil ratio (WOR)',
'Water injection ratio',
'Gas lifting injection ratio',
'Gas flooding injection ratio',
'Flood gas ',
'Liquids unloading practice',
'Fraction of CO2 breaking through to producers',
'Source of makeup CO2',
'Percentage of sequestration credit assigned to the oilfield',
'Steam-to-oil ratio (SOR)',
'Fraction of required electricity generated onsite',
'Fraction of remaining natural gas reinjected',
'Fraction of produced water reinjected',
'Fraction of steam generation via cogeneration ',
'Fraction of steam generation via solar thermal',
'Heater/treater',
'Stabilizer column',
'Upgrader type',
'Associated Gas Processing Path',
'Flaring-to-oil ratio',
'Venting-to-oil ratio (purposeful)',
'Volume fraction of diluent',
'Low carbon richness (semi-arid grasslands)',
'Moderate carbon richness (mixed)',
'High carbon richness (forested)',
'Low intensity development and low oxidation',
'Moderate intensity development and moderate oxidation',
'High intensity development and high oxidation',
'Ocean tanker',
'Barge',
'Pipeline',
'Rail',
'Truck',
'Transport distance (one way) - Ocean tanker',
'Transport distance (one way) - Barge',
'Transport distance (one way) - Pipeline',
'Transport distance (one way) - Rail',
'Transport distance (one way) - Truck',
'Ocean tanker size, if applicable',
'Small sources emissions',
'e-Total energy consumption','e-Total GHG emissions',
'e-Total GHG emissions-Combustion/land use','e-Total GHG emissions-VFF',
'd-Total energy consumption','d-Total GHG emissions',
'd-Total GHG emissions-Combustion/land use','d-Total GHG emissions-VFF',
'p-Total energy consumption','p-Total GHG emissions',
'p-Total GHG emissions-Combustion/land use','p-Total GHG emissions-VFF',
's-Total energy consumption','s-Total GHG emissions',
's-Total GHG emissions-Combustion/land use','s-Total GHG emissions-VFF',
'l-Total energy consumption','l-Total GHG emissions',
'l-Total GHG emissions-Combustion/land use','l-Total GHG emissions-VFF',
'm-Total energy consumption','m-Total GHG emissions',
'm-Total GHG emissions-Combustion/land use','m-Total GHG emissions-VFF',
'w-Total energy consumption','w-Total GHG emissions',
'w-Total GHG emissions-Combustion/land use','w-Total GHG emissions-VFF',
't-Total energy consumption','t-Total GHG emissions',
't-Total GHG emissions-Combustion/land use','t-Total GHG emissions-VFF','t-Loss factor',
'g-Total energy consumption','g-Total GHG emissions',
'g-Total GHG emissions-Combustion/land use','g-Total GHG emissions-VFF',
'Other small sources','Offsite emissions credit/debit','Lifecycle energy consumption',
'CSS-Total CO2 sequestered','Lifecycle GHG emissions','Field-by-field check']
###Output
_____no_output_____
###Markdown
Process 'Results' csvs
###Code
#Create list of all results csvs
list_results = sorted(glob.glob('*Results*.csv', recursive=True)) #list all results .csvs
len(list_results) #how many results files available
#Define a function called 'clean_df' that goes through excel fil and grabs the rows/columns where results are stored
#Then transpose the matrix, assign proper column names, and drop any rows that don't have information and return the clean dataframe
def clean_df(df,column_names):
'''clean the df and transpose to map the column names'''
df = df.iloc[: , 7:]
df = df.iloc[[8,9,10,11,12,13,14,15,16,19,20,21,22,23,24,25,26,27,28,29,30,33,35,36,37,38,39,40,41,45,46,47,48,49,
50,54,57,58,61,62,63,64,65,66,67,69,70,71,76,85,86,87,91,92,93,95,96,97,101,102,103,104,105,107,108,109,110,
111,112,114,129,130,131,132,135,136,137,138,141,142,143,144,147,148,149,150,153,154,155,156,159,160,161,162,
165,166,167,168,171,172,173,174,175,178,179,180,181,183,185,187,190,192,194]]
df_t = df.transpose()
df_t.columns = column_names
df_t = df_t.dropna(how = 'all')
return df_t
#Create an empty list in which to store cleaned results
#Loop through each results csv, clean it, add in informatoin about year, field_type, frack, lng, gwp as scraped from file name
#If this doesn't work, print the file name as a 'problematic file' in the try/except loop
list_df =[]
for file in list_results:
try:
df = pd.read_csv(d+file,header = None)
result = clean_df(df,column_names)
result['original_file']=file
result['year']=file.split('_')[3]
result['field_type']=file.split('_')[4].lower()
result['frack?']= True if file.split('_')[5].lower()=='frack' else False
result['lng?'] = True if file.split('_')[6].lower()=='lng' else False
result['gwp'] = file.split('_')[7][3:].lower()
result['Field_name'] = file.split('_')[0]
scenario = file.split('_')[-1].split('-')[0:-1]
result['Scenario'] = '-'.join(scenario)
result['Scenario_value'] = file.split('_')[-1].split('-')[-1][:-4]
list_df.append(result)
except ValueError as e:
print("problematic file: " + file)
print(e)
print(list_results.index(file))
#Combine lists into a results dataframe
results_df = pd.concat(list_df)
#List out all numerical columns to convert to type float
numerical_columns = [
'Field age',
'Field depth',
'Oil production volume',
'Number of producing wells',
'Number of water injecting wells',
'Production tubing diameter',
'Productivity index',
'Reservoir pressure',
'Reservoir temperature',
'Offshore?',
'API gravity',
'Gas composition N2',
'Gas composition CO2',
'Gas composition C1',
'Gas composition C2',
'Gas composition C3',
'Gas composition C4+',
'Gas composition H2S',
'Gas-to-oil ratio (GOR)',
'Water-to-oil ratio (WOR)',
'Water injection ratio',
'Gas lifting injection ratio',
'Gas flooding injection ratio',
'Flood gas ',
'Liquids unloading practice',
'Fraction of CO2 breaking through to producers',
'Source of makeup CO2',
'Percentage of sequestration credit assigned to the oilfield',
'Steam-to-oil ratio (SOR)',
'Fraction of required electricity generated onsite',
'Fraction of remaining natural gas reinjected',
'Fraction of produced water reinjected',
'Fraction of steam generation via cogeneration ',
'Fraction of steam generation via solar thermal',
'Heater/treater',
'Stabilizer column',
'Upgrader type',
'Associated Gas Processing Path',
'Flaring-to-oil ratio',
'Venting-to-oil ratio (purposeful)',
'Volume fraction of diluent',
'Low carbon richness (semi-arid grasslands)',
'Moderate carbon richness (mixed)',
'High carbon richness (forested)',
'Low intensity development and low oxidation',
'Moderate intensity development and moderate oxidation',
'High intensity development and high oxidation',
'Ocean tanker',
'Barge',
'Pipeline',
'Rail',
'Truck',
'Transport distance (one way) - Ocean tanker',
'Transport distance (one way) - Barge',
'Transport distance (one way) - Pipeline',
'Transport distance (one way) - Rail',
'Transport distance (one way) - Truck',
'Ocean tanker size, if applicable',
'Small sources emissions',
'e-Total energy consumption',
'e-Total GHG emissions',
'e-Total GHG emissions-Combustion/land use',
'e-Total GHG emissions-VFF',
'd-Total energy consumption',
'd-Total GHG emissions',
'd-Total GHG emissions-Combustion/land use',
'd-Total GHG emissions-VFF',
'p-Total energy consumption',
'p-Total GHG emissions',
'p-Total GHG emissions-Combustion/land use',
'p-Total GHG emissions-VFF',
's-Total energy consumption',
's-Total GHG emissions',
's-Total GHG emissions-Combustion/land use',
's-Total GHG emissions-VFF',
'l-Total energy consumption',
'l-Total GHG emissions',
'l-Total GHG emissions-Combustion/land use',
'l-Total GHG emissions-VFF',
'm-Total energy consumption',
'm-Total GHG emissions',
'm-Total GHG emissions-Combustion/land use',
'm-Total GHG emissions-VFF',
'w-Total energy consumption',
'w-Total GHG emissions',
'w-Total GHG emissions-Combustion/land use',
'w-Total GHG emissions-VFF',
't-Total energy consumption',
't-Total GHG emissions',
't-Total GHG emissions-Combustion/land use',
't-Total GHG emissions-VFF',
't-Loss factor',
'g-Total energy consumption',
'g-Total GHG emissions',
'g-Total GHG emissions-Combustion/land use',
'g-Total GHG emissions-VFF',
'Other small sources',
'Offsite emissions credit/debit',
'Lifecycle energy consumption',
'CSS-Total CO2 sequestered',
'Lifecycle GHG emissions']
#more clean up of dataframe
results_df = results_df.replace(r'^\s+$', np.nan, regex=True) #replace empty strings with NA
results_df = results_df.replace(r'\\', np.nan, regex=True) #
results_df.reset_index(inplace = True, drop=True) #reset index and drop index column
results_df[numerical_columns]= results_df[numerical_columns].astype(float) #set these numerica columns as type float
results_df['Field_name']=results_df['Field_name'].apply(lambda x: x.strip())#strip whitespace from field names
#double check there are no spaces in field names
results_df['Field_name'] = results_df['Field_name'].replace(" ", "")
###Output
_____no_output_____
###Markdown
Process 'Energy Summary' csvs
###Code
#Grab energy summary csvs
list_energysummary = sorted(glob.glob('*Energy*.csv', recursive=True)) #list all energy summary .csvs
len(list_energysummary) #how many energy summary files - should match number of results csvs
#Create empty lists to populate with energy summary data
ES_MJperd =[]
ES_mmbtuperd = []
ES_Energy_Density_crude_oil = []
ES_Energy_Density_petcoke = []
ES_Energy_Density_C2 = []
ES_Energy_Density_C3 = []
ES_Energy_Density_C4 = []
ES_Crude_output = []
ES_Gas_output = []
ES_NGL_output = []
ES_Gas_output_MJ = []
ES_Petcoke_fuel =[]
Field_name = []
original_file = []
gwp = []
Scenario = []
Scenario_value = []
for file in list_energysummary:
df = pd.read_csv(d+file,header=None)
ES_MJperd.append(float(df.iloc[127,5]))
ES_mmbtuperd.append(float(df.iloc[127,4]))
ES_Energy_Density_crude_oil.append(float(df.iloc[132,12]))
ES_Energy_Density_petcoke.append(float(df.iloc[134,12]))
ES_Energy_Density_C2.append(float(df.iloc[140,12]))
ES_Energy_Density_C3.append(float(df.iloc[141,12]))
ES_Energy_Density_C4.append(float(df.iloc[142,12]))
ES_Crude_output.append(float(df.iloc[88,4]))
ES_Gas_output.append(float(df.iloc[84,4]))
if df.iloc[120,3] == 'Gas':
ES_Gas_output_MJ.append(float(df.iloc[120,5]))
else:
ES_Gas_output_MJ.append(float(df.iloc[123,5]))
ES_NGL_output.append(float(df.iloc[86,4]))
ES_Petcoke_fuel.append(float(df.iloc[76,4]))
Field_name.append(file.split('_')[0])
#original_file.append(file)
gwp.append(file.split('_')[7][3:].lower())
scenario = file.split('_')[-1].split('-')[0:-1]
Scenario.append('-'.join(scenario))
Scenario_value.append(file.split('_')[-1].split('-')[-1][:-4])
#combine lists of values into dataframe
energysummary_df = pd.DataFrame({'Field_name':Field_name,'gwp':gwp, #'original_file':original_file
'Scenario': Scenario, 'Scenario_value':Scenario_value,
'ES_MJperd':ES_MJperd,'ES_mmbtuperd':ES_mmbtuperd,
'ES_Energy_Density_crude(mmbtu/t)':ES_Energy_Density_crude_oil,'ES_Energy_Density_petcoke(mmbtu/t)':ES_Energy_Density_petcoke,
'ES_Energy_Density_C2(mmbtu/t)':ES_Energy_Density_C2,'ES_Energy_Density_C3(mmbtu/t)':ES_Energy_Density_C3,
'ES_Energy_Density_C4(mmbtu/t)':ES_Energy_Density_C4, 'ES_Crude_output(mmbut/d)':ES_Crude_output,
'ES_Gas_output(mmbtu/d)':ES_Gas_output, 'ES_NGL_output(mmbtu/d)':ES_NGL_output,
'ES_Gas_output(MJ/d)':ES_Gas_output_MJ,'ES_Petcoke_fuel(mmbtu/d)':ES_Petcoke_fuel})
#double check there are no spaces in field names
energysummary_df['Field_name'] = energysummary_df['Field_name'].replace(" ", "")
###Output
_____no_output_____
###Markdown
Process 'VFF' csvs
###Code
#we want to grab both co2 and ch4 emissions from vff csvs
#Grab vff csvs
list_vff = sorted(glob.glob('*VFF*.csv', recursive=True)) #list all VFF .csvs
len(list_vff)
#Create empty lists in which to fill in vff data
venting_ch4 =[]
venting_ch4_miq = []
venting_ch4_uponly = []
fugitive_ch4 =[]
flaring_ch4 = []
fugitive_ch4_miq = []
fugitive_ch4_uponly = []
venting_production_ch4 = []
venting_gatherboostprocesss_ch4 = []
venting_transmissionstorage_ch4 = []
venting_2ndproduction_ch4 = []
venting_enduse_ch4 = []
fugitive_production_ch4 = []
fugitive_gatherboostprocesss_ch4 = []
fugitive_transmissionstorage_ch4 =[]
fugitive_2ndproduction_ch4 = []
fugitive_enduse_ch4 = []
venting_co2 = []
fugitive_co2 = []
Field_name = []
original_file = []
gwp = []
Scenario = []
Scenario_value = []
#fill in empty lists with data from vff files
for file in list_vff:
df = pd.read_csv(d+file,header=None)
venting_ch4.append(sum(df.iloc[87:134,9].apply(lambda x:float(x))))
fugitive_ch4.append(sum(df.iloc[87:133,10].apply(lambda x:float(x))))
flaring_ch4.append(df.iloc[133,10]) #always going to be K134
venting_co2.append(sum(df.iloc[87:134,7].apply(lambda x:float(x))))
fugitive_co2.append(sum(df.iloc[87:134,8].apply(lambda x:float(x))))
venting_production_ch4.append(sum(df.iloc[87:107,9].apply(lambda x:float(x))))
venting_gatherboostprocesss_ch4.append(sum(df.iloc[107:112,9].apply(lambda x:float(x))))
venting_transmissionstorage_ch4.append(sum(df.iloc[112:117,9].apply(lambda x:float(x))))
venting_2ndproduction_ch4.append(sum(df.iloc[123:133,9].apply(lambda x:float(x))))
venting_enduse_ch4.append(float(df.iloc[122,9]))
fugitive_production_ch4.append(sum(df.iloc[87:107,10].apply(lambda x:float(x))))
fugitive_gatherboostprocesss_ch4.append(sum(df.iloc[107:112,10].apply(lambda x:float(x))))
fugitive_transmissionstorage_ch4.append(sum(df.iloc[112:117,10].apply(lambda x:float(x))))
fugitive_2ndproduction_ch4.append(sum(df.iloc[123:133,10].apply(lambda x:float(x))))
fugitive_enduse_ch4.append((float(df.iloc[122,10])))
venting_ch4_miq= [sum(x) for x in zip(venting_production_ch4, venting_2ndproduction_ch4)]
fugitive_ch4_miq= [sum(x) for x in zip(fugitive_production_ch4, fugitive_2ndproduction_ch4)]
venting_ch4_uponly = [sum(x) for x in zip(venting_production_ch4,venting_gatherboostprocesss_ch4,venting_2ndproduction_ch4)]
fugitive_ch4_uponly = [sum(x) for x in zip(fugitive_production_ch4,fugitive_gatherboostprocesss_ch4,fugitive_2ndproduction_ch4)]
Field_name.append(file.split('_')[0])
#original_file.append(file)
gwp.append(file.split('_')[7][3:].lower())
scenario = file.split('_')[-1].split('-')[0:-1]
Scenario.append('-'.join(scenario))
Scenario_value.append(file.split('_')[-1].split('-')[-1][:-4])
#combine lists of data into dataframe
vff_df = pd.DataFrame({'Field_name':Field_name, 'gwp':gwp, #'original_file':original_file,
'Scenario': Scenario, 'Scenario_value':Scenario_value,
'venting_ch4(t/d)':venting_ch4,'fugitive_ch4(t/d)':fugitive_ch4,
'flaring_ch4(t/d)':flaring_ch4,'venting_co2(t/d)':venting_co2,'fugitive_co2(t/d)':fugitive_co2,
'venting_ch4_miq(t/d)':venting_ch4_miq,'fugitive_ch4_miq(t/d)':fugitive_ch4_miq,
'venting_ch4_uponly(t/d)':venting_ch4_uponly,'fugitive_ch4_uponly(t/d)':fugitive_ch4_uponly,
'ch4_production(t/d)': [sum(x) for x in zip(venting_production_ch4,fugitive_production_ch4)],
'ch4_gatherboostprocess(t/d)': [sum(x) for x in zip(venting_gatherboostprocesss_ch4,fugitive_gatherboostprocesss_ch4)],
'ch4_transmissionstorage(t/d)': [sum(x) for x in zip(venting_transmissionstorage_ch4,fugitive_transmissionstorage_ch4)],
'ch4_2ndproduction(t/d)':[sum(x) for x in zip(venting_2ndproduction_ch4,fugitive_2ndproduction_ch4)],
'ch4_enduse(t/d)':[sum(x) for x in zip(venting_enduse_ch4,fugitive_enduse_ch4)]})
#add in new columns for tCH4/year and tCH4/year-miQ
vff_df['tCH4/year'] = (vff_df['flaring_ch4(t/d)'].astype(float)+vff_df['venting_ch4(t/d)']+vff_df['fugitive_ch4(t/d)'])*365
vff_df['tCH4/year-miQ']=(vff_df['flaring_ch4(t/d)'].astype(float)+vff_df['venting_ch4_miq(t/d)']+vff_df['fugitive_ch4_miq(t/d)'])*365
#double check field names don't have spaces
vff_df['Field_name'] = vff_df['Field_name'].replace(" ", "")
###Output
_____no_output_____
###Markdown
Add in data from 'Flow' csvs
###Code
#Grab flow sheet csvs
list_flow = sorted(glob.glob('*Flow*.csv', recursive=True)) #list all Flow .csvs
len(list_flow)
#Create empty lists in which to populate data from csvs
FS_LPG_export_LPG = [] #Flow Sheet!W9
FS_LPG_export_C2 = [] #W17
FS_LPG_export_C3 = [] #W18
FS_LPG_export_C4 = [] #W19
FS_Ethane_to_Petchem = [] #CP17
FS_Petcoke_to_stock =[]
FS_Gas_at_Wellhead =[] #AF24
Field_name = []
original_file = []
gwp = []
Scenario = []
Scenario_value = []
#fill lists with data from flow csvs
Field_name = []
original_file = []
for file in list_flow:
df = pd.read_csv(d+file,header=None)
FS_LPG_export_LPG.append(float(df.iloc[8,22]))
FS_LPG_export_C2.append(float(df.iloc[16,22]))
FS_LPG_export_C3.append(float(df.iloc[17,22]))
FS_LPG_export_C4.append(float(df.iloc[18,22]))
FS_Ethane_to_Petchem.append(float(df.iloc[16,93]))
FS_Petcoke_to_stock.append(float(df.iloc[6,214]))
FS_Gas_at_Wellhead.append(float(df.iloc[23,31]))
Field_name.append(file.split('_')[0])
original_file.append(file)
gwp.append(file.split('_')[7][3:].lower())
scenario = file.split('_')[-1].split('-')[0:-1]
Scenario.append('-'.join(scenario))
Scenario_value.append(file.split('_')[-1].split('-')[-1][:-4])
#Create dataframe for flow data
flowsheet_df = pd.DataFrame({'Field_name':Field_name, 'gwp':gwp, #'original_file':original_file,
'Scenario': Scenario, 'Scenario_value':Scenario_value,
'FS_LPG_export_LPG(t/d)':FS_LPG_export_LPG,'FS_LPG_export_C2(t/d)':FS_LPG_export_C2,
'FS_LPG_export_C3(t/d)': FS_LPG_export_C3, 'FS_LPG_export_C4(t/d)':FS_LPG_export_C4,
'FS_Ethane_to_Petchem(t/d)':FS_Ethane_to_Petchem,
'FS_Petcoke_to_stock(t/d)':FS_Petcoke_to_stock,'FS_Gas_at_Wellhead(t/d)':FS_Gas_at_Wellhead})
#double check no spaces in field names
flowsheet_df['Field_name'] = flowsheet_df['Field_name'].replace(" ", "")
###Output
_____no_output_____
###Markdown
Merge Results, Energy Summary, VFF, and Flow into one dataframe
###Code
print(len(results_df))
print(len(energysummary_df))
print(len(vff_df))
print(len(flowsheet_df))
energysummary_df.columns
#merge results and energysummary
merge = results_df.merge(energysummary_df, on=['Field_name','gwp','Scenario','Scenario_value'], how = 'outer')
vff_df
#merge in vff
merge = merge.merge(vff_df, on=['Field_name','gwp','Scenario','Scenario_value'], how='outer')
#merge in flowsheet
merge = merge.merge(flowsheet_df, on=['Field_name','gwp','Scenario','Scenario_value'], how='outer')
#Add new column for tco2e/yr after all data are merged
merge['tCO2e/yr']=merge['Lifecycle GHG emissions']*merge['ES_MJperd']/10**6*365
###Output
_____no_output_____
###Markdown
Check against upstream results
###Code
#read in upstream_results and make sure we aren't missing any columns here
sp_dir = '/Users/rwang/RMI/Climate Action Engine - Documents/OCI Phase 2'
connection = sqlite3.connect(sp_dir+"/OCI_Database.db")
up_mid_down = pd.read_sql('select * from upstream_results',connection)
list_up = up_mid_down.columns.to_list()
list_merge = merge.columns.to_list()
###Output
_____no_output_____
###Markdown
Export results to csv or database
###Code
pd.set_option('display.max_columns', None)
merge['Scenario'] = merge['Scenario'].replace({'CCS': 'Carbon Capture and Storage','Electrify':'Renewable Electricity',
'Flare efficiency':'Flare Efficiency','LNG':'Liquefied Natural Gas',
'Methane fugitives':'Venting + Fugitive Leakage','Solar steam':'Solar Steam',
'Water': 'Energy to Pump Water'})
pd.options.display.max_rows = None
def default(x):
if ((x['Scenario']=='Renewable Electricity' and x['Scenario_value']=='off') or
(x['Scenario']=='Liquefied Natural Gas' and x['Scenario_value']=='on') or
(x['Scenario']=='Flare Efficiency' and x['Scenario_value']=='def') or
(x['Scenario']=='Venting + Fugitive Leakage' and x['Scenario_value']=='def') or
(x['Scenario']== 'Solar Steam' and x['Scenario_value']=='def') or
(x['Scenario']== 'Energy to Pump Water' and x['Scenario_value']=='def') or
(x['Scenario']=='Carbon Capture and Storage' and x['Scenario_value']=='off')):
return 'Y'
else:
return 'N'
merge['Default?'] = merge.apply(lambda x: default(x),axis =1)
merge.to_csv('/Users/rwang/RMI/Climate Action Engine - Documents/OCI Phase 2/Upstream/upstream_data_pipeline_sp/Postprocessed_outputs_2/upstream_postprocessed_scenarios_fix.csv', index=False)
merge.to_csv('/Users/lschmeisser/Desktop/upstream_postprocessed_scenarios.csv', index=False)
###Output
_____no_output_____
###Markdown
Create a spreadsheet that is easier to view (field name and important variables to the left)
###Code
easyview = merge[['Field_name','Field location (Country)','year',
'field_type',
'frack?',
'lng?',
'gwp',
'Oil production volume',
'Field age',
'Field depth',
'Downhole pump',
'Water reinjection ',
'Natural gas reinjection',
'Water flooding',
'Gas lifting',
'Gas flooding',
'Steam flooding',
'Oil sands mine (integrated with upgrader)',
'Oil sands mine (non-integrated with upgrader)',
'Number of producing wells',
'Number of water injecting wells',
'Production tubing diameter',
'Productivity index',
'Reservoir pressure',
'Reservoir temperature',
'Offshore?',
'API gravity',
'Gas composition N2',
'Gas composition CO2',
'Gas composition C1',
'Gas composition C2',
'Gas composition C3',
'Gas composition C4+',
'Gas composition H2S',
'Gas-to-oil ratio (GOR)',
'Water-to-oil ratio (WOR)',
'Water injection ratio',
'Gas lifting injection ratio',
'Gas flooding injection ratio',
'Flood gas ',
'Liquids unloading practice',
'Fraction of CO2 breaking through to producers',
'Source of makeup CO2',
'Percentage of sequestration credit assigned to the oilfield',
'Steam-to-oil ratio (SOR)',
'Fraction of required electricity generated onsite',
'Fraction of remaining natural gas reinjected',
'Fraction of produced water reinjected',
'Fraction of steam generation via cogeneration ',
'Fraction of steam generation via solar thermal',
'Heater/treater',
'Stabilizer column',
'Upgrader type',
'Associated Gas Processing Path',
'Flaring-to-oil ratio',
'Venting-to-oil ratio (purposeful)',
'Volume fraction of diluent',
'Low carbon richness (semi-arid grasslands)',
'Moderate carbon richness (mixed)',
'High carbon richness (forested)',
'Low intensity development and low oxidation',
'Moderate intensity development and moderate oxidation',
'High intensity development and high oxidation',
'Ocean tanker',
'Barge',
'Pipeline',
'Rail',
'Truck',
'Transport distance (one way) - Ocean tanker',
'Transport distance (one way) - Barge',
'Transport distance (one way) - Pipeline',
'Transport distance (one way) - Rail',
'Transport distance (one way) - Truck',
'Ocean tanker size, if applicable',
'Small sources emissions',
'e-Total energy consumption',
'e-Total GHG emissions',
'e-Total GHG emissions-Combustion/land use',
'e-Total GHG emissions-VFF',
'd-Total energy consumption',
'd-Total GHG emissions',
'd-Total GHG emissions-Combustion/land use',
'd-Total GHG emissions-VFF',
'p-Total energy consumption',
'p-Total GHG emissions',
'p-Total GHG emissions-Combustion/land use',
'p-Total GHG emissions-VFF',
's-Total energy consumption',
's-Total GHG emissions',
's-Total GHG emissions-Combustion/land use',
's-Total GHG emissions-VFF',
'l-Total energy consumption',
'l-Total GHG emissions',
'l-Total GHG emissions-Combustion/land use',
'l-Total GHG emissions-VFF',
'm-Total energy consumption',
'm-Total GHG emissions',
'm-Total GHG emissions-Combustion/land use',
'm-Total GHG emissions-VFF',
'w-Total energy consumption',
'w-Total GHG emissions',
'w-Total GHG emissions-Combustion/land use',
'w-Total GHG emissions-VFF',
't-Total energy consumption',
't-Total GHG emissions',
't-Total GHG emissions-Combustion/land use',
't-Total GHG emissions-VFF',
't-Loss factor',
'g-Total energy consumption',
'g-Total GHG emissions',
'g-Total GHG emissions-Combustion/land use',
'g-Total GHG emissions-VFF',
'Other small sources',
'Offsite emissions credit/debit',
'Lifecycle energy consumption',
'CSS-Total CO2 sequestered',
'Lifecycle GHG emissions',
'Field-by-field check',
'ES_MJperd',
'ES_mmbtuperd',
'ES_Energy_Density_crude(mmbtu/t)',
'ES_Energy_Density_petcoke(mmbtu/t)',
'ES_Energy_Density_C2(mmbtu/t)',
'ES_Energy_Density_C3(mmbtu/t)',
'ES_Energy_Density_C4(mmbtu/t)',
'ES_Crude_output(mmbut/d)',
'ES_Gas_output(mmbtu/d)',
'ES_NGL_output(mmbtu/d)',
'ES_Gas_output(MJ/d)',
'ES_Petcoke_fuel(mmbtu/d)',
'venting_ch4(t/d)',
'fugitive_ch4(t/d)',
'flaring_ch4(t/d)',
'venting_co2(t/d)',
'fugitive_co2(t/d)',
'venting_ch4_miq(t/d)',
'fugitive_ch4_miq(t/d)',
'venting_ch4_uponly(t/d)',
'fugitive_ch4_uponly(t/d)',
'ch4_production(t/d)',
'ch4_gatherboostprocess(t/d)',
'ch4_transmissionstorage(t/d)',
'ch4_2ndproduction(t/d)',
'ch4_enduse(t/d)',
'tCH4/year',
'tCH4/year-miQ',
'FS_LPG_export_LPG(t/d)',
'FS_LPG_export_C2(t/d)',
'FS_LPG_export_C3(t/d)',
'FS_LPG_export_C4(t/d)',
'FS_Ethane_to_Petchem(t/d)',
'FS_Petcoke_to_stock(t/d)',
'FS_Gas_at_Wellhead(t/d)',
'tCO2e/yr']]
#Write to excel file
easyview.to_excel('/Users/lschmeisser/RMI/Climate Action Engine - Documents/OCI Phase 2/Upstream/upstream_data_pipeline_sp/Postprocessed_outputs_2/easyview_scenarios.xlsx', index=False)
upstream_scenarios = pd.read_csv('/Users/rwang/RMI/Climate Action Engine - Documents/OCI Phase 2/Upstream/upstream_data_pipeline_sp/Postprocessed_outputs_2/upstream_postprocessed_scenarios.csv')
###Output
_____no_output_____ |
Modelling/xT/.ipynb_checkpoints/xT Bayesian-checkpoint.ipynb | ###Markdown
**Objective of this Notebook*** Have got the basics of xT up and running* Have got bilinear interpolation working to take advantage of increased positional granularity of the Opta data* Now want to add Bayesian updating for the binomial probabilities: * Transition matrix probabilities; * Shoot or move probabilities; * xG probabilities. **We will want to use annual priors, which are updated monthly with new data to model the new data.****Core reason: football changes. VAR & other rules will impact how the game is played. And implicitly, new statistics like xG will cause the game to change. So the transition matrices must evolve with the game.** How Am I Going To Do This Bayesian UpdatingI have 3 different types of success probabilities:* From (MxN) xG matrix: probability of scoring if a shot is taken from that zone.* From (MxN) shoot or move probability matrices (that complement each other): probabilty of shooting or moving from that zone.* From (M\*N x M\*N) transition matrix: the probability that when trying to move the ball from one grid to another, that you successfully can.So there will be (MxN) + 2x(MxN) + (MxN)\*(MxN) probabilities that will all be going through this updating framework!That's a lot!! Beta-Binomial Conjugate Theory:> Each of these success counts can be modelled as a Binomial distribution.Recall:> $X \sim \text{Binom}(n, \theta)$, $P(X=k) = p(k) = \binom{n}{k}\theta^{k}(1 - \theta)^{n-k}$> $\theta$ is unknown: it's the success probability.Use prior to make initial guess for $\theta$:> $\theta \sim \text{Beta}(\alpha_{0}, \beta_{0})$And our posterior distribution for $\theta | \mathbf{x}$:> $\theta | \mathbf{x} \sim \text{Beta}(\alpha_{0} + k, \beta_{0} + n-k)$Which has mean:> $E[\theta] = \frac{\alpha_0 + k}{\alpha_0 + \beta_0 + n}$So, we're going to take $\alpha_{0}$ and $\beta_{0}$ prior hyper-parameters from the previous year's season, and update them using the next month's data.The previous month's data will go into next month's prior. **Loading Opta data**Adding some additional metadata for:* Competition;* Season;* Season Index.
###Code
%%time
df_opta_1718 = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Opta/EPL Data/Events/df_subevents_EPL_1718.csv')
df_opta_1819 = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Opta/EPL Data/Events/df_subevents_EPL_1819.csv')
df_opta_1920 = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Opta/EPL Data/Events/df_subevents_EPL_1920.csv')
df_opta_2021 = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Opta/EPL Data/Events/df_subevents_EPL_2021.csv')
df_opta_1718_cl = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Opta/CL Data/Events/df_subevents_CL_1718.csv')
df_opta_1819_cl = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Opta/CL Data/Events/df_subevents_CL_1819.csv')
df_opta_1718['competition'] = 'English Premier League'
df_opta_1819['competition'] = 'English Premier League'
df_opta_1920['competition'] = 'English Premier League'
df_opta_2021['competition'] = 'English Premier League'
df_opta_1718_cl['competition'] = 'Champions League'
df_opta_1819_cl['competition'] = 'Champions League'
df_opta_1718['season'] = '2017/18'
df_opta_1819['season'] = '2018/19'
df_opta_1920['season'] = '2019/20'
df_opta_2021['season'] = '2020/21'
df_opta_1718_cl['season'] = '2017/18'
df_opta_1819_cl['season'] = '2018/19'
df_opta_1718['seasonIndex'] = 1
df_opta_1819['seasonIndex'] = 2
df_opta_1920['seasonIndex'] = 3
df_opta_2021['seasonIndex'] = 4
df_opta_1718_cl['seasonIndex'] = 1
df_opta_1819_cl['seasonIndex'] = 2
df_opta = pd.concat([df_opta_1718, df_opta_1819, df_opta_1920, df_opta_2021, df_opta_1718_cl, df_opta_1819_cl])
###Output
CPU times: user 11.2 s, sys: 1.82 s, total: 13 s
Wall time: 13.4 s
###Markdown
**Applying Tranformation Equations**
###Code
%%time
df_opta = xLoad.apply_datetimes(df_opta)
df_opta = xLoad.create_game_month_index(df_opta)
df_opta = xLoad.opta_infer_dribble_end_coords(df_opta)
df_opta = xLoad.coords_in_metres(df_opta, 'x1', 'x2', 'y1', 'y2')
###Output
_____no_output_____
###Markdown
**Defining Opta action event buckets**Splitting actions / events into buckets for:* Successful pass events;* Unsuccessful pass events;* Successful dribble events;* Unsuccessful dribble events;* Successful shot events;* Unsuccessful shot events.
###Code
# pass events (inc. crosses)
opta_successful_pass_events = ['2nd Assist','Assist','Chance Created','Cross','Pass']
opta_failed_pass_events = ['Failed Pass','Offside Pass']
# dribble events
opta_successful_dribble_events = ['Dribble']
opta_failed_dribble_events = ['Failed Dribble']
# shot events
opta_successful_shot_events = ['Goal']
opta_failed_shot_events = ['Hit Woodwork','Miss','Missed Penalty','Penalty Saved','Shot Blocked','Shot Saved']
###Output
_____no_output_____
###Markdown
**Outputting to File**
###Code
df_opta.to_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Opta Fully Loaded Events/Opta_Raw_Events.csv', index=None)
###Output
_____no_output_____
###Markdown
**Reading from File****(Saving 5 mins of loading time above)**
###Code
df_opta = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Opta Fully Loaded Events/Opta_Raw_Events.csv')
print (f'{len(df_opta)} rows loaded.')
###Output
3126182 rows loaded.
###Markdown
**Loading Synthetic Shot Data**
###Code
df_synthetic = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Synthetic/Synthetic_Shots.csv')
###Output
_____no_output_____
###Markdown
**Loading Wyscout European Championship Prior**1. Loading in pre-packaged data;2. Setting up Wyscout pass/dribble/shot classes.
###Code
df_wyscout = pd.read_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Wyscout xT/Wyscout_Euros_2016_xT.csv')
# pass events (inc. crosses)
wyscout_successful_pass_events = ['Successful Pass']
wyscout_failed_pass_events = ['Failed Pass']
# dribble events
wyscout_successful_dribble_events = ['Successful Dribble', 'Successful Take-On']
wyscout_failed_dribble_events = ['Failed Dribble','Failed Take-On']
# shot events
wyscout_successful_shot_events = ['Goal']
wyscout_failed_shot_events = ['Failed Shot']
wyscout_events_relevant = wyscout_successful_dribble_events + wyscout_successful_pass_events + wyscout_successful_shot_events + wyscout_failed_dribble_events + wyscout_failed_pass_events + wyscout_failed_shot_events
wyscout_events_relevant
###Output
_____no_output_____
###Markdown
**Opta event taxonomy**
###Code
df_opta.groupby(['eventType','eventSubType'])\
.agg({'matchId':'count'})\
.reset_index()\
.rename(columns={'matchId':'countActions'})\
.sort_values(['eventType','countActions'], ascending=[True,False])
###Output
_____no_output_____
###Markdown
--- **Bayesian xT** **Beta-Binomial Conjugate Analysis**For every unknown parameter $\theta$:* We want a prior for it, which requires an $\alpha_0$ and $\beta_0$, before we look at any of the season data* We want to calculate the season priors: * These will be league specific; * Before the first season, for all leagues, we'll use the World Cup data from Wyscout to act as the very first prior (applying to the EPL and the CL, and possibly even the Wyscout data) * Will start every new season with the previous season's priors. * So will want to calculate all of those season priors for: 1. xG; 2. pShoot (and then do 1- for pMove); 3. Transition matrix. * **This should be fairly straightforward as you'll just throw subsets of df_opta into the main functions to do that.** * Will therefore need to be looping through a single index to build monthly dataframes. * Will then keep track of all of those monthly dataframes (that will have xT applied) and will stitch them all together * Will want to be able to select the competition right from the off, and again, produce a competition specific iteration sequence, and concatenate multiple leagues together at the end.
###Code
# number of horizontal zones (across width of the pitch)
M = 12
# number of vertical zones (across length of the pitch)
N = 18
###Output
_____no_output_____
###Markdown
Beta-Binomial Conjugate Analysis> Looping Through Game Months
###Code
%%time
seasonIndex = 0
lst_df = []
lst_xT = []
lst_xG = []
# here are the game month indices that you'll loop through
lst_gameMonthIndex = df_opta.gameMonthIndex.drop_duplicates().sort_values().values
for gameMonthIndex in lst_gameMonthIndex:
# getting game month dataframe
df_gameMonth = df_opta.loc[df_opta['gameMonthIndex'] == gameMonthIndex].copy()
# getting the season index from the game month dataframe
seasonIndexNew = df_gameMonth.head(1).seasonIndex.values[0]
print (f'Modelling season {seasonIndexNew}: game month {gameMonthIndex}...')
# get the data counts for the new game month
month_data_successful_move_count_matrix = xTBayes.successful_move_count_matrix(df_gameMonth, opta_successful_pass_events, opta_successful_dribble_events, 'eventSubType', N, M, 105, 68)
month_data_failed_move_count_matrix = xTBayes.failed_move_count_matrix(df_gameMonth, opta_failed_pass_events, opta_failed_dribble_events, 'eventSubType', N, M, 105, 68)
month_data_successful_shot_matrix = xTBayes.successful_shot_count_matrix(df_gameMonth, opta_successful_shot_events, 'eventSubType', N, M, 105, 68)
month_data_failed_shot_matrix = xTBayes.failed_shot_count_matrix(df_gameMonth, opta_failed_shot_events, 'eventSubType', N, M, 105, 68)
# if we're looking at a new season
if seasonIndexNew != seasonIndex:
# if we're dealing with the first season, and we're using our special prior of the Wyscout European Championships 2016 before the first season of Opta data (2017/2018)
if seasonIndexNew == 1:
# our special prior which is the Wyscout 2016 Euros
df_prior = df_wyscout.copy()
# 1. calculate the Euros counts for successful/failed moves/shots
season_prior_successful_move_count_matrix = xTBayes.successful_move_count_matrix(df_prior, wyscout_successful_pass_events, wyscout_successful_dribble_events, 'eventSubType', N, M, 105, 68)
season_prior_failed_move_count_matrix = xTBayes.failed_move_count_matrix(df_prior, wyscout_failed_pass_events, wyscout_failed_dribble_events, 'eventSubType', N, M, 105, 68)
season_prior_successful_shot_matrix = xTBayes.successful_shot_count_matrix(df_prior, wyscout_successful_shot_events, 'eventSubType', N, M, 105, 68)
season_prior_failed_shot_matrix = xTBayes.failed_shot_count_matrix(df_prior, wyscout_failed_shot_events, 'eventSubType', N, M, 105, 68)
# 2. initialise new cumulative game month count matrices - combining Opta and Wyscout data
cumulative_month_data_successful_move_count_matrix = month_data_successful_move_count_matrix
cumulative_month_data_failed_move_count_matrix = month_data_failed_move_count_matrix
cumulative_month_data_successful_shot_matrix = month_data_successful_shot_matrix
cumulative_month_data_failed_shot_matrix = month_data_failed_shot_matrix
# 3. initialise new transition matrices for successful and failed move actions: the prior here is from Wyscout, so uses the Wyscout event types
season_prior_successful_transition_counts, season_prior_transition_matrix_denom = xTBayes.bayes_move_transition_matrices(df_prior, wyscout_successful_pass_events, wyscout_failed_pass_events, wyscout_successful_dribble_events, wyscout_failed_dribble_events, 'eventSubType', N, M, 105, 68)
# 4. initialise new cumulative transition matrix counts: the data here is always Opta, so uses the Opta event types
cumulative_month_successful_transition_counts, cumulative_month_transition_matrix_denom = xTBayes.bayes_move_transition_matrices(df_gameMonth, opta_successful_pass_events, opta_failed_pass_events, opta_successful_dribble_events, opta_failed_dribble_events, 'eventSubType', N, M, 105, 68)
# 5. updating first cumulative month counts of the season with the prior season's counts
cumulative_month_successful_transition_counts += season_prior_successful_transition_counts
cumulative_month_transition_matrix_denom += season_prior_transition_matrix_denom
# updating the current season index to catch up with the new season index
seasonIndex = seasonIndexNew
# if seasonIndexNew is >=2, therefore the prior can be the previous season's Opta data
else:
# our prior will be the previous seasons data from the Opta data
df_prior = df_opta.loc[df_opta['seasonIndex'] == seasonIndex].copy()
# 1. calculate the previous season's counts as the prior
season_prior_successful_move_count_matrix = xTBayes.successful_move_count_matrix(df_prior, opta_successful_pass_events, opta_successful_dribble_events, 'eventSubType', N, M, 105, 68)
season_prior_failed_move_count_matrix = xTBayes.failed_move_count_matrix(df_prior, opta_failed_pass_events, opta_failed_dribble_events, 'eventSubType', N, M, 105, 68)
season_prior_successful_shot_matrix = xTBayes.successful_shot_count_matrix(df_prior, opta_successful_shot_events, 'eventSubType', N, M, 105, 68)
season_prior_failed_shot_matrix = xTBayes.failed_shot_count_matrix(df_prior, opta_failed_shot_events, 'eventSubType', N, M, 105, 68)
# 2. initialise new cumulative game month count matrices
cumulative_month_data_successful_move_count_matrix = month_data_successful_move_count_matrix
cumulative_month_data_failed_move_count_matrix = month_data_failed_move_count_matrix
cumulative_month_data_successful_shot_matrix = month_data_successful_shot_matrix
cumulative_month_data_failed_shot_matrix = month_data_failed_shot_matrix
# 3. initialise new transition matrices for successful and failed move actions
season_prior_successful_transition_counts, season_prior_transition_matrix_denom = xTBayes.bayes_move_transition_matrices(df_prior, opta_successful_pass_events, opta_failed_pass_events, opta_successful_dribble_events, opta_failed_dribble_events, 'eventSubType', N, M, 105, 68)
# 4. initialise new cumulative transition matrix counts
cumulative_month_successful_transition_counts, cumulative_month_transition_matrix_denom = xTBayes.bayes_move_transition_matrices(df_gameMonth, opta_successful_pass_events, opta_failed_pass_events, opta_successful_dribble_events, opta_failed_dribble_events, 'eventSubType', N, M, 105, 68)
# 5. updating first cumulative month counts of the season with the prior season's counts
cumulative_month_successful_transition_counts += season_prior_successful_transition_counts
cumulative_month_transition_matrix_denom += season_prior_transition_matrix_denom
# updating the current season index to catch up with the new season index
seasonIndex = seasonIndexNew
# if we're not looking at a new season, then we just want to add the new game month to the last game month
else:
# 1. updating cumulative count matrices
cumulative_month_data_successful_move_count_matrix += month_data_successful_move_count_matrix
cumulative_month_data_failed_move_count_matrix += month_data_failed_move_count_matrix
cumulative_month_data_successful_shot_matrix += month_data_successful_shot_matrix
cumulative_month_data_failed_shot_matrix += month_data_failed_shot_matrix
# 2. updating cumulative transition matrices
new_month_successful_transition_counts, new_month_transition_matrix_denom = xTBayes.bayes_move_transition_matrices(df_gameMonth, opta_successful_pass_events, opta_failed_pass_events, opta_successful_dribble_events, opta_failed_dribble_events, 'eventSubType', N, M, 105, 68)
cumulative_month_successful_transition_counts += new_month_successful_transition_counts
cumulative_month_transition_matrix_denom += new_month_transition_matrix_denom
# now we have all of the monthly / seasonal counts, so it's time to start calculating posterior means
## USING SYNTHETIC SHOTS as an additional prior
posterior_xG = xTBayes.bayes_p_score_if_shoot(season_prior_successful_shot_matrix, cumulative_month_data_successful_shot_matrix, season_prior_failed_shot_matrix, cumulative_month_data_failed_shot_matrix, N, M, 105, 68, 1, df_synthetic)
posterior_pS, posterior_pM = xTBayes.bayes_p_shoot_or_move(season_prior_successful_shot_matrix, cumulative_month_data_successful_shot_matrix, season_prior_failed_shot_matrix, cumulative_month_data_failed_shot_matrix\
,season_prior_successful_move_count_matrix, cumulative_month_data_successful_move_count_matrix, season_prior_failed_move_count_matrix, cumulative_month_data_failed_move_count_matrix)
# calculating the conjugate transition matrix
## We divide the total counts of moves between grids by the NxM column vector of the starting position count denominator (hense the .reshape method)
posterior_T = xT.safe_divide(cumulative_month_successful_transition_counts, cumulative_month_transition_matrix_denom.reshape(N*M,1))
posterior_xT = xTBayes.bayes_xT_surface(posterior_xG, posterior_pS, posterior_pM, posterior_T, N, M)
df_gameMonth['xT'] = xT.apply_xT(df_gameMonth, posterior_xT, opta_successful_pass_events, opta_failed_pass_events, opta_successful_dribble_events, opta_failed_dribble_events, N, M, 105, 68, 100, xT_mode = 3)
lst_xT.append(posterior_xT)
lst_xG.append(posterior_xG)
lst_df.append(df_gameMonth)
print ('Done.')
###Output
Modelling season 1: game month 24212...
Modelling season 1: game month 24213...
Modelling season 1: game month 24214...
Modelling season 1: game month 24215...
Modelling season 1: game month 24216...
Modelling season 1: game month 24217...
Modelling season 1: game month 24218...
Modelling season 1: game month 24219...
Modelling season 1: game month 24220...
Modelling season 1: game month 24221...
Modelling season 2: game month 24224...
Modelling season 2: game month 24225...
Modelling season 2: game month 24226...
Modelling season 2: game month 24227...
Modelling season 2: game month 24228...
Modelling season 2: game month 24229...
Modelling season 2: game month 24230...
Modelling season 2: game month 24231...
Modelling season 2: game month 24232...
Modelling season 2: game month 24233...
Modelling season 2: game month 24234...
Modelling season 3: game month 24236...
Modelling season 3: game month 24237...
Modelling season 3: game month 24238...
Modelling season 3: game month 24239...
Modelling season 3: game month 24240...
Modelling season 3: game month 24241...
Modelling season 3: game month 24242...
Modelling season 3: game month 24243...
Modelling season 3: game month 24246...
Modelling season 3: game month 24247...
Modelling season 4: game month 24249...
Modelling season 4: game month 24250...
Modelling season 4: game month 24251...
Modelling season 4: game month 24252...
Modelling season 4: game month 24253...
Modelling season 4: game month 24254...
Modelling season 4: game month 24255...
Modelling season 4: game month 24256...
Modelling season 4: game month 24257...
Done.
CPU times: user 56.1 s, sys: 4.1 s, total: 1min
Wall time: 59 s
###Markdown
xG plot **WITHOUT synthetic shots**
###Code
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
posterior_xG_no_fake_shots = xTBayes.bayes_p_score_if_shoot(season_prior_successful_shot_matrix, cumulative_month_data_successful_shot_matrix, season_prior_failed_shot_matrix, cumulative_month_data_failed_shot_matrix)
plt.imshow(posterior_xG_no_fake_shots, interpolation='nearest', cmap=cm.Greys_r)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
###Output
_____no_output_____
###Markdown
xG plot **WITH synthetic shots**
###Code
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
#posterior_xG = xTBayes.bayes_p_score_if_shoot(season_prior_successful_shot_matrix, cumulative_month_data_successful_shot_matrix, season_prior_failed_shot_matrix, cumulative_month_data_failed_shot_matrix, N, M, 105, 68, 1, df_synthetic)
plt.imshow(posterior_xG, interpolation='nearest', cmap=cm.Greys_r)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
plt.imshow(posterior_pS, interpolation='nearest', cmap=cm.Greys_r)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
plt.imshow(posterior_pM, interpolation='nearest', cmap=cm.Greys_r)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
plt.imshow(posterior_T, interpolation='nearest', cmap=cm.Greys_r)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
plt.imshow(posterior_xT, interpolation='nearest', cmap=cm.coolwarm)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
plt.imshow(xT.bilinear_interp_xT(posterior_xT), interpolation='nearest', cmap=cm.coolwarm)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
fig = plt.figure(constrained_layout=True, figsize=(12,12))
gs = fig.add_gridspec(9, 2)
ax1 = fig.add_subplot(gs[0:3, 0:1])
plt.imshow(posterior_pS, interpolation='nearest', cmap=cm.Greys_r)
ax1.axes.get_xaxis().set_visible(False)
ax1.axes.get_yaxis().set_visible(False)
ax1.set_title('Probability of Shooting, $s_z$')
ax2 = fig.add_subplot(gs[0:3, 1:2])
plt.imshow(posterior_xG, interpolation='nearest', cmap=cm.Greys_r)
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.set_title('$xG(z)$ Surface (Incl. Synthetic Shots)')
ax3 = fig.add_subplot(gs[3:6, 0:1])
plt.imshow(posterior_pM, interpolation='nearest', cmap=cm.Greys_r)
ax3.axes.get_xaxis().set_visible(False)
ax3.axes.get_yaxis().set_visible(False)
ax3.set_title('Probability of Moving, $m_z$')
ax4 = fig.add_subplot(gs[3:6, 1:2])
plt.imshow(posterior_T, interpolation='nearest', cmap=cm.Greys_r)
ax4.axes.get_xaxis().set_visible(False)
ax4.axes.get_yaxis().set_visible(False)
ax4.set_title('Move Transition Matrix, $T_{z \\rightarrow z\'}$')
ax5 = fig.add_subplot(gs[6:9, 0:1])
plt.imshow(posterior_xT, interpolation='nearest', cmap=cm.coolwarm)
ax5.axes.get_xaxis().set_visible(False)
ax5.axes.get_yaxis().set_visible(False)
ax5.set_title('Most Recent Bayesian $xT(z)$ Surface (End of 2020/21 Season)')
ax6 = fig.add_subplot(gs[6:9, 1:2])
plt.imshow(xT.bilinear_interp_xT(posterior_xT), interpolation='nearest', cmap=cm.coolwarm)
ax6.axes.get_xaxis().set_visible(False)
ax6.axes.get_yaxis().set_visible(False)
ax6.set_title(r'$xT(z)$ Surface (Bilinearly Interpolated)')
plt.savefig('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Plots/Bayesian Opta xT/Components_xT.png', dpi=300)
###Output
_____no_output_____
###Markdown
--- **Looking at Monthly Updates** 1) **xG Maps**This is actually facinating, can see the penalty spot glow up at the start of the 2020/21 season due to the addition of VAR + new handball rule (that was quickly diluted).
###Code
fig, axs = plt.subplots(8,5, figsize=(25, 25), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = .01, wspace=.2)
axs = axs.ravel()
for i in range(len(lst_xG)):
xG_surface = lst_xG[i]
axs[i].imshow(xG_surface, interpolation='nearest', cmap=cm.Greys_r)
axs[i].axes.get_xaxis().set_visible(False)
axs[i].axes.get_yaxis().set_visible(False)
###Output
_____no_output_____
###Markdown
**2) Interpolated xT Maps**
###Code
fig, axs = plt.subplots(8,5, figsize=(25, 25), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = .01, wspace=.2)
axs = axs.ravel()
for i in range(len(lst_xT)):
xT_surface = lst_xT[i]
xT_interp_surface = xT.bilinear_interp_xT(xT_surface)
axs[i].imshow(xT_interp_surface, interpolation='nearest', cmap=cm.coolwarm)
axs[i].axes.get_xaxis().set_visible(False)
axs[i].axes.get_yaxis().set_visible(False)
###Output
_____no_output_____
###Markdown
--- **Constructing Final Opta + xT Dataset** **1) Concatenating Monthly Dataframes**
###Code
# concatenating all of the monthly
df_opta_xT = pd.concat(lst_df, ignore_index=True)
print (f'{len(df_opta_xT)} rows loaded.')
df_opta_xT
###Output
3126182 rows loaded.
###Markdown
**2) Outputting to File**
###Code
df_opta_xT.to_csv('/Users/christian/Desktop/University/Birkbeck MSc Applied Statistics/Project/Data/Analysis Ready/Opta Bayesian xT/Bayesian_Opta_xT.csv', index=None)
###Output
_____no_output_____
###Markdown
**3) Quick Player Summary Analysis**
###Code
df_xT = df_opta_xT.groupby(['competition','season','playerId','playerName','matchId'])\
.agg({'xT':np.sum,'minsPlayed':np.mean,'x1':'count'})\
.reset_index().rename(columns={'x1':'numActions'})\
.groupby(['competition','season','playerId','playerName'])\
.agg({'xT':np.sum,'minsPlayed':np.sum,'numActions':np.sum,'matchId':'nunique'})\
.reset_index()\
.rename(columns={'matchId':'numMatches'})\
.sort_values('xT', ascending=False)
df_xT['xT_per_90'] = (df_xT.xT / df_xT.minsPlayed) * 90
# min mins filter
df_xT = df_xT.loc[(df_xT['minsPlayed'] > 1000)]
df_xT['season_xT_rank'] = df_xT.sort_values('xT', ascending=False).groupby(['competition','season']).cumcount() + 1
df_xT['season_xT_per_90_rank'] = df_xT.sort_values('xT_per_90', ascending=False).groupby(['competition','season']).cumcount() + 1
df_xT.loc[df_xT['season_xT_per_90_rank'] <= 20].sort_values(['competition','season','season_xT_per_90_rank'], ascending=[True,True, True])
###Output
_____no_output_____
###Markdown
--- **4) Misc** By Eye Sanity Checks1. Checking all move events have final coords (Opta provide final pass coords and final shot coords, but not final dribble coords, so these were engineered)
###Code
df_moves = xT.get_df_all_moves(df_opta, opta_successful_pass_events, opta_failed_pass_events, opta_successful_dribble_events, opta_failed_dribble_events, 'eventSubType')
# checking nulls
len(df_moves.loc[pd.isna(df_moves['x2_m']) == True].eventSubType.value_counts()), len(df_moves.loc[pd.isna(df_moves['y2_m']) == True].eventSubType.value_counts())
###Output
_____no_output_____
###Markdown
Creating movie of the Bayesian updating pitches
###Code
"""
import matplotlib.animation as animation
img = [] # some array of images
frames = [] # for storing the generated images
fig = plt.figure()
for i in np.arange(len(lst_xT)):
xT_surface = lst_xT[i]
xT_interp_surface = xT.bilinear_interp_xT(xT_surface)
frames.append([plt.imshow(xT_interp_surface, cmap=cm.coolwarm,animated=True)])
ani = animation.ArtistAnimation(fig, frames, interval=50, blit=False,
repeat_delay=1000)
ani.save('movie.mp4')
plt.show()
"""
###Output
_____no_output_____ |
ueb4/ex17.ipynb | ###Markdown
Links zu Dokumentationen/Tutorials für IPython/Python/numpy/matplotlib/git sowie die Sourcodes findet ihr im [GitHub Repo](https://github.com/BerndSchwarzenbacher/numdiff).
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
adaptive = np.loadtxt('data/ex17.out')
time = adaptive[:,0]
h = adaptive[:,1]
V1 = adaptive[:,2]
V2 = adaptive[:,3]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time, h)
plt.ylabel(r'$h(t)$')
plt.yscale('log')
plt.xlabel(r'$t$')
plt.grid()
eps2 = np.loadtxt('data/ex17_tol_e2.out')
eps3 = np.loadtxt('data/ex17_tol_e3.out')
eps4 = np.loadtxt('data/ex17_tol_e4.out')
eps5 = np.loadtxt('data/ex17_tol_e5.out')
eps6 = np.loadtxt('data/ex17_tol_e6.out')
tol = np.arange(2, 6);
V1_eps2 = eps2[-1:,2]
V1_eps3 = eps3[-1:,2]
V1_eps4 = eps4[-1:,2]
V1_eps5 = eps5[-1:,2]
V1_eps6 = eps6[-1:,2]
V1_err = np.array([V1_eps2 - V1_eps6, V1_eps3-V1_eps6, V1_eps4-V1_eps6, V1_eps5-V1_eps6])
V1_err = np.abs(V1_err)
plt.plot(tol, V1_err)
plt.ylabel('err')
plt.yscale('log')
plt.xlabel('tol 10^(.)')
plt.grid()
###Output
_____no_output_____ |
codigo/metodos_numericos/integracion/Integracion_numerica.ipynb | ###Markdown
Integración numérica
###Code
from __future__ import division
import numpy as np
from scipy.integrate import quad, fixed_quad, quadrature
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
La rutina más general para la integración númerica es ``scipy.integrate.quad()``
###Code
res, err = quad(np.sin, 0, np.pi/2)
###Output
1.0
###Markdown
``res`` contiene la respuesta de la integral
###Code
print(res)
###Output
1.0
###Markdown
``err`` contiene la estimación del error en la integración numérica
###Code
print(err)
###Output
1.11022302463e-14
###Markdown
Además, tenemos las rutinas ``fixed_quad`` y ``quadrature`` Usando ``fixed_quad`` realizamos una integral por el método de Gausspara una cantidad de puntos de Gauss deseada
###Code
fixed_quad(lambda x: x**7 - 4, 0, 1, n=4)
###Output
_____no_output_____
###Markdown
Y vemos que este es el valor exacto de la integral$$ \int_{0}^1 x^7 - 4 \mathrm{d}x = \left[\frac{x^8}{8} - 4 x\right]_{0}^{1} = -3.875$$
###Code
1/8 - 4
###Output
_____no_output_____
###Markdown
La rutina ``quadrature`` usa un método de Gauss adaptativo, por lo tantonos garantiza que la precisión de la integral está dada por un valor prefijado.Si repetimos la integral anterior, obtenemos
###Code
quadrature(lambda x: x**7 - 4, 0, 1)
###Output
_____no_output_____ |
cassava-distillation.ipynb | ###Markdown
About this notebook TBD... Data Loading
###Code
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
os.listdir("../input/cassava-leaf-disease-classification")
# train = pd.read_csv("../input/cassava-leaf-disease-classification/train.csv")
# train = pd.read_csv("../input/cassava-leaf-disease-merged/oversample-0124.csv") # label 0124 x3
train_id = pd.read_csv("../input/cassava-leaf-disease-merged/merged.csv")["image_id"]
dist = pd.read_csv("distillation_label/ensemble-tta-v2.csv", header=None)
train_label = pd.DataFrame(dist.idxmax(axis=1), columns=["label"])
train = pd.concat([train_id, train_label, dist], axis=1)
test = pd.read_csv("../input/cassava-leaf-disease-classification/sample_submission.csv")
label_map = pd.read_json("../input/cassava-leaf-disease-classification/label_num_to_disease_map.json", orient="index")
display(train.head())
display(test.head())
display(label_map)
sns.distplot(train["label"], kde=False)
###Output
/home/ubuntu/work/pytorch/.venv/lib/python3.7/site-packages/seaborn/distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Directory settings
###Code
# ====================================================
# Directory settings
# ====================================================
import os
OUTPUT_DIR = "./"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# TRAIN_PATH = "../input/cassava-leaf-disease-classification/train_images"
TRAIN_PATH = "../input/cassava-leaf-disease-merged/train"
TEST_PATH = "../input/cassava-leaf-disease-classification/test_images"
###Output
_____no_output_____
###Markdown
CFG
###Code
# ====================================================
# CFG
# ====================================================
class CFG:
debug = False
apex = False
print_freq = 100
num_workers = 4
model_name = "vit_base_patch16_384" # resnext50_32x4d, seresnext50_32x4d, tf_efficientnet_b3_ns, vit_base_patch16_384, deit_base_patch16_384
batch_size = 8
gradient_accumulation_steps = 4
size = 384 if "it_base_" in model_name else 512
n_fold = 5
trn_fold = [0, 1, 2, 3, 4]
criterion = "BiTemperedLoss" # ['CrossEntropyLoss', 'BiTemperedLoss']
btl_t1 = 0.3 # Bi-Tempered Logistic Loss
btl_t2 = 1.0
label_smoothing = 0.2
kd_temperature = 10 # DistillationLoss
kd_alpha = 0.9 # DistillationLoss
scheduler = "CosineAnnealingWarmRestarts" # ['ReduceLROnPlateau', 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts', 'CosineAnnealingWarmupRestarts']
scheduler_batch_update = True
epochs = 10
# factor = 0.2 # ReduceLROnPlateau
# patience = 4 # ReduceLROnPlateau
# eps = 1e-6 # ReduceLROnPlateau
# T_max = 10 # CosineAnnealingLR
T_0 = (
len(train) // n_fold * (n_fold - 1) // batch_size // gradient_accumulation_steps * epochs + 5
) # CosineAnnealingWarmRestarts
# first_cycle_steps = (
# len(train) // n_fold * (n_fold - 1) // batch_size // gradient_accumulation_steps * epochs + 5
# ) # CosineAnnealingWarmupRestarts for batch update
# warmup_steps = first_cycle_steps // 10 # CosineAnnealingWarmupRestarts
# gamma = 0.8 # CosineAnnealingWarmupRestarts
lr = 1e-4
min_lr = 2e-6
weight_decay = 1e-6
max_grad_norm = 1000
seed = 5678
target_size = 5
target_col = "label"
train = True
inference = False
if CFG.debug:
CFG.epochs = 1
train = train.sample(n=1000, random_state=CFG.seed).reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Library
###Code
# ====================================================
# Library
# ====================================================
import sys
sys.path.append("../input/pytorch-image-models/pytorch-image-models-master")
sys.path.append("../input/pytorchcosineannealingwithwarmup")
sys.path.append("../input/bitemperedlogloss/")
sys.path.append("../input/image-fmix/FMix-master")
import math
import os
import random
import shutil
import time
import warnings
from collections import Counter, defaultdict
from contextlib import contextmanager
from functools import partial
from pathlib import Path
import bi_tempered_loss_pytorch as btl
import cv2
import numpy as np
import pandas as pd
import scipy as sp
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from albumentations import (
CenterCrop,
CoarseDropout,
Compose,
Cutout,
HorizontalFlip,
HueSaturationValue,
IAAAdditiveGaussianNoise,
ImageOnlyTransform,
Normalize,
OneOf,
RandomBrightness,
RandomBrightnessContrast,
RandomContrast,
RandomCrop,
RandomResizedCrop,
Resize,
Rotate,
ShiftScaleRotate,
Transpose,
VerticalFlip,
)
from albumentations.pytorch import ToTensorV2
from cosine_annearing_with_warmup import CosineAnnealingWarmupRestarts
from fmix import sample_mask
from PIL import Image
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
from torch.nn.parameter import Parameter
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts, ReduceLROnPlateau
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
warnings.filterwarnings("ignore")
if CFG.apex:
from apex import amp
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Utils
###Code
# ====================================================
# Utils
# ====================================================
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
@contextmanager
def timer(name):
t0 = time.time()
LOGGER.info(f"[{name}] start")
yield
LOGGER.info(f"[{name}] done in {time.time() - t0:.0f} s.")
def init_logger(log_file=OUTPUT_DIR + "train.log"):
from logging import INFO, FileHandler, Formatter, StreamHandler, getLogger
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
LOGGER = init_logger()
def seed_torch(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_torch(seed=CFG.seed)
###Output
_____no_output_____
###Markdown
CV split
###Code
folds = train.copy()
Fold = StratifiedKFold(n_splits=CFG.n_fold, shuffle=True, random_state=CFG.seed)
for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[CFG.target_col])):
folds.loc[val_index, "fold"] = int(n)
folds["fold"] = folds["fold"].astype(int)
print(folds.groupby(["fold", CFG.target_col]).size())
###Output
fold label
0 0 289
1 663
2 578
3 3165
4 573
1 0 289
1 663
2 578
3 3165
4 573
2 0 289
1 663
2 577
3 3165
4 573
3 0 288
1 662
2 578
3 3166
4 573
4 0 288
1 663
2 578
3 3165
4 573
dtype: int64
###Markdown
Dataset
###Code
# ====================================================
# Dataset
# ====================================================
class TrainDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df["image_id"].values
self.hard_target = df["label"].values
self.soft_target = df[[0, 1, 2, 3, 4]].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f"{TRAIN_PATH}/{file_name}"
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented["image"]
hard_target = torch.tensor(self.hard_target[idx]).long()
soft_target = torch.tensor(self.soft_target[idx]).float()
return image, hard_target, soft_target
class TestDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df["image_id"].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f"{TEST_PATH}/{file_name}"
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented["image"]
return image
train_dataset = TrainDataset(train, transform=None)
for i in range(1):
image, hard_target, soft_target = train_dataset[i]
plt.imshow(image)
plt.title(f"hard_target: {hard_target}, soft_target: {soft_target}")
plt.show()
###Output
_____no_output_____
###Markdown
Transforms
###Code
# ====================================================
# Transforms
# ====================================================
def get_transforms(*, data):
if data == "train":
return Compose(
[
# Resize(CFG.size, CFG.size),
RandomResizedCrop(CFG.size, CFG.size),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
ShiftScaleRotate(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5),
CoarseDropout(p=0.5),
Cutout(p=0.5),
Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
elif data == "valid":
return Compose(
[
Resize(CFG.size, CFG.size),
CenterCrop(CFG.size, CFG.size),
Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
train_dataset = TrainDataset(train, transform=get_transforms(data="train"))
for i in range(1):
image, hard_target, soft_target = train_dataset[i]
plt.imshow(image[0])
plt.title(f"hard_target: {hard_target}, soft_target: {soft_target}")
plt.show()
###Output
_____no_output_____
###Markdown
CutMix / FMix
###Code
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def cutmix(data, target, soft_target, alpha):
indices = torch.randperm(data.size(0))
shuffled_data = data[indices]
shuffled_target = target[indices]
shuffled_soft_target = soft_target[indices]
lam = np.clip(np.random.beta(alpha, alpha), 0.3, 0.4)
bbx1, bby1, bbx2, bby2 = rand_bbox(data.size(), lam)
new_data = data.clone()
new_data[:, :, bby1:bby2, bbx1:bbx2] = data[indices, :, bby1:bby2, bbx1:bbx2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (data.size()[-1] * data.size()[-2]))
targets = (target, soft_target, shuffled_target, shuffled_soft_target, lam)
return new_data, targets
def fmix(data, targets, soft_target, alpha, decay_power, shape, max_soft=0.0, reformulate=False):
lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate)
if CFG.apex:
# mask = torch.tensor(mask, device=device).float()
mask = mask.astype(np.float32)
indices = torch.randperm(data.size(0))
shuffled_data = data[indices]
shuffled_targets = targets[indices]
shuffled_soft_target = soft_target[indices]
x1 = torch.from_numpy(mask).to(device) * data
x2 = torch.from_numpy(1 - mask).to(device) * shuffled_data
targets = (targets, soft_target, shuffled_targets, shuffled_soft_target, lam)
return (x1 + x2), targets
###Output
_____no_output_____
###Markdown
MixUp
###Code
# https://github.com/yuhao318/mwh/blob/e9e2da8fc6/utils.py
def mixup(x, y, soft_target, alpha=1.0, use_cuda=True):
"""Compute the mixup data. Return mixed inputs, pairs of targets, and lambda"""
if alpha > 0.0:
lam = np.random.beta(alpha, alpha)
lam = max(lam, 1 - lam)
# lam = min(lam, 1-lam)
else:
lam = 1.0
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
## SYM
# mixed_x = lam * x + (1 - lam) * x[index,:]
# mixed_y = (1 - lam) * x + lam * x[index,:]
# mixed_image = torch.cat([mixed_x,mixed_y], 0)
# y_a, y_b = y, y[index]
# mixed_label = torch.cat([y_a,y_b], 0)
## Reduce batch size
# new_batch_size = batch_size // 2
# x_i = x[ : new_batch_size]
# x_j = x[new_batch_size : ]
# y_a = y[ : new_batch_size]
# y_b = y[new_batch_size : ]
# mixed_x = lam * x_i + (1 - lam) * x_j
## NO SYM
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
shuffled_soft_target = soft_target[index]
## Only Alpha
# mixed_x = 0.5 * x + (1 - 0.5) * x[index,:]
# mixed_image = mixed_x
# y_a, y_b = y, y[index]
# ind_label = torch.randint_like(y, 0,2)
# mixed_label = ind_label * y_a + (1-ind_label) * y_b
## Reduce batch size and SYM
# new_batch_size = batch_size // 2
# x_i = x[ : new_batch_size]
# x_j = x[new_batch_size : ]
# y_a = y[ : new_batch_size]
# y_b = y[new_batch_size : ]
# mixed_x = lam * x_i + (1 - lam) * x_j
# mixed_y = (1 - lam) * x_i + lam * x_j
# mixed_x = torch.cat([mixed_x,mixed_y], 0)
# y_b = torch.cat([y_b,y_a], 0)
# y_a = y
# return mixed_image, mixed_label, lam
return mixed_x, (y_a, soft_target, y_b, shuffled_soft_target, lam)
###Output
_____no_output_____
###Markdown
MODEL
###Code
# ====================================================
# MODEL
# ====================================================
class CassvaImgClassifier(nn.Module):
def __init__(self, model_name="resnext50_32x4d", pretrained=False):
super().__init__()
self.model_name = model_name
if model_name.startswith("deit_"):
self.model = torch.hub.load("facebookresearch/deit:main", model_name, pretrained=True)
if model_name == "deit_base_patch16_384":
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, CFG.target_size)
else:
self.model = timm.create_model(model_name, pretrained=pretrained)
if "resnext50_32x4d" in model_name:
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG.target_size)
elif model_name.startswith("tf_efficientnet"):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, CFG.target_size)
elif model_name.startswith("vit_"):
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x
def freeze_batch_normalization(model):
if CFG.model_name.startswith("tf_efficientnet_"):
for name1, child1 in model.named_children():
for name2, child2 in child1.named_children():
# print(f"===== {name2} =====")
if name2.startswith("bn"):
for param in child2.parameters():
param.requires_grad = False
# print(param.requires_grad)
for child3 in child2.children():
if isinstance(child3, nn.modules.container.Sequential):
for child4 in child3.children():
for child5 in child4.children():
if isinstance(child5, nn.BatchNorm2d):
# print(child5)
for param in child5.parameters():
param.requires_grad = False
# print(param.requires_grad)
if "it_base_" in CFG.model_name:
try:
for m in model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
m.eval()
except ValuError:
print("error with batchnorm2d or layernorm")
return
model = CassvaImgClassifier(model_name=CFG.model_name, pretrained=False)
freeze_batch_normalization(model)
# print(model)
train_dataset = TrainDataset(train, transform=get_transforms(data="train"))
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
for image, hard_target, soft_target in train_loader:
output = model(image)
print(output)
break
###Output
tensor([[-0.4990, -0.3848, -0.2926, -0.9923, 0.4753],
[ 0.2738, 0.7270, 0.8360, 0.9189, 0.3108],
[-0.6176, -0.5566, -0.3044, -1.1195, 0.2256],
[-0.4510, -0.1903, -0.0997, 0.1891, -0.4352]],
grad_fn=<AddmmBackward>)
###Markdown
Loss functions
###Code
class BiTemperedLogisticLoss(nn.Module):
def __init__(self, t1, t2, smoothing=0.0):
super(BiTemperedLogisticLoss, self).__init__()
self.t1 = t1
self.t2 = t2
self.smoothing = smoothing
def forward(self, logit_label, truth_label):
loss_label = btl.bi_tempered_logistic_loss(
logit_label, truth_label, t1=self.t1, t2=self.t2, label_smoothing=self.smoothing, reduction="none"
)
loss_label = loss_label.mean()
return loss_label
class DistillationLoss(nn.Module):
def __init__(self, criterion):
super().__init__()
self.criterion = criterion
self.alpha = CFG.kd_alpha
self.T = CFG.kd_temperature
# https://github.com/peterliht/knowledge-distillation-pytorch/blob/b1e4b6acb8d4e138ad12ba1aac22d5da42a85c10/model/net.py#L100
def forward(self, outputs, hard_target, soft_target):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
"""
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs / self.T, dim=1), F.softmax(soft_target / self.T, dim=1)) * (
self.alpha * self.T * self.T
) + self.criterion(outputs, hard_target) * (1.0 - self.alpha)
return KD_loss
###Output
_____no_output_____
###Markdown
Helper functions
###Code
# ====================================================
# Helper functions
# ====================================================
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return "%s (remain %s)" % (asMinutes(s), asMinutes(rs))
def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device, scheduler_batch_update=True):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
start = end = time.time()
global_step = 0
for step, (images, hard_target, soft_target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device)
hard_target = hard_target.to(device)
soft_target = soft_target.to(device)
batch_size = hard_target.shape[0]
# CutMix, FMix
if epoch <= 1 or epoch >= CFG.epochs - 1:
mix_decision = 0.75 # Disable CutMix, FMix for final epoch
else:
mix_decision = np.random.rand()
if epoch >= CFG.epochs - 4:
mix_decision *= 2 # Reduce probability
if mix_decision < 0.25:
images, labels = cutmix(images, hard_target, soft_target, 1.0)
elif mix_decision >= 0.25 and mix_decision < 0.5:
images, labels = fmix(
images, hard_target, soft_target, alpha=1.0, decay_power=5.0, shape=(CFG.size, CFG.size)
)
elif mix_decision >= 0.5 and mix_decision < 0.75:
images, labels = mixup(images, hard_target, soft_target, alpha=0.5)
y_preds = model(images.float())
if mix_decision < 0.75:
loss = criterion(y_preds, labels[0], labels[1]) * labels[4] + criterion(y_preds, labels[2], labels[3]) * (
1.0 - labels[4]
)
else:
loss = criterion(y_preds, hard_target, soft_target)
# record loss
losses.update(loss.item(), batch_size)
if CFG.gradient_accumulation_steps > 1:
loss = loss / CFG.gradient_accumulation_steps
if CFG.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CFG.max_grad_norm)
if (step + 1) % CFG.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
if CFG.scheduler_batch_update:
scheduler.step()
global_step += 1
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % CFG.print_freq == 0 or step == (len(train_loader) - 1):
print(
"Epoch: [{0}][{1}/{2}] "
# "Data {data_time.val:.3f} ({data_time.avg:.3f}) "
# "Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Elapsed {remain:s} "
"Loss: {loss.val:.4f}({loss.avg:.4f}) "
"Grad: {grad_norm:.4f} "
"LR: {lr:.6f} ".format(
epoch + 1,
step,
len(train_loader),
# batch_time=batch_time,
# data_time=data_time,
loss=losses,
remain=timeSince(start, float(step + 1) / len(train_loader)),
grad_norm=grad_norm,
lr=scheduler.get_lr()[0],
)
)
return losses.avg
def valid_fn(valid_loader, model, criterion, device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to evaluation mode
model.eval()
preds = []
start = end = time.time()
for step, (images, hard_target, soft_target) in enumerate(valid_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device)
hard_target = hard_target.to(device)
soft_target = soft_target.to(device)
batch_size = hard_target.shape[0]
# compute loss
with torch.no_grad():
y_preds = model(images)
loss = criterion(y_preds, hard_target, soft_target)
losses.update(loss.item(), batch_size)
# record accuracy
preds.append(y_preds.softmax(1).to("cpu").numpy())
if CFG.gradient_accumulation_steps > 1:
loss = loss / CFG.gradient_accumulation_steps
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % CFG.print_freq == 0 or step == (len(valid_loader) - 1):
print(
"EVAL: [{0}/{1}] "
# "Data {data_time.val:.3f} ({data_time.avg:.3f}) "
# "Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Elapsed {remain:s} "
"Loss: {loss.val:.4f}({loss.avg:.4f}) ".format(
step,
len(valid_loader),
# batch_time=batch_time,
# data_time=data_time,
loss=losses,
remain=timeSince(start, float(step + 1) / len(valid_loader)),
)
)
predictions = np.concatenate(preds)
return losses.avg, predictions
def inference(model, states, test_loader, device):
model.to(device)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i, (images) in tk0:
images = images.to(device)
avg_preds = []
for state in states:
model.load_state_dict(state["model"])
model.eval()
with torch.no_grad():
y_preds = model(images)
avg_preds.append(y_preds.softmax(1).to("cpu").numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs
###Output
_____no_output_____
###Markdown
Train loop
###Code
# ====================================================
# Train loop
# ====================================================
def train_loop(folds, fold):
LOGGER.info(f"========== fold: {fold} training ==========")
# ====================================================
# loader
# ====================================================
trn_idx = folds[folds["fold"] != fold].index
val_idx = folds[folds["fold"] == fold].index
train_folds = folds.loc[trn_idx].reset_index(drop=True)
valid_folds = folds.loc[val_idx].reset_index(drop=True)
train_dataset = TrainDataset(train_folds, transform=get_transforms(data="train"))
train_dataset_no_aug = TrainDataset(train_folds, transform=get_transforms(data="valid"))
valid_dataset = TrainDataset(valid_folds, transform=get_transforms(data="valid"))
train_loader = DataLoader(
train_dataset,
batch_size=CFG.batch_size,
shuffle=True,
num_workers=CFG.num_workers,
pin_memory=True,
drop_last=True,
)
train_loader_no_aug = DataLoader(
train_dataset_no_aug,
batch_size=CFG.batch_size,
shuffle=True,
num_workers=CFG.num_workers,
pin_memory=True,
drop_last=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=CFG.batch_size,
shuffle=False,
num_workers=CFG.num_workers,
pin_memory=True,
drop_last=False,
)
# ====================================================
# scheduler
# ====================================================
def get_scheduler(optimizer):
if CFG.scheduler == "ReduceLROnPlateau":
scheduler = ReduceLROnPlateau(
optimizer, mode="min", factor=CFG.factor, patience=CFG.patience, verbose=True, eps=CFG.eps
)
elif CFG.scheduler == "CosineAnnealingLR":
scheduler = CosineAnnealingLR(optimizer, T_max=CFG.T_max, eta_min=CFG.min_lr, last_epoch=-1)
elif CFG.scheduler == "CosineAnnealingWarmRestarts":
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CFG.T_0, T_mult=1, eta_min=CFG.min_lr, last_epoch=-1)
elif CFG.scheduler == "CosineAnnealingWarmupRestarts":
scheduler = CosineAnnealingWarmupRestarts(
optimizer,
first_cycle_steps=CFG.first_cycle_steps,
cycle_mult=1.0,
max_lr=CFG.lr,
min_lr=CFG.min_lr,
warmup_steps=CFG.warmup_steps,
gamma=CFG.gamma,
)
return scheduler
# ====================================================
# model & optimizer
# ====================================================
model = CassvaImgClassifier(CFG.model_name, pretrained=True)
freeze_batch_normalization(model)
model.to(device)
# Use multi GPU
if device == torch.device("cuda") and not CFG.apex:
model = torch.nn.DataParallel(model) # make parallel
# torch.backends.cudnn.benchmark=True
optimizer = Adam(model.parameters(), lr=CFG.lr, weight_decay=CFG.weight_decay, amsgrad=False)
scheduler = get_scheduler(optimizer)
# ====================================================
# apex
# ====================================================
if CFG.apex:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
# ====================================================
# Criterion
# ====================================================
def get_criterion():
if CFG.criterion == "CrossEntropyLoss":
hard_criterion = nn.CrossEntropyLoss()
elif CFG.criterion == "BiTemperedLoss":
hard_criterion = BiTemperedLogisticLoss(t1=CFG.btl_t1, t2=CFG.btl_t2, smoothing=CFG.label_smoothing)
criterion = DistillationLoss(hard_criterion)
return criterion
criterion = get_criterion()
# ====================================================
# loop
# ====================================================
best_score = 0.0
best_loss = np.inf
for epoch in range(CFG.epochs):
start_time = time.time()
# train
if epoch <= 1 or epoch >= CFG.epochs - 1:
avg_loss = train_fn(
train_loader_no_aug, model, criterion, optimizer, epoch, scheduler, device, CFG.scheduler_batch_update
)
else:
avg_loss = train_fn(
train_loader, model, criterion, optimizer, epoch, scheduler, device, CFG.scheduler_batch_update
)
# eval
avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device)
valid_labels = valid_folds[CFG.target_col].values
if not CFG.scheduler_batch_update:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(avg_val_loss)
elif isinstance(scheduler, CosineAnnealingLR):
scheduler.step()
elif isinstance(scheduler, CosineAnnealingWarmRestarts):
scheduler.step()
# scoring
score = get_score(valid_labels, preds.argmax(1))
elapsed = time.time() - start_time
LOGGER.info(
f"Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s"
)
LOGGER.info(f"Epoch {epoch+1} - Accuracy: {score}")
if score > best_score:
best_score = score
LOGGER.info(f"Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model")
torch.save(
{"model": model.state_dict(), "preds": preds}, OUTPUT_DIR + f"{CFG.model_name}_fold{fold}_best.pth"
)
if epoch == CFG.epochs - 1:
LOGGER.info(f"Epoch {epoch+1} - Save final model")
torch.save(
{"model": model.state_dict(), "preds": preds}, OUTPUT_DIR + f"{CFG.model_name}_fold{fold}_final.pth"
)
check_point = torch.load(OUTPUT_DIR + f"{CFG.model_name}_fold{fold}_best.pth")
valid_folds[[str(c) for c in range(5)]] = check_point["preds"]
valid_folds["preds"] = check_point["preds"].argmax(1)
return valid_folds
# ====================================================
# main
# ====================================================
def main():
"""
Prepare: 1.train 2.test 3.submission 4.folds
"""
def get_result(result_df):
preds = result_df["preds"].values
labels = result_df[CFG.target_col].values
score = get_score(labels, preds)
LOGGER.info(f"Score: {score:<.5f}")
if CFG.train:
# train
oof_df = pd.DataFrame()
for fold in range(CFG.n_fold):
if fold in CFG.trn_fold:
_oof_df = train_loop(folds, fold)
oof_df = pd.concat([oof_df, _oof_df])
LOGGER.info(f"========== fold: {fold} result ==========")
get_result(_oof_df)
# CV result
LOGGER.info(f"========== CV ==========")
get_result(oof_df)
# save result
oof_df.to_csv(OUTPUT_DIR + "oof_df.csv", index=False)
if CFG.inference:
# inference
model = CassvaImgClassifier(CFG.model_name, pretrained=False)
states = [torch.load(OUTPUT_DIR + f"{CFG.model_name}_fold{fold}_best.pth") for fold in CFG.trn_fold]
test_dataset = TestDataset(test, transform=get_transforms(data="valid"))
test_loader = DataLoader(
test_dataset, batch_size=CFG.batch_size, shuffle=False, num_workers=CFG.num_workers, pin_memory=True
)
predictions = inference(model, states, test_loader, device)
# submission
test["label"] = predictions.argmax(1)
test[["image_id", "label"]].to_csv(OUTPUT_DIR + "submission.csv", index=False)
if __name__ == "__main__":
main()
###Output
========== fold: 0 training ==========
|
606_Widgets.ipynb | ###Markdown
Create a drop-down menu of all the tickers. Create a generic f(x) which finds unique values, sorts, and add ALL filter for user to remove filter
###Code
ALL = 'ALL'
def unique_sorted_values_plus_ALL(array):
unique = array.unique().tolist()
unique.sort()
unique.insert(0, ALL)
return unique
###Output
_____no_output_____
###Markdown
Initialize dropdown. Observe handler will be created to filter the data frame be the selected values; input argument of the handler "change" allows us to access the NEW value. If new value is ALL, we remove the filer
###Code
dropdown_symbol = widgets.Dropdown(options = unique_sorted_values_plus_ALL(all_stocks_df.Symbol))
output_Symbol = widgets.Output()
def dropdown_Symbol_eventhandler(change):
output_Symbol.clear_output()
if (change.new == ALL):
display(all_stocks_df)
else:
display(all_stocks_df[all_stocks_df.Symbol == change.new])
###Output
_____no_output_____
###Markdown
Bind the handler to the dropdown
###Code
dropdown_symbol.observe(dropdown_Symbol_eventhandler, names = 'Symbol')
display(dropdown_symbol)
###Output
_____no_output_____ |
experiment/Baisc/experiment-etch.ipynb | ###Markdown
Data Split
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
from sklearn.preprocessing import StandardScaler, MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.metrics import matthews_corrcoef, confusion_matrix,precision_recall_curve,auc,f1_score,roc_auc_score,roc_curve,recall_score,classification_report,accuracy_score
def model(algorithm,dtrain_x,dtrain_y,dtest_x,dtest_y):
print ("MODEL - OUTPUT")
print ("*****************************************************************************************")
algorithm.fit(dtrain_x,dtrain_y)
predictions = algorithm.predict(dtest_x)
print (algorithm)
print ("\naccuracy_score :",accuracy_score(dtest_y,predictions))
print ("\nrecall score:\n",(recall_score(dtest_y,predictions)))
print ("\nf1 score:\n",(f1_score(dtest_y,predictions)))
# print ("\nclassification report :\n",(classification_report(dtest_y,predictions)))
print ("\nmatthews_corrcoef:\n", (matthews_corrcoef(dtest_y, predictions)))
#cross validation
# Graph
plt.figure(figsize=(13,10))
plt.subplot(221)
sns.heatmap(confusion_matrix(dtest_y,predictions),annot=True,fmt = "d",linecolor="k",linewidths=3)
plt.title("CONFUSION MATRIX",fontsize=20)
predicting_probabilites = algorithm.predict_proba(dtest_x)[:,1]
fpr,tpr,thresholds = roc_curve(dtest_y,predicting_probabilites)
plt.subplot(222)
plt.plot(fpr,tpr,label = ("Area_under the curve :",auc(fpr,tpr)),color = "r")
plt.plot([1,0],[1,0],linestyle = "dashed",color ="k")
plt.legend(loc = "best")
plt.title("ROC - CURVE & AREA UNDER CURVE",fontsize=20)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
model(clf ,X_train,y_train,X_test,y_test)
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
model(clf ,X_train,y_train,X_test,y_test)
# import SVC classifier
from sklearn.svm import SVC
# import metrics to compute accuracy
from sklearn.metrics import accuracy_score
# instantiate classifier with default hyperparameters
svc=SVC(probability=True)
model(svc ,X_train,y_train,X_test,y_test)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
model(clf ,X_train,y_train,X_test,y_test)
###Output
MODEL - OUTPUT
*****************************************************************************************
RandomForestClassifier()
accuracy_score : 0.9907107048068416
recall score:
0.9316037735849056
f1 score:
0.9616555082166768
matthews_corrcoef:
0.9570216471877498
|
Day2_assignment.ipynb | ###Markdown
###Code
lst=["Shivani",1,2,3.5,[7,8,9]]
lst
###Output
_____no_output_____
###Markdown
###Code
print(lst[0])
print(lst[1:3])
print(lst[2:])
print(lst[4][1])
print(lst.append(21.5))
lst
print (lst[-5])
dit={"name":"shivani","age":22,"no":12345}
print (dit)
print (dit.get('name'))
print (dit.items())
print (dit.keys())
print (dit.pop('no'))
print (dit)
dit["school"]="svm"
print (dit)
st={"abc","pqr",1,2,3,3,4,5,6,5}
print (st)
st1={"abc",1}
print (st1)
st1.issubset(st)
print (st.copy())
tup=("shivani","@","Gmail.com")
print (tup)
print (tup.count("@"))
print (tup.index("shivani"))
name="shivani shrikant shete"
name1="sangola"
print(name)
print (name+name1)
type(name)
print(name[0])
print (name[0:1])
###Output
s
|
dataflow/model/notebooks/Master_incremental_single_name_model_evaluator.ipynb | ###Markdown
Imports
###Code
%load_ext autoreload
%autoreload 2
import logging
import core.config as cconfig
import dataflow_model.incremental_single_name_model_evaluator as ime
import dataflow_model.model_evaluator as modeval
import dataflow_model.model_plotter as modplot
import dataflow_model.stats_computer as csc
import core.plotting as cplot
import helpers.hdbg as dbg
import helpers.hprint as hprint
dbg.init_logger(verbosity=logging.INFO)
# dbg.init_logger(verbosity=logging.DEBUG)
_LOG = logging.getLogger(__name__)
# _LOG.info("%s", env.get_system_signature()[0])
hprint.config_notebook()
###Output
[0m[36mINFO[0m: > cmd='/venv/lib/python3.8/site-packages/ipykernel_launcher.py -f /root/.local/share/jupyter/runtime/kernel-db368b29-f783-4c65-bce3-11285bd2d5b0.json'
[33mWARNING[0m: Running in Jupyter
[33mWARNING[0m: Disabling annoying warnings
###Markdown
Notebook config
###Code
# Read from env var.
eval_config = cconfig.Config.from_env_var("AM_CONFIG_CODE")
# Override config.
if eval_config is None:
src_dir = ""
file_name = "result_bundle.v2_0.pkl"
prediction_col = ""
target_col = ""
aws_profile = None
eval_config = cconfig.get_config_from_nested_dict(
{
"compute_stats_kwargs": {
"src_dir": src_dir,
"file_name": file_name,
"prediction_col": prediction_col,
"target_col": target_col,
"start": None,
"end": None,
"selected_idxs": None,
"aws_profile": aws_profile,
},
"aggregate_single_name_models": {
"src_dir": src_dir,
"file_name": file_name,
"position_intent_1_col": "",
"ret_0_col": "",
"spread_0_col": "",
"prediction_col": prediction_col,
"target_col": target_col,
"start": None,
"end": None,
"selected_idxs": None,
"aws_profile": aws_profile,
},
"bh_adj_threshold": 0.1,
}
)
print(str(eval_config))
###Output
_____no_output_____
###Markdown
Compute stats
###Code
stats = ime.compute_stats_for_single_name_artifacts(**eval_config["compute_stats_kwargs"].to_dict())
# TODO(gp): Move this chunk of code into a function.
col_mask = (
stats.loc["signal_quality"].loc["sr.adj_pval"]
< eval_config["bh_adj_threshold"]
)
selected = stats.loc[:, col_mask].columns.to_list()
not_selected = stats.loc[:, ~col_mask].columns.to_list()
print("num model selected=%s" % hprint.perc(len(selected), stats.shape[1]))
print("model selected=%s" % selected)
print("model not selected=%s" % not_selected)
# Use `selected = None` to show all of the models.
# TODO(Paul): call `multipletests_plot()`
###Output
_____no_output_____
###Markdown
Build portfolio
###Code
portfolio, daily_dfs = ime.aggregate_single_name_models(**eval_config["aggregate_single_name_models"].to_dict())
portfolio.dropna().head()
stats_computer = csc.StatsComputer()
###Output
_____no_output_____ |
hw14/myRNN.ipynb | ###Markdown
Neural & Behavioral Modeling - Week 14 (Exercises)by 駱皓正 ([email protected])
###Code
# Upgrade PyTorch to 0.4.0 if necessary:
! conda install -y pytorch-cpu torchvision-cpu -c pytorch
%config IPCompleter.greedy=True
%matplotlib inline
from matplotlib.pyplot import *
from IPython.display import *
import numpy as np
# Check GPU status:
# import torch as t
from torch import *
t.manual_seed(1) # for reproduction
print('PyTorch version:',t.__version__)
use_cuda=t.cuda.is_available()
if(use_cuda):
for i in range(t.cuda.device_count()):
print('Device ',i,':',t.cuda.get_device_name(i))
print('Current: Device ',t.cuda.current_device())
t.backends.cudnn.benchmark = True
device = t.device("cuda")
else:
device = t.device("cpu")
print('No GPU')
###Output
PyTorch version: 0.4.0
No GPU
###Markdown
1 Activation/Signal Function in RNN (10 points)tanh(x) is a more popular choice of activation function than sigmoid(x) & relu(x) in contemporary RNNs.Below please use simple RNN to compare 3 activation functions in terms of their learning efficiency and capacity. If there is any significant difference, please discuss why. nn.RNN(..., nonlinearity='relu') allows you to switch tanh to relu. However, you need some deep hacking of the source code to replace nn.Tanh with nn.Sigmoid. Alternatively, you can code a simple RNN from scratch to have a better control of everything! 1.0 Data: sin(t) → cos(t)
###Code
steps = np.linspace(0, np.pi*2, 100, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
plot(steps, y_np, 'r-', label='y (cos)')
plot(steps, x_np, 'b-', label='x (sin)')
#plt.legend(loc='best');
###Output
_____no_output_____
###Markdown
1.1 RNN model for 1D-to-1D time-series regression (7 points)
###Code
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.modules.rnn as mornn
torch.manual_seed(1) # reproducible
# Hyper Parameters
TIME_STEP = 10 # rnn time step / image height
INPUT_SIZE = 1 # rnn input size / image width
LR = 0.001 # learning rate
DOWNLOAD_MNIST = False # set to True if haven't download the data
###Output
_____no_output_____
###Markdown
Tanh Version
###Code
#normal linear version
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN( # 这回一个普通的 RNN 就能胜任
input_size=1,
hidden_size=32, # rnn hidden unit
num_layers=1, # 有几层 RNN layers
batch_first=True, # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state): # 因为 hidden state 是连续的, 所以我们要一直传递这一个 state
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, output_size)
r_out, h_state = self.rnn(x, h_state) # h_state 也要作为 RNN 的一个输入
outs = [] # 保存所有时间点的预测值
for time_step in range(r_out.size(1)): # 对每一个时间点计算 output
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
rnn = RNN()
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None # for initial hidden state
for step in range(100):
start, end = step * np.pi, (step+1)*np.pi # time range
# use sine to predict cosine
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size)
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state) # rnn output
h_state = h_state.data # get rid of the autograd part and retain the data part only
loss = loss_func(prediction, y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# plotting
clf() # clear previous figure
plot(steps, y_np.flatten(), 'r-', label='target')
plot(steps, prediction.data.numpy().flatten(), 'b-', label='prediction')
title(loss.item()); legend(loc=9)
display(gcf()); clear_output(wait=True) # to allow dynamic plots
###Output
_____no_output_____
###Markdown
ReLU version
###Code
#normal linear version
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN( # 这回一个普通的 RNN 就能胜任
input_size=1,
hidden_size=32, # rnn hidden unit
num_layers=1, # 有几层 RNN layers
batch_first=True, # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
nonlinearity='relu'
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state): # 因为 hidden state 是连续的, 所以我们要一直传递这一个 state
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, output_size)
r_out, h_state = self.rnn(x, h_state) # h_state 也要作为 RNN 的一个输入
outs = [] # 保存所有时间点的预测值
for time_step in range(r_out.size(1)): # 对每一个时间点计算 output
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
rnn = RNN()
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None # for initial hidden state
for step in range(100):
start, end = step * np.pi, (step+1)*np.pi # time range
# use sine to predict cosine
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size)
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state) # rnn output
h_state = h_state.data # get rid of the autograd part and retain the data part only
loss = loss_func(prediction, y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# plotting
clf() # clear previous figure
plot(steps, y_np.flatten(), 'r-', label='target')
plot(steps, prediction.data.numpy().flatten(), 'b-', label='prediction')
title(loss.item()); legend(loc=9)
display(gcf()); clear_output(wait=True) # to allow dynamic plots
###Output
_____no_output_____ |
code/modeling/logistic_regression_version3.ipynb | ###Markdown
df['imd_band']=df['imd_band'].map({'0-10%':0,'10-20':1,'20-30%':2,'30-40%':3,'40-50%':4,'50-60%':5,'60-70%':6,'70-80%':7,'80-90%':8,'90-100%':9})df['module_domain'] = df['module_domain'].map({'SocialScience': 0,'STEM': 1})df['term'] = df['term'].map({'J': 0,'B': 1})df['year'] = df['year'].map({'2013': 0,'2014': 1})df['is_reenrolled'] = df['is_reenrolled'].replace(range(1,12), 1)df['gender'] = df['gender'].map({'M': 0,'F': 1})df['age_band'] = df['age_band'].map({'0-35': 0,'35-55': 1,'55<=':2})df['region'] = df['region'].map({'Scotland': 0,'East Anglian Region': 1,'London Region':2,'South Region': 3,'North Western Region': 4,'West Midlands Region':5,'South West Region': 6,'East Midlands Region': 7,'South East Region':8,'Wales': 9,'Yorkshire Region': 10,'North Region':11,'Ireland':12})df['pass_fail_ind'] = df['pass_fail_ind'].map({'FAIL':0,'PASS':1})df['disability'] = df['disability'].map({'N':0,'Y':1})df['highest_education'] = df['highest_education'].map({'No Formal quals':0,'Lower Than A Level':1,'A Level or Equivalent':2,'HE Qualification':3,'Post Graduate Qualification':4})
###Code
df['term'].count()
df['year'].isnull().sum()
df.groupby(['year','term'])['module_domain'].count()
df.groupby(['year','term']).count()
###Output
_____no_output_____
###Markdown
Chek observations for Year 2013 Term J and Term B STEM class
###Code
df.groupby(['year','term','module_domain']).count()
###Output
_____no_output_____
###Markdown
Create a temporaray data frame
###Code
tempDf = df[['year','term','code_module','module_domain','region','gender','disability','std_half_score','half_sum_clicks','b4_sum_clicks','age_band','module_presentation_length','num_of_prev_attempts','final_result','highest_education','imd_band','studied_credits','date_registration']]
# tempDf.head(20)
tempDf = tempDf.loc[(tempDf.year == 0)&(tempDf.module_domain==1)]
# Show first 20 observations of the dataset
tempDf.head(20)
tempDf.count()
tempDf.groupby(['term']).count()
tempDf=tempDf.drop(columns=['year','term','module_domain'])
# Create a Heatmap
## import packages
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
sns.heatmap(tempDf.corr(),vmin=-1,cmap='YlGnBu',annot=True)
plt.show()
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
tempDf.head(20)
# drop missing values
tempDf=tempDf.dropna()
tempDf.head(20)
tempDf.count()
# Define our predictors
X=tempDf[['disability','gender','std_half_score','half_sum_clicks','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits']]
y=tempDf['final_result']
# Implementing the model
import statsmodels.api as sm
logit_model=sm.Logit(y,X)
result=logit_model.fit()
print(result.summary2())
# Split the dataset to train and test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0)
# In this case, I set the test size to 0.25, and therefore the model testing will be based on 25% of the dataset,
# while the model training will be based on 75% of the dataset
from sklearn.linear_model import LogisticRegression
# Initialize our algorithm
# lr = LogisticRegression(random_state=1, solver='liblinear')
logistic_regression= LogisticRegression()
logistic_regression.fit(X_train,y_train)
y_pred=logistic_regression.predict(X_test)
# Get a Confusion Matrix
from sklearn import metrics
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
sns.heatmap(confusion_matrix, annot=True)
# Print the Accuracy
print('Accuracy: ',metrics.accuracy_score(y_test, y_pred))
# Print the Precision
print("Precision:",metrics.precision_score(y_test, y_pred))
# Print the Recall
print("Recall:",metrics.recall_score(y_test, y_pred))
## Residuals Plot
from yellowbrick.regressor import ResidualsPlot
from sklearn.linear_model import Ridge
model = ResidualsPlot(Ridge())
model.fit(X_train, y_train)
model.score(X_test, y_test)
model.show()
# ROCAUC
from yellowbrick.classifier import ROCAUC
# Instantiate the visualizer with the classification model
model = LogisticRegression(multi_class="auto", solver="liblinear")
visualizer = ROCAUC(model, classes=["tempDf.final_result==1", "temDf.final_result=0"])
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
###Output
_____no_output_____
###Markdown
Diving Deeper into the Results
###Code
print(X_test) # test dataset (without the actual outcome)
print(y_pred)
# Checking the prediction for a New Set of Data
# Let's use our model to predict Year 2014 STEM class results.
# My goal is to use the existing logistic regression model to predict whether the new student will pass or fail.
# Create a new temporary data frame
tempDf2 = df[['year','term','module_domain','final_result','code_module','gender','disability','b4_sum_clicks','half_sum_clicks','std_half_score','module_presentation_length','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits','date_registration']]
tempDf2 = tempDf2.loc[(tempDf2.year == 1)&(tempDf2.module_domain==1)]
# Show first 20 observations of the dataset
tempDf2.head(20)
tempDf2.count()
# Again, we drop those missing values
tempDf2=tempDf2.dropna()
tempDf2.head(20)
tempDf2.count()
df2 = pd.DataFrame(tempDf2,columns= ['disability','gender','std_half_score','half_sum_clicks','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits'])
# Show the first 20 observations
df2.head(20)
# Define our predictors, this is the model we use in the previous dataset (tempDf)
X=tempDf[['disability','gender','std_half_score','half_sum_clicks','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits']]
y=tempDf['final_result']
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0)
logistic_regression= LogisticRegression()
logistic_regression.fit(X_train,y_train)
# See how our model works
y_pred_new=logistic_regression.predict(df2)
y_test_new=tempDf2['final_result']
from sklearn.metrics import classification_report
print(classification_report(y_test_new,y_pred_new))
confusion_matrix = pd.crosstab(y_test_new, y_pred_new, rownames=['Actual'], colnames=['Predicted'])
sns.heatmap(confusion_matrix, annot=True)
# Print the Accuracy
print('Accuracy: ',metrics.accuracy_score(y_test_new, y_pred_new))
###Output
Accuracy: 0.8166389004581425
###Markdown
Create a temporary data frame for Year 2013 Term J and Term B Social Science Class
###Code
SSDf = df[['year','term','code_module','module_domain','region','gender','disability','std_half_score','half_sum_clicks','b4_sum_clicks','age_band','module_presentation_length','num_of_prev_attempts','final_result','highest_education','imd_band','studied_credits','date_registration']]
SSDf = SSDf.loc[(SSDf.year == 0)&(SSDf.module_domain==0)]
SSDf.head(20)
SSDf.count()
SSDf.groupby(['term']).count()
SSDf=SSDf.drop(columns=['year','term','module_domain'])
SSDf.head(5)
# drop missing values
SSDf=SSDf.dropna()
SSDf.count()
# Define our predictors
X=SSDf[['region','code_module','disability','module_presentation_length','date_registration','gender','std_half_score','half_sum_clicks','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits']]
y=SSDf['final_result']
# Implementing the model
import statsmodels.api as sm
logit_model=sm.Logit(y,X)
result=logit_model.fit()
print(result.summary2())
# Split the dataset to train and test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0)
# In this case, I set the test size to 0.25, and therefore the model testing will be based on 25% of the dataset,
# while the model training will be based on 75% of the dataset
from sklearn.linear_model import LogisticRegression
# Initialize our algorithm
# lr = LogisticRegression(random_state=1, solver='liblinear')
logistic_regression= LogisticRegression()
logistic_regression.fit(X_train,y_train)
y_pred=logistic_regression.predict(X_test)
# Get a Confusion Matrix
from sklearn import metrics
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
sns.heatmap(confusion_matrix, annot=True)
# Print the Accuracy
print('Accuracy: ',metrics.accuracy_score(y_test, y_pred))
# ROCAUC
from yellowbrick.classifier import ROCAUC
# Instantiate the visualizer with the classification model
model = LogisticRegression(multi_class="auto", solver="liblinear")
visualizer = ROCAUC(model, classes=["SSDf.final_result==1", "SSDf.final_result=0"])
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
# Checking the prediction for a New Set of Data
# Let's use our model to predict Year 2014 Social Science class results.
# My goal is to use the existing logistic regression model to predict whether the new student will pass or fail.
# Create a new temporary data frame
SSDf2 = df[['year','term','module_domain','final_result','code_module','gender','disability','b4_sum_clicks','half_sum_clicks','std_half_score','module_presentation_length','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits','date_registration','region']]
SSDf2 = SSDf2.loc[(SSDf2.year == 1)&(SSDf2.module_domain==0)]
# Show first 20 observations of the dataset
SSDf2.head(20)
# Again, we drop those missing values
SSDf2=SSDf2.dropna()
SSDf2.head(20)
SSDf2.count()
SST2 = pd.DataFrame(SSDf2,columns= ['region','code_module','disability','module_presentation_length','date_registration','gender','std_half_score','half_sum_clicks','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits'])
# Define our predictors, this is the model we use in the previous dataset (tempDf)
X=SSDf[['region','code_module','disability','module_presentation_length','date_registration','gender','std_half_score','half_sum_clicks','imd_band','age_band','num_of_prev_attempts','highest_education','studied_credits']]
y=SSDf['final_result']
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0)
logistic_regression= LogisticRegression()
logistic_regression.fit(X_train,y_train)
# See how our model works
y_pred_new=logistic_regression.predict(SST2)
y_test_new=SSDf2['final_result']
from sklearn.metrics import classification_report
print(classification_report(y_test_new,y_pred_new))
confusion_matrix = pd.crosstab(y_test_new, y_pred_new, rownames=['Actual'], colnames=['Predicted'])
sns.heatmap(confusion_matrix, annot=True)
# Print the Accuracy
print('Accuracy: ',metrics.accuracy_score(y_test_new, y_pred_new))
###Output
Accuracy: 0.7871547566856641
|
doc/notebooks/ObservationHistory.ipynb | ###Markdown
Observation History_Rahul Biswas, Phil Marshall_In this notebook we show how to obtain the observation history of the Twinkles field for the 2017 "baseline cadence" `minion_1016` OpSim output database. In order to run it, you would need to change the definition of `dbfileName` to the location of your local copy of an OpSim database. More OpSim databases can be downloaded from [here](https://www.lsst.org/scientists/simulations/opsim/opsim-v335-benchmark-surveys).
###Code
from sqlalchemy import create_engine
import pandas as pd
import os
dbfileName = os.path.join('/Users/rbiswas/data/LSST/OpSimData/', 'minion_1016_sqlite.db')
engine = create_engine('sqlite:///'+dbfileName)
query = 'SELECT obsHistID, expMJD, Filter, FWHMeff, fiveSigmaDepth FROM Summary WHERE FIELDID is 1427 and PROPID is 54'
df = pd.read_sql_query(query, engine)
###Output
_____no_output_____
###Markdown
Let's look at what this returns: a table with 5 columns. Each visit has an `obsHistID`, and took place on a particular date. This timestamp is stored as `expMJD`, for "exposure mean Julian date." (For our purposes, we can assume that each visit will produce a single image, which will appears to us as an "exposure" taken by the camera. In practice, visit images would be constructed out of two separate 15-second camera exposures, but since we probably won't have access to those two images separately, we can use the terms "visit image" and "exposure" interchangeably). The filter can take values `ugrizy`, and the expected PSF full width at half maximum is stored as `FWHMeff`, in arcsec. This standard measure of image quality is what OpSim predicts would be the average PSF width over the whole focal plane. In practice we expect the PSF width, and shape, to vary across the field in each exposure, and hence the DM stack to provide PSF model parameters at every sky position in every visit. The very simple approximation of taking a single `FWHMeff` value and assuming it to be the actual width of a circularly symmetric PSF that does not vary across the field can be thought of as a placeholder: eventually we would use as much PSF information as we can get. The `fiveSigmaDepth` value is a prediction for the average photometric noise level across the field, and depends on the sky brightness as well as the readout noise. A point source with magnitude `fiveSigmaDepth` would be detected at 5-sigma, by having flux equal to 5 times the rms photometric noise level. This means that the flux uncertainty is given by `fiveSigmaDepth` = $-2.5 \log_{10}{5\sigma_f}$ If the `fiveSigmaDepth` is given in AB magnitudes, the flux uncertainty as defined here is in units of "AB maggies" or "mgy". In general, the flux of a source $f$ with AB magnitude $m$ is $f = 10^{-0.4 m}$ mgy.
###Code
df.head()
###Output
_____no_output_____
###Markdown
It'll be useful to have this table in `csv` format, so that we can easily read it back in later.
###Code
df.to_csv('twinkles_observation_history.csv', index=False)
###Output
_____no_output_____ |
teaching/decisiones_de_negocio_basadas_abr_jun_2020/soluciones/lec08-scipy-matplotlib-soluciones.ipynb | ###Markdown
Ejercicio ¿Cómo graficaría una triángulo de que apunta a la derecha ( **>** )?
###Code
x0, x1 = 1, 2
y0, y1, y2 = 1, 2, 3
eje_x = [x0, x0, x0, x1, x0, x1]
eje_y = [y0, y2, y2, y1, y0, y1]
plt.plot(eje_x, eje_y)
plt.show()
#La función scatter se utiliza para graficar puntos
x1, x2 = [1, 1, 2, 3, 4, 5, 4.5], [-0.1, 0.3, 0.2, 0.4, 0.7, 0.9, 0.5]
plt.scatter(x1, x2)
plt.show()
###Output
_____no_output_____
###Markdown
EjercicioUtilizando únicamente la función plot, reproduzca el gráfico de scatter
###Code
plt.plot(x1,x2, 'o')
plt.show()
###Output
_____no_output_____ |
doc/pub/week3/ipynb/.ipynb_checkpoints/week3-checkpoint.ipynb | ###Markdown
PHY321: Review of Vectors, Math and first Numerical Examples for simple Motion Problems **[Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/)**, Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA and Department of Physics, University of Oslo, NorwayDate: **Jan 20, 2021**Copyright 1999-2021, [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/). Released under CC Attribution-NonCommercial 4.0 license Aims and Overarching Motivation WednesdayOn Wednesday the 20th we will mainly go through the overview material from week 2, January 11-15. See the learning material fro week 2 at for example . FridayWe start studying the problem of a falling object and use this to introduce numerical aspects. Falling baseball in one dimensionWe anticipate the mathematical model to come and assume that we have amodel for the motion of a falling baseball without air resistance.Our system (the baseball) is at an initial height $y_0$ (which we willspecify in the program below) at the initial time $t_0=0$. In our program example here we will plot the position in steps of $\Delta t$ up to a final time $t_f$. The mathematical formula for the position $y(t)$ as function of time $t$ is $$y(t) = y_0-\frac{1}{2}gt^2,$$ where $g=9.80665=0.980655\times 10^1$m/s${}^2$ is a constant representing the standard acceleration due to gravity.We have here adopted the conventional standard value. This does not take into account other effects, such as buoyancy or drag.Furthermore, we stop when the ball hits the ground, which takes place at $$y(t) = 0= y_0-\frac{1}{2}gt^2,$$ which gives us a final time $t_f=\sqrt{2y_0/g}$. As of now we simply assume that we know the formula for the falling object. Afterwards, we will derive it. Our Python EncounterWe start with preparing folders for storing our calculations, figures and if needed, specific data files we use as input or output files.
###Code
%matplotlib inline
# Common imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
#in case we have an input file we wish to read in
#infile = open(data_path("MassEval2016.dat"),'r')
###Output
_____no_output_____
###Markdown
You could also define a function for making our plots. Youcan obviously avoid this and simply set up various **matplotlib**commands every time you need them. You may however find it convenientto collect all such commands in one function and simply call thisfunction.
###Code
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
def MakePlot(x,y, styles, labels, axlabels):
plt.figure(figsize=(10,6))
for i in range(len(x)):
plt.plot(x[i], y[i], styles[i], label = labels[i])
plt.xlabel(axlabels[0])
plt.ylabel(axlabels[1])
plt.legend(loc=0)
###Output
_____no_output_____
###Markdown
Thereafter we start setting up the code for the falling object.
###Code
%matplotlib inline
import matplotlib.patches as mpatches
g = 9.80655 #m/s^2
y_0 = 10.0 # initial position in meters
DeltaT = 0.1 # time step
# final time when y = 0, t = sqrt(2*10/g)
tfinal = np.sqrt(2.0*y_0/g)
#set up arrays
t = np.arange(0,tfinal,DeltaT)
y =y_0 -g*.5*t**2
# Then make a nice printout in table form using Pandas
import pandas as pd
from IPython.display import display
data = {'t[s]': t,
'y[m]': y
}
RawData = pd.DataFrame(data)
display(RawData)
plt.style.use('ggplot')
plt.figure(figsize=(8,8))
plt.scatter(t, y, color = 'b')
blue_patch = mpatches.Patch(color = 'b', label = 'Height y as function of time t')
plt.legend(handles=[blue_patch])
plt.xlabel("t[s]")
plt.ylabel("y[m]")
save_fig("FallingBaseball")
plt.show()
###Output
_____no_output_____
###Markdown
Here we used **pandas** (see below) to systemize the output of the position as function of time. Average quantitiesWe define now the average velocity as $$\overline{v}(t) = \frac{y(t+\Delta t)-y(t)}{\Delta t}.$$ In the code we have set the time step $\Delta t$ to a given value. We could define it in terms of the number of points $n$ as $$\Delta t = \frac{t_{\mathrm{final}-}t_{\mathrm{initial}}}{n+1}.$$ Since we have discretized the variables, we introduce the counter $i$ and let $y(t)\rightarrow y(t_i)=y_i$ and $t\rightarrow t_i$with $i=0,1,\dots, n$. This gives us the following shorthand notations that we will use for the rest of this course. We define $$y_i = y(t_i),\hspace{0.2cm} i=0,1,2,\dots,n.$$ This applies to other variables which depend on say time. Examples are the velocities, accelerations, momenta etc.Furthermore we use the shorthand $$y_{i\pm 1} = y(t_i\pm \Delta t),\hspace{0.12cm} i=0,1,2,\dots,n.$$ Compact equationsWe can then rewrite in a more compact form the average velocity as $$\overline{v}_i = \frac{y_{i+1}-y_{i}}{\Delta t}.$$ The velocity is defined as the change in position per unit time.In the limit $\Delta t \rightarrow 0$ this defines the instantaneous velocity, which is nothing but the slope of the position at a time $t$.We have thus $$v(t) = \frac{dy}{dt}=\lim_{\Delta t \rightarrow 0}\frac{y(t+\Delta t)-y(t)}{\Delta t}.$$ Similarly, we can define the average acceleration as the change in velocity per unit time as $$\overline{a}_i = \frac{v_{i+1}-v_{i}}{\Delta t},$$ resulting in the instantaneous acceleration $$a(t) = \frac{dv}{dt}=\lim_{\Delta t\rightarrow 0}\frac{v(t+\Delta t)-v(t)}{\Delta t}.$$ **A note on notations**: When writing for example the velocity as $v(t)$ we are then referring to the continuous and instantaneous value. A subscript like$v_i$ refers always to the discretized values. A differential equationWe can rewrite the instantaneous acceleration as $$a(t) = \frac{dv}{dt}=\frac{d}{dt}\frac{dy}{dt}=\frac{d^2y}{dt^2}.$$ This forms the starting point for our definition of forces later. It is a famous second-order differential equation. If the acceleration is constant we can now recover the formula for the falling ball we started with.The acceleration can depend on the position and the velocity. To be more formal we should then write the above differential equation as $$\frac{d^2y}{dt^2}=a(t,y(t),\frac{dy}{dt}).$$ With given initial conditions for $y(t_0)$ and $v(t_0)$ we can thenintegrate the above equation and find the velocities and positions ata given time $t$.If we multiply with mass, we have one of the famous expressions for Newton's second law, $$F(y,v,t)=m\frac{d^2y}{dt^2}=ma(t,y(t),\frac{dy}{dt}),$$ where $F$ is the force acting on an object with mass $m$. We see that it also has the right dimension, mass times length divided by time squared.We will come back to this soon. Integrating our equationsFormally we can then, starting with the acceleration (suppose we have measured it, how could we do that?)compute say the height of a building. To see this we perform the following integrations from an initial time $t_0$ to a given time $t$ $$\int_{t_0}^t dt a(t) = \int_{t_0}^t dt \frac{dv}{dt} = v(t)-v(t_0),$$ or as $$v(t)=v(t_0)+\int_{t_0}^t dt a(t).$$ When we know the velocity as function of time, we can find the position as function of time starting from the defintion of velocity as the derivative with respect to time, that is we have $$\int_{t_0}^t dt v(t) = \int_{t_0}^t dt \frac{dy}{dt} = y(t)-y(t_0),$$ or as $$y(t)=y(t_0)+\int_{t_0}^t dt v(t).$$ These equations define what is called the integration method forfinding the position and the velocity as functions of time. There isno loss of generality if we extend these equations to more than onespatial dimension. Constant acceleration case, the velocityLet us compute the velocity using the constant value for the acceleration given by $-g$. We have $$v(t)=v(t_0)+\int_{t_0}^t dt a(t)=v(t_0)+\int_{t_0}^t dt (-g).$$ Using our initial time as $t_0=0$s and setting the initial velocity $v(t_0)=v_0=0$m/s we get when integrating $$v(t)=-gt.$$ The more general case is $$v(t)=v_0-g(t-t_0).$$ We can then integrate the velocity and obtain the final formula for the position as function of time through $$y(t)=y(t_0)+\int_{t_0}^t dt v(t)=y_0+\int_{t_0}^t dt v(t)=y_0+\int_{t_0}^t dt (-gt),$$ With $y_0=10$m and $t_0=0$s, we obtain the equation we started with $$y(t)=10-\frac{1}{2}gt^2.$$ Computing the averagesAfter this mathematical background we are now ready to compute the mean velocity using our data.
###Code
# Now we can compute the mean velocity using our data
# We define first an array Vaverage
n = np.size(t)
Vaverage = np.zeros(n)
for i in range(1,n-1):
Vaverage[i] = (y[i+1]-y[i])/DeltaT
# Now we can compute the mean accelearatio using our data
# We define first an array Aaverage
n = np.size(t)
Aaverage = np.zeros(n)
Aaverage[0] = -g
for i in range(1,n-1):
Aaverage[i] = (Vaverage[i+1]-Vaverage[i])/DeltaT
data = {'t[s]': t,
'y[m]': y,
'v[m/s]': Vaverage,
'a[m/s^2]': Aaverage
}
NewData = pd.DataFrame(data)
display(NewData[0:n-2])
###Output
_____no_output_____
###Markdown
Note that we don't print the last values! Including Air Resistance in our modelIn our discussions till now of the falling baseball, we have ignoredair resistance and simply assumed that our system is only influencedby the gravitational force. We will postpone the derivation of airresistance till later, after our discussion of Newton's laws andforces.For our discussions here it suffices to state that the accelerations is now modified to $$\boldsymbol{a}(t) = -g +D\boldsymbol{v}(t)\vert v(t)\vert,$$ where $\vert v(t)\vert$ is the absolute value of the velocity and $D$ is a constant which pertains to the specific object we are studying.Since we are dealing with motion in one dimension, we can simplify the above to $$a(t) = -g +Dv^2(t).$$ We can rewrite this as a differential equation $$a(t) = \frac{dv}{dt}=\frac{d^2y}{dt^2}= -g +Dv^2(t).$$ Using the integral equations discussed above we can integrate twiceand obtain first the velocity as function of time and thereafter theposition as function of time.For this particular case, we can actually obtain an analyticalsolution for the velocity and for the position. Here we will firstcompute the solutions analytically, thereafter we will derive Euler'smethod for solving these differential equations numerically. Analytical solutionsFor simplicity let us just write $v(t)$ as $v$. We have $$\frac{dv}{dt}= -g +Dv^2(t).$$ We can solve this using the technique of separation of variables. Weisolate on the left all terms that involve $v$ and on the right allterms that involve time. We get then $$\frac{dv}{g -Dv^2(t) }= -dt,$$ We scale now the equation to the left by introducing a constant$v_T=\sqrt{g/D}$. This constant has dimension length/time. Can youshow this?Next we integrate the left-hand side (lhs) from $v_0=0$ m/s to $v$ andthe right-hand side (rhs) from $t_0=0$ to $t$ and obtain $$\int_{0}^v\frac{dv}{g -Dv^2(t) }= \frac{v_T}{g}\mathrm{arctanh}(\frac{v}{v_T}) =-\int_0^tdt = -t.$$ We can reorganize these equations as $$v_T\mathrm{arctanh}(\frac{v}{v_T}) =-gt,$$ which gives us $v$ as function of time $$v(t)=v_T\tanh{-(\frac{gt}{v_T})}.$$ Finding the final heightWith the velocity we can then find the height $y(t)$ by integrating yet another time, that is $$y(t)=y(t_0)+\int_{t_0}^t dt v(t)=\int_{0}^t dt[v_T\tanh{-(\frac{gt}{v_T})}].$$ This integral is a little bit trickier but we can look it up in a table over known integrals and we get $$y(t)=y(t_0)-\frac{v_T^2}{g}\log{[\cosh{(\frac{gt}{v_T})}]}.$$ Alternatively we could have used the symbolic Python package **Sympy** (example will be inserted later). In most cases however, we need to revert to numerical solutions. Our first attempt at solving differential equationsHere we will try the simplest possible approach to solving the second-order differential equation $$a(t) =\frac{d^2y}{dt^2}= -g +Dv^2(t).$$ We rewrite it as two coupled first-order equations (this is a standard approach) $$\frac{dy}{dt} = v(t),$$ with initial condition $y(t_0)=y_0$ and $$a(t) =\frac{dv}{dt}= -g +Dv^2(t),$$ with initial condition $v(t_0)=v_0$.Many of the algorithms for solving differential equations start with simple Taylor equations.If we now Taylor expand $y$ and $v$ around a value $t+\Delta t$ we have $$y(t+\Delta t) = y(t)+\Delta t \frac{dy}{dt}+\frac{\Delta t^2}{2!} \frac{d^2y}{dt^2}+O(\Delta t^3),$$ and $$v(t+\Delta t) = v(t)+\Delta t \frac{dv}{dt}+\frac{\Delta t^2}{2!} \frac{d^2v}{dt^2}+O(\Delta t^3).$$ Using the fact that $dy/dt = v$ and $dv/dt=a$ and keeping only terms up to $\Delta t$ we have $$y(t+\Delta t) = y(t)+\Delta t v(t)+O(\Delta t^2),$$ and $$v(t+\Delta t) = v(t)+\Delta t a(t)+O(\Delta t^2).$$ Discretizing our equationsUsing our discretized versions of the equations with for example$y_{i}=y(t_i)$ and $y_{i\pm 1}=y(t_i+\Delta t)$, we can rewrite theabove equations as (and truncating at $\Delta t$) $$y_{i+1} = y_i+\Delta t v_i,$$ and $$v_{i+1} = v_i+\Delta t a_i.$$ These are the famous Euler equations (forward Euler).To solve these equations numerically we start at a time $t_0$ and simply integrate up these equations to a final time $t_f$,The step size $\Delta t$ is an input parameter in our code.You can define it directly in the code below as
###Code
DeltaT = 0.1
###Output
_____no_output_____
###Markdown
With a given final time **tfinal** we can then find the number of integration points via the **ceil** function included in the **math** package of Pythonas
###Code
#define final time, assuming that initial time is zero
from math import ceil
tfinal = 0.5
n = ceil(tfinal/DeltaT)
print(n)
###Output
5
###Markdown
The **ceil** function returns the smallest integer not less than the input in say
###Code
x = 21.15
print(ceil(x))
###Output
22
###Markdown
which in the case here is 22.
###Code
x = 21.75
print(ceil(x))
###Output
22
###Markdown
which also yields 22. The **floor** function in the **math** packageis used to return the closest integer value which is less than or equal to the specified expression or value.Compare the previous result to the usage of **floor**
###Code
from math import floor
x = 21.75
print(floor(x))
###Output
21
###Markdown
Alternatively, we can define ourselves the number of integration(mesh) points. In this case we could have
###Code
n = 10
tinitial = 0.0
tfinal = 0.5
DeltaT = (tfinal-tinitial)/(n)
print(DeltaT)
###Output
0.05
###Markdown
Since we will set up one-dimensional arrays that contain the values ofvarious variables like time, position, velocity, acceleration etc, weneed to know the value of $n$, the number of data points (orintegration or mesh points). With $n$ we can initialize a given arrayby setting all elelements to zero, as done here
###Code
# define array a
a = np.zeros(n)
print(a)
###Output
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
###Markdown
Code for implementing Euler's methodIn the code here we implement this simple Eurler scheme choosing a value for $D=0.0245$ m/s.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
g = 9.80655 #m/s^2
D = 0.00245 #m/s
DeltaT = 0.1
#set up arrays
tfinal = 0.5
n = ceil(tfinal/DeltaT)
# define scaling constant vT
vT = sqrt(g/D)
# set up arrays for t, a, v, and y and we can compare our results with analytical ones
t = np.zeros(n)
a = np.zeros(n)
v = np.zeros(n)
y = np.zeros(n)
yanalytic = np.zeros(n)
# Initial conditions
v[0] = 0.0 #m/s
y[0] = 10.0 #m
yanalytic[0] = y[0]
# Start integrating using Euler's method
for i in range(n-1):
# expression for acceleration
a[i] = -g + D*v[i]*v[i]
# update velocity and position
y[i+1] = y[i] + DeltaT*v[i]
v[i+1] = v[i] + DeltaT*a[i]
# update time to next time step and compute analytical answer
t[i+1] = t[i] + DeltaT
yanalytic[i+1] = y[0]-(vT*vT/g)*log(cosh(g*t[i+1]/vT))
if ( y[i+1] < 0.0):
break
a[n-1] = -g + D*v[n-1]*v[n-1]
data = {'t[s]': t,
'y[m]': y-yanalytic,
'v[m/s]': v,
'a[m/s^2]': a
}
NewData = pd.DataFrame(data)
display(NewData)
#finally we plot the data
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, y, t, yanalytic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('y and exact')
axs[1].plot(t, v)
axs[1].set_ylabel('v[m/s]')
axs[2].plot(t, a)
axs[2].set_xlabel('time[s]')
axs[2].set_ylabel('a[m/s^2]')
fig.tight_layout()
save_fig("EulerIntegration")
plt.show()
###Output
_____no_output_____
###Markdown
Try different values for $\Delta t$ and study the difference between the exact solution and the numerical solution. Simple extension, the Euler-Cromer methodThe Euler-Cromer method is a simple variant of the standard Eulermethod. We use the newly updated velocity $v_{i+1}$ as an input to thenew position, that is, instead of $$y_{i+1} = y_i+\Delta t v_i,$$ and $$v_{i+1} = v_i+\Delta t a_i,$$ we use now the newly calculate for $v_{i+1}$ as input to $y_{i+1}$, that is we compute first $$v_{i+1} = v_i+\Delta t a_i,$$ and then $$y_{i+1} = y_i+\Delta t v_{i+1},$$ Implementing the Euler-Cromer method yields a simple change to the previous code. We only need to change the following line in the loop over timesteps
###Code
for i in range(n-1):
# more codes in between here
v[i+1] = v[i] + DeltaT*a[i]
y[i+1] = y[i] + DeltaT*v[i+1]
# more code
###Output
_____no_output_____ |
lesson09/inClass_lesson09.ipynb | ###Markdown
Choosing locations of files
###Code
# specify the directory where the make3dplanets.py is
make3dplanets_libdir = './genericPlanetFiles/'
# pick a name for our 3D files
SystemName = 'myPlanetSystem1'
# make a directory where things are going to be stored
#!mkdir outPlanets
# now I commented because directory exists
output_planet_dir = './outPlanets/' # where my planets will be stored
###Output
_____no_output_____
###Markdown
Make aesthecial choices
###Code
# look again at the kepler dataset to remind ourselves what things look like
from convert_kepler_data import read_kepler_data
kepler_data = read_kepler_data('kepler101data.txt') # initial conditions, observational
# mass of the star here in this system
kepler_data['sMass'][0] # mass of star in solar masses
# mass of my planets
kepler_data['pMass'] # in Jupiter masses
# so these are about the size of Saturn & Neptune, respectively
# order is first planet second planet sun
texture_files = ['saturn_1024.jpg', 'neptunemap_1000.jpg', 'sun_texture1.jpg']
# approximate the sizes of things by Saturn, Neptune and the sun
PlanetRadius = np.array([5.823e9, 2.46e9, 69.55e9])*2 # cm
# Saturn Neptune Sun
PlanetRadius
###Output
_____no_output_____
###Markdown
Start putting all this information together
###Code
# 1. tell python where to look for files
from sys import path
path.append(make3dplanets_libdir)
from make3dplanets import make3dplanets
# rename a few things for sanity's sake
generic_dir = make3dplanets_libdir # generic planet files
textures_dir = make3dplanets_libdir + '/textureMaps/' # textures are
###Output
_____no_output_____
###Markdown
Finally - run function to make 3d geometry
###Code
# what time step to make a file out of
Nplot = 100
# format my planet locations
# at this timestep (Nplot timestep) our planet will be located at PlanetLocation
# start with an empty PlanetLocation vector:
PlanetLocation = np.zeros([len(PlanetRadius), 3]) # initialize format
# fill PlanetLocation with positions from r_h, our simulation data at timestep Nplot
for p in range(0,len(PlanetRadius)): # loop over each planet
PlanetLocation[p,2] = r_h[p, 0, Nplot]*AUinCM # swapping z & x, and converting to cm
PlanetLocation[p,0] = r_h[p, 1, Nplot]*AUinCM # swapping x & y, converting
PlanetLocation[p,1] = r_h[p, 2, Nplot]*AUinCM # swapping y & z, converting
# use all this information to make a 3d geometry file
fname = make3dplanets(SystemName, PlanetLocation, PlanetRadius,
output_planet_dir, generic_dir, textures_dir=textures_dir,
texture_file=texture_files, DistanceUnits=AUinCM)
fname
###Output
_____no_output_____
###Markdown
ExerciseDo this for your dataset (not the Kepler 101 dataset). Upload a model + textures to sketchfab.Consider:1. What textures you're going to use? (What do you think your planet looks like?)1. Do you want to change the size (PlanetRadius), if so, make sure you note by how much.Other things:1. Try other timesteps.1. Look over the galaxy data1. Try another model - either seperate file upload OR in same box, offset1. Start building a data-viz blog Using rebound to solve for solar system - Bonus Round
###Code
# gotta do:
#!pip install rebound
from rebound import data
import rebound
sim = rebound.Simulation() # create a simulation object
data.add_outer_solar_system(sim) # add the outer solar system data from Horizon telescope
# gives position of outer planets
# counts particles in sim
Nparts = 0
for p in sim.particles:
Nparts += 1
print('number parts ', Nparts)
# timestep counter
Nsteps = 100 # total number of steps in sim
delta_t = 1.0e2 # time in *days* between steps
# create r_h to plop down our positions in
# r_h[# particles, #of coord, #timesteps]
r_h = np.zeros([Nparts, 3, Nsteps])
#r_h.shape
# Integrate until t=1e4 (unit of time in this example is days)
for i in range(Nsteps):
tmax = delta_t*i
sim.integrate(tmax) # integrate to current timestep
# Store positions in r_h array
for j,p in enumerate(sim.particles):
# time x y z
#print(j,sim.t, p.x, p.y, p.z)
r_h[j, 0, i] = p.x
r_h[j, 1, i] = p.y
r_h[j, 2, i] = p.z
# plot quick scatter plot with ipyvolume
import ipyvolume
x = r_h[:,0,:].ravel()
y = r_h[:,1,:].ravel()
z = r_h[:,2,:].ravel()
ipyvolume.quickscatter(x, y, z,
size=1, marker="sphere")
###Output
_____no_output_____ |
Task_2-3.ipynb | ###Markdown
Course 2Miriam Stricker 03.10.2021  *Figure 1: Project proposal* IntroductionCredit One is a third-party credit rating authority that provides retail customer credit approval services to Blackwell. This project is examining current customer demographics to better understand what traits might relate to whether or not a customer is likely to default on their current credit obligations. Understanding this is vital to the success of Credit One because their business model depends on customers paying their debts.The goal is to identify which customer attributes relate significantly to customer default rates and to build a predictive model that Credit One can use to better classify potential customers as being ‘at-risk’, compared to previously implemented models. Specific questions- How do you ensure that customers can/will pay their loans?- Can we approve customers with high certainty?Here are some lessons the company learned from addressing a similar problem last year:- We cannot control customer spending habits- We cannot always go from what we find in our analysis to the underlying "why"- We must focus on the problems we can solve:- Which attributes in the data can we deem to be statistically significant to the problem at hand?- What concrete information can we derive from the data we have?- What proven methods can we use to uncover more information and why? OverviewTo tackle these questions I will start with preparing the data, then do some exploratory data analysis (EDA) and subsequently develop models to build a predictor for default risk per customer. A conclusion at the end will summarize the most important points. Data30000 entries of Taiwanese customers in 2005.There are 25 variables:- ID: ID of each client- LIMIT_BAL: Amount of given credit in NT dollars (includes individual and family/supplementary credit- SEX: Gender (1=male, 2=female)- EDUCATION: (1=graduate school, 2=university, 3=high school, 4=others, 5=unknown, 6=unknown)- MARRIAGE: Marital status (1=married, 2=single, 3=others)- AGE: Age in years- PAY_0: Repayment status in September, 2005 (-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above)- PAY_2: Repayment status in August, 2005 (scale same as above)- PAY_3: Repayment status in July, 2005 (scale same as above)- PAY_4: Repayment status in June, 2005 (scale same as above)- PAY_5: Repayment status in May, 2005 (scale same as above)- PAY_6: Repayment status in April, 2005 (scale same as above)- BILL_AMT1: Amount of bill statement in September, 2005 (NT dollar)- BILL_AMT2: Amount of bill statement in August, 2005 (NT dollar)- BILL_AMT3: Amount of bill statement in July, 2005 (NT dollar)- BILL_AMT4: Amount of bill statement in June, 2005 (NT dollar)- BILL_AMT5: Amount of bill statement in May, 2005 (NT dollar)- BILL_AMT6: Amount of bill statement in April, 2005 (NT dollar)- PAY_AMT1: Amount of previous payment in September, 2005 (NT dollar)- PAY_AMT2: Amount of previous payment in August, 2005 (NT dollar)- PAY_AMT3: Amount of previous payment in July, 2005 (NT dollar)- PAY_AMT4: Amount of previous payment in June, 2005 (NT dollar)- PAY_AMT5: Amount of previous payment in May, 2005 (NT dollar)- PAY_AMT6: Amount of previous payment in April, 2005 (NT dollar)- default.payment.next.month: Default payment (1=yes, 0=no) Loading Data
###Code
from sqlalchemy import create_engine
import pymysql
import pandas as pd
import pandas_profiling
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from math import sqrt
from lightgbm import LGBMRegressor
from xgboost.sklearn import XGBRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import BayesianRidge
from sklearn.ensemble import GradientBoostingRegressor
#create a connection:
db_connection_str = 'mysql+pymysql://deepanalytics:[email protected]/deepanalytics'
# connect to the MySQL instance:
db_connection = create_engine(db_connection_str)
df = pd.read_sql('SELECT * FROM credit', con=db_connection)
df.to_csv("payment_data_raw.csv", index=False)
#changing header to first row data:
df.columns = df.iloc[0]
df = df[1:]
df.to_csv("payment_data.csv", index=False)
###Output
_____no_output_____
###Markdown
Data preprocessing
###Code
data = pd.read_csv('payment_data.csv')
#data = pd.read_csv('payment_data_cleaned.csv')
#print (data.head())
#data.describe()
#data.columns
#print (data.info())
###Output
_____no_output_____
###Markdown
Data Cleaning: Duplicates
###Code
data['SEX'].unique()
data.drop(data.loc[data['SEX']=='X2'].index, inplace=True)
data.drop(data.loc[data['SEX']=='SEX'].index, inplace=True)
print ("Number of data points: " + str(len(data)))
print ("Duplicates count: " + str(len(data)-len(data.drop_duplicates())))
#Drop duplicates:
data = data.drop_duplicates()
print ("After removing duplicates:")
print ("Number of data points: " + str(len(data)))
print ("Duplicates count: " + str(len(data)-len(data.drop_duplicates())))
###Output
Number of data points: 30201
Duplicates count: 201
After removing duplicates:
Number of data points: 30000
Duplicates count: 0
###Markdown
Missing Data:
###Code
data.isna().any().any()
###Output
_____no_output_____
###Markdown
--> no missing data
###Code
#Making object types to "best" types for columns
data = data.apply(pd.to_numeric, errors='ignore')
data = data.convert_dtypes()
print(data.info())
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 30000 entries, 0 to 30202
Data columns (total 25 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 30000 non-null Int64
1 LIMIT_BAL 30000 non-null Int64
2 SEX 30000 non-null string
3 EDUCATION 30000 non-null string
4 MARRIAGE 30000 non-null Int64
5 AGE 30000 non-null Int64
6 PAY_0 30000 non-null Int64
7 PAY_2 30000 non-null Int64
8 PAY_3 30000 non-null Int64
9 PAY_4 30000 non-null Int64
10 PAY_5 30000 non-null Int64
11 PAY_6 30000 non-null Int64
12 BILL_AMT1 30000 non-null Int64
13 BILL_AMT2 30000 non-null Int64
14 BILL_AMT3 30000 non-null Int64
15 BILL_AMT4 30000 non-null Int64
16 BILL_AMT5 30000 non-null Int64
17 BILL_AMT6 30000 non-null Int64
18 PAY_AMT1 30000 non-null Int64
19 PAY_AMT2 30000 non-null Int64
20 PAY_AMT3 30000 non-null Int64
21 PAY_AMT4 30000 non-null Int64
22 PAY_AMT5 30000 non-null Int64
23 PAY_AMT6 30000 non-null Int64
24 default payment next month 30000 non-null string
dtypes: Int64(22), string(3)
memory usage: 6.6 MB
None
###Markdown
Data Cleaning: Checking each variable for consistency ID:- Must not have any double values -> OK- Goes from 1 to 30000 -> OK
###Code
data[data['ID'].duplicated() == True]
###Output
_____no_output_____
###Markdown
LIMIT_BALAmount of given credit in NT dollars (includes individual and family/supplementary credit- Check for "magic values smaller 0" -> OK- MIN: 10k- MAX = 1Mio- Outliers? -> Only 206 entries over 500k...- Outliers? -> Only 6 entries over 750k...--> This data does not reflect values greater than 500k well. Therefore it makes sense to limit the model to values of maximum 500k. A different model needs t be employed for bigger valuesDropping all columns with "LIMIT_BAL">500k--> There is a spike at 500k, which is so far unexplained, but we will leave it there for now
###Code
#data["LIMIT_BAL"].hist(bins=50)
#data["LIMIT_BAL"].max()
#data.loc[data["LIMIT_BAL"]>500000]
#data[data["LIMIT_BAL"]>500000].count()
#data[data["LIMIT_BAL"]>750000].count()
data.drop(data.loc[data["LIMIT_BAL"]>500000].index, inplace=True)
#data["LIMIT_BAL"].hist(bins=50)
###Output
_____no_output_____
###Markdown
SEX:Gender (1=male, 0=female)- Two genders- Made into binary data- Relatively balanced (17995 female to 11799 male) UPDATE NUMBERS AFTER MORE DATA CLEANING- MIGHT BE ETHICALLY PROBLEMATIC!!!
###Code
data["SEX"].describe()
#print (data["SEX"].head())
data['SEX_binary'] = data.SEX.apply(lambda x: 1 if x == "male" else 0)
#print (data["SEX_binary"].head())
data = data.drop(["SEX"], axis=1)
data = data.rename(columns={"SEX_binary": "SEX"})
###Output
_____no_output_____
###Markdown
EDUCATION:(4=graduate school, 3=university, 2=high school, 1=others)--> "other" hard to gauge what it is. Could be on either end of the scale...--> Making numerical. It is ok to stay ordinal
###Code
data["EDUCATION"].describe()
#data["EDUCATION"].hist()
di = {"graduate school": 4, "university": 3, "high school": 2, "other": 1}
data = data.replace({"EDUCATION": di})
#data["EDUCATION"].hist()
###Output
_____no_output_____
###Markdown
MARRIAGEMarital status (0=other, 1=married, 2=single, 3=divorced)- This is not an ordinal variable, we are therefore one-hot-coding it
###Code
#data["MARRIAGE"].hist()
data["MARRIAGE"].unique()
data.loc[data["MARRIAGE"]==0]
##data["MARRIAGE"] = data.MARRIAGE.apply(lambda x: 3 if x == 0)
#data = data.replace({"MARRIAGE": {0: 3}})
one_hot = pd.get_dummies(data["MARRIAGE"])
data = data.drop("MARRIAGE",axis = 1)
# Join the encoded df
data = data.join(one_hot)
data = data.rename(columns={1: "RELATIONSHIP_MARRIED", 2: "RELATIONSHIP_SINGLE", 3: "RELATIONSHIP_DIVORCED", 0: "RELATIONSHIP_OTHER"})
###Output
_____no_output_____
###Markdown
AGE- check that > 18 only: Min age is 21 (max 79)Leave as it is at the moment
###Code
#data.AGE.hist()
data.AGE.min()
###Output
_____no_output_____
###Markdown
default payment next month(0: not default, 1: default)- Only options are "default" and "not default"- 6613 "default"- 23181 "not default" (at the moment)Making binary.
###Code
data["default payment next month"].unique()
#data[data["default payment next month"] == "not default"].shape[0]
data["default_binary"] = data["default payment next month"].apply(lambda x: 1 if x == "default" else (0 if x == "not default" else -1))
data = data.drop(["default payment next month"], axis=1)
data = data.rename(columns={"default_binary": "default payment next month"})
###Output
_____no_output_____
###Markdown
PAY_0, PAY_2, PAY_3, PAY_4, PAY_5, PAY_6History of past payment. We tracked the past monthly payment records (from April to September, 2005) as follows: X6 = the repayment status in September, 2005; X7 = the repayment status in August, 2005; . . .;X11 = the repayment status in April, 2005. The measurement scale for the repayment status is: -2: No consumption; -1: Paid in full; 0: The use of revolving credit; 1 = payment delay for one month; 2 = payment delay for two months; . . .; 8 = payment delay for eight months; 9 = payment delay for nine months and above. --> This is ordinal, I will leave it like this
###Code
data.PAY_0.unique()
#data.PAY_0.hist(bins=10)
###Output
_____no_output_____
###Markdown
BILL_AMT1, BILL_AMT2, BILL_AMT3, BILL_AMT4, BILL_AMT5, BILL_AMT6Drop everyhting where the Bill amout is higher than the credit limit...Drop everything where there is a negative bill amount..???Negative bill amount TODO
###Code
#data.BILL_AMT1.hist()
print (data.loc[data["BILL_AMT1"]>data["LIMIT_BAL"]].shape[0])
print (data.BILL_AMT1.min())
data.drop(data.loc[data["BILL_AMT1"]>data["LIMIT_BAL"]].index, inplace=True)
data.drop(data.loc[data["BILL_AMT2"]>data["LIMIT_BAL"]].index, inplace=True)
data.drop(data.loc[data["BILL_AMT3"]>data["LIMIT_BAL"]].index, inplace=True)
data.drop(data.loc[data["BILL_AMT4"]>data["LIMIT_BAL"]].index, inplace=True)
data.drop(data.loc[data["BILL_AMT5"]>data["LIMIT_BAL"]].index, inplace=True)
data.drop(data.loc[data["BILL_AMT6"]>data["LIMIT_BAL"]].index, inplace=True)
data.drop(data.loc[data["BILL_AMT1"]<0].index, inplace=True)
data.drop(data.loc[data["BILL_AMT2"]<0].index, inplace=True)
data.drop(data.loc[data["BILL_AMT3"]<0].index, inplace=True)
data.drop(data.loc[data["BILL_AMT4"]<0].index, inplace=True)
data.drop(data.loc[data["BILL_AMT5"]<0].index, inplace=True)
data.drop(data.loc[data["BILL_AMT6"]<0].index, inplace=True)
#data.BILL_AMT1.hist()
###Output
2111
-165580
###Markdown
PAY_AMT1, PAY_AMT2, PAY_AMT3, PAY_AMT4, PAY_AMT5, PAY_AMT6A few data points with PAY Amount > 100k....Check if PAY_AMOUNT >BILL_AMT --> some items have that --> DELETE!Could additionally be deleting those as highly unrealistic...
###Code
#data.PAY_AMT1.hist()
#data[data["PAY_AMT1"]>100000].count()
#data.drop(data.loc[data["PAY_AMT1"]>100000].index, inplace=True)
#data.drop(data.loc[data["PAY_AMT2"]>100000].index, inplace=True)
#data.drop(data.loc[data["PAY_AMT3"]>100000].index, inplace=True)
#data.drop(data.loc[data["PAY_AMT4"]>100000].index, inplace=True)
#data.drop(data.loc[data["PAY_AMT5"]>100000].index, inplace=True)
#data.drop(data.loc[data["PAY_AMT6"]>100000].index, inplace=True)
data.drop(data.loc[data["PAY_AMT1"]>data["BILL_AMT1"]].index, inplace=True)
data.drop(data.loc[data["PAY_AMT2"]>data["BILL_AMT2"]].index, inplace=True)
data.drop(data.loc[data["PAY_AMT3"]>data["BILL_AMT3"]].index, inplace=True)
data.drop(data.loc[data["PAY_AMT4"]>data["BILL_AMT4"]].index, inplace=True)
data.drop(data.loc[data["PAY_AMT5"]>data["BILL_AMT5"]].index, inplace=True)
data.drop(data.loc[data["PAY_AMT6"]>data["BILL_AMT6"]].index, inplace=True)
print (data.loc[data["PAY_AMT1"]>data["LIMIT_BAL"]].shape[0])
#data["PAY_AMT6"].hist(bins=50)
# Loading cleaned data
data = pd.read_csv("payment_data_cleaned.csv")
#data.head()
# Loading cleaned data
data = pd.read_csv("payment_data_cleaned.csv")
###Output
_____no_output_____
###Markdown
Constructing continuous Y-variable: We try to predict "probability of default" for month -1, as we have more data as for the current month.As the closest value to "Probabilty of default for month -1" we calculate "percent of bill payed in month -1":PERCENT_UNPAID_1=(BILL-PAYED)/BILL0 for fully paid --> 0% probability of default for month -11 for nothing paid --> 100% probability of default for month -1
###Code
data["Y_PERCENT_UNPAID_1"]=(data["BILL_AMT1"]-data["PAY_AMT1"])/data["BILL_AMT1"]
data["Y_PERCENT_UNPAID_1"].fillna(0, inplace=True)
###Output
_____no_output_____
###Markdown
Making the same percental feature for month -2 to -6:
###Code
data["PERCENT_UNPAID_2"]=(data["BILL_AMT2"]-data["PAY_AMT2"])/data["BILL_AMT2"]
data["PERCENT_UNPAID_2"].fillna(0, inplace=True)
data["PERCENT_UNPAID_3"]=(data["BILL_AMT3"]-data["PAY_AMT3"])/data["BILL_AMT3"]
data["PERCENT_UNPAID_3"].fillna(0, inplace=True)
data["PERCENT_UNPAID_4"]=(data["BILL_AMT4"]-data["PAY_AMT4"])/data["BILL_AMT4"]
data["PERCENT_UNPAID_4"].fillna(0, inplace=True)
data["PERCENT_UNPAID_5"]=(data["BILL_AMT5"]-data["PAY_AMT5"])/data["BILL_AMT5"]
data["PERCENT_UNPAID_5"].fillna(0, inplace=True)
data["PERCENT_UNPAID_6"]=(data["BILL_AMT6"]-data["PAY_AMT6"])/data["BILL_AMT6"]
data["PERCENT_UNPAID_6"].fillna(0, inplace=True)
###Output
_____no_output_____
###Markdown
With that approach I have to drop PAY_0, BILL_AMT1 and PAY_AMT1 from the feature list, as these are unknowns.We are also dropping the old binary Y variable
###Code
data = data.drop(["PAY_0"], axis=1)
data = data.drop(["BILL_AMT1"], axis=1)
data = data.drop(["PAY_AMT1"], axis=1)
data = data.drop(["default payment next month"], axis=1)
corrMatrix = data.corr(method='pearson')
sns.heatmap(corrMatrix)
plt.title('Correlations of all features', size=14)
plt.xlabel('Features')
plt.ylabel('Features')
plt.show()
###Output
_____no_output_____
###Markdown
*Fig 2: Correlation between all features*
###Code
correlations = data[data.columns[1:]].corr()["Y_PERCENT_UNPAID_1"][:-1].sort_values(axis=0, ascending=False)
print (correlations.index[0])
plt.bar(x=correlations.index, height=correlations, color='#087E8B')
plt.title('Correlations of features with "Y_PERCENT_UNPAID_1"', size=14)
plt.xticks(rotation='vertical')
plt.xlabel('Correlation')
plt.ylabel('Features')
plt.show()
###Output
Y_PERCENT_UNPAID_1
###Markdown
*Fig 3: Correlation between predictive variable "Y_Percent_unpaid_1" and other features* Models Linear Regression for feature importance analysis:
###Code
x_data = data[['LIMIT_BAL', 'EDUCATION','AGE', 'SEX', 'RELATIONSHIP_MARRIED', 'RELATIONSHIP_SINGLE', 'RELATIONSHIP_DIVORCED', 'RELATIONSHIP_OTHER', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6', "PERCENT_UNPAID_2", "PERCENT_UNPAID_3", "PERCENT_UNPAID_4", "PERCENT_UNPAID_5", "PERCENT_UNPAID_6"]]
y_data = data['Y_PERCENT_UNPAID_1']
scaler = StandardScaler()
x_data_scaled = pd.DataFrame(scaler.fit_transform(x_data), columns=x_data.columns)
linear_regressor = LinearRegression() # create object for the class
linear_regressor.fit(x_data_scaled, y_data) # perform linear regression
y_pred = linear_regressor.predict(x_data_scaled) # make predictions
#linear_regressor.score(x_data, y_data, sample_weight=None) # Returns R^2. Best: 1
# The coefficients
#print('Coefficients: \n', linear_regressor.coef_)
# The mean squared error
print('Mean squared error: %.2f'
% mean_squared_error(y_data, y_pred))
# The coefficient of determination: 1 is perfect prediction
print('Coefficient of determination (R^2): %.2f'
% r2_score(y_data, y_pred))
importances = pd.DataFrame(data={
'Attribute': x_data_scaled.columns,
'Importance': linear_regressor.coef_
})
importances = importances.sort_values(by='Importance', ascending=False)
plt.bar(x=importances['Attribute'], height=importances['Importance'], color='#087E8B')
plt.title('Feature Importance obtained from coefficients', size=14)
plt.xticks(rotation='vertical')
plt.xlabel('Feature Importance')
plt.ylabel('Features')
plt.xticks(rotation='vertical')
plt.show()
###Output
_____no_output_____
###Markdown
*Fig 4: Feature importance obtained from linear regression model coefficients* Testing different regressor models:
###Code
algosClass = []
algosClass.append(('Random Forest Regressor',RandomForestRegressor()))
algosClass.append(('Linear Regression',LinearRegression()))
algosClass.append(('Support Vector Regression',SVR()))
algosClass.append(('LGBM Regressor',LGBMRegressor()))
algosClass.append(('xgboost.sklearn',XGBRegressor()))
algosClass.append(('Stochastic Gradient Descent Regression',SGDRegressor()))
algosClass.append(('Bayesian Ridge Regression',BayesianRidge()))
algosClass.append(('sklearn.ensemble',GradientBoostingRegressor()))
#regression
results = []
names = []
for name, model in algosClass:
result = cross_val_score(model, x_data_scaled, y_data, cv=3, scoring='r2')
names.append(name)
results.append(result)
for i in range(len(names)):
print(names[i],results[i].mean())
###Output
Random Forest Regressor 0.7937987514100429
Linear Regression 0.6343565057895927
Support Vector Regression 0.7503226151814851
LGBM Regressor 0.810725569869208
xgboost.sklearn 0.7872596280738401
Stochastic Gradient Descent Regression 0.6209423573610852
Bayesian Ridge Regression 0.6343820666470279
sklearn.ensemble 0.8048213503122813
###Markdown
Structured Approach: Best model
###Code
#Based on what you've already learned about cross validation score, choose the best model that you will use in the next step to train the model make predictions.
#3. Use the model variables you established in step 2, pass the training data to it in the following format (you'll need to use train_test_split prior):
X_train, X_test, y_train, y_test = train_test_split(x_data_scaled, y_data, test_size=0.33, random_state=42)
#algo = RandomForestRegressor()
#algo = LinearRegression()
algo = GradientBoostingRegressor()
model = algo.fit(X_train,y_train)
#Make Predictions
predictions = model.predict(X_test)
predRsquared = r2_score(y_test,predictions)
rmse = sqrt(mean_squared_error(y_test, predictions))
print('R Squared: %.3f' % predRsquared)
print('RMSE: %.3f' % rmse)
plt.scatter(y_test, predictions, alpha = 0.5, color='#087E8B')
plt.title('Prediction vs Ground Truth', size=14)
plt.xlabel('Ground Truth')
plt.ylabel('Predictions')
plt.show();
###Output
_____no_output_____ |
examples/sprawl_batch.ipynb | ###Markdown
Sprawling indices: Batch mode* Illustration of a simple way to calculate sprawling indices for several cities in batch-mode
###Code
# snippet for relative imports
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import matplotlib.pyplot as plt
import osmnx as ox
from src.core import process_spatial_indices, get_route_graph
from src.osm.osm_tags import activity_classification
ox.config(log_file=True, log_console=True, log_name='sprawl_batch', log_filename='sprawl_batch', use_cache=True)
figsize = (8,6)
###Output
_____no_output_____
###Markdown
Sprawling indices* Indices can be calculated in a batch mode
###Code
# Arguments
grid_step = 200
process_osm_args = {"retrieve_graph":True, "default_height":3, "meters_per_level":3, "associate_landuses_m2":True, "mixed_building_first_floor_activity":True, "minimum_m2_building_area":9, "date":None}
dispersion_args = {'radius_search': 750, 'use_median': False, 'K_nearest': 50}
landusemix_args = {'walkable_distance': 600, 'compute_activity_types_kde': True, 'weighted_kde': True,
'pois_weight': 9, 'log_weighted': True}
accessibility_args = {'fixed_distance': True, 'fixed_activities': False, 'max_edge_length': 200,
'max_node_distance': 250, 'fixed_distance_max_travel_distance': 2000,
'fixed_distance_max_num_activities': 250, 'fixed_activities_min_number': 20}
indices_computation = {"dispersion":True, "landusemix":False, "accessibility":False}
###Output
_____no_output_____
###Markdown
Define regions to process* Cities names* Regions of interest * Example: Stuttgart's region is determined using the second result of the query: https://www.openstreetmap.org/search?query=stuttgartmap=11/48.7788/9.1794
###Code
# Cities
cities_ref = [ "Florence", "Zagreb", "Stuttgart"]
regions_args = [ {"place":"Florence"}, {"place":"Zagreb"}, {"place":"Stuttgart", "which_result":2} ]
###Output
_____no_output_____
###Markdown
Process spatial indices* Results are computed in a geographical data frame for each city
###Code
df_indices = {}
for city_ref, region_arg in zip(cities_ref, regions_args):
# Process spatial indices for input city
df_indices[city_ref] = process_spatial_indices(city_ref, region_arg, grid_step, process_osm_args, dispersion_args, landusemix_args, accessibility_args, indices_computation)
###Output
_____no_output_____
###Markdown
Visualization
###Code
for city_ref in cities_ref:
print("\t\t\t" + city_ref)
df_indices_city = df_indices[city_ref]
G = get_route_graph(city_ref)
if (indices_computation.get("landusemix")):
print("Land use mix")
f, ax = ox.plot_graph(G, fig_height=figsize[1], fig_width=figsize[0], close=False, show=False, edge_color='black', edge_alpha=0.3, node_alpha=0.1)
df_indices_city.plot('landusemix', figsize=figsize, cmap='jet', legend=True, ax=ax, markersize=50*df_indices_city.landuse_intensity)
f.tight_layout(); ax.set_title(city_ref + ": Land use mix"); plt.show()
if (indices_computation.get("accessibility")):
print("Accessibility")
f, ax = ox.plot_graph(G, fig_height=figsize[1], fig_width=figsize[0], close=False, show=False, edge_color='black', edge_alpha=0.3, node_alpha=0.1)
df_indices_city.plot('accessibility', figsize=figsize, cmap='jet', legend=True, ax=ax)
f.tight_layout(); ax.set_title(city_ref + ": Accessibility"); plt.show()
if (indices_computation.get("dispersion")):
print("Dispersion")
f, ax = ox.plot_graph(G, fig_height=figsize[1], fig_width=figsize[0], close=False, show=False, edge_color='black', edge_alpha=0.3, node_alpha=0.1)
df_indices_city.plot('dispersion', figsize=figsize, cmap='jet', legend=True, vmax=15, ax=ax)
f.tight_layout(); ax.set_title(city_ref + ": Dispersion"); plt.show()
###Output
Florence
Dispersion
|
Covid19Colombia.ipynb | ###Markdown
Covid19PYColombia Module - Examplescovid19builderdf is a module to construct a dataframe with covid19 data for ColombiaThe class Covid19Builder provides a object from class Covid19PyColombia which has two methods:- buildTotalDF() for retrieving historical Data- buildByDate() for retrieving data of a specific date. By default return data of today.Both functions take the limit parameter to set the max. number of expected rows
###Code
from covid19builderdf import Covid19Builder
builder = Covid19Builder()
covid19 = builder.buildTotalDF(limit=10000)
###Output
_____no_output_____
###Markdown
PlotAll() shows a line plotting the total confirmed cases throughout the days”
###Code
covid19.plotAll()
###Output
_____no_output_____
###Markdown
Plotting using the covid19pyColombia module.
###Code
covid19.plotAll_Bar()
###Output
_____no_output_____
###Markdown
Plotting using the covid19pyworld module.It is neccesary to dowload the file before. Repo: https://github.com/CSSEGISandData/COVID-19.git By contrast to covid19pycolombia module; covid19pyworld allows to plot data of any Country.
###Code
import covid19pyworld as cw
cw.plotReportedCasesBar('confirmed','Colombia')
###Output
Processing file data/time_series_covid19_confirmed_global.csv
last date => 5/5/20
###Markdown
Below more examples using covid19pycolombia module.
###Code
print('Total: {} confirmed cases'.format(covid19.getAllData_Agg().tail(1).values[0]))
###Output
Total: 8959 confirmed cases
###Markdown
The method plotByDeptTopN(n) gets Top N Departments with most cases
###Code
covid19.plotByDeptTopN(5)
###Output
_____no_output_____
###Markdown
The method plotByDept([]) takes a list of departments as parameter and makes a chart with lines for each department
###Code
covid19.plotByDept([covid19.bogota,'Valle del Cauca',])
###Output
_____no_output_____
###Markdown
The method plotCities(n) gets Top N Cities with most cases
###Code
covid19.plotByCityTopN(5)
###Output
_____no_output_____
###Markdown
The method plotByCity([]) takes a list of cities as parameter and makes a chart with lines for each city
###Code
covid19.plotByCity(['Villavicencio','Cartagena de Indias','Cali','Medellín'])
###Output
_____no_output_____
###Markdown
Below there are additional methods to plot data about age, genre and health
###Code
covid19.plotAge()
covid19.plotGenre()
covid19.plotStatus()
covid19.plotAttention()
###Output
_____no_output_____
###Markdown
Using the method builder.buildByDate() for fetching data of today
###Code
covid19_today = builder.buildByDate()
covid19_today.plotAge()
covid19_today.plotStatus()
covid19_today.plotAttention()
covid19_today.plotGenre()
len(covid19_today.df)
dir(covid19_today)
covid19_today.getDataByAttention()
ps = covid19.df.groupby(['atenci_n']).count()['edad']
s = sum(ps)
ps.map(lambda x: (x,x*100/s))
covid19_today.getDataDepts()
covid19_today.getDataByDept('Boyacá')
covid19_today.df[covid19_today.df.departamento == 'Cundinamarca']
###Output
_____no_output_____ |
doc/Types Review.ipynb | ###Markdown
Review of Python Data TypesPython can store data in many different ways, from simple numbers and strings, to more complex structures like tuples, lists, dictionaries and sets. Values of these types can be stored in variables. Finding the type of an object The `type` function tells you the type of an object:
###Code
type('Hi')
type(3)
type(3.14159)
type((1,2))
type([1,2])
type({'CA': 'California'})
type({2,3,6})
###Output
_____no_output_____
###Markdown
Sequences Strings, tuples and lists are *sequences*. You can do things like this with sequences:
###Code
greeting = 'Hello, world!'
animals = ('cow', 'chicken', 'cat')
len(greeting)
len(animals)
animals[0]
animals[2]
###Output
_____no_output_____
###Markdown
You’ll get an error if the number in the brackets is too high:
###Code
greeting[20]
from random import choice
choice(greeting)
choice(animals)
###Output
_____no_output_____
###Markdown
Adding to a sequence You can’t add to a tuple because tuples are *immutable* (unchangeable ).
###Code
animals.append('Whale')
###Output
_____no_output_____
###Markdown
You *can* add to a list.
###Code
winners = ['Sue']
winners.append('Dave')
winners
###Output
_____no_output_____
###Markdown
DictionariesDictionaries store *key/value pairs*. The key is the string, number, or value of another type that you use to retrieve an associated value. For example, if you know the state code, `“CA”` (key), you can retrieve the state name (value).
###Code
states_by_code = {
'CA': 'California',
'IN': 'Indiana'
}
states_by_code
states_by_code['CA']
states_by_code['Zoo']
'CA' in states_by_code
'Zoo' in states_by_code
###Output
_____no_output_____
###Markdown
SetsLike tuples and lists, sets store items, but don’t allow duplicates
###Code
nums = {1, 1, 2}
nums
nums.add(3)
nums
nums.remove(1)
nums
###Output
_____no_output_____
###Markdown
Values, Variables, and TypesIn the example below, a is the variable, 5 is the value, and int is the type. A variable is a name for an object, which has a value and a type.
###Code
a = 5
print(a)
type(a)
###Output
_____no_output_____ |
Django by Multi/NLP/countvectorizer.ipynb | ###Markdown
뉴스 관련단어 추천 서비스 데이터 가져오기
###Code
import pandas as pd
df = pd.read_csv('./drive/MyDrive/Multi_test_deeplearning/smtph_total.csv')
df.head(5)
df.columns
posts = df['Title']+['Description']
type(posts),posts
###Output
_____no_output_____
###Markdown
단어로 쪼개기
###Code
!python3 -m pip install konlpy
from konlpy.tag import Okt
okt = Okt()
stop_words = ['강,', '글,', '애', '미', '번', '은', '이', '것', '등', '더', '를', '좀', '즉', '인', '옹', '때', '만', '원', '이때', '개']
posts_noun = []
for post in posts:
#print(okt.nouns(post))
for noun in okt.nouns(post):
posts_noun.append(noun)
len(posts_noun)
posts_noun
from collections import Counter
###Output
_____no_output_____
###Markdown
type(noun_cnt) 딕셔너리로 출력 됨
###Code
noun_cnt = Counter(posts_noun)
type(noun_cnt)
top_30_nouns = noun_cnt.most_common(30)
type(top_30_nouns),top_30_nouns
top_nouns_dict = dict(top_30_nouns)
type(top_nouns_dict)
###Output
_____no_output_____
###Markdown
Word cloud
###Code
from wordcloud import WordCloud
path = ''
nouns_wordcloud = WordCloud()
nouns_wordcloud.generate_from_frequencies(top_nouns_dict)
###Output
_____no_output_____
###Markdown
Display
###Code
import matplotlib.pyplot as plt
plt.imshow(nouns_wordcloud)
###Output
_____no_output_____
###Markdown
stopwords, fonts 사용 안한 것 CountVectorizer(빈도수 사용)
###Code
from sklearn.feature_extraction.text import CountVectorizer
corpus = [
'you know I want your love',
'I like you',
'what should I do ',
]
countvectorizer = CountVectorizer()
countvectorizer.fit_transform(corpus).toarray()
###Output
_____no_output_____
###Markdown
vocabulary_ 로 컬럼의 순서 확인
###Code
print(countvectorizer.vocabulary_)
###Output
{'you': 7, 'know': 1, 'want': 5, 'your': 8, 'love': 3, 'like': 2, 'what': 6, 'should': 4, 'do': 0}
###Markdown
Word2Vec단어 리스트를 word2vec 함
###Code
from gensim.models import Word2Vec
word2vec= Word2Vec([posts_noun],min_count=1)
word2vec
###Output
_____no_output_____
###Markdown
궁금한 단어 넣으면 유사단어 가져옴(코사인 기준)
###Code
word2vec.wv.most_similar('삼성')
word2vec
###Output
_____no_output_____ |
examples/Working with Network Assets.ipynb | ###Markdown
Serial Numbers, How I love thee... No one really like serial numbers, but keeping track of them is one of the "brushing your teeth" activities that everyone needs to take care of. It's like eating your brussel sprouts. Or listening to your mom. You're just better of if you do it quickly as it just gets more painful over time.Not only is it just good hygene, but you may be subject to regulations, like [eRate](https://en.wikipedia.org/wiki/E-Rate) in the United States where you have to be able to report on the location of any device by serial number at any point in time. > Trust me, having to play hide-and-go seek with an SSH session is not something you want to do when government auditors are looking for answers.I'm sure you've already guessed what I'm about to say, but I"ll say it anyway...> *There's an API for that!!!*[HPE IMC](http://www.hpe.com/networking/imc) base platform has a great network assets function that automatically gathers all the details of your various devices, assuming of course they support [RFC 4133](https://tools.ietf.org/html/rfc4133), otherwise known as the Entity MIB. On the bright side, most vendors have chosen to support this standards based MIB, so chances are you're in good shape. And if they don't support it, they really should. You should ask them. Ok?So without further ado, let's get started. Importing the required librariesI'm sure you're getting used to this part, but it's import to know where to look for these different functions. In this case, we're going to look at a new library that is specifically designed to deal with network assets, including serial numbers.
###Code
from pyhpeimc.auth import *
from pyhpeimc.plat.netassets import *
import csv
auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
ciscorouter = get_dev_asset_details('10.101.0.1', auth.creds, auth.url)
###Output
_____no_output_____
###Markdown
How many assets in a Cisco Router?As some of you may have heard, HPE IMC is a multi-vendor tool and offers support for many of the common devices you'll see in your daily travels. In this example, we're going to use a Cisco 2811 router to showcase the basic function.Routers, like chassis switches have multiple components. As any one who's ever been the ~~victem~~ owner of a Smartnet contract, you'll know that you have individual components which have serial numbers as well and all of them have to be reported for them to be covered. So let's see if we managed to grab all of those by first checking out how many individual items we got back in the asset list for this cisco router.
###Code
len(ciscorouter)
###Output
_____no_output_____
###Markdown
What's in the box???Now we know that we've got an idea of how many assets are in here, let's take a look to see exactly what's in one of the asset records to see if there's anything useful in here.
###Code
ciscorouter[0]
###Output
_____no_output_____
###Markdown
What can we do with this?With some basic python string manipulation we could easily print out some of the attributes that we want into what could easily turn into a nicely formated report. Again realise that the example below is just a subset of what's available in the JSON above. If you want more, just add it to the list.
###Code
for i in ciscorouter:
print ("Device Name: " + i['deviceName'] + " Device Model: " + i['model'] +
"\nAsset Name is: " + i['name'] + " Asset Serial Number is: " +
i['serialNum']+ "\n")
###Output
Device Name: router.lab.local Device Model: CISCO2811
Asset Name is: 2811 chassis Asset Serial Number is: FHK1119F1DX
Device Name: router.lab.local Device Model: VIC2-2FXO
Asset Name is: 2nd generation two port FXO voice interface daughtercard on Slot 0 SubSlot 2 Asset Serial Number is: FOC11063NZ4
Device Name: router.lab.local Device Model:
Asset Name is: 40GB IDE Disc Daughter Card on Slot 1 SubSlot 0 Asset Serial Number is: FOC11163P04
Device Name: router.lab.local Device Model:
Asset Name is: AIM Container Slot 0 Asset Serial Number is:
Device Name: router.lab.local Device Model:
Asset Name is: AIM Container Slot 1 Asset Serial Number is:
Device Name: router.lab.local Device Model:
Asset Name is: C2811 Chassis Slot 0 Asset Serial Number is:
Device Name: router.lab.local Device Model:
Asset Name is: C2811 Chassis Slot 1 Asset Serial Number is:
###Markdown
Why not just write that to disk?Although we could go directly to the formated report without a lot of extra work, we would be losing a lot of data which we may have use for later. Instead why don't we export all the available data from the JSON above into a CSV file which can be later opened in your favourite spreadsheet viewer and manipulated to your hearst content.Pretty cool, no?
###Code
keys = ciscorouter[0].keys()
with open('ciscorouter.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(ciscorouter)
###Output
_____no_output_____
###Markdown
Reading it backNow we'll read it back from disk to make sure it worked properly. When working with data like this, I find it useful to think about who's going to be consuming the data. For example, when looking at this remember this is a CSV file which can be easily opened in python, or something like Microsoft Excel to manipuate further. It's not realy intended to be read by human beings in this particular format. You'll need another program to consume and munge the data first to turn it into something human consumable.
###Code
with open('ciscorouter.csv') as file:
print (file.read())
###Output
firmwareVersion,vendorType,phyIndex,relPos,boardNum,phyClass,softVersion,serverDate,isFRU,alias,bom,physicalFlag,deviceName,deviceIp,containedIn,cleiCode,mfgName,desc,name,hardVersion,remark,asset,model,assetNumber,serialNum,buildInfo,devId
"System Bootstrap, Version 12.4(13r)T11, RELEASE SOFTWARE (fc1)",1.3.6.1.4.1.9.12.3.1.3.436,1,-1,FHK1119F1DX,3,"15.1(4)M, RELEASE SOFTWARE (fc1)",2016-01-26T15:20:40-05:00,2,,,0,router.lab.local,10.101.0.1,0,,Cisco,2811 chassis,2811 chassis,V04 ,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=1,CISCO2811,,FHK1119F1DX,,15
,1.3.6.1.4.1.9.12.3.1.9.3.114,14,0,FOC11063NZ4,9,,2016-01-26T15:20:40-05:00,1,,,2,router.lab.local,10.101.0.1,13,,Cisco,2nd generation two port FXO voice interface daughtercard,2nd generation two port FXO voice interface daughtercard on Slot 0 SubSlot 2,V01 ,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=14,VIC2-2FXO,,FOC11063NZ4,,15
,1.3.6.1.4.1.9.12.3.1.9.15.25,30,0,FOC11163P04,9,,2016-01-26T15:20:40-05:00,1,,,2,router.lab.local,10.101.0.1,29,,Cisco,40GB IDE Disc Daughter Card,40GB IDE Disc Daughter Card on Slot 1 SubSlot 0,,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=30, ,,FOC11163P04,,15
,1.3.6.1.4.1.9.12.3.1.5.2,25,6,,5,,2016-01-26T15:20:40-05:00,2,,,0,router.lab.local,10.101.0.1,3,,Cisco,AIM Container Slot 0,AIM Container Slot 0,,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=25,,,,,15
,1.3.6.1.4.1.9.12.3.1.5.2,26,7,,5,,2016-01-26T15:20:40-05:00,2,,,0,router.lab.local,10.101.0.1,3,,Cisco,AIM Container Slot 1,AIM Container Slot 1,,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=26,,,,,15
,1.3.6.1.4.1.9.12.3.1.5.1,2,0,,5,,2016-01-26T15:20:40-05:00,2,,,0,router.lab.local,10.101.0.1,1,,Cisco,C2811 Chassis Slot,C2811 Chassis Slot 0,,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=2,,,,,15
,1.3.6.1.4.1.9.12.3.1.5.1,27,1,,5,,2016-01-26T15:20:40-05:00,2,,,0,router.lab.local,10.101.0.1,1,,Cisco,C2811 Chassis Slot,C2811 Chassis Slot 1,,,http://10.101.0.203:8080/imcrs/netasset/asset/detail?devId=15&phyIndex=27,,,,,15
###Markdown
What about all my serial numbers at once?That's a great question! I'm glad you asked. One of the most beautiful things about learning to automate things like asset gathering through an API is that it's often not much more work to do something 1000 times than it is to do it a single time. This time instead of using the *get_dev_asset_details* function that we used above which gets us all the assets associated with a single device, let's grab ALL the devices at once.
###Code
all_assets = get_dev_asset_details_all(auth.creds, auth.url)
len (all_assets)
###Output
_____no_output_____
###Markdown
That's a lot of assets!Exactly why we automate things. Now let's write the all_assets list to disk as well. **note for reasons unknown to me at this time, although the majority of the assets have 27 differnet fields, a few of them actually have 28 different attributes. Something I'll have to dig into later.
###Code
keys = all_assets[0].keys()
with open('all_assets.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_assets)
###Output
_____no_output_____
###Markdown
Well That's not good....So it looks like there are a few network assets that have a different number of attributes than the first one in the list. We'll write some quick code to figure out how big of a problem this is.
###Code
print ("The length of the first items keys is " + str(len(keys)))
for i in all_assets:
if len(i) != len(all_assets[0].keys()):
print ("The length of index " + str(all_assets.index(i)) + " is " + str(len(i.keys())))
###Output
The length of the first items keys is 27
The length of index 39 is 28
The length of index 41 is 28
The length of index 42 is 28
The length of index 474 is 28
The length of index 497 is 28
The length of index 569 is 28
The length of index 570 is 28
The length of index 585 is 28
The length of index 604 is 28
The length of index 605 is 28
The length of index 879 is 28
The length of index 880 is 28
The length of index 881 is 28
The length of index 882 is 28
The length of index 883 is 28
The length of index 884 is 28
The length of index 885 is 28
The length of index 886 is 28
###Markdown
Well that's not so badIt looks like the items which don't have exactly 27 attribues have exactly 28 attributes. So we'll just pick one of the longer ones to use as the headers for our CSV file and then run the script again.For this one, I'm going to ask you to trust me that the file is on disk and save us all the trouble of having to print out 1013 seperate assets into this blog post.
###Code
keys = all_assets[879].keys()
with open ('all_assets.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_assets)
###Output
_____no_output_____ |
run-2016-wrangle.ipynb | ###Markdown
Run-2016-Wrangle Here I will be cleaning the dataset 'Career_Stats_Rushing.csv'. This dataset can be found [here](https://www.kaggle.com/kendallgillies/nflstatistics).
###Code
# import all packages and set plots to be embedded inline. Also, set all columns and rows to be displayed.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Various Functions Gather
###Code
# Read rushing stats into csv file and create copy for wrangling.
df_original = pd.read_csv('Career_Stats_Rushing.csv')
df = df_original.copy()
###Output
_____no_output_____
###Markdown
Assess 1 and 2(1) Multiple data points contain the symbol '--' to indicate missing data. (2) The columns 'Rushing Attempts', 'Rushing Yards', 'Yards Per Carry', 'Rushing Yards Per Game', 'Rushing TDs', 'Longest Rushing Run', 'Rushing First Downs', 'Percentage of Rushing First Downs', 'Rushing More Than 20 Yards', 'Rushing More Than 40 Yards', and 'Fumbles' are of the data type 'object'. Assessments 1 and 2
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 17507 entries, 0 to 17506
Data columns (total 18 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Player Id 17507 non-null object
1 Name 17507 non-null object
2 Position 2319 non-null object
3 Year 17507 non-null int64
4 Team 17507 non-null object
5 Games Played 17507 non-null int64
6 Rushing Attempts 17507 non-null object
7 Rushing Attempts Per Game 17507 non-null float64
8 Rushing Yards 17507 non-null object
9 Yards Per Carry 17507 non-null object
10 Rushing Yards Per Game 17507 non-null object
11 Rushing TDs 17507 non-null object
12 Longest Rushing Run 17507 non-null object
13 Rushing First Downs 17507 non-null object
14 Percentage of Rushing First Downs 17507 non-null object
15 Rushing More Than 20 Yards 17507 non-null object
16 Rushing More Than 40 Yards 17507 non-null object
17 Fumbles 17507 non-null object
dtypes: float64(1), int64(2), object(15)
memory usage: 2.4+ MB
###Markdown
Clean - Assessments 1 and 2I will be cleaning the first two issues I found because they make further explortation more difficult. Define - Assessment 1 I will use the pandas replace function to replace all of the -- symbols with NaN. Code - Assessment 1
###Code
# Replace all instances of '--' with np.NaN throughout dataframe
df.replace('--', np.NaN, inplace = True)
df.columns.tolist()
###Output
_____no_output_____
###Markdown
Test - Assessment 1
###Code
# How many times '--' exists anywhere in dataframe.
df.isin(['--']).any().sum()
###Output
_____no_output_____
###Markdown
Define - Assessment 2 I will convert all of the columns that are oft the wrong data type to the int data type. Code - Assessment 2
###Code
#Make list of all columns to change, then apply list to the to_numeric function
wrong_type = ['Rushing Attempts', 'Rushing Yards', 'Yards Per Carry', 'Rushing Yards Per Game',
'Rushing TDs', 'Longest Rushing Run', 'Rushing First Downs',
'Percentage of Rushing First Downs', 'Rushing More Than 20 Yards', 'Rushing More Than 40 Yards', 'Fumbles']
df[wrong_type] = df[wrong_type].apply(pd.to_numeric, errors='coerce')
###Output
_____no_output_____
###Markdown
Test - Assessment 2
###Code
df[wrong_type].info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 17507 entries, 0 to 17506
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rushing Attempts 11457 non-null float64
1 Rushing Yards 11070 non-null float64
2 Yards Per Carry 11376 non-null float64
3 Rushing Yards Per Game 11445 non-null float64
4 Rushing TDs 11457 non-null float64
5 Longest Rushing Run 6725 non-null float64
6 Rushing First Downs 4875 non-null float64
7 Percentage of Rushing First Downs 4851 non-null float64
8 Rushing More Than 20 Yards 4875 non-null float64
9 Rushing More Than 40 Yards 4875 non-null float64
10 Fumbles 4875 non-null float64
dtypes: float64(11)
memory usage: 1.5 MB
###Markdown
Assess 3 onward(3) Column names have spaces in them.(4) One row had a value greater than 100 in the column 'Percentage of Rushing First Downs'. Assessment 3
###Code
df.head()
###Output
_____no_output_____
###Markdown
Assessment 4
###Code
df[df['Percentage of Rushing First Downs'] > 100]
###Output
_____no_output_____
###Markdown
Clean Assessments 3 and 4 Define Assessment 3 I will use the pandas replace function to replace all spaces with underscores. Code Assessment 3
###Code
#Create list of current column names, then use for loop to rename all columns without needing to create long dictionary.
c_list = df.columns.tolist()
for column in c_list:
df.rename(columns = {column : column.replace(' ', '_')}, inplace = True)
###Output
_____no_output_____
###Markdown
Test Assessment 3
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 17507 entries, 0 to 17506
Data columns (total 18 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Player_Id 17507 non-null object
1 Name 17507 non-null object
2 Position 2319 non-null object
3 Year 17507 non-null int64
4 Team 17507 non-null object
5 Games_Played 17507 non-null int64
6 Rushing_Attempts 11457 non-null float64
7 Rushing_Attempts_Per_Game 17507 non-null float64
8 Rushing_Yards 11070 non-null float64
9 Yards_Per_Carry 11376 non-null float64
10 Rushing_Yards_Per_Game 11445 non-null float64
11 Rushing_TDs 11457 non-null float64
12 Longest_Rushing_Run 6725 non-null float64
13 Rushing_First_Downs 4875 non-null float64
14 Percentage_of_Rushing_First_Downs 4851 non-null float64
15 Rushing_More_Than_20_Yards 4875 non-null float64
16 Rushing_More_Than_40_Yards 4875 non-null float64
17 Fumbles 4875 non-null float64
dtypes: float64(12), int64(2), object(4)
memory usage: 2.4+ MB
###Markdown
Define Assessment 4 The stats page for this player can be found [here](https://www.espn.com/nfl/player/stats/_/id/14150/kealoha-pilares) under the rushing section. This page also appears to contain the error. Because I cannot know the validity of any of this player's data, I will delete him altogether. Code Assessment 4
###Code
# Use pandas drop function to remove row from df.
false_row = df.query('Percentage_of_Rushing_First_Downs > 100').index
df.drop(false_row, inplace = True)
###Output
_____no_output_____
###Markdown
Test Assessment 4
###Code
df.query('Percentage_of_Rushing_First_Downs > 100')
# Create master csv file
df.to_csv('Career_Stats_Rushing_master.csv', index = False)
###Output
_____no_output_____ |
LDP_HISTOGRAM_FEATURE_EXTRACTION.ipynb | ###Markdown
###Code
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('/content/camerman.png',0)
plt.imshow(img)
plt.xticks([]),plt.yticks([])
plt.show()
img.shape
s_k = np.zeros_like(img)
n_k = np.zeros_like(img)
nw_k = np.zeros_like(img)
ne_k = np.zeros_like(img)
sw_k = np.zeros_like(img)
se_k = np.zeros_like(img)
w_k = np.zeros_like(img)
e_k = np.zeros_like(img)
na= np.array([[-3,-3,5],[-3,0,5],[-3,-3,5]])
wa= np.array([[5,5,5],[-3,0,-3],[-3,-3,-3]])
sa= np.array([[5,-3,-3],[5,0,-3],[5,-3,-3]])
nea= np.array([[-3,-3,-3],[-3,0,5],[-3,5,5]])
nwa= np.array([[-3,5,5],[-3,0,5],[-3,-3,-3]])
sea= np.array([[-3,-3,-3],[5,0,-3],[5,5,-3]])
swa= np.array([[5,5,-3],[5,0,-3],[-3,-3,-3]])
ka= np.array([[-3,-3,-3],[-3,0,-3],[5,5,5]])
import scipy
from scipy import ndimage
e_k=ndimage.convolve(img,ka,mode='nearest',cval=0.0)
n_k=ndimage.convolve(img,na,mode='nearest',cval=0.0)
s_k=ndimage.convolve(img,sa,mode='nearest',cval=0.0)
w_k=ndimage.convolve(img,wa,mode='nearest',cval=0.0)
ne_k=ndimage.convolve(img,nea,mode='nearest',cval=0.0)
nw_k=ndimage.convolve(img,nwa,mode='nearest',cval=0.0)
se_k=ndimage.convolve(img,sea,mode='nearest',cval=0.0)
sw_k=ndimage.convolve(img,swa,mode='nearest',cval=0.0)
ldp_mat=np.zeros_like(img)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
lst=[se_k[i][j],s_k[i][j],sw_k[i][j],w_k[i][j],nw_k[i][j],n_k[i][j],ne_k[i][j],e_k[i][j]]
l=[abs(h) for h in lst]
marr=np.argsort(l)
marr1=marr[::-1]
binary=np.zeros(8,dtype="uint8")
binary[marr1[0]]=1
binary[marr1[1]]=1
binary[marr1[2]]=1
d_no=binary[0]*2**7+binary[1]*2**6+binary[2]*2**5+binary[3]*2**4+binary[4]*2**3+binary[5]*2**2+binary[6]*2**1+binary[7]*2**0
ldp_mat[i][j]=d_no
plt.imshow(ldp_mat,cmap='gray')
plt.xticks([]),plt.yticks([])
plt.show()
arr=np.zeros(56)
for c in range(1,57):
cnt=0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if ldp_mat[i][j]==c:
cnt+=1
arr[c-1]=cnt
bins=np.linspace(1,56,num=56,dtype=int)
width = bins[1] - bins[0]
plt.title("CAMERAMAN")
plt.bar(bins, arr, align='center', width=width)
plt.show()
###Output
_____no_output_____ |
pandas/.ipynb_checkpoints/exportando-dataframes-dicionarios-csv-checkpoint.ipynb | ###Markdown
Exportando do DataFrame para um csv Depois de modificar um DataFrame, ou até criar um, muitas vezes podemos exportar esse dataframe para um csvNo pandas, isso é bem simples:dataframe.to_csv(r'nome_do_arquivo.csv', sep=',')É necessário passar o caminho. Lendo um DataFrame, modificando ele e exportando
###Code
import pandas as pd
import os
#importando os arquivos
caminho_padrao = r'C:\Users\joaop\Google Drive\Python Impressionador\Pyhon e Power BI'
vendas_df = pd.read_csv(os.path.join(caminho_padrao, r'Contoso - Vendas - 2017.csv'), sep=';')
produtos_df = pd.read_csv(os.path.join(caminho_padrao, r'Contoso - Cadastro Produtos.csv'), sep=';')
lojas_df = pd.read_csv(os.path.join(caminho_padrao, r'Contoso - Lojas.csv'), sep=';')
clientes_df = pd.read_csv(os.path.join(caminho_padrao, r'Contoso - Clientes.csv'), sep=';')
#limpando apenas as colunas que queremos
clientes_df = clientes_df[['ID Cliente', 'E-mail']]
produtos_df = produtos_df[['ID Produto', 'Nome do Produto']]
lojas_df = lojas_df[['ID Loja', 'Nome da Loja']]
#mesclando e renomeando os dataframes
vendas_df = vendas_df.merge(produtos_df, on='ID Produto')
vendas_df = vendas_df.merge(lojas_df, on='ID Loja')
vendas_df = vendas_df.merge(clientes_df, on='ID Cliente').rename(columns={'E-mail': 'E-mail do Cliente'})
display(vendas_df)
# agora vamos criar o csv
# é necessário passar o caminho
vendas_df.to_csv(r'C:\Users\Maki\Downloads\nova-pasta\Novo Vendas 2017.csv', sep=';')
###Output
_____no_output_____
###Markdown
Criando um dicionário, transformando o dicionário em um DataFrame e Exportando para csv
###Code
vendas_produtos = {'iphone': [558147, 951642], 'galaxy': [712350, 244295], 'ipad': [573823, 26964], 'tv': [405252, 787604], 'máquina de café': [718654, 867660], 'kindle': [531580, 78830], 'geladeira': [973139, 710331], 'adega': [892292, 646016], 'notebook dell': [422760, 694913], 'notebook hp': [154753, 539704], 'notebook asus': [887061, 324831], 'microsoft surface': [438508, 667179], 'webcam': [237467, 295633], 'caixa de som': [489705, 725316], 'microfone': [328311, 644622], 'câmera canon': [591120, 994303]}
# vai transformar o dicionário em tabela
vendas_produtos_df = pd.DataFrame.from_dict(vendas_produtos, orient='index')
# vai renomear o nome das colunas
vendas_produtos_df = vendas_produtos_df.rename(columns={0: 'Vendas 2019', 1: 'Vendas 2020'})
# vai transformar o arquivo em csv, com os caracteres especiais do Brasil
vendas_produtos_df.to_csv(r'Novo Vendas Produtos.csv', sep=',', encoding='latin1')
###Output
_____no_output_____ |
DevelopmentSandbox/HomingCalibration-Copy1.ipynb | ###Markdown
Figure out homing. Code:
###Code
cnc = GRBL.GRBL(port="/dev/cnc_3018")
cnc.reset()
cnc.laser_mode = 1
print(cnc.laser_mode)
cnc.status
cnc.homing_dir_invert = 3
cnc.home()
cnc.status
cnc.cmd("G91G0Y-10")
cnc.reset()
cnc.cmd("$$")
cnc.home()
cnc.cmd("G91G0Y10")
###Output
_____no_output_____ |
Taller3semana3.ipynb | ###Markdown
Taller semana 3 Ejercicio 1:Cargue los datos del archivo ```pokemon_data.csv``` a un dataframe, cree una columna que se llame interes asignándole el valor False, luego cambie el valor de esta columna solo para los pokemon que sean legendarios (Legendary = True) y que el Type 1 sea fuego.
###Code
import pandas as pd
# Escriba su código aquí
import os
print(os.getcwd())
#Subir archivo
from google.colab import files
uploaded = files.upload()
#Leer archivo
pk = pd.read_csv('pokemon_data.csv')
#Crear columna con nombre "INTERES" con valor False
df = pd.DataFrame (pk)
df['INTERES']=False
print(df)
#Llamar los pokemones legendarios de tipo fuego
df.loc[(df['Type 1'] == 'Fire') & (df['Legendary']==True)]
#Cambiar los valor de False a True del la columna "INTERES" de los pokemones legendarios tipo fuego.
df.loc[(df['Type 1'] == 'Fire') & (df['Legendary']==True), 'INTERES'] =True
#Lammar a los pokemones para visualizar los cambios
df.loc[(df['Type 1'] == 'Fire') & (df['Legendary']==True)]
###Output
_____no_output_____
###Markdown
Ejercicio 2Con los datos de pokemones anteriores, escriba un código para sacar el promedio de la columna Attack agrupado por Type 1.
###Code
# Escriba su código aquí
#Codigo para sacar promedio de la columna Attack agrupado por Tipo 1.
df_type= pk['Type 1'].unique()
df_type
#promedio de la columna Attack del elemnto Grass agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Grass', 'Attack'].mean()
print("Promedio de Grass: ",pro)
#promedio de la columna Attack del elemnto Fuego agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Fire', 'Attack'].mean()
print("Promedio de Fire: ",pro)
#promedio de la columna Attack del elemnto Water agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Water', 'Attack'].mean()
print("Promedio de Water: ",pro)
#promedio de la columna Attack del elemnto Bug agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Bug', 'Attack'].mean()
print("Promedio de Bug: ",pro)
#promedio de la columna Attack del elemnto Normal agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Normal', 'Attack'].mean()
print("Promedio de Normal: ",pro)
#promedio de la columna Attack del elemnto Poison agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Poison', 'Attack'].mean()
print("Promedio de Poison: ",pro)
#promedio de la columna Attack del elemnto Electric agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Electric', 'Attack'].mean()
print("Promedio de Electric: ",pro)
#promedio de la columna Attack del elemnto Ground agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Ground', 'Attack'].mean()
print("Promedio de Ground: ",pro)
#promedio de la columna Attack del elemnto Fairy agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Fairy', 'Attack'].mean()
print("Promedio de Fairy: ",pro)
#promedio de la columna Attack del elemnto Fighting agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Fighting', 'Attack'].mean()
print("Promedio de Fighting: ",pro)
#promedio de la columna Attack del elemnto Psychic agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Psychic', 'Attack'].mean()
print("Promedio de Psychic: ",pro)
#promedio de la columna Attack del elemnto Rock agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Rock', 'Attack'].mean()
print("Promedio de Rock: ",pro)
#promedio de la columna Attack del elemnto Ghost agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Ghost', 'Attack'].mean()
print("Promedio de Ghost: ",pro)
#promedio de la columna Attack del elemnto Ice agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Ice', 'Attack'].mean()
print("Promedio de Ice: ",pro)
#promedio de la columna Attack del elemnto Dragon agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Dragon', 'Attack'].mean()
print("Promedio de Dragon: ",pro)
#promedio de la columna Attack del elemnto Dark agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Dark', 'Attack'].mean()
print("Promedio de Dark: ",pro)
#promedio de la columna Attack del elemnto Steel agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Steel', 'Attack'].mean()
print("Promedio de Steel: ",pro)
#promedio de la columna Attack del elemnto Flying agrupado por Tipo 1
pro=df.loc[df['Type 1'] =='Flying', 'Attack'].mean()
print("Promedio de Flying: ",pro)
###Output
Promedio de Grass: 73.21428571428571
Promedio de Fire: 84.76923076923077
Promedio de Water: 74.15178571428571
Promedio de Bug: 70.97101449275362
Promedio de Normal: 73.46938775510205
Promedio de Poison: 74.67857142857143
Promedio de Electric: 69.0909090909091
Promedio de Ground: 95.75
Promedio de Fairy: 61.529411764705884
Promedio de Fighting: 96.77777777777777
Promedio de Psychic: 71.45614035087719
Promedio de Rock: 92.86363636363636
Promedio de Ghost: 73.78125
Promedio de Ice: 72.75
Promedio de Dragon: 112.125
Promedio de Dark: 88.38709677419355
Promedio de Steel: 92.70370370370371
Promedio de Flying: 78.75
###Markdown
Ejercicio 3Se le entregan los datos de movimientos de acciones en el archivo ```DUK.csv```, y se le pide que entregue el promedio de Open del 2015, es decir entre las fechas 2015-01-01 y 2015-12-31
###Code
# Escriba su código aquí
#Subir archivo
from google.colab import files
uploaded = files.upload()
#leer archivo
duk = pd.read_csv('DUK.csv')
#Filtro de fechas
duk.loc[(duk['Date']>= '2015-01-01') &(duk['Date'] <= '2015-12-31')]
print("promedio de Open del 2015: ",duk.loc[(duk['Date']>= '2015-01-01') &(duk['Date'] <= '2015-12-31'), 'Open'].mean())
###Output
promedio de Open del 2015: 74.8137697698413
|
20170618 Experiment.ipynb | ###Markdown
Utility functions
###Code
def layer_extraction(dcgan, file_names):
return dcgan.get_feature(FLAGS, file_names)
def maxpooling(disc):
kernel_stride_size = 4
maxpooling = [
tf.nn.max_pool(disc[i],ksize=[1,2**(4-i),2**(4-i),1],
strides=[1,2**(4-i),2**(4-i),1],padding='SAME')
for i in range(4)
]
# tf.global_variables_initializer().run()
maxpool_result = sess.run(maxpooling)
# for idx in range(4):
# print(idx, maxpool_result[idx].shape)
return maxpool_result
def flatten(disc):
flatten = [
tf.reshape(disc[i],[64, -1])
for i in range(4)
]
# tf.global_variables_initializer().run()
flatten_result = sess.run(flatten)
return flatten_result
def concat(disc):
concat = tf.concat(disc,1)
# tf.global_variables_initializer().run()
concat_result = sess.run(concat)
return concat_result
def feature_ext_GAN(file_names):
ret = layer_extraction(dcgan, file_names)
ret = maxpooling(ret)
ret = flatten(ret)
ret = concat(ret)
return ret
###Output
_____no_output_____
###Markdown
Integration
###Code
for term in range(0,1):
print('%d ~ %d' % (100*term,100*(term+1)))
disc_list = []
batch_list = []
file_names = []
for idx in range(100*term,100*(term+1)):
patch_path ="/media/dongwonshin/Ubuntu Data/Datasets/Places365/Large_images/val_large/patches"
data = glob("%s/Places365_val_%08d/*.jpg" % (patch_path, idx))
data.sort()
file_names.append(data)
file_names=np.concatenate(file_names)
print('total:',len(file_names))
# print(file_names)
for idx in range(0, len(file_names)-64,64):
batch_files = file_names[idx: idx+64]
disc = feature_ext_GAN(batch_files)
disc_list.append(disc)
batch_list.append(batch_files)
sys.stdout.write('.')
final_disc_list = np.concatenate(disc_list)
final_batch_list = np.concatenate(batch_list)
# for idx, name in enumerate(final_batch_list):
# output_filename = '/media/dongwonshin/Ubuntu Data/Datasets/Places365/Large_images/val_large/descs/' + (name.split('/')[-2])+'.desc'
# with open(output_filename,'at') as fp:
# for v in final_disc_list[idx]:
# fp.write('%f ' % v)
# fp.write('\n')
import numpy as np
from sklearn.decomposition import PCA
X = np.array(final_disc_list)
pca = PCA(n_components = 128)
pca.fit(X)
Y = pca.transform(X)
print(len(Y))
desc_path ="/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/descs"
desc_name = glob("%s/*.desc" % (desc_path))
desc_name.sort()
for i, d in enumerate(desc_name):
if (i+1 != int(d[77:81])):
print(i+1)
break
###Output
1020
###Markdown
Descriptor Save
###Code
for idx, name in enumerate(final_batch_list):
output_filename = '/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/descs/' + (name.split('/')[-2])+'.desc'
with open(output_filename,'at') as fp:
for v in final_disc_list[idx]:
fp.write('%f ' % v)
fp.write('\n')
###Output
_____no_output_____
###Markdown
Result Analysis
###Code
# import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
SURF_result_text = '/home/dongwonshin/Desktop/20170622_SURF_result_long_different_dataset.txt'
DCGAN_result_text = '/home/dongwonshin/Desktop/20170622_DCGAN_result_long_different_dataset.txt'
with open(SURF_result_text) as fp:
SURF_current_idx = []
SURF_most_related_idx = []
lines = fp.readlines()
for line in lines:
ele = line.strip().split(',')
SURF_current_idx.append(ele[0].split('=')[1])
SURF_most_related_idx.append(ele[2].split('=')[1])
with open(DCGAN_result_text) as fp:
DCGAN_current_idx = []
DCGAN_most_related_idx = []
lines = fp.readlines()
for line in lines:
ele = line.strip().split(',')
DCGAN_current_idx.append(ele[0].split('=')[1])
DCGAN_most_related_idx.append(ele[2].split('=')[1])
cnt = 0
LC_cs_cnt = 0
LC_cd_cnt = 0
for c, s, d in zip(SURF_current_idx, SURF_most_related_idx, DCGAN_most_related_idx):
gps_c = np.array(GPS_info_list[int(c)])
gps_s = np.array(GPS_info_list[int(s)])
gps_d = np.array(GPS_info_list[int(d)])
gps_cs = np.linalg.norm(gps_c-gps_s)
gps_cd = np.linalg.norm(gps_c-gps_d)
if (gps_cs < 5):
LC_cs = 'true'
LC_cs_cnt += 1
else:
LC_cs = 'false'
if (gps_cd < 5):
LC_cd = 'true'
LC_cd_cnt += 1
else:
LC_cd = 'false'
# print('%4d' % int(c), gps_c)
# print('%4d' % int(s), gps_s, gps_cs, LC_cs)
# print('%4d' % int(d), gps_d, gps_cd, LC_cd)
# print()
# cur_path = '/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/images/%04d.jpg' % int(c)
# surf_path = '/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/images/%04d.jpg' % int(s)
# dcgan_path = '/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/images/%04d.jpg' % int(d)
# print(cur_path)
# print(surf_path)
# print(dcgan_path)
# cur_img = mpimg.imread(cur_path)
# surf_img = mpimg.imread(surf_path)
# dcgan_img = mpimg.imread(dcgan_path)
# one_img = np.hstack([cur_img, surf_img, dcgan_img])
# plt.imshow(one_img)
# plt.show()
if (cnt > 170):
break
else:
cnt += 1
print('LC_cs_cnt = %d, LC_cd_cnt = %d' % (LC_cs_cnt, LC_cd_cnt))
###Output
LC_cs_cnt = 70, LC_cd_cnt = 69
###Markdown
Loop Closure GroundTruth Text Handling
###Code
LC_corr_list = []
with open('/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/GroundTruth Text/CityCentreGroundTruth.txt') as fp:
row = 1
for line in fp:
row_ele = line.strip().split(',')
if ('1' in row_ele):
col = 1
for r in row_ele:
if (r == '1'):
# print('(row, col) (%d, %d)' % (row, col))
LC_corr_list.append([row,col])
col+=1
row += 1
else:
print('eof')
GPS_info_list = [[0,0]] # dummy for a start index 1
with open('/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/GroundTruth Text/CityCentreGPSData.txt') as fp:
for line in fp:
GPS_info_list.append(
[float(line.strip().split(' ')[1]) , float(line.strip().split(' ')[2])]
)
else:
print('eof')
def isOdd(val):
return not (val%2==0)
def isEven(val):
return (val%2==0)
for i, corr in enumerate(LC_corr_list):
if (isOdd(corr[0]) and isEven(corr[1])):
continue
if (isEven(corr[0]) and isOdd(corr[1])):
continue
img_i_path = ('/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/images/%04d.jpg' % corr[0])
img_j_path = ('/media/dongwonshin/Ubuntu Data/Datasets/FAB-MAP/Image Data/City Centre/images/%04d.jpg' % corr[1])
print(corr[0], GPS_info_list[corr[0]])
print(corr[1], GPS_info_list[corr[1]])
img_i = mpimg.imread(img_i_path)
img_j = mpimg.imread(img_j_path)
merge_img = np.hstack([img_i, img_j])
plt.imshow(merge_img)
plt.show()
if i > 10:
break
###Output
1353 [201.13763, -174.712228]
305 [196.393236, -168.938331]
|
baycoms/Untitled.ipynb | ###Markdown
Deploy
###Code
import pickle
filename = 'titanic.model'
pickle.dump(clf, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
loaded_model.predict(X[:10,:])
Y[:10]
###Output
_____no_output_____ |
graph-partitioning-patoh.ipynb | ###Markdown
Prediction Model
###Code
m = gp.prediction_model()
rows = list(range(1, len(m)+1))
df = pd.DataFrame(m, index=rows, columns=cols)
print(df)
pos = nx.spring_layout(gp.G)
plt.figure(figsize=(10, 10))
plt.axis('off')
colours = {0: 'red', 1: 'blue', 2: 'yellow', 3: 'green'}
colour = []
#for n in gp.G.nodes_iter():
# colour += [colours[gp.assignments[n]]]
nx.draw_networkx_nodes(gp.G, pos, node_size=20, node_color=gp.assignments, cmap=plt.cm.jet, with_labels=False)
nx.draw_networkx_edges(gp.G, pos, alpha=0.2)
plt.show(gp.G)
###Output
WASTE CUT RATIO EDGES CUT TOTAL COMM VOLUME Qds CONDUCTANCE \
1 0.595 0.081007 238 242 0.365053 0.152957
MAXPERM NMI FSCORE FSCORE RELABEL IMPROVEMENT
1 0.396858 1.0 1.0 0.0
###Markdown
Assign Cut Off
###Code
m = gp.assign_cut_off()
rows = list(range(1, len(m)+1))
df = pd.DataFrame(m, index=rows, columns=cols)
print(df)
###Output
WASTE CUT RATIO EDGES CUT TOTAL COMM VOLUME Qds CONDUCTANCE MAXPERM \
1 0 0.0 0 0 0.0 0.0 0.0
NMI FSCORE FSCORE RELABEL IMPROVEMENT
1 1.0 0 0
###Markdown
Run Simulation
###Code
m = gp.batch_arrival()
rows = list(range(1, len(m)+1))
df = pd.DataFrame(m, index=rows, columns=cols).astype(float)
print(df)
if len(df) > 1:
'''
df.plot(y=['EDGES CUT', 'TOTAL COMM VOLUME'], xticks=rows, figsize=(5,4))
fig, axs = plt.subplots(1,6)
df.plot(y=['CUT RATIO'], title='Cut ratio', xticks=rows, figsize=(12,2), legend=False, ax=axs[0])
df.plot(y=['MODULARITY'], title='Modularity', xticks=rows, figsize=(12,2), legend=False, ax=axs[1])
df.plot(y=['LONELINESS'], title='Loneliness', xticks=rows, figsize=(12,2), legend=False, ax=axs[2])
df.plot(y=['NETWORK PERMANENCE'], title='Network permanence', xticks=rows, figsize=(12,2), legend=False, ax=axs[3])
df.plot(y=['NORM. MUTUAL INFO'], title='Norm. Mutual Info', xticks=rows, figsize=(12,2), legend=False, ax=axs[4])
df.plot(y=['FSCORE'], title='Fscore', xticks=rows, figsize=(12,2), legend=False, ax=axs[5])
'''
t = 1
xticks = rows[::t]
df.plot(y=['EDGES CUT', 'TOTAL COMM VOLUME'], xticks=xticks, figsize=(5,4))
fig, axs = plt.subplots(1,5)
df.plot(y=['CUT RATIO'], title='Cut ratio', xticks=xticks, figsize=(12,2), legend=False, ax=axs[0])
df.plot(y=['Qds'], title='Qds', xticks=xticks, figsize=(12,2), legend=False, ax=axs[1])
#df.plot(y=['LONELINESS'], title='Loneliness', xticks=xticks, figsize=(12,2), legend=False, ax=axs[2])
df.plot(y=['MAXPERM'], title='Network permanence', xticks=xticks, figsize=(12,2), legend=False, ax=axs[2])
df.plot(y=['NMI'], title='Norm. Mutual Info', xticks=rows, figsize=(12,2), legend=False, ax=axs[3])
df.plot(y=['FSCORE'], title='Fscore', xticks=rows, figsize=(12,2), legend=False, ax=axs[4])
else:
print("\n\nNot enough data points to plot charts. There is only one row.")
###Output
WASTE CUT RATIO EDGES CUT TOTAL COMM VOLUME Qds CONDUCTANCE \
1 0.000000 0.000000 0.0 0.0 0.241104 0.013824
2 0.000000 0.071429 4.0 7.0 0.549992 0.009859
3 0.000000 0.111111 13.0 22.0 0.560736 0.030331
4 0.000000 0.162562 33.0 49.0 0.584783 0.039638
5 0.100000 0.194357 62.0 80.0 0.484412 0.060358
6 0.000000 0.242222 109.0 128.0 0.478400 0.065234
7 0.003584 0.264559 159.0 171.0 0.487315 0.081504
MAXPERM NMI FSCORE FSCORE RELABEL IMPROVEMENT
1 -0.595833 0.184319 0.111389 0.384744
2 -0.166156 0.118765 0.119664 0.333689
3 -0.036492 0.122011 0.116082 0.341185
4 0.123173 0.121747 0.111230 0.321873
5 0.218216 0.121830 0.105307 0.300715
6 0.300843 0.111486 0.112522 0.285824
7 0.328282 0.098845 0.121648 0.254450
###Markdown
Network Visual
###Code
part = dict(zip(gp.G.nodes(), gp.assignments))
values = [part.get(node) for node in gp.G.nodes()]
pos_spring = nx.spring_layout(gp.G, k=0.5, iterations=20)
pos = {}
with open(gp.POPULATION_LOCATION_FILE, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for node, row in enumerate(reader):
pos[node] = np.array(row).astype(np.float)
# create colours
cmap = utils.get_cmap(gp.num_partitions)
colours = np.zeros((gp.G.number_of_nodes(), 4), dtype=np.float)
for i,n in enumerate(gp.G.nodes_iter()):
if gp.assignments[n] == -1:
col = [(1.0, 1.0, 1.0, 1.0)]
else:
col = [cmap(gp.assignments[n])]
colours[i] = np.array(col, dtype=np.float)
plt.figure(figsize=(10, 10))
plt.axis('off')
nx.draw_networkx_nodes(gp.G, pos, node_size=20, node_color=colours, cmap=plt.cm.jet, with_labels=False)
nx.draw_networkx_edges(gp.G, pos, alpha=0.5)
plt.show(gp.G)
#nx.draw_networkx_nodes(gp.G, pos_spring, node_size=20, node_color=colour, cmap=plt.cm.jet, with_labels=False)
#nx.draw_networkx_edges(gp.G, pos_spring, alpha=0.5)
#plt.show(gp.G)
###Output
_____no_output_____
###Markdown
Metrics
###Code
if run_metrics:
gp.get_metrics()
###Output
Complete graph with 1000 nodes
Config
-------
file: 172039
num_partitions: 4
num_iterations: 1
prediction_model_cut_off: 0.0
restream_batches: 40
use_virtual_nodes: False
virtual_edge_weight: 1.0
edges_cut: 159
Metrics
-------
waste: 0.0010000000000000009
cut_ratio: 0.26455906821963393
total_communication_volume: 171
network_permanence: -0.620997
Q: 0.7743396313692862
NQ: -0.7955521736135005
Qds: 0.13046087353602737
intraEdges: 261.2826086956522
interEdges: 36.56521739130435
intraDensity: 0.16163416522720014
modularity degree: 231.9607765041976
conductance: 0.5063493260506428
expansion: 9.910209813070226
contraction: 19.99546096542664
fitness: 0.46989179116337393
QovL: 0.9296411421159722
Partition 0 with 70 nodes
-----------------------------
Metrics
file: 172039
partition: 0
population: 70
Q: 0.4428206071326235
NQ: -1.2136482126435053
Qds: 0.1059902452657825
intraEdges: 145.1818181818182
interEdges: 19.454545454545453
intraDensity: 0.03336839644693782
modularity degree: -62.30208333333334
conductance: 0.8207918811968881
expansion: 15.775568181818182
contraction: 10.111742424242424
fitness: 0.1766788161496044
QovL: 0.8288618081485268
file: 172039
partition: 0
population: 70
modularity: 0.6290093111485816
loneliness_score: 0.7587330289705778
network_permanence: 0.253506
Partition 1 with 69 nodes
-----------------------------
Metrics
file: 172039
partition: 1
population: 69
Q: 0.3791164274189407
NQ: -6.5030424048594995
Qds: -0.2729961375381495
intraEdges: 13.631578947368421
interEdges: 17.42105263157895
intraDensity: 0.013351478168411807
modularity degree: -598.0193236714977
conductance: 0.9473684210526315
expansion: 17.42105263157895
contraction: 1.6837020086448002
fitness: 0.05263157894736842
QovL: 0.5869739563128462
file: 172039
partition: 1
population: 69
modularity: 0.8180593534137717
loneliness_score: 0.7221783084456377
network_permanence: 0.336957
Partition 2 with 70 nodes
-----------------------------
Metrics
file: 172039
partition: 2
population: 70
Q: 0.6918088798473049
NQ: -1.8029968494357185
Qds: 0.17903640827258768
intraEdges: 56.8235294117647
interEdges: 16.58823529411765
intraDensity: 0.1294707356626861
modularity degree: -88.53176691729323
conductance: 0.7058823529411765
expansion: 16.58823529411765
contraction: 11.38048429898275
fitness: 0.29411764705882354
QovL: 0.862380072077046
file: 172039
partition: 2
population: 70
modularity: 0.864964596649881
loneliness_score: 0.7407959929318784
network_permanence: 0.396905
Partition 3 with 70 nodes
-----------------------------
Metrics
file: 172039
partition: 3
population: 70
Q: 0.5917764328942684
NQ: -4.200045468140767
Qds: 9.170483008115659E-4
intraEdges: 30.0
interEdges: 18.51851851851852
intraDensity: 0.04986618319951654
modularity degree: -348.12820512820514
conductance: 0.85359896933971
expansion: 18.128205128205128
contraction: 5.234567901234567
fitness: 0.144759427368123
QovL: 0.7581581552421838
file: 172039
partition: 3
population: 70
modularity: 0.8068952474190104
loneliness_score: 0.7424382389367348
network_permanence: 0.325884
|
CA05_A_Jerry_Gehrung.ipynb | ###Markdown
Logistic Regression Data Source and Contents
###Code
# import packacges
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import confusion_matrix
# read dataset
df = pd.read_csv('https://github.com/ArinB/CA05-B-Logistic-Regression/raw/master/cvd_data.csv')
# inspect dataset
df.head()
###Output
_____no_output_____
###Markdown
Binary Classifier Model
###Code
# select datasets to split
X = df.drop(['cvd_4types'], axis = 1)
y = df.cvd_4types
# split datasets into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y)
# fit the Logistic Regression model
lr = LogisticRegression(max_iter = 1000)
lr.fit(X_train, y_train)
# make predictions using testing data
y_pred = lr.predict(X_test)
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
# find coefficients
lr.coef_
# store coefficients in dictionary
coef_dict = list(zip(X, lr.coef_[0, :]))
coef_dict
# display coefficients in table
coef_table = pd.DataFrame(coef_dict, columns = ['Feature', 'Coefficient'])
coef_table
# sort coefficients by feature importance
coef_table.sort_values(by = 'Coefficient', key = abs, ascending = False)
###Output
_____no_output_____
###Markdown
Evaluating Performance
###Code
# Logistic Regression accuracy
lr.score(X_train, y_train)
# Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
print('True Negatives:', cm[0,0])
print('True Positives:', cm[1,1])
print('False Negatives:', cm[1,0])
print('False Positives:', cm[0,1])
# ROC Curve
metrics.plot_roc_curve(lr, X_test, y_test)
###Output
_____no_output_____ |
notebooks/align_fits_wcs.ipynb | ###Markdown
Aligning HST ACS/HRC Images Using `tweakwcs` *** About this Notebook**Author:** Mihai Cara, STScI**Initial version on:** 11/20/2018**Updated on:** 02/07/2019 *** IntroductionOften the World Coordinate System (WCS) of images may contain small errors. These alignment errors in the WCS of the images need to be removed before images can be further processed, e.g., before they can be combined into a mosaiced image. The images are said to be aligned (in a relative sense) _on the sky_ when image coordinates _of the same object_ (present in several images) can be converted aproximately the same sky coordinates (using appropriate image's WCS).In this notebook we illustrate how to create source catalogs using `photutils` package and then how to match sources from image catalogs and find aligned `WCS` using `tweakwcs` package. *** Imports
###Code
import shutil
import glob
import os
import sys
import logging
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.nddata import NDData
from astroquery.mast import Observations
from photutils import detect_threshold, DAOStarFinder
from stwcs.wcsutil import HSTWCS
from drizzlepac import updatehdr
from tweakwcs import fit_wcs, align_wcs, FITSWCS, TPMatch, WCSImageCatalog
###Output
_____no_output_____
###Markdown
*** Setup LoggingAdd basic support for loging to `stdout`:
###Code
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
###Output
_____no_output_____
###Markdown
*** Download DataFor this example, we have chosen HST ACS/WFC observation of NGC104 in the F606W filter. The data come from the SM3/ACS proposal 9019 _"HRC flat field stability"_ (PI: Ralph Bohlin).Data are downloaded using the `astroquery` API to access the [MAST](http://archive.stsci.edu) archive. The `astroquery.mast` [documentation](http://astroquery.readthedocs.io/en/latest/mast/mast.html) has more examples for how to find and download data from MAST.
###Code
# If mastDownload directory already exists, delete it
# and all subdirectories it contains:
if os.path.isdir('mastDownload'):
shutil.rmtree('mastDownload')
# Retrieve the observation information.
obs_table = Observations.query_criteria(obs_id='j8bt06*', filters='F606W', obstype='ALL')
products = Observations.get_product_list(obs_table)
# Download only the 'j8bt06nyq' and 'j8bt06nzq' images:
Observations.download_products(products, mrp_only=False, obs_id=['j8bt06nyq', 'j8bt06nzq'],
productSubGroupDescription=['FLC', 'FLT'],
extension='fits')
def copy_mast_to_cwd():
"""
Move the files from the mastDownload directory to the current working
directory and make a backup of the files. Return a list of image file
names in the CWD.
"""
downloaded_fits_files = glob.glob('mastDownload/HST/j*/j*flt.fits')
fits_files = []
for fil in downloaded_fits_files:
base_name = os.path.basename(fil)
fits_files.append(base_name)
if os.path.isfile(base_name):
os.remove(base_name)
shutil.copy2(fil, '.')
return fits_files
fits_files = copy_mast_to_cwd()
###Output
_____no_output_____
###Markdown
*** EXAMPLE 1: Simple Workflow to Align Two or More ImagesIn this example we illustrate the use of convenience function `align_wcs()` to align two images downloaded in the previous step. 1.1 Create Source Catalogs and FITSWCS WCS Corrector ObjectsThis stage fundamentally consists of three steps:- create source catalogs for each image whose WCS needs to be aligned;- create a tangent-plane WCS corrector object specific for image's WCS and supply the catalog to this object through it's `meta` attribute, essetially creating a WCS-catalog combination;- call `align_wcs` to align WCS.For the first step, we use `photutils` package to find stars in the images. One can use any other tools star finding. Then, for the second step, we create FITSWCS corrector object which is appropriate for FITS WCS used in HST images.
###Code
wcs_catalogs = []
for group_id, file in enumerate(fits_files):
group_id += 1
with fits.open(file) as hdulist:
# extract data and WCS from each SCI extension:
im_data = hdulist[('SCI', 1)].data
w = HSTWCS(hdulist, ('SCI', 1))
# find sources / create catalogs
threshold = detect_threshold(im_data, snr=100.0)[0, 0]
daofind = DAOStarFinder(fwhm=2.5, threshold=threshold, exclude_border=True)
cat = daofind(im_data)
cat.rename_column('xcentroid', 'x')
cat.rename_column('ycentroid', 'y')
print("Length of catalog #{:d}: {:d}".format(group_id, len(cat)))
# create tangent-plane WCS corrector:
wcscat = FITSWCS(wcs=w,
meta={
'group_id': group_id,
'name': 'im{:d} sources'.format(group_id), # or use any other naming scheme
'catalog': cat,
'filename': file, # optional info
'ext': ('SCI', 1), # optional info
}
)
wcs_catalogs.append(wcscat)
###Output
_____no_output_____
###Markdown
1.2 Align Images (Find Corrected WCS):
###Code
align_wcs(wcs_catalogs, expand_refcat=True, enforce_user_order=False);
###Output
_____no_output_____
###Markdown
1.3 Update FITS File Headers with Aligned WCS
###Code
for file, w in zip(fits_files, [im.wcs for im in wcs_catalogs]):
with fits.open(file, mode='update') as hdulist:
updatehdr.update_wcs(hdulist, 1, w, wcsname='TWEAK', reusename=True, verbose=False)
###Output
_____no_output_____
###Markdown
The above code serves as a simple illustration of updating files. The logic could be expanded to utilize the information stored in the `meta` dictionary such as file name and extension. *** EXAMPLE 2: Customizable Workflow to Align Two or More Images or to Align to an External Reference CatalogIn this example we show how to use lower-level functions to align two images. This approach allows significantly higher customization compared to the use of the convenience function `align_wcs()` from Example 1. In addition, this approach allows inspection and logging of intermediate results such as number of matched sources, their indices in the corresponding catalogs, linear fit results, fit residuals, etc. 2.1 Get a Fresh Copy of Data
###Code
fits_files = copy_mast_to_cwd()
###Output
_____no_output_____
###Markdown
2.2 Create Catalogs and Create a Telescope/Instrument-specific "corrector" WCS objectBelow we take the sources from the first image to create a "reference" catalog. Therefore this example can be used also for aligning images to _external_ "reference" catalogs. Since we are working with HST images that use FITS WCS, we will use `FITSWCS` tangent plane corrector specific to FITS WCS.
###Code
catalogs = []
for group_id, file in enumerate(fits_files):
group_id += 1
with fits.open(file) as hdulist:
im_data = hdulist[('SCI', 1)].data
dq_data = hdulist[('DQ', 1)].data
w = HSTWCS(hdulist, ('SCI', 1))
# create FITS WCS corrector object
wcs_corrector = FITSWCS(w)
# find stars:
threshold = detect_threshold(im_data, snr=100.0)[0, 0]
daofind = DAOStarFinder(fwhm=2.5, threshold=threshold, exclude_border=True)
cat = daofind(im_data)
cat.rename_column('xcentroid', 'x')
cat.rename_column('ycentroid', 'y')
cat.meta['name'] = 'im{:d} sources'.format(group_id)
cat.meta['file_name'] = file
print("Length of catalog #{:d}: {:d}".format(group_id, len(cat)))
catalogs.append((cat, wcs_corrector))
###Output
_____no_output_____
###Markdown
Create a "reference" catalog based on the first image's stars. A reference catalog must have star coordinates in world coordinates. When using external reference catalog, this step essentially can be skipped.
###Code
refcat, refwcs = catalogs.pop(0)
refcat.meta['name'] = 'REFCAT ({})'.format(refcat.meta['name'])
# convert image coordinates to sky coords:
ra, dec = refwcs.det_to_world(refcat['x'], refcat['y'])
refcat['RA'] = ra
refcat['DEC'] = dec
###Output
_____no_output_____
###Markdown
2.3 Match Catalogs and Align Image WCS
###Code
match = TPMatch(searchrad=5, separation=0.1, tolerance=5, use2dhist=False)
for imcat, imwcs in catalogs:
# Match sources in the catalogs:
ridx, iidx = match(refcat, imcat, imwcs)
# Align image WCS:
aligned_imwcs = fit_wcs(refcat[ridx], imcat[iidx], imwcs).wcs
imcat.meta['aligned_wcs'] = aligned_imwcs
###Output
_____no_output_____
###Markdown
2.4 Update FITS File Headers with Aligned WCS
###Code
for cat, _ in catalogs:
with fits.open(cat.meta['file_name'], mode='update') as hdulist:
updatehdr.update_wcs(hdulist, 1, cat.meta['aligned_wcs'], wcsname='TWEAK', reusename=True, verbose=False)
###Output
_____no_output_____
###Markdown
*** Delete Downloaded Data
###Code
# Delete the mastDownload directory and all subdirectories it contains:
if os.path.isdir('mastDownload'):
shutil.rmtree('mastDownload')
###Output
_____no_output_____ |
01. Python/04. Advanced/08.2 Effective Python Testing With Pytest.ipynb | ###Markdown
Effective Python Testing With Pytest Table of Contents * [How to Install `pytest`](how_to_install_`pytest`)* [What Makes `pytest` So Useful?](what_makes_`pytest`_so_useful?) * [Less Boilerplate](less_boilerplate) * [State and Dependency Management](state_and_dependency_management) * [Test Filtering](test_filtering) * [Test Parametrization](test_parametrization) * [Plugin-Based Architecture](plugin-based_architecture)* [Fixtures: Managing State and Dependencies](fixtures:_managing_state_and_dependencies) * [When to Create Fixtures](when_to_create_fixtures) * [When to Avoid Fixtures](when_to_avoid_fixtures) * [Fixtures at Scale](fixtures_at_scale)* [Marks: Categorizing Tests](marks:_categorizing_tests)* [Parametrization: Combining Tests](parametrization:_combining_tests)* [Durations Reports: Fighting Slow Tests](durations_reports:_fighting_slow_tests)* [Useful `pytest` Plugins](useful_`pytest`_plugins) * [`pytest-randomly`](`pytest-randomly`) * [`pytest-cov`](`pytest-cov`) * [`pytest-django`](`pytest-django`) * [`pytest-bdd`](`pytest-bdd`)* [Conclusion](conclusion)--- [Testing your code](https://realpython.com/python-testing/) brings a wide variety of benefits. It increases your confidence that the code behaves as you expect and ensures that changes to your code won’t cause regressions. Writing and maintaining tests is hard work, so you should leverage all the tools at your disposal to make it as painless as possible. [`pytest`](https://docs.pytest.org/) is one of the best tools you can use to boost your testing productivity. **In this tutorial, you’ll learn:** - What **benefits** `pytest` offers- How to ensure your tests are **stateless**- How to make repetitious tests more **comprehensible**- How to run **subsets** of tests by name or custom groups- How to create and maintain **reusable** testing utilities How to Install `pytest` To follow along with some of the examples in this tutorial, you’ll need to install `pytest`. As with most [Python packages](https://realpython.com/python-modules-packages/), you can install `pytest` in a [virtual environment](https://realpython.com/python-virtual-environments-a-primer/) from [PyPI](https://realpython.com/pypi-publish-python-package/) using [`pip`](https://realpython.com/what-is-pip/): ```sh$ python -m pip install pytest``` The `pytest` command will now be available in your installation environment. What Makes `pytest` So Useful? If you’ve written unit tests for your Python code before, then you may have used Python’s built-in **`unittest`** module. `unittest` provides a solid base on which to build your test suite, but it has a few shortcomings. A number of third-party testing frameworks attempt to address some of the issues with `unittest`, and [`pytest` has proven to be one of the most popular](https://realpython.com/courses/test-driven-development-pytest/). `pytest` is a feature-rich, plugin-based ecosystem for testing your Python code. If you haven’t had the pleasure of using `pytest` yet, then you’re in for a treat! Its philosophy and features will make your testing experience more productive and enjoyable. With `pytest`, common tasks require less code and advanced tasks can be achieved through a variety of time-saving commands and plugins. It will even run your existing tests out of the box, including those written with `unittest`. As with most frameworks, some development patterns that make sense when you first start using `pytest` can start causing pains as your test suite grows. This tutorial will help you understand some of the tools `pytest` provides to keep your testing efficient and effective even as it scales. Less Boilerplate Most functional tests follow the Arrange-Act-Assert model: 1. **Arrange**, or set up, the conditions for the test2. **Act** by calling some function or method3. **Assert** that some end condition is true Testing frameworks typically hook into your test’s [assertions](https://realpython.com/lessons/assertions-and-tryexcept/) so that they can provide information when an assertion fails. `unittest`, for example, provides a number of helpful assertion utilities out of the box. However, even a small set of tests requires a fair amount of [boilerplate code](https://en.wikipedia.org/wiki/Boilerplate_code). Imagine you’d like to write a test suite just to make sure `unittest` is working properly in your project. You might want to write one test that always passes and one that always fails:
###Code
import re
from unittest import TestCase
class TryTesting(TestCase):
def test_always_passes(self):
self.assertTrue(True)
def test_always_fails(self):
self.assertTrue(False)
###Output
_____no_output_____
###Markdown
You can then run those tests from the command line using the `discover` option of `unittest`: ```sh$ python -m unittest discoverF.============================================================FAIL: test_always_fails (test_with_unittest.TryTesting)------------------------------------------------------------Traceback (most recent call last): File "/.../test_with_unittest.py", line 9, in test_always_fails self.assertTrue(False)AssertionError: False is not True------------------------------------------------------------Ran 2 tests in 0.001sFAILED (failures=1)``` As expected, one test passed and one failed. You’ve proven that `unittest` is working, but look at what you had to do: 1. Import the `TestCase` class from `unittest`2. Create `TryTesting`, a [subclass](https://realpython.com/python3-object-oriented-programming/) of `TestCase`3. Write a method in `TryTesting` for each test4. Use one of the `self.assert*` methods from `unittest.TestCase` to make assertions That’s a significant amount of code to write, and because it’s the minimum you need for *any* test, you’d end up writing the same code over and over. `pytest` simplifies this workflow by allowing you to use Python’s `assert` keyword directly:
###Code
# test_with_pytest.py
def test_always_passes():
assert True
def test_always_fails():
assert False
###Output
_____no_output_____
###Markdown
That’s it. You don’t have to deal with any imports or classes. Because you can use the `assert` keyword, you don’t need to learn or remember all the different `self.assert*` methods in `unittest`, either. If you can write an expression that you expect to evaluate to `True`, then `pytest` will test it for you. You can run it using the `pytest` command: ```sh$ pytest================== test session starts =============================platform darwin -- Python 3.7.3, pytest-5.3.0, py-1.8.0, pluggy-0.13.0rootdir: /.../effective-python-testing-with-pytestcollected 2 itemstest_with_pytest.py .F [100%]======================== FAILURES ==================================___________________ test_always_fails ______________________________ def test_always_fails():> assert FalseE assert Falsetest_with_pytest.py:5: AssertionError============== 1 failed, 1 passed in 0.07s =========================``` `pytest` presents the test results differently than `unittest`. The report shows: 1. The system state, including which versions of Python, `pytest`, and any plugins you have installed2. The `rootdir`, or the directory to search under for configuration and tests3. The number of tests the runner discovered The output then indicates the status of each test using a syntax similar to `unittest`: - **A dot (`.`)** means that the test passed. - **`F`** means that the test has failed. - **`E`** means that the test raised an unexpected exception. For pytest, any uncaught exception thrown in a test function is a failure, including but not limited to assertion errors. So error is reserved for a failure in a fixture.Output may also include `s`, `x`, and `X`, which Will be explained later in pytest marks. In short:- **`s`** means skipped.- **`x`** means xfailed.- **`X`** means xpassed.For example:```[email protected] error_fixture(): assert Falsedef test_fail(error_fixture): pass``` For tests that fail, the report gives a detailed breakdown of the failure. In the example above, the test failed because `assert False` always fails. Finally, the report gives an overall status report of the test suite. Here are a few more quick assertion examples:
###Code
def test_uppercase():
assert "loud noises".upper() == "LOUD NOISES"
def test_reversed():
assert list(reversed([1, 2, 3, 4])) == [4, 3, 2, 1]
def test_some_primes():
assert 37 in {
num
for num in range(1, 50)
if num != 1 and not any([num % div == 0 for div in range(2, num)])
}
###Output
_____no_output_____
###Markdown
The learning curve for `pytest` is shallower than it is for `unittest` because you don’t need to learn new constructs for most tests. Also, the use of `assert`, which you may have used before in your implementation code, makes your tests more understandable. State and Dependency Management Your tests will often depend on pieces of data or [test doubles](https://en.wikipedia.org/wiki/Test_double) for some of the objects in your code. In `unittest`, you might extract these dependencies into `setUp()` and `tearDown()` methods so each test in the class can make use of them. But in doing so, you may inadvertently make the test’s dependence on a particular piece of data or object entirely **implicit**. Over time, implicit dependencies can lead to a complex tangle of code that you have to unwind to make sense of your tests. Tests should help you make your code more understandable. If the tests themselves are difficult to understand, then you may be in trouble! `pytest` takes a different approach. It leads you toward **explicit** dependency declarations that are still reusable thanks to the availability of [fixtures](https://docs.pytest.org/en/latest/fixture.html). `pytest` fixtures are functions that create data or test doubles or initialize some system state for the test suite. Any test that wants to use a fixture must explicitly accept it as an argument, so dependencies are always stated up front. Fixtures can also make use of other fixtures, again by declaring them explicitly as dependencies. That means that, over time, your fixtures can become bulky and modular. Although the ability to insert fixtures into other fixtures provides enormous flexibility, it can also make managing dependencies more challenging as your test suite grows. Later in this tutorial, you’ll learn [more about fixtures](fixtures:_managing_state_and_dependencies) and try a few techniques for handling these challenges. Test Filtering As your test suite grows, you may find that you want to run just a few tests on a feature and save the full suite for later. `pytest` provides a few ways of doing this: - **Name-based filtering**: You can limit `pytest` to running only those tests whose fully qualified names match a particular expression. You can do this with the `-k` parameter.- **Directory scoping**: By default, `pytest` will run only those tests that are in or under the current directory.- **Test categorization**: `pytest` can include or exclude tests from particular categories that you define. You can do this with the `-m` parameter. Test categorization in particular is a subtly powerful tool. `pytest` enables you to create **marks**, or custom labels, for any test you like. A test may have multiple labels, and you can use them for granular control over which tests to run. Later in this tutorial, you’ll see an example of [how `pytest` marks work](marks-categorizing-tests) and learn how to make use of them in a large test suite. Test Parametrization When you’re testing functions that process data or perform generic transformations, you’ll find yourself writing many similar tests. They may differ only in the [input or output](https://realpython.com/python-input-output/) of the code being tested. This requires duplicating test code, and doing so can sometimes obscure the behavior you’re trying to test. `unittest` offers a way of collecting several tests into one, but they don’t show up as individual tests in result reports. If one test fails and the rest pass, then the entire group will still return a single failing result. `pytest` offers its own solution in which each test can pass or fail independently. You’ll see [how to parametrize tests](parametrization-combining-tests) with `pytest` later in this tutorial. Plugin-Based Architecture One of the most beautiful features of `pytest` is its openness to customization and new features. Almost every piece of the program can be cracked open and changed. As a result, `pytest` users have developed a rich ecosystem of helpful plugins. Although some `pytest` plugins focus on specific frameworks like [Django](https://www.djangoproject.com/), others are applicable to most test suites. You’ll see [details on some specific plugins](useful-pytest-plugins) later in this tutorial. Fixtures: Managing State and Dependencies `pytest` fixtures are a way of providing data, test doubles, or state setup to your tests. Fixtures are functions that can return a wide range of values. Each test that depends on a fixture must explicitly accept that fixture as an argument. When to Create Fixtures Imagine you’re writing a function, `format_data_for_display()`, to process the data returned by an API endpoint. The data represents a list of people, each with a given name, family name, and job title. The function should output a list of strings that include each person’s full name (their `given_name` followed by their `family_name`), a colon, and their `title`. To test this, you might write the following code:
###Code
def format_data_for_display(people):
... # Implement this!
def test_format_data_for_display():
people = [
{
"given_name": "Alfonsa",
"family_name": "Ruiz",
"title": "Senior Software Engineer",
},
{
"given_name": "Sayid",
"family_name": "Khan",
"title": "Project Manager",
},
]
assert format_data_for_display(people) == [
"Alfonsa Ruiz: Senior Software Engineer",
"Sayid Khan: Project Manager",
]
###Output
_____no_output_____
###Markdown
Now suppose you need to write another function to transform the data into comma-separated values for use in [Excel](https://realpython.com/openpyxl-excel-spreadsheets-python/). The test would look awfully similar:
###Code
def format_data_for_excel(people):
... # Implement this!
def test_format_data_for_excel():
people = [
{
"given_name": "Alfonsa",
"family_name": "Ruiz",
"title": "Senior Software Engineer",
},
{
"given_name": "Sayid",
"family_name": "Khan",
"title": "Project Manager",
},
]
assert format_data_for_excel(people) == """given,family,title
Alfonsa,Ruiz,Senior Software Engineer
Sayid,Khan,Project Manager
"""
###Output
_____no_output_____
###Markdown
If you find yourself writing several tests that all make use of the same underlying test data, then a fixture may be in your future. You can pull the repeated data into a single function decorated with `@pytest.fixture` to indicate that the function is a `pytest` fixture:
###Code
import pytest
@pytest.fixture
def example_people_data():
return [
{
"given_name": "Alfonsa",
"family_name": "Ruiz",
"title": "Senior Software Engineer",
},
{
"given_name": "Sayid",
"family_name": "Khan",
"title": "Project Manager",
},
]
###Output
_____no_output_____
###Markdown
You can use the fixture by adding it as an argument to your tests. Its value will be the return value of the fixture function:
###Code
def test_format_data_for_display(example_people_data):
assert format_data_for_display(example_people_data) == [
"Alfonsa Ruiz: Senior Software Engineer",
"Sayid Khan: Project Manager",
]
def test_format_data_for_excel(example_people_data):
assert format_data_for_excel(example_people_data) == """given,family,title
Alfonsa,Ruiz,Senior Software Engineer
Sayid,Khan,Project Manager
"""
###Output
_____no_output_____
###Markdown
Each test is now notably shorter but still has a clear path back to the data it depends on. Be sure to name your fixture something specific. That way, you can quickly determine if you want to use it when writing new tests in the future! When to Avoid Fixtures Fixtures are great for extracting data or objects that you use across multiple tests. They aren’t always as good for tests that require slight variations in the data. Littering your test suite with fixtures is no better than littering it with plain data or objects. It might even be worse because of the added layer of indirection. As with most abstractions, it takes some practice and thought to find the right level of fixture use. Fixtures at Scale As you extract more fixtures from your tests, you might see that some fixtures could benefit from further extraction. Fixtures are **modular**, so they can depend on other fixtures. You may find that fixtures in two separate test modules share a common dependency. What can you do in this case? You can move fixtures from test [modules](https://realpython.com/python-modules-packages/) into more general fixture-related modules. That way, you can import them back into any test modules that need them. This is a good approach when you find yourself using a fixture repeatedly throughout your project. `pytest` looks for `conftest.py` modules throughout the directory structure. Each `conftest.py` provides configuration for the file tree `pytest` finds it in. You can use any fixtures that are defined in a particular `conftest.py` throughout the file’s parent directory and in any subdirectories. This is a great place to put your most widely used fixtures. Another interesting use case for fixtures is in guarding access to resources. Imagine that you’ve written a test suite for code that deals with [API calls](https://realpython.com/api-integration-in-python/). You want to ensure that the test suite doesn’t make any real network calls, even if a test accidentally executes the real network call code. `pytest` provides a [`monkeypatch`](https://docs.pytest.org/en/latest/monkeypatch.html) fixture to replace values and behaviors, which you can use to great effect:
###Code
# conftest.py
import pytest
import requests
@pytest.fixture(autouse=True)
def disable_network_calls(monkeypatch):
def stunted_get():
raise RuntimeError("Network access not allowed during testing!")
monkeypatch.setattr(requests, "get", lambda *args, **kwargs: stunted_get())
###Output
_____no_output_____
###Markdown
By placing `disable_network_calls()` in `conftest.py` and adding the `autouse=True` option, you ensure that network calls will be disabled in every test across the suite. Any test that executes code calling `requests.get()` will raise a `RuntimeError` indicating that an unexpected network call would have occurred. Marks: Categorizing Tests In any large test suite, some of the tests will inevitably be slow. They might test timeout behavior, for example, or they might exercise a broad area of the code. Whatever the reason, it would be nice to avoid running *all* the slow tests when you’re trying to iterate quickly on a new feature. `pytest` enables you to define categories for your tests and provides options for including or excluding categories when you run your suite. You can mark a test with any number of categories. Marking tests is useful for categorizing tests by subsystem or dependencies. If some of your tests require access to a database, for example, then you could create a `@pytest.mark.database_access` mark for them. When the time comes to run your tests, you can still run them all by default with the `pytest` command. If you’d like to run only those tests that require database access, then you can use `pytest -m database_access`. To run all tests *except* those that require database access, you can use `pytest -m "not database_access"`. You can even use an `autouse` fixture to limit database access to those tests marked with `database_access`. Some plugins expand on the functionality of marks by guarding access to resources. The [`pytest-django`](https://pytest-django.readthedocs.io/en/latest/) plugin provides a `django_db` mark. Any tests without this mark that try to access the database will fail. The first test that tries to access the database will trigger the creation of Django’s test database. The requirement that you add the `django_db` mark nudges you toward stating your dependencies explicitly. That’s the `pytest` philosophy, after all! It also means that you can run tests that don’t rely on the database much more quickly, because `pytest -m "not django_db"` will prevent the test from triggering database creation. The time savings really add up, especially if you’re diligent about running your tests frequently. `pytest` provides a few marks out of the box: - **`skip`** skips a test unconditionally.- **`skipif`** skips a test if the expression passed to it evaluates to `True`.- **`xfail`** indicates that a test is expected to fail, so if the test *does* fail, the overall suite can still result in a passing status.- **`parametrize`** (note the spelling) creates multiple variants of a test with different values as arguments. You’ll learn more about this mark shortly. **`xfail`** tests as implemented in pytest are distinct from tests that are expected to produce an error. The xfail test should pass and is expected to pass in the future, but is expected to fail given the current state of the software. In contrast, tests that are expected to produce an error fail if there is no such error. Such error tests are e.g. useful to assert that input validation succeeds. You can see a list of all the marks `pytest` knows about by running `pytest --markers`.
###Code
import sys
sys.platform
###Output
_____no_output_____
###Markdown
Parametrization: Combining Tests You saw earlier in this tutorial how `pytest` fixtures can be used to reduce code duplication by extracting common dependencies. Fixtures aren’t quite as useful when you have several tests with slightly different inputs and expected outputs. In these cases, you can [**parametrize**](http://doc.pytest.org/en/latest/example/parametrize.html) a single test definition, and `pytest` will create variants of the test for you with the parameters you specify. Imagine you’ve written a function to tell if a string is a [palindrome](https://en.wikipedia.org/wiki/Palindrome). An initial set of tests could look like this:
###Code
def test_is_palindrome_empty_string():
assert is_palindrome("")
def test_is_palindrome_single_character():
assert is_palindrome("a")
def test_is_palindrome_mixed_casing():
assert is_palindrome("Bob")
def test_is_palindrome_with_spaces():
assert is_palindrome("Never odd or even")
def test_is_palindrome_with_punctuation():
assert is_palindrome("Do geese see God?")
def test_is_palindrome_not_palindrome():
assert not is_palindrome("abc")
def test_is_palindrome_not_quite():
assert not is_palindrome("abab")
###Output
_____no_output_____
###Markdown
All of these tests except the last two have the same shape:
###Code
def test_is_palindrome_<in some situation>():
assert is_palindrome("<some string>")
###Output
_____no_output_____
###Markdown
You can use `@pytest.mark.parametrize()` to fill in this shape with different values, reducing your test code significantly:
###Code
@pytest.mark.parametrize("palindrome", [
"",
"a",
"Bob",
"Never odd or even",
"Do geese see God?",
])
def test_is_palindrome(palindrome):
assert is_palindrome(palindrome)
@pytest.mark.parametrize("non_palindrome", [
"abc",
"abab",
])
def test_is_palindrome_not_palindrome(non_palindrome):
assert not is_palindrome(non_palindrome)
###Output
_____no_output_____
###Markdown
The first argument to `parametrize()` is a comma-delimited string of parameter names. The second argument is a [list](https://realpython.com/courses/lists-tuples-python/) of either [tuples](https://realpython.com/python-lists-tuples/) or single values that represent the parameter value(s). You could take your parametrization a step further to combine all your tests into one:
###Code
@pytest.mark.parametrize("maybe_palindrome, expected_result", [
("", True),
("a", True),
("Bob", True),
("Never odd or even", True),
("Do geese see God?", True),
("abc", False),
("abab", False),
])
def test_is_palindrome(maybe_palindrome, expected_result):
assert is_palindrome(maybe_palindrome) == expected_result
###Output
_____no_output_____
###Markdown
Even though this shortened your code, it’s important to note that in this case, it didn’t do much to clarify your test code. Use parametrization to separate the test data from the test behavior so that it’s clear what the test is testing! Working with custom markers Marking test functions and selecting them for a runYou can “mark” a test function with custom metadata like this:
###Code
import pytest
@pytest.mark.webtest
def test_send_http():
pass # perform some webtest test for your app
def test_something_quick():
pass
def test_another():
pass
###Output
_____no_output_____
###Markdown
You can then restrict a test run to only run tests marked with webtest:```bashpytest -v -m webtest``` Or the inverse, running all tests except the webtest ones:```bashpytest -v -m "not webtest"``` You can use the `-k` command line option to specify an expression which implements a substring match on the test names instead of the exact match on markers that `-m` provides. This makes it easy to select tests based on their names:The expression matching is now case-insensitive.```bashpytest -v -k http running with the above defined example module```And you can also run all tests except the ones that match the keyword:```bashpytest -k "not send_http" -v``` Or to select “http” and “quick” tests:```bashpytest -k "http or quick" -v``` You can use and, or, not and parentheses. Registering markersRegistering markers for your test suite is simple:```yaml content of pytest.ini[pytest]markers = webtest: mark a test as a webtest. slow: mark test as slow.```Multiple custom markers can be registered, by defining each one in its own line, as shown in above example. Durations Reports: Fighting Slow Tests Each time you switch contexts from implementation code to test code, you incur some [overhead](https://en.wikipedia.org/wiki/Overhead_(computing)). If your tests are slow to begin with, then overhead can cause friction and frustration. You read earlier about using marks to filter out slow tests when you run your suite. If you want to improve the speed of your tests, then it’s useful to know which tests might offer the biggest improvements. `pytest` can automatically record test durations for you and report the top offenders. Use the `--durations` option to the `pytest` command to include a duration report in your test results. `--durations` expects an integer value `n` and will report the slowest `n` number of tests. The output will follow your test results: ```sh$ pytest --durations=33.03s call test_code.py::test_request_read_timeout1.07s call test_code.py::test_request_connection_timeout0.57s call test_code.py::test_database_read======================== 7 passed in 10.06s ==============================``` Each test that shows up in the durations report is a good candidate to speed up because it takes an above-average amount of the total testing time. Be aware that some tests may have an invisible setup overhead. You read earlier about how the first test marked with `django_db` will trigger the creation of the Django test database. The `durations` report reflects the time it takes to set up the database in the test that triggered the database creation, which can be misleading. Useful `pytest` Plugins You learned about a few valuable `pytest` plugins earlier in this tutorial. You can explore those and a few others in more depth below. `pytest-randomly` [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly) does something seemingly simple but with valuable effect: It forces your tests to run in a random order. `pytest` always collects all the tests it can find before running them, so `pytest-randomly` shuffles that list of tests just before execution. This is a great way to uncover tests that depend on running in a specific order, which means they have a **stateful dependency** on some other test. If you built your test suite from scratch in `pytest`, then this isn’t very likely. It’s more likely to happen in test suites that you migrate to `pytest`. Pytest will automatically find the plugin and use it when you run pytest. The output will start with an extra line that tells you the random seed that is being used:```bash$ pytest...platform darwin -- Python 3.7.2, pytest-4.3.1, py-1.8.0, pluggy-0.9.0Using --randomly-seed=1553614239...``` The plugin will print a seed value in the configuration description. You can use that value to run the tests in the same order as you try to fix the issue.```bashpytest --randomly-seed=1234``` Or more conveniently, use the special value last:```bashpytest --randomly-seed=last``` The plugin appears to Pytest with the name ‘randomly’. To disable it altogether, you can use the `-p` argument, for example:```bashpytest -p no:randomly``` Generally, `-p name` loads given plugin module name. To avoid loading of plugins, you should use the `no:` prefix, e.g. `no:doctest`, `no:randomly`. `pytest-cov` If you measure how well your tests cover your implementation code, you likely use the [coverage](https://coverage.readthedocs.io/) package. [`pytest-cov`](https://pytest-cov.readthedocs.io/en/latest/) integrates coverage, so you can run `pytest --cov` to see the test coverage report.```bashpytest --cov=myproj tests/``` `pytest-django` [`pytest-django`](https://pytest-django.readthedocs.io/en/latest/) provides a handful of useful fixtures and marks for dealing with Django tests. You saw the `django_db` mark earlier in this tutorial, and the `rf` fixture provides direct access to an instance of Django’s [`RequestFactory`](https://docs.djangoproject.com/en/3.0/topics/testing/advanced/django.test.RequestFactory). The `settings` fixture provides a quick way to set or override Django settings. This is a great boost to your Django testing productivity! If you’re interested in learning more about using `pytest` with Django, then check out [How to Provide Test Fixtures for Django Models in Pytest](https://realpython.com/django-pytest-fixtures/). You can see which other plugins are available for `pytest` with this extensive [list of third-party plugins](http://plugincompat.herokuapp.com/). Example of All Pytest Possible Outputs
###Code
# content of test_example.py
import pytest
@pytest.fixture
def error_fixture():
assert 0
def test_ok():
print("ok")
def test_fail():
assert 0
def test_error(error_fixture):
pass
def test_skip():
pytest.skip("skipping this test")
def test_xfail():
pytest.xfail("xfailing this test")
@pytest.mark.xfail(reason="always xfail")
def test_xpass():
pass
###Output
_____no_output_____ |
Python Data Tutorials/MachineLearning/Full_Nlp_Pipeline.ipynb | ###Markdown
Imports Always First
###Code
import os
import nltk
import pandas as pd
import re
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
os.listdir('..\data')
###Output
_____no_output_____
###Markdown
Data Extract
###Code
def extract_data():
df = pd.read_csv('..\data\Corporate-messaging-DFE.csv', encoding='latin-1')
df = df[(df["category:confidence"] == 1) & (df['category'] != 'Exclude')]
X = df.text.values
y = df.category.values
return X, y
###Output
_____no_output_____
###Markdown
Custom Tokenizer
###Code
def tokenize(text):
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
###Output
_____no_output_____
###Markdown
Pipeline + Feature UnionFor a feature union to work, the machine learning algorithm needs to be at the same level pipeline as the Feature Union. Any custom transformations need to be added to the transformer_list.
###Code
class SentLen(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return pd.DataFrame(pd.Series(X).apply(len))
def main():
X, y = extract_data()
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline = Pipeline(
steps=[
('features', FeatureUnion(
transformer_list=[
('text_pipeline', Pipeline(
steps=[
('vect', CountVectorizer()),
('tfidf', TfidfTransformer())
])),
('sentlen', SentLen())
])),
('clf', RandomForestClassifier(n_estimators=100))
])
parameters = {
'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),
'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),
'features__text_pipeline__vect__max_features': (None, 5000, 10000),
'features__text_pipeline__tfidf__use_idf': (True, False),
'clf__n_estimators': [10, 100, 200],
'clf__min_samples_split': [2, 3, 4],
'features__transformer_weights': (
{'text_pipeline': 1, 'sentlen': 0.5},
{'text_pipeline': 0.5, 'sentlen': 1},
{'text_pipeline': 0.8, 'sentlen': 1})
}
cv = GridSearchCV(pipeline, parameters)
cv.fit(X_train, y_train)
y_pred = cv.predict(X_test)
labels = np.unique(y_pred)
index_labels = {i: x for i, x in enumerate(labels)}
display(pd.DataFrame(confusion_matrix(y_test, y_pred, labels=labels), columns=labels).rename(index=index_labels))
print(f"Prediction Score: {(y_pred == y_test).mean():2.2%}")
print("\nBest Parameters: ", cv.best_params_)
###Output
_____no_output_____
###Markdown
Evaluation
###Code
main()
###Output
D:\ProgramData\Anaconda3\lib\site-packages\sklearn\model_selection\_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.
warnings.warn(CV_WARNING, FutureWarning)
|
7-Day-Working-with-External-Libraries/working-with-external-libraries.ipynb | ###Markdown
In this tutorial, you will learn about **imports** in Python, get some tips for working with unfamiliar libraries (and the objects they return), and dig into **operator overloading**. ImportsSo far we've talked about types and functions which are built-in to the language. But one of the best things about Python (especially if you're a data scientist) is the vast number of high-quality custom libraries that have been written for it. Some of these libraries are in the "standard library", meaning you can find them anywhere you run Python. Others libraries can be easily added, even if they aren't always shipped with Python.Either way, we'll access this code with **imports**.We'll start our example by importing `math` from the standard library.
###Code
import math
print("It's math! It has type {}".format(type(math)))
###Output
It's math! It has type <class 'module'>
###Markdown
`math` is a module. A module is just a collection of variables (a *namespace*, if you like) defined by someone else. We can see all the names in `math` using the built-in function `dir()`.
###Code
print(dir(math))
###Output
['__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2', 'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tau', 'trunc']
###Markdown
We can access these variables using dot syntax. Some of them refer to simple values, like `math.pi`:
###Code
print("pi to 4 significant digits = {:.4}".format(math.pi))
###Output
pi to 4 significant digits = 3.142
###Markdown
But most of what we'll find in the module are functions, like `math.log`:
###Code
math.log(32, 2)
###Output
_____no_output_____
###Markdown
Of course, if we don't know what `math.log` does, we can call `help()` on it:
###Code
help(math.log)
###Output
Help on built-in function log in module math:
log(...)
log(x, [base=math.e])
Return the logarithm of x to the given base.
If the base not specified, returns the natural logarithm (base e) of x.
###Markdown
We can also call `help()` on the module itself. This will give us the combined documentation for *all* the functions and values in the module (as well as a high-level description of the module). Click the "output" button to see the whole `math` help page.
###Code
help(math)
###Output
Help on module math:
NAME
math
MODULE REFERENCE
https://docs.python.org/3.7/library/math
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
DESCRIPTION
This module provides access to the mathematical functions
defined by the C standard.
FUNCTIONS
acos(x, /)
Return the arc cosine (measured in radians) of x.
acosh(x, /)
Return the inverse hyperbolic cosine of x.
asin(x, /)
Return the arc sine (measured in radians) of x.
asinh(x, /)
Return the inverse hyperbolic sine of x.
atan(x, /)
Return the arc tangent (measured in radians) of x.
atan2(y, x, /)
Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered.
atanh(x, /)
Return the inverse hyperbolic tangent of x.
ceil(x, /)
Return the ceiling of x as an Integral.
This is the smallest integer >= x.
copysign(x, y, /)
Return a float with the magnitude (absolute value) of x but the sign of y.
On platforms that support signed zeros, copysign(1.0, -0.0)
returns -1.0.
cos(x, /)
Return the cosine of x (measured in radians).
cosh(x, /)
Return the hyperbolic cosine of x.
degrees(x, /)
Convert angle x from radians to degrees.
erf(x, /)
Error function at x.
erfc(x, /)
Complementary error function at x.
exp(x, /)
Return e raised to the power of x.
expm1(x, /)
Return exp(x)-1.
This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.
fabs(x, /)
Return the absolute value of the float x.
factorial(x, /)
Find x!.
Raise a ValueError if x is negative or non-integral.
floor(x, /)
Return the floor of x as an Integral.
This is the largest integer <= x.
fmod(x, y, /)
Return fmod(x, y), according to platform C.
x % y may differ.
frexp(x, /)
Return the mantissa and exponent of x, as pair (m, e).
m is a float and e is an int, such that x = m * 2.**e.
If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
fsum(seq, /)
Return an accurate floating point sum of values in the iterable seq.
Assumes IEEE-754 floating point arithmetic.
gamma(x, /)
Gamma function at x.
gcd(x, y, /)
greatest common divisor of x and y
hypot(x, y, /)
Return the Euclidean distance, sqrt(x*x + y*y).
isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0)
Determine whether two floating point numbers are close in value.
rel_tol
maximum difference for being considered "close", relative to the
magnitude of the input values
abs_tol
maximum difference for being considered "close", regardless of the
magnitude of the input values
Return True if a is close in value to b, and False otherwise.
For the values to be considered close, the difference between them
must be smaller than at least one of the tolerances.
-inf, inf and NaN behave similarly to the IEEE 754 Standard. That
is, NaN is not close to anything, even itself. inf and -inf are
only close to themselves.
isfinite(x, /)
Return True if x is neither an infinity nor a NaN, and False otherwise.
isinf(x, /)
Return True if x is a positive or negative infinity, and False otherwise.
isnan(x, /)
Return True if x is a NaN (not a number), and False otherwise.
ldexp(x, i, /)
Return x * (2**i).
This is essentially the inverse of frexp().
lgamma(x, /)
Natural logarithm of absolute value of Gamma function at x.
log(...)
log(x, [base=math.e])
Return the logarithm of x to the given base.
If the base not specified, returns the natural logarithm (base e) of x.
log10(x, /)
Return the base 10 logarithm of x.
log1p(x, /)
Return the natural logarithm of 1+x (base e).
The result is computed in a way which is accurate for x near zero.
log2(x, /)
Return the base 2 logarithm of x.
modf(x, /)
Return the fractional and integer parts of x.
Both results carry the sign of x and are floats.
pow(x, y, /)
Return x**y (x to the power of y).
radians(x, /)
Convert angle x from degrees to radians.
remainder(x, y, /)
Difference between x and the closest integer multiple of y.
Return x - n*y where n*y is the closest integer multiple of y.
In the case where x is exactly halfway between two multiples of
y, the nearest even value of n is used. The result is always exact.
sin(x, /)
Return the sine of x (measured in radians).
sinh(x, /)
Return the hyperbolic sine of x.
sqrt(x, /)
Return the square root of x.
tan(x, /)
Return the tangent of x (measured in radians).
tanh(x, /)
Return the hyperbolic tangent of x.
trunc(x, /)
Truncates the Real x to the nearest Integral toward 0.
Uses the __trunc__ magic method.
DATA
e = 2.718281828459045
inf = inf
nan = nan
pi = 3.141592653589793
tau = 6.283185307179586
FILE
/opt/conda/lib/python3.7/lib-dynload/math.cpython-37m-x86_64-linux-gnu.so
###Markdown
Other import syntaxIf we know we'll be using functions in `math` frequently we can import it under a shorter alias to save some typing (though in this case "math" is already pretty short).
###Code
import math as mt
mt.pi
###Output
_____no_output_____
###Markdown
> You may have seen code that does this with certain popular libraries like Pandas, Numpy, Tensorflow, or Matplotlib. For example, it's a common convention to `import numpy as np` and `import pandas as pd`. The `as` simply renames the imported module. It's equivalent to doing something like:
###Code
import math
mt = math
###Output
_____no_output_____
###Markdown
Wouldn't it be great if we could refer to all the variables in the `math` module by themselves? i.e. if we could just refer to `pi` instead of `math.pi` or `mt.pi`? Good news: we can do that.
###Code
from math import *
print(pi, log(32, 2))
###Output
3.141592653589793 5.0
###Markdown
`import *` makes all the module's variables directly accessible to you (without any dotted prefix).Bad news: some purists might grumble at you for doing this.Worse: they kind of have a point.
###Code
from math import *
from numpy import *
print(pi, log(32, 2))
###Output
_____no_output_____
###Markdown
What has happened? It worked before!These kinds of "star imports" can occasionally lead to weird, difficult-to-debug situations.The problem in this case is that the `math` and `numpy` modules both have functions called `log`, but they have different semantics. Because we import from `numpy` second, its `log` overwrites (or "shadows") the `log` variable we imported from `math`.A good compromise is to import only the specific things we'll need from each module:
###Code
from math import log, pi
from numpy import asarray
###Output
_____no_output_____
###Markdown
SubmodulesWe've seen that modules contain variables which can refer to functions or values. Something to be aware of is that they can also have variables referring to *other modules*.
###Code
import numpy
print("numpy.random is a", type(numpy.random))
print("it contains names such as...",
dir(numpy.random)[-15:]
)
###Output
numpy.random is a <class 'module'>
it contains names such as... ['seed', 'set_state', 'shuffle', 'standard_cauchy', 'standard_exponential', 'standard_gamma', 'standard_normal', 'standard_t', 'test', 'triangular', 'uniform', 'vonmises', 'wald', 'weibull', 'zipf']
###Markdown
So if we import `numpy` as above, then calling a function in the `random` "submodule" will require *two* dots.
###Code
# Roll 10 dice
rolls = numpy.random.randint(low=1, high=6, size=10)
rolls
###Output
_____no_output_____
###Markdown
Oh the places you'll go, oh the objects you'll seeSo after 6 lessons, you're a pro with ints, floats, bools, lists, strings, and dicts (right?). Even if that were true, it doesn't end there. As you work with various libraries for specialized tasks, you'll find that they define their own types which you'll have to learn to work with. For example, if you work with the graphing library `matplotlib`, you'll be coming into contact with objects it defines which represent Subplots, Figures, TickMarks, and Annotations. `pandas` functions will give you DataFrames and Series. In this section, I want to share with you a quick survival guide for working with strange types. Three tools for understanding strange objectsIn the cell above, we saw that calling a `numpy` function gave us an "array". We've never seen anything like this before (not in this course anyways). But don't panic: we have three familiar builtin functions to help us here.**1: `type()`** (what is this thing?)
###Code
type(rolls)
###Output
_____no_output_____
###Markdown
**2: `dir()`** (what can I do with it?)
###Code
print(dir(rolls))
# If I want the average roll, the "mean" method looks promising...
rolls.mean()
# Or maybe I just want to turn the array into a list, in which case I can use "tolist"
rolls.tolist()
###Output
_____no_output_____
###Markdown
**3: `help()`** (tell me more)
###Code
# That "ravel" attribute sounds interesting. I'm a big classical music fan.
help(rolls.ravel)
# Okay, just tell me everything there is to know about numpy.ndarray
# (Click the "output" button to see the novel-length output)
help(rolls)
###Output
Help on ndarray object:
class ndarray(builtins.object)
| ndarray(shape, dtype=float, buffer=None, offset=0,
| strides=None, order=None)
|
| An array object represents a multidimensional, homogeneous array
| of fixed-size items. An associated data-type object describes the
| format of each element in the array (its byte-order, how many bytes it
| occupies in memory, whether it is an integer, a floating point number,
| or something else, etc.)
|
| Arrays should be constructed using `array`, `zeros` or `empty` (refer
| to the See Also section below). The parameters given here refer to
| a low-level method (`ndarray(...)`) for instantiating an array.
|
| For more information, refer to the `numpy` module and examine the
| methods and attributes of an array.
|
| Parameters
| ----------
| (for the __new__ method; see Notes below)
|
| shape : tuple of ints
| Shape of created array.
| dtype : data-type, optional
| Any object that can be interpreted as a numpy data type.
| buffer : object exposing buffer interface, optional
| Used to fill the array with data.
| offset : int, optional
| Offset of array data in buffer.
| strides : tuple of ints, optional
| Strides of data in memory.
| order : {'C', 'F'}, optional
| Row-major (C-style) or column-major (Fortran-style) order.
|
| Attributes
| ----------
| T : ndarray
| Transpose of the array.
| data : buffer
| The array's elements, in memory.
| dtype : dtype object
| Describes the format of the elements in the array.
| flags : dict
| Dictionary containing information related to memory use, e.g.,
| 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
| flat : numpy.flatiter object
| Flattened version of the array as an iterator. The iterator
| allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
| assignment examples; TODO).
| imag : ndarray
| Imaginary part of the array.
| real : ndarray
| Real part of the array.
| size : int
| Number of elements in the array.
| itemsize : int
| The memory use of each array element in bytes.
| nbytes : int
| The total number of bytes required to store the array data,
| i.e., ``itemsize * size``.
| ndim : int
| The array's number of dimensions.
| shape : tuple of ints
| Shape of the array.
| strides : tuple of ints
| The step-size required to move from one element to the next in
| memory. For example, a contiguous ``(3, 4)`` array of type
| ``int16`` in C-order has strides ``(8, 2)``. This implies that
| to move from element to element in memory requires jumps of 2 bytes.
| To move from row-to-row, one needs to jump 8 bytes at a time
| (``2 * 4``).
| ctypes : ctypes object
| Class containing properties of the array needed for interaction
| with ctypes.
| base : ndarray
| If the array is a view into another array, that array is its `base`
| (unless that array is also a view). The `base` array is where the
| array data is actually stored.
|
| See Also
| --------
| array : Construct an array.
| zeros : Create an array, each element of which is zero.
| empty : Create an array, but leave its allocated memory unchanged (i.e.,
| it contains "garbage").
| dtype : Create a data-type.
|
| Notes
| -----
| There are two modes of creating an array using ``__new__``:
|
| 1. If `buffer` is None, then only `shape`, `dtype`, and `order`
| are used.
| 2. If `buffer` is an object exposing the buffer interface, then
| all keywords are interpreted.
|
| No ``__init__`` method is needed because the array is fully initialized
| after the ``__new__`` method.
|
| Examples
| --------
| These examples illustrate the low-level `ndarray` constructor. Refer
| to the `See Also` section above for easier ways of constructing an
| ndarray.
|
| First mode, `buffer` is None:
|
| >>> np.ndarray(shape=(2,2), dtype=float, order='F')
| array([[0.0e+000, 0.0e+000], # random
| [ nan, 2.5e-323]])
|
| Second mode:
|
| >>> np.ndarray((2,), buffer=np.array([1,2,3]),
| ... offset=np.int_().itemsize,
| ... dtype=int) # offset = 1*itemsize, i.e. skip first element
| array([2, 3])
|
| Methods defined here:
|
| __abs__(self, /)
| abs(self)
|
| __add__(self, value, /)
| Return self+value.
|
| __and__(self, value, /)
| Return self&value.
|
| __array__(...)
| a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
|
| Returns either a new reference to self if dtype is not given or a new array
| of provided data type if dtype is different from the current dtype of the
| array.
|
| __array_function__(...)
|
| __array_prepare__(...)
| a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
|
| __array_ufunc__(...)
|
| __array_wrap__(...)
| a.__array_wrap__(obj) -> Object of same type as ndarray object a.
|
| __bool__(self, /)
| self != 0
|
| __complex__(...)
|
| __contains__(self, key, /)
| Return key in self.
|
| __copy__(...)
| a.__copy__()
|
| Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
|
| Equivalent to ``a.copy(order='K')``.
|
| __deepcopy__(...)
| a.__deepcopy__(memo, /) -> Deep copy of array.
|
| Used if :func:`copy.deepcopy` is called on an array.
|
| __delitem__(self, key, /)
| Delete self[key].
|
| __divmod__(self, value, /)
| Return divmod(self, value).
|
| __eq__(self, value, /)
| Return self==value.
|
| __float__(self, /)
| float(self)
|
| __floordiv__(self, value, /)
| Return self//value.
|
| __format__(...)
| Default object formatter.
|
| __ge__(self, value, /)
| Return self>=value.
|
| __getitem__(self, key, /)
| Return self[key].
|
| __gt__(self, value, /)
| Return self>value.
|
| __iadd__(self, value, /)
| Return self+=value.
|
| __iand__(self, value, /)
| Return self&=value.
|
| __ifloordiv__(self, value, /)
| Return self//=value.
|
| __ilshift__(self, value, /)
| Return self<<=value.
|
| __imatmul__(self, value, /)
| Return self@=value.
|
| __imod__(self, value, /)
| Return self%=value.
|
| __imul__(self, value, /)
| Return self*=value.
|
| __index__(self, /)
| Return self converted to an integer, if self is suitable for use as an index into a list.
|
| __int__(self, /)
| int(self)
|
| __invert__(self, /)
| ~self
|
| __ior__(self, value, /)
| Return self|=value.
|
| __ipow__(self, value, /)
| Return self**=value.
|
| __irshift__(self, value, /)
| Return self>>=value.
|
| __isub__(self, value, /)
| Return self-=value.
|
| __iter__(self, /)
| Implement iter(self).
|
| __itruediv__(self, value, /)
| Return self/=value.
|
| __ixor__(self, value, /)
| Return self^=value.
|
| __le__(self, value, /)
| Return self<=value.
|
| __len__(self, /)
| Return len(self).
|
| __lshift__(self, value, /)
| Return self<<value.
|
| __lt__(self, value, /)
| Return self<value.
|
| __matmul__(self, value, /)
| Return self@value.
|
| __mod__(self, value, /)
| Return self%value.
|
| __mul__(self, value, /)
| Return self*value.
|
| __ne__(self, value, /)
| Return self!=value.
|
| __neg__(self, /)
| -self
|
| __or__(self, value, /)
| Return self|value.
|
| __pos__(self, /)
| +self
|
| __pow__(self, value, mod=None, /)
| Return pow(self, value, mod).
|
| __radd__(self, value, /)
| Return value+self.
|
| __rand__(self, value, /)
| Return value&self.
|
| __rdivmod__(self, value, /)
| Return divmod(value, self).
|
| __reduce__(...)
| a.__reduce__()
|
| For pickling.
|
| __reduce_ex__(...)
| Helper for pickle.
|
| __repr__(self, /)
| Return repr(self).
|
| __rfloordiv__(self, value, /)
| Return value//self.
|
| __rlshift__(self, value, /)
| Return value<<self.
|
| __rmatmul__(self, value, /)
| Return value@self.
|
| __rmod__(self, value, /)
| Return value%self.
|
| __rmul__(self, value, /)
| Return value*self.
|
| __ror__(self, value, /)
| Return value|self.
|
| __rpow__(self, value, mod=None, /)
| Return pow(value, self, mod).
|
| __rrshift__(self, value, /)
| Return value>>self.
|
| __rshift__(self, value, /)
| Return self>>value.
|
| __rsub__(self, value, /)
| Return value-self.
|
| __rtruediv__(self, value, /)
| Return value/self.
|
| __rxor__(self, value, /)
| Return value^self.
|
| __setitem__(self, key, value, /)
| Set self[key] to value.
|
| __setstate__(...)
| a.__setstate__(state, /)
|
| For unpickling.
|
| The `state` argument must be a sequence that contains the following
| elements:
|
| Parameters
| ----------
| version : int
| optional pickle version. If omitted defaults to 0.
| shape : tuple
| dtype : data-type
| isFortran : bool
| rawdata : string or list
| a binary string with the data (or a list if 'a' is an object array)
|
| __sizeof__(...)
| Size of object in memory, in bytes.
|
| __str__(self, /)
| Return str(self).
|
| __sub__(self, value, /)
| Return self-value.
|
| __truediv__(self, value, /)
| Return self/value.
|
| __xor__(self, value, /)
| Return self^value.
|
| all(...)
| a.all(axis=None, out=None, keepdims=False)
|
| Returns True if all elements evaluate to True.
|
| Refer to `numpy.all` for full documentation.
|
| See Also
| --------
| numpy.all : equivalent function
|
| any(...)
| a.any(axis=None, out=None, keepdims=False)
|
| Returns True if any of the elements of `a` evaluate to True.
|
| Refer to `numpy.any` for full documentation.
|
| See Also
| --------
| numpy.any : equivalent function
|
| argmax(...)
| a.argmax(axis=None, out=None)
|
| Return indices of the maximum values along the given axis.
|
| Refer to `numpy.argmax` for full documentation.
|
| See Also
| --------
| numpy.argmax : equivalent function
|
| argmin(...)
| a.argmin(axis=None, out=None)
|
| Return indices of the minimum values along the given axis of `a`.
|
| Refer to `numpy.argmin` for detailed documentation.
|
| See Also
| --------
| numpy.argmin : equivalent function
|
| argpartition(...)
| a.argpartition(kth, axis=-1, kind='introselect', order=None)
|
| Returns the indices that would partition this array.
|
| Refer to `numpy.argpartition` for full documentation.
|
| .. versionadded:: 1.8.0
|
| See Also
| --------
| numpy.argpartition : equivalent function
|
| argsort(...)
| a.argsort(axis=-1, kind=None, order=None)
|
| Returns the indices that would sort this array.
|
| Refer to `numpy.argsort` for full documentation.
|
| See Also
| --------
| numpy.argsort : equivalent function
|
| astype(...)
| a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
|
| Copy of the array, cast to a specified type.
|
| Parameters
| ----------
| dtype : str or dtype
| Typecode or data-type to which the array is cast.
| order : {'C', 'F', 'A', 'K'}, optional
| Controls the memory layout order of the result.
| 'C' means C order, 'F' means Fortran order, 'A'
| means 'F' order if all the arrays are Fortran contiguous,
| 'C' order otherwise, and 'K' means as close to the
| order the array elements appear in memory as possible.
| Default is 'K'.
| casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
| Controls what kind of data casting may occur. Defaults to 'unsafe'
| for backwards compatibility.
|
| * 'no' means the data types should not be cast at all.
| * 'equiv' means only byte-order changes are allowed.
| * 'safe' means only casts which can preserve values are allowed.
| * 'same_kind' means only safe casts or casts within a kind,
| like float64 to float32, are allowed.
| * 'unsafe' means any data conversions may be done.
| subok : bool, optional
| If True, then sub-classes will be passed-through (default), otherwise
| the returned array will be forced to be a base-class array.
| copy : bool, optional
| By default, astype always returns a newly allocated array. If this
| is set to false, and the `dtype`, `order`, and `subok`
| requirements are satisfied, the input array is returned instead
| of a copy.
|
| Returns
| -------
| arr_t : ndarray
| Unless `copy` is False and the other conditions for returning the input
| array are satisfied (see description for `copy` input parameter), `arr_t`
| is a new array of the same shape as the input array, with dtype, order
| given by `dtype`, `order`.
|
| Notes
| -----
| .. versionchanged:: 1.17.0
| Casting between a simple data type and a structured one is possible only
| for "unsafe" casting. Casting to multiple fields is allowed, but
| casting from multiple fields is not.
|
| .. versionchanged:: 1.9.0
| Casting from numeric to string types in 'safe' casting mode requires
| that the string dtype length is long enough to store the max
| integer/float value converted.
|
| Raises
| ------
| ComplexWarning
| When casting from complex to float or int. To avoid this,
| one should use ``a.real.astype(t)``.
|
| Examples
| --------
| >>> x = np.array([1, 2, 2.5])
| >>> x
| array([1. , 2. , 2.5])
|
| >>> x.astype(int)
| array([1, 2, 2])
|
| byteswap(...)
| a.byteswap(inplace=False)
|
| Swap the bytes of the array elements
|
| Toggle between low-endian and big-endian data representation by
| returning a byteswapped array, optionally swapped in-place.
| Arrays of byte-strings are not swapped. The real and imaginary
| parts of a complex number are swapped individually.
|
| Parameters
| ----------
| inplace : bool, optional
| If ``True``, swap bytes in-place, default is ``False``.
|
| Returns
| -------
| out : ndarray
| The byteswapped array. If `inplace` is ``True``, this is
| a view to self.
|
| Examples
| --------
| >>> A = np.array([1, 256, 8755], dtype=np.int16)
| >>> list(map(hex, A))
| ['0x1', '0x100', '0x2233']
| >>> A.byteswap(inplace=True)
| array([ 256, 1, 13090], dtype=int16)
| >>> list(map(hex, A))
| ['0x100', '0x1', '0x3322']
|
| Arrays of byte-strings are not swapped
|
| >>> A = np.array([b'ceg', b'fac'])
| >>> A.byteswap()
| array([b'ceg', b'fac'], dtype='|S3')
|
| ``A.newbyteorder().byteswap()`` produces an array with the same values
| but different representation in memory
|
| >>> A = np.array([1, 2, 3])
| >>> A.view(np.uint8)
| array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
| 0, 0], dtype=uint8)
| >>> A.newbyteorder().byteswap(inplace=True)
| array([1, 2, 3])
| >>> A.view(np.uint8)
| array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
| 0, 3], dtype=uint8)
|
| choose(...)
| a.choose(choices, out=None, mode='raise')
|
| Use an index array to construct a new array from a set of choices.
|
| Refer to `numpy.choose` for full documentation.
|
| See Also
| --------
| numpy.choose : equivalent function
|
| clip(...)
| a.clip(min=None, max=None, out=None, **kwargs)
|
| Return an array whose values are limited to ``[min, max]``.
| One of max or min must be given.
|
| Refer to `numpy.clip` for full documentation.
|
| See Also
| --------
| numpy.clip : equivalent function
|
| compress(...)
| a.compress(condition, axis=None, out=None)
|
| Return selected slices of this array along given axis.
|
| Refer to `numpy.compress` for full documentation.
|
| See Also
| --------
| numpy.compress : equivalent function
|
| conj(...)
| a.conj()
|
| Complex-conjugate all elements.
|
| Refer to `numpy.conjugate` for full documentation.
|
| See Also
| --------
| numpy.conjugate : equivalent function
|
| conjugate(...)
| a.conjugate()
|
| Return the complex conjugate, element-wise.
|
| Refer to `numpy.conjugate` for full documentation.
|
| See Also
| --------
| numpy.conjugate : equivalent function
|
| copy(...)
| a.copy(order='C')
|
| Return a copy of the array.
|
| Parameters
| ----------
| order : {'C', 'F', 'A', 'K'}, optional
| Controls the memory layout of the copy. 'C' means C-order,
| 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
| 'C' otherwise. 'K' means match the layout of `a` as closely
| as possible. (Note that this function and :func:`numpy.copy` are very
| similar, but have different default values for their order=
| arguments.)
|
| See also
| --------
| numpy.copy
| numpy.copyto
|
| Examples
| --------
| >>> x = np.array([[1,2,3],[4,5,6]], order='F')
|
| >>> y = x.copy()
|
| >>> x.fill(0)
|
| >>> x
| array([[0, 0, 0],
| [0, 0, 0]])
|
| >>> y
| array([[1, 2, 3],
| [4, 5, 6]])
|
| >>> y.flags['C_CONTIGUOUS']
| True
|
| cumprod(...)
| a.cumprod(axis=None, dtype=None, out=None)
|
| Return the cumulative product of the elements along the given axis.
|
| Refer to `numpy.cumprod` for full documentation.
|
| See Also
| --------
| numpy.cumprod : equivalent function
|
| cumsum(...)
| a.cumsum(axis=None, dtype=None, out=None)
|
| Return the cumulative sum of the elements along the given axis.
|
| Refer to `numpy.cumsum` for full documentation.
|
| See Also
| --------
| numpy.cumsum : equivalent function
|
| diagonal(...)
| a.diagonal(offset=0, axis1=0, axis2=1)
|
| Return specified diagonals. In NumPy 1.9 the returned array is a
| read-only view instead of a copy as in previous NumPy versions. In
| a future version the read-only restriction will be removed.
|
| Refer to :func:`numpy.diagonal` for full documentation.
|
| See Also
| --------
| numpy.diagonal : equivalent function
|
| dot(...)
| a.dot(b, out=None)
|
| Dot product of two arrays.
|
| Refer to `numpy.dot` for full documentation.
|
| See Also
| --------
| numpy.dot : equivalent function
|
| Examples
| --------
| >>> a = np.eye(2)
| >>> b = np.ones((2, 2)) * 2
| >>> a.dot(b)
| array([[2., 2.],
| [2., 2.]])
|
| This array method can be conveniently chained:
|
| >>> a.dot(b).dot(b)
| array([[8., 8.],
| [8., 8.]])
|
| dump(...)
| a.dump(file)
|
| Dump a pickle of the array to the specified file.
| The array can be read back with pickle.load or numpy.load.
|
| Parameters
| ----------
| file : str or Path
| A string naming the dump file.
|
| .. versionchanged:: 1.17.0
| `pathlib.Path` objects are now accepted.
|
| dumps(...)
| a.dumps()
|
| Returns the pickle of the array as a string.
| pickle.loads or numpy.loads will convert the string back to an array.
|
| Parameters
| ----------
| None
|
| fill(...)
| a.fill(value)
|
| Fill the array with a scalar value.
|
| Parameters
| ----------
| value : scalar
| All elements of `a` will be assigned this value.
|
| Examples
| --------
| >>> a = np.array([1, 2])
| >>> a.fill(0)
| >>> a
| array([0, 0])
| >>> a = np.empty(2)
| >>> a.fill(1)
| >>> a
| array([1., 1.])
|
| flatten(...)
| a.flatten(order='C')
|
| Return a copy of the array collapsed into one dimension.
|
| Parameters
| ----------
| order : {'C', 'F', 'A', 'K'}, optional
| 'C' means to flatten in row-major (C-style) order.
| 'F' means to flatten in column-major (Fortran-
| style) order. 'A' means to flatten in column-major
| order if `a` is Fortran *contiguous* in memory,
| row-major order otherwise. 'K' means to flatten
| `a` in the order the elements occur in memory.
| The default is 'C'.
|
| Returns
| -------
| y : ndarray
| A copy of the input array, flattened to one dimension.
|
| See Also
| --------
| ravel : Return a flattened array.
| flat : A 1-D flat iterator over the array.
|
| Examples
| --------
| >>> a = np.array([[1,2], [3,4]])
| >>> a.flatten()
| array([1, 2, 3, 4])
| >>> a.flatten('F')
| array([1, 3, 2, 4])
|
| getfield(...)
| a.getfield(dtype, offset=0)
|
| Returns a field of the given array as a certain type.
|
| A field is a view of the array data with a given data-type. The values in
| the view are determined by the given type and the offset into the current
| array in bytes. The offset needs to be such that the view dtype fits in the
| array dtype; for example an array of dtype complex128 has 16-byte elements.
| If taking a view with a 32-bit integer (4 bytes), the offset needs to be
| between 0 and 12 bytes.
|
| Parameters
| ----------
| dtype : str or dtype
| The data type of the view. The dtype size of the view can not be larger
| than that of the array itself.
| offset : int
| Number of bytes to skip before beginning the element view.
|
| Examples
| --------
| >>> x = np.diag([1.+1.j]*2)
| >>> x[1, 1] = 2 + 4.j
| >>> x
| array([[1.+1.j, 0.+0.j],
| [0.+0.j, 2.+4.j]])
| >>> x.getfield(np.float64)
| array([[1., 0.],
| [0., 2.]])
|
| By choosing an offset of 8 bytes we can select the complex part of the
| array for our view:
|
| >>> x.getfield(np.float64, offset=8)
| array([[1., 0.],
| [0., 4.]])
|
| item(...)
| a.item(*args)
|
| Copy an element of an array to a standard Python scalar and return it.
|
| Parameters
| ----------
| \*args : Arguments (variable number and type)
|
| * none: in this case, the method only works for arrays
| with one element (`a.size == 1`), which element is
| copied into a standard Python scalar object and returned.
|
| * int_type: this argument is interpreted as a flat index into
| the array, specifying which element to copy and return.
|
| * tuple of int_types: functions as does a single int_type argument,
| except that the argument is interpreted as an nd-index into the
| array.
|
| Returns
| -------
| z : Standard Python scalar object
| A copy of the specified element of the array as a suitable
| Python scalar
|
| Notes
| -----
| When the data type of `a` is longdouble or clongdouble, item() returns
| a scalar array object because there is no available Python scalar that
| would not lose information. Void arrays return a buffer object for item(),
| unless fields are defined, in which case a tuple is returned.
|
| `item` is very similar to a[args], except, instead of an array scalar,
| a standard Python scalar is returned. This can be useful for speeding up
| access to elements of the array and doing arithmetic on elements of the
| array using Python's optimized math.
|
| Examples
| --------
| >>> np.random.seed(123)
| >>> x = np.random.randint(9, size=(3, 3))
| >>> x
| array([[2, 2, 6],
| [1, 3, 6],
| [1, 0, 1]])
| >>> x.item(3)
| 1
| >>> x.item(7)
| 0
| >>> x.item((0, 1))
| 2
| >>> x.item((2, 2))
| 1
|
| itemset(...)
| a.itemset(*args)
|
| Insert scalar into an array (scalar is cast to array's dtype, if possible)
|
| There must be at least 1 argument, and define the last argument
| as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
| than ``a[args] = item``. The item should be a scalar value and `args`
| must select a single item in the array `a`.
|
| Parameters
| ----------
| \*args : Arguments
| If one argument: a scalar, only used in case `a` is of size 1.
| If two arguments: the last argument is the value to be set
| and must be a scalar, the first argument specifies a single array
| element location. It is either an int or a tuple.
|
| Notes
| -----
| Compared to indexing syntax, `itemset` provides some speed increase
| for placing a scalar into a particular location in an `ndarray`,
| if you must do this. However, generally this is discouraged:
| among other problems, it complicates the appearance of the code.
| Also, when using `itemset` (and `item`) inside a loop, be sure
| to assign the methods to a local variable to avoid the attribute
| look-up at each loop iteration.
|
| Examples
| --------
| >>> np.random.seed(123)
| >>> x = np.random.randint(9, size=(3, 3))
| >>> x
| array([[2, 2, 6],
| [1, 3, 6],
| [1, 0, 1]])
| >>> x.itemset(4, 0)
| >>> x.itemset((2, 2), 9)
| >>> x
| array([[2, 2, 6],
| [1, 0, 6],
| [1, 0, 9]])
|
| max(...)
| a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
|
| Return the maximum along a given axis.
|
| Refer to `numpy.amax` for full documentation.
|
| See Also
| --------
| numpy.amax : equivalent function
|
| mean(...)
| a.mean(axis=None, dtype=None, out=None, keepdims=False)
|
| Returns the average of the array elements along given axis.
|
| Refer to `numpy.mean` for full documentation.
|
| See Also
| --------
| numpy.mean : equivalent function
|
| min(...)
| a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
|
| Return the minimum along a given axis.
|
| Refer to `numpy.amin` for full documentation.
|
| See Also
| --------
| numpy.amin : equivalent function
|
| newbyteorder(...)
| arr.newbyteorder(new_order='S')
|
| Return the array with the same data viewed with a different byte order.
|
| Equivalent to::
|
| arr.view(arr.dtype.newbytorder(new_order))
|
| Changes are also made in all fields and sub-arrays of the array data
| type.
|
|
|
| Parameters
| ----------
| new_order : string, optional
| Byte order to force; a value from the byte order specifications
| below. `new_order` codes can be any of:
|
| * 'S' - swap dtype from current to opposite endian
| * {'<', 'L'} - little endian
| * {'>', 'B'} - big endian
| * {'=', 'N'} - native order
| * {'|', 'I'} - ignore (no change to byte order)
|
| The default value ('S') results in swapping the current
| byte order. The code does a case-insensitive check on the first
| letter of `new_order` for the alternatives above. For example,
| any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
|
|
| Returns
| -------
| new_arr : array
| New array object with the dtype reflecting given change to the
| byte order.
|
| nonzero(...)
| a.nonzero()
|
| Return the indices of the elements that are non-zero.
|
| Refer to `numpy.nonzero` for full documentation.
|
| See Also
| --------
| numpy.nonzero : equivalent function
|
| partition(...)
| a.partition(kth, axis=-1, kind='introselect', order=None)
|
| Rearranges the elements in the array in such a way that the value of the
| element in kth position is in the position it would be in a sorted array.
| All elements smaller than the kth element are moved before this element and
| all equal or greater are moved behind it. The ordering of the elements in
| the two partitions is undefined.
|
| .. versionadded:: 1.8.0
|
| Parameters
| ----------
| kth : int or sequence of ints
| Element index to partition by. The kth element value will be in its
| final sorted position and all smaller elements will be moved before it
| and all equal or greater elements behind it.
| The order of all elements in the partitions is undefined.
| If provided with a sequence of kth it will partition all elements
| indexed by kth of them into their sorted position at once.
| axis : int, optional
| Axis along which to sort. Default is -1, which means sort along the
| last axis.
| kind : {'introselect'}, optional
| Selection algorithm. Default is 'introselect'.
| order : str or list of str, optional
| When `a` is an array with fields defined, this argument specifies
| which fields to compare first, second, etc. A single field can
| be specified as a string, and not all fields need to be specified,
| but unspecified fields will still be used, in the order in which
| they come up in the dtype, to break ties.
|
| See Also
| --------
| numpy.partition : Return a parititioned copy of an array.
| argpartition : Indirect partition.
| sort : Full sort.
|
| Notes
| -----
| See ``np.partition`` for notes on the different algorithms.
|
| Examples
| --------
| >>> a = np.array([3, 4, 2, 1])
| >>> a.partition(3)
| >>> a
| array([2, 1, 3, 4])
|
| >>> a.partition((1, 3))
| >>> a
| array([1, 2, 3, 4])
|
| prod(...)
| a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
|
| Return the product of the array elements over the given axis
|
| Refer to `numpy.prod` for full documentation.
|
| See Also
| --------
| numpy.prod : equivalent function
|
| ptp(...)
| a.ptp(axis=None, out=None, keepdims=False)
|
| Peak to peak (maximum - minimum) value along a given axis.
|
| Refer to `numpy.ptp` for full documentation.
|
| See Also
| --------
| numpy.ptp : equivalent function
|
| put(...)
| a.put(indices, values, mode='raise')
|
| Set ``a.flat[n] = values[n]`` for all `n` in indices.
|
| Refer to `numpy.put` for full documentation.
|
| See Also
| --------
| numpy.put : equivalent function
|
| ravel(...)
| a.ravel([order])
|
| Return a flattened array.
|
| Refer to `numpy.ravel` for full documentation.
|
| See Also
| --------
| numpy.ravel : equivalent function
|
| ndarray.flat : a flat iterator on the array.
|
| repeat(...)
| a.repeat(repeats, axis=None)
|
| Repeat elements of an array.
|
| Refer to `numpy.repeat` for full documentation.
|
| See Also
| --------
| numpy.repeat : equivalent function
|
| reshape(...)
| a.reshape(shape, order='C')
|
| Returns an array containing the same data with a new shape.
|
| Refer to `numpy.reshape` for full documentation.
|
| See Also
| --------
| numpy.reshape : equivalent function
|
| Notes
| -----
| Unlike the free function `numpy.reshape`, this method on `ndarray` allows
| the elements of the shape parameter to be passed in as separate arguments.
| For example, ``a.reshape(10, 11)`` is equivalent to
| ``a.reshape((10, 11))``.
|
| resize(...)
| a.resize(new_shape, refcheck=True)
|
| Change shape and size of array in-place.
|
| Parameters
| ----------
| new_shape : tuple of ints, or `n` ints
| Shape of resized array.
| refcheck : bool, optional
| If False, reference count will not be checked. Default is True.
|
| Returns
| -------
| None
|
| Raises
| ------
| ValueError
| If `a` does not own its own data or references or views to it exist,
| and the data memory must be changed.
| PyPy only: will always raise if the data memory must be changed, since
| there is no reliable way to determine if references or views to it
| exist.
|
| SystemError
| If the `order` keyword argument is specified. This behaviour is a
| bug in NumPy.
|
| See Also
| --------
| resize : Return a new array with the specified shape.
|
| Notes
| -----
| This reallocates space for the data area if necessary.
|
| Only contiguous arrays (data elements consecutive in memory) can be
| resized.
|
| The purpose of the reference count check is to make sure you
| do not use this array as a buffer for another Python object and then
| reallocate the memory. However, reference counts can increase in
| other ways so if you are sure that you have not shared the memory
| for this array with another Python object, then you may safely set
| `refcheck` to False.
|
| Examples
| --------
| Shrinking an array: array is flattened (in the order that the data are
| stored in memory), resized, and reshaped:
|
| >>> a = np.array([[0, 1], [2, 3]], order='C')
| >>> a.resize((2, 1))
| >>> a
| array([[0],
| [1]])
|
| >>> a = np.array([[0, 1], [2, 3]], order='F')
| >>> a.resize((2, 1))
| >>> a
| array([[0],
| [2]])
|
| Enlarging an array: as above, but missing entries are filled with zeros:
|
| >>> b = np.array([[0, 1], [2, 3]])
| >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
| >>> b
| array([[0, 1, 2],
| [3, 0, 0]])
|
| Referencing an array prevents resizing...
|
| >>> c = a
| >>> a.resize((1, 1))
| Traceback (most recent call last):
| ...
| ValueError: cannot resize an array that references or is referenced ...
|
| Unless `refcheck` is False:
|
| >>> a.resize((1, 1), refcheck=False)
| >>> a
| array([[0]])
| >>> c
| array([[0]])
|
| round(...)
| a.round(decimals=0, out=None)
|
| Return `a` with each element rounded to the given number of decimals.
|
| Refer to `numpy.around` for full documentation.
|
| See Also
| --------
| numpy.around : equivalent function
|
| searchsorted(...)
| a.searchsorted(v, side='left', sorter=None)
|
| Find indices where elements of v should be inserted in a to maintain order.
|
| For full documentation, see `numpy.searchsorted`
|
| See Also
| --------
| numpy.searchsorted : equivalent function
|
| setfield(...)
| a.setfield(val, dtype, offset=0)
|
| Put a value into a specified place in a field defined by a data-type.
|
| Place `val` into `a`'s field defined by `dtype` and beginning `offset`
| bytes into the field.
|
| Parameters
| ----------
| val : object
| Value to be placed in field.
| dtype : dtype object
| Data-type of the field in which to place `val`.
| offset : int, optional
| The number of bytes into the field at which to place `val`.
|
| Returns
| -------
| None
|
| See Also
| --------
| getfield
|
| Examples
| --------
| >>> x = np.eye(3)
| >>> x.getfield(np.float64)
| array([[1., 0., 0.],
| [0., 1., 0.],
| [0., 0., 1.]])
| >>> x.setfield(3, np.int32)
| >>> x.getfield(np.int32)
| array([[3, 3, 3],
| [3, 3, 3],
| [3, 3, 3]], dtype=int32)
| >>> x
| array([[1.0e+000, 1.5e-323, 1.5e-323],
| [1.5e-323, 1.0e+000, 1.5e-323],
| [1.5e-323, 1.5e-323, 1.0e+000]])
| >>> x.setfield(np.eye(3), np.int32)
| >>> x
| array([[1., 0., 0.],
| [0., 1., 0.],
| [0., 0., 1.]])
|
| setflags(...)
| a.setflags(write=None, align=None, uic=None)
|
| Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
| respectively.
|
| These Boolean-valued flags affect how numpy interprets the memory
| area used by `a` (see Notes below). The ALIGNED flag can only
| be set to True if the data is actually aligned according to the type.
| The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
| to True. The flag WRITEABLE can only be set to True if the array owns its
| own memory, or the ultimate owner of the memory exposes a writeable buffer
| interface, or is a string. (The exception for string is made so that
| unpickling can be done without copying memory.)
|
| Parameters
| ----------
| write : bool, optional
| Describes whether or not `a` can be written to.
| align : bool, optional
| Describes whether or not `a` is aligned properly for its type.
| uic : bool, optional
| Describes whether or not `a` is a copy of another "base" array.
|
| Notes
| -----
| Array flags provide information about how the memory area used
| for the array is to be interpreted. There are 7 Boolean flags
| in use, only four of which can be changed by the user:
| WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
|
| WRITEABLE (W) the data area can be written to;
|
| ALIGNED (A) the data and strides are aligned appropriately for the hardware
| (as determined by the compiler);
|
| UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
|
| WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
| by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
| called, the base array will be updated with the contents of this array.
|
| All flags can be accessed using the single (upper case) letter as well
| as the full name.
|
| Examples
| --------
| >>> y = np.array([[3, 1, 7],
| ... [2, 0, 0],
| ... [8, 5, 9]])
| >>> y
| array([[3, 1, 7],
| [2, 0, 0],
| [8, 5, 9]])
| >>> y.flags
| C_CONTIGUOUS : True
| F_CONTIGUOUS : False
| OWNDATA : True
| WRITEABLE : True
| ALIGNED : True
| WRITEBACKIFCOPY : False
| UPDATEIFCOPY : False
| >>> y.setflags(write=0, align=0)
| >>> y.flags
| C_CONTIGUOUS : True
| F_CONTIGUOUS : False
| OWNDATA : True
| WRITEABLE : False
| ALIGNED : False
| WRITEBACKIFCOPY : False
| UPDATEIFCOPY : False
| >>> y.setflags(uic=1)
| Traceback (most recent call last):
| File "<stdin>", line 1, in <module>
| ValueError: cannot set WRITEBACKIFCOPY flag to True
|
| sort(...)
| a.sort(axis=-1, kind=None, order=None)
|
| Sort an array in-place. Refer to `numpy.sort` for full documentation.
|
| Parameters
| ----------
| axis : int, optional
| Axis along which to sort. Default is -1, which means sort along the
| last axis.
| kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
| Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
| and 'mergesort' use timsort under the covers and, in general, the
| actual implementation will vary with datatype. The 'mergesort' option
| is retained for backwards compatibility.
|
| .. versionchanged:: 1.15.0.
| The 'stable' option was added.
|
| order : str or list of str, optional
| When `a` is an array with fields defined, this argument specifies
| which fields to compare first, second, etc. A single field can
| be specified as a string, and not all fields need be specified,
| but unspecified fields will still be used, in the order in which
| they come up in the dtype, to break ties.
|
| See Also
| --------
| numpy.sort : Return a sorted copy of an array.
| numpy.argsort : Indirect sort.
| numpy.lexsort : Indirect stable sort on multiple keys.
| numpy.searchsorted : Find elements in sorted array.
| numpy.partition: Partial sort.
|
| Notes
| -----
| See `numpy.sort` for notes on the different sorting algorithms.
|
| Examples
| --------
| >>> a = np.array([[1,4], [3,1]])
| >>> a.sort(axis=1)
| >>> a
| array([[1, 4],
| [1, 3]])
| >>> a.sort(axis=0)
| >>> a
| array([[1, 3],
| [1, 4]])
|
| Use the `order` keyword to specify a field to use when sorting a
| structured array:
|
| >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
| >>> a.sort(order='y')
| >>> a
| array([(b'c', 1), (b'a', 2)],
| dtype=[('x', 'S1'), ('y', '<i8')])
|
| squeeze(...)
| a.squeeze(axis=None)
|
| Remove single-dimensional entries from the shape of `a`.
|
| Refer to `numpy.squeeze` for full documentation.
|
| See Also
| --------
| numpy.squeeze : equivalent function
|
| std(...)
| a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
|
| Returns the standard deviation of the array elements along given axis.
|
| Refer to `numpy.std` for full documentation.
|
| See Also
| --------
| numpy.std : equivalent function
|
| sum(...)
| a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
|
| Return the sum of the array elements over the given axis.
|
| Refer to `numpy.sum` for full documentation.
|
| See Also
| --------
| numpy.sum : equivalent function
|
| swapaxes(...)
| a.swapaxes(axis1, axis2)
|
| Return a view of the array with `axis1` and `axis2` interchanged.
|
| Refer to `numpy.swapaxes` for full documentation.
|
| See Also
| --------
| numpy.swapaxes : equivalent function
|
| take(...)
| a.take(indices, axis=None, out=None, mode='raise')
|
| Return an array formed from the elements of `a` at the given indices.
|
| Refer to `numpy.take` for full documentation.
|
| See Also
| --------
| numpy.take : equivalent function
|
| tobytes(...)
| a.tobytes(order='C')
|
| Construct Python bytes containing the raw data bytes in the array.
|
| Constructs Python bytes showing a copy of the raw contents of
| data memory. The bytes object can be produced in either 'C' or 'Fortran',
| or 'Any' order (the default is 'C'-order). 'Any' order means C-order
| unless the F_CONTIGUOUS flag in the array is set, in which case it
| means 'Fortran' order.
|
| .. versionadded:: 1.9.0
|
| Parameters
| ----------
| order : {'C', 'F', None}, optional
| Order of the data for multidimensional arrays:
| C, Fortran, or the same as for the original array.
|
| Returns
| -------
| s : bytes
| Python bytes exhibiting a copy of `a`'s raw data.
|
| Examples
| --------
| >>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
| >>> x.tobytes()
| b'\x00\x00\x01\x00\x02\x00\x03\x00'
| >>> x.tobytes('C') == x.tobytes()
| True
| >>> x.tobytes('F')
| b'\x00\x00\x02\x00\x01\x00\x03\x00'
|
| tofile(...)
| a.tofile(fid, sep="", format="%s")
|
| Write array to a file as text or binary (default).
|
| Data is always written in 'C' order, independent of the order of `a`.
| The data produced by this method can be recovered using the function
| fromfile().
|
| Parameters
| ----------
| fid : file or str or Path
| An open file object, or a string containing a filename.
|
| .. versionchanged:: 1.17.0
| `pathlib.Path` objects are now accepted.
|
| sep : str
| Separator between array items for text output.
| If "" (empty), a binary file is written, equivalent to
| ``file.write(a.tobytes())``.
| format : str
| Format string for text file output.
| Each entry in the array is formatted to text by first converting
| it to the closest Python type, and then using "format" % item.
|
| Notes
| -----
| This is a convenience function for quick storage of array data.
| Information on endianness and precision is lost, so this method is not a
| good choice for files intended to archive data or transport data between
| machines with different endianness. Some of these problems can be overcome
| by outputting the data as text files, at the expense of speed and file
| size.
|
| When fid is a file object, array contents are directly written to the
| file, bypassing the file object's ``write`` method. As a result, tofile
| cannot be used with files objects supporting compression (e.g., GzipFile)
| or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
|
| tolist(...)
| a.tolist()
|
| Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
|
| Return a copy of the array data as a (nested) Python list.
| Data items are converted to the nearest compatible builtin Python type, via
| the `~numpy.ndarray.item` function.
|
| If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
| not be a list at all, but a simple Python scalar.
|
| Parameters
| ----------
| none
|
| Returns
| -------
| y : object, or list of object, or list of list of object, or ...
| The possibly nested list of array elements.
|
| Notes
| -----
| The array may be recreated via ``a = np.array(a.tolist())``, although this
| may sometimes lose precision.
|
| Examples
| --------
| For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
| except that ``tolist`` changes numpy scalars to Python scalars:
|
| >>> a = np.uint32([1, 2])
| >>> a_list = list(a)
| >>> a_list
| [1, 2]
| >>> type(a_list[0])
| <class 'numpy.uint32'>
| >>> a_tolist = a.tolist()
| >>> a_tolist
| [1, 2]
| >>> type(a_tolist[0])
| <class 'int'>
|
| Additionally, for a 2D array, ``tolist`` applies recursively:
|
| >>> a = np.array([[1, 2], [3, 4]])
| >>> list(a)
| [array([1, 2]), array([3, 4])]
| >>> a.tolist()
| [[1, 2], [3, 4]]
|
| The base case for this recursion is a 0D array:
|
| >>> a = np.array(1)
| >>> list(a)
| Traceback (most recent call last):
| ...
| TypeError: iteration over a 0-d array
| >>> a.tolist()
| 1
|
| tostring(...)
| a.tostring(order='C')
|
| A compatibility alias for `tobytes`, with exactly the same behavior.
|
| Despite its name, it returns `bytes` not `str`\ s.
|
| .. deprecated:: 1.19.0
|
| trace(...)
| a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
|
| Return the sum along diagonals of the array.
|
| Refer to `numpy.trace` for full documentation.
|
| See Also
| --------
| numpy.trace : equivalent function
|
| transpose(...)
| a.transpose(*axes)
|
| Returns a view of the array with axes transposed.
|
| For a 1-D array this has no effect, as a transposed vector is simply the
| same vector. To convert a 1-D array into a 2D column vector, an additional
| dimension must be added. `np.atleast2d(a).T` achieves this, as does
| `a[:, np.newaxis]`.
| For a 2-D array, this is a standard matrix transpose.
| For an n-D array, if axes are given, their order indicates how the
| axes are permuted (see Examples). If axes are not provided and
| ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
| ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
|
| Parameters
| ----------
| axes : None, tuple of ints, or `n` ints
|
| * None or no argument: reverses the order of the axes.
|
| * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
| `i`-th axis becomes `a.transpose()`'s `j`-th axis.
|
| * `n` ints: same as an n-tuple of the same ints (this form is
| intended simply as a "convenience" alternative to the tuple form)
|
| Returns
| -------
| out : ndarray
| View of `a`, with axes suitably permuted.
|
| See Also
| --------
| ndarray.T : Array property returning the array transposed.
| ndarray.reshape : Give a new shape to an array without changing its data.
|
| Examples
| --------
| >>> a = np.array([[1, 2], [3, 4]])
| >>> a
| array([[1, 2],
| [3, 4]])
| >>> a.transpose()
| array([[1, 3],
| [2, 4]])
| >>> a.transpose((1, 0))
| array([[1, 3],
| [2, 4]])
| >>> a.transpose(1, 0)
| array([[1, 3],
| [2, 4]])
|
| var(...)
| a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
|
| Returns the variance of the array elements, along given axis.
|
| Refer to `numpy.var` for full documentation.
|
| See Also
| --------
| numpy.var : equivalent function
|
| view(...)
| a.view([dtype][, type])
|
| New view of array with the same data.
|
| .. note::
| Passing None for ``dtype`` is different from omitting the parameter,
| since the former invokes ``dtype(None)`` which is an alias for
| ``dtype('float_')``.
|
| Parameters
| ----------
| dtype : data-type or ndarray sub-class, optional
| Data-type descriptor of the returned view, e.g., float32 or int16.
| Omitting it results in the view having the same data-type as `a`.
| This argument can also be specified as an ndarray sub-class, which
| then specifies the type of the returned object (this is equivalent to
| setting the ``type`` parameter).
| type : Python type, optional
| Type of the returned view, e.g., ndarray or matrix. Again, omission
| of the parameter results in type preservation.
|
| Notes
| -----
| ``a.view()`` is used two different ways:
|
| ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
| of the array's memory with a different data-type. This can cause a
| reinterpretation of the bytes of memory.
|
| ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
| returns an instance of `ndarray_subclass` that looks at the same array
| (same shape, dtype, etc.) This does not cause a reinterpretation of the
| memory.
|
| For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
| bytes per entry than the previous dtype (for example, converting a
| regular array to a structured array), then the behavior of the view
| cannot be predicted just from the superficial appearance of ``a`` (shown
| by ``print(a)``). It also depends on exactly how ``a`` is stored in
| memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
| defined as a slice or transpose, etc., the view may give different
| results.
|
|
| Examples
| --------
| >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
|
| Viewing array data using a different type and dtype:
|
| >>> y = x.view(dtype=np.int16, type=np.matrix)
| >>> y
| matrix([[513]], dtype=int16)
| >>> print(type(y))
| <class 'numpy.matrix'>
|
| Creating a view on a structured array so it can be used in calculations
|
| >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
| >>> xv = x.view(dtype=np.int8).reshape(-1,2)
| >>> xv
| array([[1, 2],
| [3, 4]], dtype=int8)
| >>> xv.mean(0)
| array([2., 3.])
|
| Making changes to the view changes the underlying array
|
| >>> xv[0,1] = 20
| >>> x
| array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
|
| Using a view to convert an array to a recarray:
|
| >>> z = x.view(np.recarray)
| >>> z.a
| array([1, 3], dtype=int8)
|
| Views share data:
|
| >>> x[0] = (9, 10)
| >>> z[0]
| (9, 10)
|
| Views that change the dtype size (bytes per entry) should normally be
| avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
|
| >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
| >>> y = x[:, 0:2]
| >>> y
| array([[1, 2],
| [4, 5]], dtype=int16)
| >>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
| Traceback (most recent call last):
| ...
| ValueError: To change to a dtype of a different size, the array must be C-contiguous
| >>> z = y.copy()
| >>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
| array([[(1, 2)],
| [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
|
| ----------------------------------------------------------------------
| Static methods defined here:
|
| __new__(*args, **kwargs) from builtins.type
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| T
| The transposed array.
|
| Same as ``self.transpose()``.
|
| Examples
| --------
| >>> x = np.array([[1.,2.],[3.,4.]])
| >>> x
| array([[ 1., 2.],
| [ 3., 4.]])
| >>> x.T
| array([[ 1., 3.],
| [ 2., 4.]])
| >>> x = np.array([1.,2.,3.,4.])
| >>> x
| array([ 1., 2., 3., 4.])
| >>> x.T
| array([ 1., 2., 3., 4.])
|
| See Also
| --------
| transpose
|
| __array_finalize__
| None.
|
| __array_interface__
| Array protocol: Python side.
|
| __array_priority__
| Array priority.
|
| __array_struct__
| Array protocol: C-struct side.
|
| base
| Base object if memory is from some other object.
|
| Examples
| --------
| The base of an array that owns its memory is None:
|
| >>> x = np.array([1,2,3,4])
| >>> x.base is None
| True
|
| Slicing creates a view, whose memory is shared with x:
|
| >>> y = x[2:]
| >>> y.base is x
| True
|
| ctypes
| An object to simplify the interaction of the array with the ctypes
| module.
|
| This attribute creates an object that makes it easier to use arrays
| when calling shared libraries with the ctypes module. The returned
| object has, among others, data, shape, and strides attributes (see
| Notes below) which themselves return ctypes objects that can be used
| as arguments to a shared library.
|
| Parameters
| ----------
| None
|
| Returns
| -------
| c : Python object
| Possessing attributes data, shape, strides, etc.
|
| See Also
| --------
| numpy.ctypeslib
|
| Notes
| -----
| Below are the public attributes of this object which were documented
| in "Guide to NumPy" (we have omitted undocumented public attributes,
| as well as documented private attributes):
|
| .. autoattribute:: numpy.core._internal._ctypes.data
| :noindex:
|
| .. autoattribute:: numpy.core._internal._ctypes.shape
| :noindex:
|
| .. autoattribute:: numpy.core._internal._ctypes.strides
| :noindex:
|
| .. automethod:: numpy.core._internal._ctypes.data_as
| :noindex:
|
| .. automethod:: numpy.core._internal._ctypes.shape_as
| :noindex:
|
| .. automethod:: numpy.core._internal._ctypes.strides_as
| :noindex:
|
| If the ctypes module is not available, then the ctypes attribute
| of array objects still returns something useful, but ctypes objects
| are not returned and errors may be raised instead. In particular,
| the object will still have the ``as_parameter`` attribute which will
| return an integer equal to the data attribute.
|
| Examples
| --------
| >>> import ctypes
| >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
| >>> x
| array([[0, 1],
| [2, 3]], dtype=int32)
| >>> x.ctypes.data
| 31962608 # may vary
| >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
| <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
| >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
| c_uint(0)
| >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
| c_ulong(4294967296)
| >>> x.ctypes.shape
| <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
| >>> x.ctypes.strides
| <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
|
| data
| Python buffer object pointing to the start of the array's data.
|
| dtype
| Data-type of the array's elements.
|
| Parameters
| ----------
| None
|
| Returns
| -------
| d : numpy dtype object
|
| See Also
| --------
| numpy.dtype
|
| Examples
| --------
| >>> x
| array([[0, 1],
| [2, 3]])
| >>> x.dtype
| dtype('int32')
| >>> type(x.dtype)
| <type 'numpy.dtype'>
|
| flags
| Information about the memory layout of the array.
|
| Attributes
| ----------
| C_CONTIGUOUS (C)
| The data is in a single, C-style contiguous segment.
| F_CONTIGUOUS (F)
| The data is in a single, Fortran-style contiguous segment.
| OWNDATA (O)
| The array owns the memory it uses or borrows it from another object.
| WRITEABLE (W)
| The data area can be written to. Setting this to False locks
| the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
| from its base array at creation time, but a view of a writeable
| array may be subsequently locked while the base array remains writeable.
| (The opposite is not true, in that a view of a locked array may not
| be made writeable. However, currently, locking a base object does not
| lock any views that already reference it, so under that circumstance it
| is possible to alter the contents of a locked array via a previously
| created writeable view onto it.) Attempting to change a non-writeable
| array raises a RuntimeError exception.
| ALIGNED (A)
| The data and all elements are aligned appropriately for the hardware.
| WRITEBACKIFCOPY (X)
| This array is a copy of some other array. The C-API function
| PyArray_ResolveWritebackIfCopy must be called before deallocating
| to the base array will be updated with the contents of this array.
| UPDATEIFCOPY (U)
| (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
| When this array is
| deallocated, the base array will be updated with the contents of
| this array.
| FNC
| F_CONTIGUOUS and not C_CONTIGUOUS.
| FORC
| F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
| BEHAVED (B)
| ALIGNED and WRITEABLE.
| CARRAY (CA)
| BEHAVED and C_CONTIGUOUS.
| FARRAY (FA)
| BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
|
| Notes
| -----
| The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
| or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
| names are only supported in dictionary access.
|
| Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
| changed by the user, via direct assignment to the attribute or dictionary
| entry, or by calling `ndarray.setflags`.
|
| The array flags cannot be set arbitrarily:
|
| - UPDATEIFCOPY can only be set ``False``.
| - WRITEBACKIFCOPY can only be set ``False``.
| - ALIGNED can only be set ``True`` if the data is truly aligned.
| - WRITEABLE can only be set ``True`` if the array owns its own memory
| or the ultimate owner of the memory exposes a writeable buffer
| interface or is a string.
|
| Arrays can be both C-style and Fortran-style contiguous simultaneously.
| This is clear for 1-dimensional arrays, but can also be true for higher
| dimensional arrays.
|
| Even for contiguous arrays a stride for a given dimension
| ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
| or the array has no elements.
| It does *not* generally hold that ``self.strides[-1] == self.itemsize``
| for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
| Fortran-style contiguous arrays is true.
|
| flat
| A 1-D iterator over the array.
|
| This is a `numpy.flatiter` instance, which acts similarly to, but is not
| a subclass of, Python's built-in iterator object.
|
| See Also
| --------
| flatten : Return a copy of the array collapsed into one dimension.
|
| flatiter
|
| Examples
| --------
| >>> x = np.arange(1, 7).reshape(2, 3)
| >>> x
| array([[1, 2, 3],
| [4, 5, 6]])
| >>> x.flat[3]
| 4
| >>> x.T
| array([[1, 4],
| [2, 5],
| [3, 6]])
| >>> x.T.flat[3]
| 5
| >>> type(x.flat)
| <class 'numpy.flatiter'>
|
| An assignment example:
|
| >>> x.flat = 3; x
| array([[3, 3, 3],
| [3, 3, 3]])
| >>> x.flat[[1,4]] = 1; x
| array([[3, 1, 3],
| [3, 1, 3]])
|
| imag
| The imaginary part of the array.
|
| Examples
| --------
| >>> x = np.sqrt([1+0j, 0+1j])
| >>> x.imag
| array([ 0. , 0.70710678])
| >>> x.imag.dtype
| dtype('float64')
|
| itemsize
| Length of one array element in bytes.
|
| Examples
| --------
| >>> x = np.array([1,2,3], dtype=np.float64)
| >>> x.itemsize
| 8
| >>> x = np.array([1,2,3], dtype=np.complex128)
| >>> x.itemsize
| 16
|
| nbytes
| Total bytes consumed by the elements of the array.
|
| Notes
| -----
| Does not include memory consumed by non-element attributes of the
| array object.
|
| Examples
| --------
| >>> x = np.zeros((3,5,2), dtype=np.complex128)
| >>> x.nbytes
| 480
| >>> np.prod(x.shape) * x.itemsize
| 480
|
| ndim
| Number of array dimensions.
|
| Examples
| --------
| >>> x = np.array([1, 2, 3])
| >>> x.ndim
| 1
| >>> y = np.zeros((2, 3, 4))
| >>> y.ndim
| 3
|
| real
| The real part of the array.
|
| Examples
| --------
| >>> x = np.sqrt([1+0j, 0+1j])
| >>> x.real
| array([ 1. , 0.70710678])
| >>> x.real.dtype
| dtype('float64')
|
| See Also
| --------
| numpy.real : equivalent function
|
| shape
| Tuple of array dimensions.
|
| The shape property is usually used to get the current shape of an array,
| but may also be used to reshape the array in-place by assigning a tuple of
| array dimensions to it. As with `numpy.reshape`, one of the new shape
| dimensions can be -1, in which case its value is inferred from the size of
| the array and the remaining dimensions. Reshaping an array in-place will
| fail if a copy is required.
|
| Examples
| --------
| >>> x = np.array([1, 2, 3, 4])
| >>> x.shape
| (4,)
| >>> y = np.zeros((2, 3, 4))
| >>> y.shape
| (2, 3, 4)
| >>> y.shape = (3, 8)
| >>> y
| array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
| [ 0., 0., 0., 0., 0., 0., 0., 0.],
| [ 0., 0., 0., 0., 0., 0., 0., 0.]])
| >>> y.shape = (3, 6)
| Traceback (most recent call last):
| File "<stdin>", line 1, in <module>
| ValueError: total size of new array must be unchanged
| >>> np.zeros((4,2))[::2].shape = (-1,)
| Traceback (most recent call last):
| File "<stdin>", line 1, in <module>
| AttributeError: Incompatible shape for in-place modification. Use
| `.reshape()` to make a copy with the desired shape.
|
| See Also
| --------
| numpy.reshape : similar function
| ndarray.reshape : similar method
|
| size
| Number of elements in the array.
|
| Equal to ``np.prod(a.shape)``, i.e., the product of the array's
| dimensions.
|
| Notes
| -----
| `a.size` returns a standard arbitrary precision Python integer. This
| may not be the case with other methods of obtaining the same value
| (like the suggested ``np.prod(a.shape)``, which returns an instance
| of ``np.int_``), and may be relevant if the value is used further in
| calculations that may overflow a fixed size integer type.
|
| Examples
| --------
| >>> x = np.zeros((3, 5, 2), dtype=np.complex128)
| >>> x.size
| 30
| >>> np.prod(x.shape)
| 30
|
| strides
| Tuple of bytes to step in each dimension when traversing an array.
|
| The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
| is::
|
| offset = sum(np.array(i) * a.strides)
|
| A more detailed explanation of strides can be found in the
| "ndarray.rst" file in the NumPy reference guide.
|
| Notes
| -----
| Imagine an array of 32-bit integers (each 4 bytes)::
|
| x = np.array([[0, 1, 2, 3, 4],
| [5, 6, 7, 8, 9]], dtype=np.int32)
|
| This array is stored in memory as 40 bytes, one after the other
| (known as a contiguous block of memory). The strides of an array tell
| us how many bytes we have to skip in memory to move to the next position
| along a certain axis. For example, we have to skip 4 bytes (1 value) to
| move to the next column, but 20 bytes (5 values) to get to the same
| position in the next row. As such, the strides for the array `x` will be
| ``(20, 4)``.
|
| See Also
| --------
| numpy.lib.stride_tricks.as_strided
|
| Examples
| --------
| >>> y = np.reshape(np.arange(2*3*4), (2,3,4))
| >>> y
| array([[[ 0, 1, 2, 3],
| [ 4, 5, 6, 7],
| [ 8, 9, 10, 11]],
| [[12, 13, 14, 15],
| [16, 17, 18, 19],
| [20, 21, 22, 23]]])
| >>> y.strides
| (48, 16, 4)
| >>> y[1,1,1]
| 17
| >>> offset=sum(y.strides * np.array((1,1,1)))
| >>> offset/y.itemsize
| 17
|
| >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
| >>> x.strides
| (32, 4, 224, 1344)
| >>> i = np.array([3,5,2,2])
| >>> offset = sum(i * x.strides)
| >>> x[3,5,2,2]
| 813
| >>> offset / x.itemsize
| 813
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| __hash__ = None
###Markdown
(Of course, you might also prefer to check out [the online docs](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.ndarray.html).) Operator overloadingWhat's the value of the below expression?
###Code
[3, 4, 1, 2, 2, 1] + 10
###Output
_____no_output_____
###Markdown
What a silly question. Of course it's an error. But what about...
###Code
rolls + 10
###Output
_____no_output_____
###Markdown
We might think that Python strictly polices how pieces of its core syntax behave such as `+`, `<`, `in`, `==`, or square brackets for indexing and slicing. But in fact, it takes a very hands-off approach. When you define a new type, you can choose how addition works for it, or what it means for an object of that type to be equal to something else.The designers of lists decided that adding them to numbers wasn't allowed. The designers of `numpy` arrays went a different way (adding the number to each element of the array).Here are a few more examples of how `numpy` arrays interact unexpectedly with Python operators (or at least differently from lists).
###Code
# At which indices are the dice less than or equal to 3?
rolls <= 3
xlist = [[1,2,3],[2,4,6],]
# Create a 2-dimensional array
x = numpy.asarray(xlist)
print("xlist = {}\nx =\n{}".format(xlist, x))
# Get the last element of the second row of our numpy array
x[1,-1]
# Get the last element of the second sublist of our nested list?
xlist[1,-1]
###Output
_____no_output_____
###Markdown
numpy's `ndarray` type is specialized for working with multi-dimensional data, so it defines its own logic for indexing, allowing us to index by a tuple to specify the index at each dimension. When does 1 + 1 not equal 2?Things can get weirder than this. You may have heard of (or even used) tensorflow, a Python library popularly used for deep learning. It makes extensive use of operator overloading.
###Code
import tensorflow as tf
# Create two constants, each with value 1
a = tf.constant(1)
b = tf.constant(1)
# Add them together to get...
a + b
###Output
_____no_output_____
###Markdown
`a + b` isn't 2, it is (to quote tensorflow's documentation)...> a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow `tf.Session`. It's important just to be aware of the fact that this sort of thing is possible and that libraries will often use operator overloading in non-obvious or magical-seeming ways.Understanding how Python's operators work when applied to ints, strings, and lists is no guarantee that you'll be able to immediately understand what they do when applied to a tensorflow `Tensor`, or a numpy `ndarray`, or a pandas `DataFrame`.Once you've had a little taste of DataFrames, for example, an expression like the one below starts to look appealingly intuitive:```python Get the rows with population over 1m in South Americadf[(df['population'] > 10**6) & (df['continent'] == 'South America')]```But why does it work? The example above features something like **5** different overloaded operators. What's each of those operations doing? It can help to know the answer when things start going wrong. Curious how it all works?Have you ever called `help()` or `dir()` on an object and wondered what the heck all those names with the double-underscores were?
###Code
print(dir(list))
###Output
['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']
|
Qunova Computing/test.ipynb | ###Markdown
Continued from the result of the [training](./training.ipynb), we test the model.
###Code
import numpy as np
from utils import load_data, bf
from QVC import PyquilVariationalClassifier
from pyquil_circuits import PauliFeatureMap, VariationalCircuit
###Output
_____no_output_____
###Markdown
As already shown in the [training script](./training.ipynb), we declare the model.
###Code
# Define quantum feature map
qfm = PauliFeatureMap(num_qubits=3, rep=2)
vc = VariationalCircuit(num_qubits=3, rep=2)
qvc = PyquilVariationalClassifier(qfm, vc, bool_ftn=bf, use_bias=False)
###Output
_____no_output_____
###Markdown
Also load the data and the trained parameters.
###Code
train_data = np.load('./npy_files/TrainData_zzpfmc12_pyquil.npy')
test_data = np.load('./npy_files/TestData_zzpfmc12_pyquil.npy')
train_labels = np.load('./npy_files/TrainLabels_zzpfmc12_pyquil.npy')
test_labels = np.load('./npy_files/TestLabels_zzpfmc12_pyquil.npy')
opt_param = np.load('./npy_files/Optimal_param_zzpfmc12_qiskit.npy')
qvc.optimal_params = opt_param
###Output
_____no_output_____
###Markdown
Get the test and training accuracies.
###Code
test_acc, _, test_correct = qvc.test(test_data, (-1)**test_labels, verbose=True)
train_acc, _, train_correct = qvc.test(train_data, (-1)**train_labels, verbose=True)
print(f"Test accuracy = {test_acc}")
print(f"Training accuracy = {train_acc}")
###Output
Test accuracy = 0.6190476190476191
Training accuracy = 0.8271604938271605
###Markdown
Plot the classification result of test and training data.
###Code
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
colors = ['tab:blue', 'tab:red', 'tab:gray']
correct_dp_c0 = list()
correct_dp_c1 = list()
false_dp = list()
for i, (dp, l) in enumerate(zip(train_data, train_correct)):
if l :
if train_labels[i] == 0:
correct_dp_c0.append(dp)
elif train_labels[i] == 1:
correct_dp_c1.append(dp)
else:
print("something went wrong")
raise ValueError
else:
false_dp.append(dp)
correct_dp_c0 = np.array([np.array(d) for d in correct_dp_c0]).swapaxes(0,1)
correct_dp_c1 = np.array([np.array(d) for d in correct_dp_c1]).swapaxes(0,1)
false_dp = np.array([np.array(d) for d in false_dp]).swapaxes(0,1)
ax.scatter(correct_dp_c0[0], correct_dp_c0[1], correct_dp_c0[2],
c = colors[0],
label='correct_cls0')
ax.scatter(correct_dp_c1[0], correct_dp_c1[1], correct_dp_c1[2],
c = colors[1],
label='correct_cls1')
ax.scatter(false_dp[0], false_dp[1], false_dp[2],
c = colors[2],
label='false')
ax.set_xlabel('pc1')
ax.set_ylabel('pc2')
ax.set_zlabel('pc3')
fig.legend(bbox_to_anchor=(1,1))
plt.savefig('./figures/zzzpfm_c12_class_train.png')
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
colors = ['tab:blue', 'tab:red', 'tab:gray']
correct_dp_c0 = list()
correct_dp_c1 = list()
false_dp = list()
for i, (dp, l) in enumerate(zip(test_data, test_correct)):
if l :
if test_labels[i] == 0:
correct_dp_c0.append(dp)
elif test_labels[i] == 1:
correct_dp_c1.append(dp)
else:
print("something went wrong")
raise ValueError
else:
false_dp.append(dp)
correct_dp_c0 = np.array([np.array(d) for d in correct_dp_c0]).swapaxes(0,1)
correct_dp_c1 = np.array([np.array(d) for d in correct_dp_c1]).swapaxes(0,1)
false_dp = np.array([np.array(d) for d in false_dp]).swapaxes(0,1)
ax.scatter(correct_dp_c0[0], correct_dp_c0[1], correct_dp_c0[2],
c = colors[0],
label='correct_cls0')
ax.scatter(correct_dp_c1[0], correct_dp_c1[1], correct_dp_c1[2],
c = colors[1],
label='correct_cls1')
ax.scatter(false_dp[0], false_dp[1], false_dp[2],
c = colors[2],
label='false')
ax.set_xlabel('pc1')
ax.set_ylabel('pc2')
ax.set_zlabel('pc3')
fig.legend(bbox_to_anchor=(1,1))
plt.savefig('./figures/zzzpfm_c12_class_test.png')
###Output
_____no_output_____ |
workflow/notebooks/descriptive_stats.ipynb | ###Markdown
Data import
###Code
dtypes = dict(
Tumor_Sample_Barcode=str,
Chromosome="category",
Start_Position=int,
Reference_Allele="category",
Tumor_Seq_Allele2="category",
Hugo_Symbol=str,
Consequence="category",
Variant_Classification="category",
DNA_VAF=float,
Substitution="category"
)
mutations = pd.read_table("results/mutations_with_context.txt", dtype=dtypes, usecols=dtypes.keys())
mutational_catalogues = mutations.groupby(["Tumor_Sample_Barcode","Substitution"]).size().unstack(fill_value=0)
assert len(mutational_catalogues.columns) == 96
info_clinica = pd.read_table(
"resources/pancan_pcawg_2020/data_clinical_sample.txt",
index_col="SAMPLE_ID",
comment="#")
histology = info_clinica.loc[mutational_catalogues.index, "HISTOLOGY_ABBREVIATION"].astype("category")
###Output
_____no_output_____
###Markdown
Tumor types Table
###Code
(
histology
.str.split("-", expand=True)
.rename(columns={0:"Site",1:"Type"})
.groupby("Site")
.value_counts()
.rename("Samples")
.to_frame()
)
(
histology
.str.split("-", expand=True)
.rename(columns={0:"Site",1:"Type"})
.groupby("Type")
.value_counts()
.rename("Samples")
.to_frame()
)
###Output
_____no_output_____
###Markdown
Plots
###Code
plt.figure(figsize=(5,10))
sns.countplot(y=histology.str.split("-").str[0], order=histology.str.split("-").str[0].value_counts().index)
plt.ylabel("")
plt.xlabel("Available samples")
sns.despine()
plt.figure(figsize=(5,10))
sns.countplot(y=histology.str.split("-").str[1], order=histology.str.split("-").str[1].value_counts().index)
plt.ylabel("")
plt.xlabel("Available samples")
sns.despine()
plt.figure(figsize=(5,10))
sns.countplot(y=histology, order=histology.value_counts().index)
plt.ylabel("")
plt.xlabel("Available samples")
sns.despine()
###Output
_____no_output_____
###Markdown
SNV counts
###Code
plt.figure(figsize=(5,10))
sns.boxplot(
x=mutational_catalogues.sum(axis=1),
y=histology,
order = mutational_catalogues.sum(axis=1).groupby(histology).median().sort_values(ascending=False).index)
plt.xscale("log")
plt.xlabel("Whole genome SNV counts")
plt.ylabel("")
sns.despine()
plt.figure(figsize=(5,7))
sns.boxplot(
x=mutational_catalogues.sum(axis=1),
y=histology.str.split("-").str[0],
order = mutational_catalogues.sum(axis=1).groupby(histology.str.split("-").str[0]).median().sort_values(ascending=False).index)
plt.xscale("log")
plt.xlabel("Whole genome SNV counts")
plt.ylabel("")
sns.despine()
###Output
_____no_output_____ |
Ch06/06_01/06_01.ipynb | ###Markdown
___ Chapter 6 - Cluster Analysis Segment 1 - K-means method Setting up for clustering analysis
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import scale
import sklearn.metrics as sm
from sklearn import datasets
from sklearn.metrics import confusion_matrix, classification_report
%matplotlib inline
plt.figure(figsize=(7,4))
iris = datasets.load_iris()
X = scale(iris.data)
y = pd.DataFrame(iris.target)
variable_names = iris.feature_names
X[0:10,]
###Output
_____no_output_____
###Markdown
Building and running your model
###Code
clustering = KMeans(n_clusters=3, random_state=5)
clustering.fit(X)
###Output
_____no_output_____
###Markdown
Plotting your model outputs
###Code
iris_df = pd.DataFrame(iris.data)
iris_df.columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']
y.columns = ['Targets']
color_theme = np.array(['darkgray', 'lightsalmon', 'powderblue'])
plt.subplot(1,2,1)
plt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width, c=color_theme[iris.target], s=50)
plt.title('Ground Truth Classification')
plt.subplot(1,2,2)
plt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width, c=color_theme[clustering.labels_], s=50)
plt.title('K-Means Classification')
relabel = np.choose(clustering.labels_, [2, 0, 1]).astype(np.int64)
plt.subplot(1,2,1)
plt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width, c=color_theme[iris.target], s=50)
plt.title('Ground Truth Classification')
plt.subplot(1,2,2)
plt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width, c=color_theme[relabel], s=50)
plt.title('K-Means Classification')
###Output
_____no_output_____
###Markdown
Evaluate your clustering results
###Code
print(classification_report(y, relabel))
###Output
precision recall f1-score support
0 1.00 1.00 1.00 50
1 0.74 0.78 0.76 50
2 0.77 0.72 0.74 50
avg / total 0.83 0.83 0.83 150
|
Computer_Vision_for_Fashion_Dataset.ipynb | ###Markdown
Import Libraries
###Code
%tensorflow_version 2.x
import tensorflow as tf
import numpy as np
from tensorflow import keras
###Output
_____no_output_____
###Markdown
Simple Computer Vision using Neural Network Get Fashion DatasetTensorFlow contains a set up Fashion Dataset, which has pictures and labels of various clothing items
###Code
mnist = tf.keras.datasets.fashion_mnist
###Output
_____no_output_____
###Markdown
The dataset has build-in method to split Training and Testing dataset:
###Code
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
###Output
_____no_output_____
###Markdown
Print sample image from dataset:
###Code
import matplotlib.pyplot as plt
item = 100
plt.imshow(training_images[item])
print("Item Category: {}".format(training_labels[item])) #category is 8 here : bag
print(training_images[item]) # imput feature below represetns a 28 * 28 grayscale image
###Output
Item Category: 8
[[ 0 0 0 0 0 0 0 0 0 1 0 0 18 107 119 103 9 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 99 155 113 61 118 173 117
0 0 3 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 1 0 110 136 0 0 0 0 0 167
159 0 0 2 0 0 0 0 0 0]
[ 0 0 0 0 0 1 0 6 0 72 147 0 0 5 0 2 0 0
174 118 0 5 0 0 0 0 0 0]
[ 0 0 0 0 0 1 5 0 0 174 4 0 4 0 0 0 0 0
0 204 44 0 4 0 0 0 0 0]
[ 0 0 0 0 0 0 8 0 125 128 0 9 0 0 0 0 0 2
0 107 152 0 6 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 184 9 0 4 0 0 0 0 0 2
0 0 190 0 0 2 0 0 0 0]
[ 0 0 0 1 0 2 0 73 158 0 0 0 0 1 0 0 0 0
4 0 157 79 0 3 1 0 0 0]
[ 0 0 2 5 1 0 0 157 98 0 2 0 0 1 0 0 0 0
5 0 121 145 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 231 84 0 2 1 1 0 0 0 0 0
1 0 92 223 16 0 7 4 1 0]
[ 0 0 0 0 0 0 38 183 81 0 0 0 2 2 1 2 1 0
1 0 120 226 57 0 0 0 0 0]
[ 0 0 173 216 193 213 183 164 167 98 16 0 0 0 0 0 0 0
0 0 107 141 111 91 90 108 50 0]
[ 0 0 185 221 217 210 202 222 200 206 202 204 109 27 12 17 61 136
180 221 202 225 208 214 223 240 162 0]
[ 0 0 183 220 205 200 193 184 189 182 173 194 215 216 205 207 207 195
185 194 194 203 212 183 193 211 153 0]
[ 0 0 190 233 203 206 214 216 195 183 176 159 175 189 202 195 186 186
182 186 195 209 235 203 202 215 136 0]
[ 0 0 180 228 201 202 188 183 178 192 186 181 181 177 204 217 175 179
184 176 166 169 175 185 152 203 107 0]
[ 0 0 167 235 200 207 207 209 207 203 198 189 182 178 186 189 178 192
197 195 192 185 184 205 187 255 61 0]
[ 0 0 152 250 208 214 209 202 200 202 205 204 200 192 198 204 195 205
206 198 199 203 214 207 179 249 55 0]
[ 0 0 126 255 211 215 210 206 203 203 203 206 204 199 206 207 200 206
201 199 202 208 215 195 174 211 15 0]
[ 0 0 107 255 212 211 210 211 208 206 206 207 208 208 217 214 205 206
204 208 211 211 220 197 182 225 0 0]
[ 0 0 63 232 211 216 212 214 212 212 214 212 211 212 223 219 210 214
213 212 211 209 217 208 168 190 0 0]
[ 0 0 14 255 218 221 215 218 218 217 215 213 211 215 228 220 216 222
217 217 216 218 212 213 189 143 0 0]
[ 0 0 0 230 227 214 217 218 216 217 217 214 216 222 226 218 218 220
219 218 217 218 217 215 216 113 0 0]
[ 0 0 0 177 233 216 223 222 213 213 215 212 220 226 221 220 221 218
222 220 220 221 217 212 219 52 0 0]
[ 0 0 0 10 223 222 220 211 213 215 220 222 226 226 221 223 222 222
221 218 220 219 220 223 174 0 0 0]
[ 0 0 0 0 126 241 208 210 214 214 216 216 220 220 214 213 212 212
210 215 217 218 215 236 42 0 1 0]
[ 0 0 0 0 0 188 237 230 233 255 255 255 255 255 255 255 255 255
255 255 231 229 239 161 0 0 0 0]
[ 0 0 0 0 0 0 63 95 108 105 100 103 104 105 102 100 98 97
96 90 80 83 60 0 0 0 0 0]]
###Markdown
Normalize input datainput is between 0 to 255 as it is in grey scale
###Code
training_images = training_images/255.0
test_images = test_images/255.0
###Output
_____no_output_____
###Markdown
Build and Train a neural network
###Code
model = tf.keras.Sequential([tf.keras.layers.Flatten(), #Flatten 28*28 to a 1*784 feature column
tf.keras.layers.Dense(128, activation = tf.nn.relu), #Layer 1 with ReLu activation function giving max(0,val)
tf.keras.layers.Dense(10, activation = tf.nn.softmax)]) #Ouput Layer with 10 labels and probabilities of each class using softmax activation fucntion
model.compile(optimizer = tf.optimizers.Adam(), #using adaptive gradient descent
loss="sparse_categorical_crossentropy", #using sparse as targets in integer format
metrics=['accuracy']) #set desired metric
model.fit(training_images, training_labels, epochs=5) #fit model and set no. of epochs
###Output
Epoch 1/5
1875/1875 [==============================] - 3s 2ms/step - loss: 0.4987 - accuracy: 0.8233
Epoch 2/5
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3803 - accuracy: 0.8619
Epoch 3/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3384 - accuracy: 0.8769
Epoch 4/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3160 - accuracy: 0.8847
Epoch 5/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.2974 - accuracy: 0.8909
###Markdown
Evaluate the performance of the model loss: 36.99% accuracy: 86.97%
###Code
model.evaluate(test_images, test_labels)
###Output
313/313 [==============================] - 0s 1ms/step - loss: 0.3699 - accuracy: 0.8697
###Markdown
Make Prediction
###Code
classifications = model.predict(test_images)
print(classifications[45])
print("Test image category: {}".format(test_labels[45])) #category 7: Sneaker
plt.imshow(test_images[45])
classifications = model.predict(test_images)
print(classifications[0])
print("Test image category: {}".format(test_labels[0])) #category 9: Ankle Boot
plt.imshow(test_images[0])
###Output
[2.1132342e-05 2.1673056e-08 1.5013877e-06 3.5350766e-08 4.5658157e-06
1.7161107e-02 1.1936325e-05 7.8070223e-02 1.3326171e-05 9.0471607e-01]
Test image category: 9
###Markdown
Number of layers: Adding one more layer to the model loss: 36.26% increases accuracy: 87.07% increases
###Code
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation = tf.nn.relu),
tf.keras.layers.Dense(128, activation = tf.nn.relu),
tf.keras.layers.Dense(10, activation = tf.nn.softmax )])
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
###Output
Epoch 1/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.4826 - accuracy: 0.8269
Epoch 2/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3640 - accuracy: 0.8667
Epoch 3/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3318 - accuracy: 0.8771
Epoch 4/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3059 - accuracy: 0.8862
Epoch 5/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.2905 - accuracy: 0.8916
313/313 [==============================] - 0s 1ms/step - loss: 0.3627 - accuracy: 0.8707
###Markdown
Number of Neurons: Adding more neurons to the model loss: 34.59% decreases accuracy: 87.51% increases
###Code
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation = tf.nn.relu),
tf.keras.layers.Dense(10, activation = tf.nn.softmax )])
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
###Output
Epoch 1/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.4857 - accuracy: 0.8275
Epoch 2/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3649 - accuracy: 0.8651
Epoch 3/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3293 - accuracy: 0.8789
Epoch 4/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.3044 - accuracy: 0.8880
Epoch 5/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.2868 - accuracy: 0.8935
313/313 [==============================] - 0s 1ms/step - loss: 0.3460 - accuracy: 0.8752
###Markdown
Epoch: Add more epoch in training loss: 32.67% decreases accuracy: 88.99% increases
###Code
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)] )
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=10)
model.evaluate(test_images, test_labels)
###Output
Epoch 1/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.4988 - accuracy: 0.8247
Epoch 2/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3755 - accuracy: 0.8643
Epoch 3/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3373 - accuracy: 0.8774
Epoch 4/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3148 - accuracy: 0.8848
Epoch 5/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2959 - accuracy: 0.8911
Epoch 6/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2805 - accuracy: 0.8953
Epoch 7/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2681 - accuracy: 0.9000
Epoch 8/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2559 - accuracy: 0.9054
Epoch 9/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2470 - accuracy: 0.9080
Epoch 10/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2377 - accuracy: 0.9108
313/313 [==============================] - 0s 1ms/step - loss: 0.3267 - accuracy: 0.8899
###Markdown
Call BackImplementing a call back function to stop training the model when model reaches 90% accuracy in trainingsaves time and computation
###Code
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.9):
print("\nReached 90% accuracy so cancelling training")
self.model.stop_training = True
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=10, callbacks=[myCallback()])
###Output
Epoch 1/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.4963 - accuracy: 0.8263
Epoch 2/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3720 - accuracy: 0.8667
Epoch 3/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3355 - accuracy: 0.8776
Epoch 4/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.3137 - accuracy: 0.8837
Epoch 5/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2952 - accuracy: 0.8913
Epoch 6/10
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2800 - accuracy: 0.8964
Epoch 7/10
1875/1875 [==============================] - ETA: 0s - loss: 0.2666 - accuracy: 0.9011
Reached 90% accuracy so cancelling training
1875/1875 [==============================] - 3s 2ms/step - loss: 0.2666 - accuracy: 0.9011
|
notebooks/03_forecasting.ipynb | ###Markdown
###Code
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders pandas-profiling plotly
import category_encoders as ce
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import seaborn as sns
pd.set_option('display.float_format', lambda x: '%.2f' % x)
pd.set_option('display.max_columns', 500)
# read in features
gdp = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/deforestation_app/master/raw_data/features/API_NY.GDP.MKTP.KD.ZG_DS2_en_csv_v2_382358.csv', skiprows=3)
urban_population = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/deforestation_app/master/raw_data/features/API_SP.URB.TOTL_DS2_en_csv_v2_385764.csv', skiprows=3)
agriculture = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/deforestation_app/master/raw_data/features/API_AG.LND.AGRI.ZS_DS2_en_csv_v2_383732.csv', skiprows=3)
livestock = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/deforestation_app/master/raw_data/features/API_AG.PRD.LVSK.XD_DS2_en_csv_v2_384749.csv', skiprows=3)
electricity = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/deforestation_app/master/raw_data/features/API_EG.USE.ELEC.KH.PC_DS2_en_csv_v2_383985.csv', skiprows=3)
mining = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/deforestation_app/master/raw_data/features/API_TX.VAL.MMTL.ZS.UN_DS2_en_csv_v2_382594.csv', skiprows=3)
###Output
_____no_output_____
###Markdown
Define Wrangle, train dataframe
###Code
def wrangle(df):
'''
A function that reformats the World Bank data
for feature engineering.
'''
# drop years between 1960-1989
df.drop(df.iloc[:, 3:34], inplace=True, axis=1)
# drop 2017-2018 as NANs
df = df.drop(columns=['2017', '2018'])
# deal with nulls
for col in df.select_dtypes(include=np.number):
df[col] = df[col].fillna(df[col].median())
# melt
year = map(str, range(1990, 2017))
feature = df.iloc[0][2]
df = pd.concat([pd.melt(df, id_vars=['Country Name', 'Country Code'], value_vars=val, var_name='Year', value_name=feature) for val in year])
return(df)
###Output
_____no_output_____
###Markdown
Target
###Code
# import target dataframe
forest_pct = pd.read_csv('https://raw.githubusercontent.com/pragmatizt/BW_Unit3_deforestation/master/01_forest_pct_land_area.csv')
forest_pct = wrangle(forest_pct)
###Output
_____no_output_____
###Markdown
Train Dataframe
###Code
mining = wrangle(mining)
livestock = wrangle(livestock)
agriculture = wrangle(agriculture)
urban_population = wrangle(urban_population)
gdp = wrangle(gdp)
electricity = wrangle(electricity)
# create list of dfs
feature_dfs = [mining, livestock, agriculture, urban_population, gdp, electricity]
def merge_features(list_dfs):
'''
A function that takes tidy dataframes across all features
and merges them.
Returns in tidy format.
'''
# use as base for merging
train = list_dfs.pop(0)
for df in list_dfs:
train = train.merge(df, on=['Country Name', 'Country Code', 'Year'])
return(train)
features = merge_features(feature_dfs)
# merge features and forest_pct (target)
train = features.merge(forest_pct, on=['Country Name', 'Country Code', 'Year'])
train.head()
###Output
_____no_output_____
###Markdown
Predictions
###Code
def extend_df(df):
'''
A function that takes wrangled data in tidy format and extends predictions
of the wrangled dataframe from 2017-2025.
'''
# Preparing linear regression to make predictions for each country...
model = LinearRegression()
# Getting list of country codes...
codes = df['Country Code'].unique()
# Getting list of years where we will predict forest coverage...
years = [year for year in range(2017, 2026)]
# For-loop to make predictions for each country with first dataset...
rows = []
feature = df.columns.tolist()[3]
for code in codes:
dictionary = {'Country Code': code}
model.fit(df[df['Country Code'] == code][['Year']],
df[df['Country Code'] == code][feature])
for year in years:
prediction = model.predict([[year]])
dictionary[str(year)] = prediction[0]
rows.append(dictionary)
# Making a new dataframe out of the predictions for the first dataset...
df_predictions = pd.DataFrame(rows)
# Reordering the columns in the dataframe for the first dataset...
df_predictions = df_predictions[
['Country Code'] + [str(year) for year in years]]
# melt df_predictions to tidy format
year = map(str, range(2017, 2026))
df_predictions = pd.concat([pd.melt(df_predictions, id_vars=['Country Code'], value_vars=val, var_name='Year', value_name=feature) for val in year])
return(df_predictions)
mining_pred = extend_df(mining)
livestock_pred = extend_df(livestock)
agriculture_pred = extend_df(agriculture)
urban_population_pred = extend_df(urban_population)
gdp_pred = extend_df(gdp)
electricity_pred = extend_df(electricity)
# create list of dfs
feature_dfs_pred = [mining_pred, livestock_pred, agriculture_pred, urban_population_pred, gdp_pred, electricity_pred]
def merge_pred_features(list_dfs_pred):
'''
A function that takes predicted dataframes across all features
and merges them.
Returns a dataframe in tidy format.
'''
# create base df for merging, call it "test"
test = list_dfs_pred.pop(0)
for df in list_dfs_pred:
test = test.merge(df, on=['Country Code', 'Year'])
return(test)
test = merge_pred_features(feature_dfs_pred)
###Output
_____no_output_____
###Markdown
Train model first on 1990-2016
###Code
model = LinearRegression()
features = test.columns.tolist()
target = 'Forest area (% of land area)'
X_train = train[features]
y_train = train[target]
X_train.Year = X_train.Year.astype(int)
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train = encoder.fit_transform(X_train)
X_train
X_train.head()
model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Test (Get Predictions)
###Code
X_test = test[features]
X_test.Year = X_test.Year.astype(int)
X_test = encoder.transform(X_test)
y_pred = model.predict(X_test)
y_pred
test['Forest area (% of land area)'] = pd.Series(y_pred)
test.head(1000)
# download predictions result as a csv
from google.colab import files
test.to_csv('predictions.csv')
files.download('predictions.csv')
###Output
_____no_output_____
###Markdown
Robustness checks
###Code
ax = sns.distplot(train['Forest area (% of land area)'])
ax = sns.distplot(test['Forest area (% of land area)'])
###Output
_____no_output_____ |
SI/FonctionsLogiquesComplet.ipynb | ###Markdown
Logique de la chaine d'information : Mise en situation :Nous allons découvrir la logique fondamentale qui permet de **traiter** l'information d'un système dont nous décrirons le comportement en utilisant différents moyens. > *Pour illustrer notre propos, nous étudierons, dans un premier temps, le cas d'un simple **système par [Va et Vient](https://fr.wikipedia.org/wiki/Montage_va-et-vient)** qui permet de commander l’éclairage d’une pièce depuis deux endroits différents ?*
###Code
%%HTML
<center>
<iframe width="600" height="340" src="https://www.brun-videographie.com/iframe/VA-ET-VIENT/VA-ET-VIENT.html"></iframe>
</center>
###Output
_____no_output_____
###Markdown
Définition :Une fonction logique est une relation établie entre une variable logique de sortie et une ou plusieurs variables logiques d’entrée.Ces variables d’entrée et de sortie sont dites logiques car elles ne peuvent prendre que deux valeurs (niveaux ou états) distinctes et non simultanées : - soit **Vraies = $1$** ;- soit **Fausses = $0$**.Ce sont des variables binaires, de type **booléennes**.Car pour étudier ces fonctions de variables binaires, on utilise l’algèbre de [George BOOLE](https://fr.wikipedia.org/wiki/George_Boole) (mathématicien britannique du XIX siècle), qui a permis le développement des automatismes et de l’informatique... > *Dans notre exemple de **système d’éclairage par va et vient** :*>> *L’état allumé ou éteint d’une lampe (L) d’éclairage d’une pièce dépend de l’état d’un interrupteur (a) situé à un des accès de la pièce et de celui d’un interrupteur (b) situé à l’autre accès de la pièce.*>> A faire vous-même - Compléter les entrées et sortie de la fonction traiter du va et vient :> > Description d'une fonction logique Proposition logique littérale :Une fonction logique peut se décrire par une phrase établissant une relation de cause à effet (si...alors) entre les entrées et la sortie. > *Pour notre exemple de **système d’éclairage par va et vient** :*>> *SI l’interrupteur a est NON actionné ET l’interrupteur b est actionné OU SI l’interrupteur a est actionné ET l’interrupteur b est NON actionné ALORS la lampe L est allumée.* Equation logique (Equation booléenne) :Pour alléger l’écriture et rendre possible l’application de l’algèbre de Boole, on code la proposition logique sous la forme d’une équation.Le ET se traduit par $\bullet$, le OU se traduit par $+$, le ALORS se traduit par $=$, le NON se traduit par une barre sur la $\overline{variable}$. > *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Ecrire l'équation booléenne du va et vient :>> $L =$>> $L =\overline{a}\bullet b + a\bullet\overline{b}$ Complément : [les propriétés de l’algèbre de Boole](https://fr.wikiversity.org/wiki/Logique_de_base/Alg%C3%A8bre_de_Boole) Table de vérité :Pour décrire le résultat d’une fonction logique en fonction de l’état de ses entrées on dresse un tableau ordonné appelé table de vérité.Elle permet de faire l’inventaire de toutes les combinaisons possibles. Le nombre de combinaisons est $2^n$ avec $n$ le nombre d’entrées > *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Compléter la table de vérité du va et vient :>> > Tableau d'inventaire des principales fonctions logiques :> A faire vous-même :>>**Compléter** le tableau ci-dessus à partir d'une recherche web et en expérimentant par simulation le fonctionnement de chaque porte logique de base sur https://logic.ly/demo > Prolongement :>>**Simuler** puis **Expérimenter** le fonctionnement de chaque porte logique de base avec une carte BBC micro:bit sur [https://fr.vittascience.com](https://fr.vittascience.com/microbit/?lang=fr&mode=mixed) ou à l'aide de http://2si.si.lycee.ecmorlaix.fr/Robotique/BBCmicrobit.html>> On utilisera les boutons-poussoir A et B pour faire les entrées et on affichera en sortie sur la matrice à LEDs un pour vrai et un pour faux. Exercice d'application - Répondre aux questions suivantes :A qui Google rendait hommage le 2 novembre 2015 en mettant sur la page d'accueil de son moteur de recherche le doodle ci-dessous ?Indice : lorsque l'on cliquait sur l'image, cela ouvrait [cette page de résultats de recherche...](https://www.google.fr/search?q=George+Boole&oi=ddle&ct=george-booles-200th-birthday-5636122663190528&hl=fr)Quelles étaient les raisons de cet hommage rendu par Google ?> .......................................................Qu'est-ce qu'une variable booléenne ?> .......................................................Qu'elles sont les principales fonctions (opérations) booléennes ?> .......................................................Expliquer en observant le gif animé du doodle ci-dessus, les conditions pour que :- le G s'allume en bleu :> .......................................................- le deuxième o s'allume en jaune :> .......................................................- le l s'allume en vert :> .......................................................- le e s'allume en rouge :> .......................................................- le premier o s'allume en rouge :> ....................................................... Description d'une fonction logique (Suite) : Schéma à contacts :Il s’agit de représenter la fonction logique sous la forme d’un schéma à contact de technologie électrique.Un contact ouvert au repos (NO) représente une variable d’entrée dont l’état vrai (=1) est requis, un contact fermé au repos (NF) représente une variable d’entrée dont l’état faux (=0) est requis, et une charge (bobine ou lampe) représente la variable de sortie, la fonction ET est traduite en disposant les contacts en série, la fonction OU est traduite en disposant les contacts en parallèle. > *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Tracer un schéma à contacts pour le va et vient :>> > Logigramme :Une fonction logique peut également être représentée sous la forme d’un logigramme de technologie électronique (ou pneumatique).Une fonction logique peut se décomposer en fonctions de base (opérateurs logiques). Il existe deux représentations graphiques normalisées pour ces dernières, l'une est Européenne, l'autre est Américaine.Le logigramme est la représentation graphique d’une fonction logique en combinant les symboles des opérateurs logiques qui la composent. > *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Compléter le logigramme du va et vient :> > >> Chronogramme :Un chronogramme permet de décrire le fonctionnement d’un système au cours du temps. Une fonction logique peut donc être représentée par un chronogramme.Il s’agit d’un graphe permettant de visualiser, en fonction du temps, l’état logique des sorties en fonction des états logiques pris par les entrées au cours du temps. > *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Compléter le chronogramme du va et vient :>> > Algorithme :Un algorithme est un ensemble de règles opératoires rigoureuses ordonnant, à un processeur particulier,d'exécuter dans un ordre déterminé, un nombre fini d'opérations élémentaires pour remplir une fonction donnée.Un algorithme s'implémente dans un programme informatique.L'algorithme est un outil méthodologique général qui ne doit pas être confondu avec le programme proprement dit. Un algorithme peut être représenté :- soit graphiquement à l'aide de l'algorigramme (organigramme ou ordinogramme) en respectant un formalisme de symboles et de structure ;- soit littéralement grâce au langage algorithmique en respectant un formalisme d'écriture (pseudo-code). Algorigramme :C'est une représentation graphique de l'algorithme utilisant des symboles définis par des normes (NF Z 67-010 et ISO 5807)> *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Compléter l'algorigramme de la fonction va et vient :> > > Pseudo-Code :Le pseudo-code est une façon de décrire un algorithme en langage presque naturel, sans référence à un langage de programmation en particulier.> *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Compléter le pseudo-code de la fonction va et vient :> > ````pseudo-code> TANT QUE Vrai> FAIRE > LIRE a> LIRE b> SI ....................... = Vrai> ALORS> FAIRE> L <- ....> SINON> FAIRE> L <- ....> FIN SI> ECRIRE L> FIN TANT QUE> ```` Implémentation en Python :En Python, une variable booléeene est soit `True` soit `False` et les opérateurs booléens sont les mots clés :- `and` pour le ET- `or` pour le OU- `not` pour le NON> *Pour notre exemple de **système d’éclairage par va et vient** :*>> A faire vous-même - Compléter le script Python de la fonction va et vient :>> Remarque : la fonction `bool()` renvoie `True` pour toute saisie `input()` non vide et `False` sinon.
###Code
while True:
a = bool(input())
b = bool(input())
if not a and b or a and not b == True:
L = True
else:
L = False
print(L)
while True:
a = bool(input())
b = bool(input())
if ............................. == True:
L = .....
else:
L = .....
print(L)
###Output
_____no_output_____
###Markdown
> On observe que la fonction ``input()`` qui renvoie une chaine de caractères n'est pas très adaptée pour gérer des entrées booléennes même avec la fonction de conversion de type ``bool()``. De plus, l'utilisation d'une boucle infinie n'est pas pratique...>> Pour remédier, nous pouvons organiser notre code dans une fonction VaEtVient(a,b) et utiliser le module ``ipywidgets`` qui permet d'afficher une interface graphique interactive dans un carnet jupyter :>> A faire vous-même - Compléter l'expression booléenne de la fonction VaEtVient(a,b) dans ce nouveau script Python :>
###Code
# Il faut importer la fonction interact() du module ipywidgets
from ipywidgets import interact
# Définition de la fonction logique à simuler
def VaEtVient(a, b):
# Expression booléenne de la fonction en python
L = ...........................
# Résultat interactif à afficher
return (print(f"Si a = {a} et b = {b} alors la lampe L = {L}"))
# Appel de la fonction interact() qui appelle la fonction logique à simuler
interact(VaEtVient, a = False, b = False)
# Il faut importer la fonction interact() du module ipywidgets
from ipywidgets import interact
# Définition de la fonction logique à simuler
def VaEtVient(a, b):
# Expression booléenne de la fonction en python
L = not a and b or a and not b
# Résultat interactif à afficher
return (print(f"Si a = {a} et b = {b} alors la lampe L = {L}"))
# Appel de la fonction interact()
interact(VaEtVient, a = False, b = False)
###Output
_____no_output_____ |
notebooks/quality_assessment/decoding.ipynb | ###Markdown
Quality Assessment - Decoding Import modules
###Code
from __future__ import print_function
import numpy as np
import caiman as cm
import os
from ipywidgets import interact, interactive, fixed, interact_manual, FloatSlider,Dropdown, Checkbox, IntSlider
import ipywidgets as widgets
import warnings
warnings.filterwarnings('ignore', module = 'paramiko')
import logging
from IPython.display import display, Image
import matplotlib.pyplot as plt
%matplotlib notebook
import src.config
os.chdir(os.environ['PROJECT_DIR'])
import src.pipeline
from src.quality_assessment import *
import src.steps.decoding
import src.steps.cropping
step = 'decoding'
if not bool(os.environ['LOCAL']):
raise Exception('This notebook can only be used on the local machine!' )
master_df = src.pipeline.open_master_file_list()
analysis_state_widgets = get_analysis_state_widgets(step, master_df)
analysis_state_widgets_vbox = VBox(list(analysis_state_widgets.values()))
###Output
_____no_output_____
###Markdown
Select data
###Code
analysis_state_widgets_vbox
###Output
_____no_output_____
###Markdown
Loading row, parameters and output
###Code
index = tuple([widget.value for widget in list(analysis_state_widgets.values())])
row = master_df.loc[index].iloc[0]
output = eval(row.loc[f'{step}_output'])
print('index: ',index)
print('output: ', output)
###Output
index: (56166, 2, 1, 0, 3, 0, 0, 0, 0, 0)
output: {'main': 'data/interim/decoding/main/mouse_56166_session_2_trial_1_v3.tif', 'meta': {'analysis': {'analyst': 'Casper', 'date': '06-09-2019', 'time': '13:42:20'}, 'metrics': {'session_wise': {'min_max_mean': 'data/interim/decoding/meta/metrics/trial_wise/min_mean_max/mouse_56166_session_2_trial_1_v3.pkl'}, 'min_max_mean': 'data/interim/decoding/meta/metrics/trial_wise/min_mean_max/mouse_56166_session_2_trial_1_v3.pkl'}}}
###Markdown
MoviePlay the decoded movie * Check for abnormalities (e.g. lenze flare)* Determine where the spatial cropping points should be globally
###Code
m = cm.load(src.pipeline.get_file(output['main']))
print('movie shape: ', m.shape)
get_movie_player(m)
###Output
_____no_output_____
###Markdown
Session-wiseInspect the quality session-wise for quick inspection
###Code
#fig = src.steps.decoding.get_fig_session_wise_min_mean_max(src.pipeline.select('cropping',56166,2))
#get_save_fig_button(fig, f'data/interim/decoding/meta/figures/session_wise/min_mean_max/mouse_{mouse}_session_{session}.png')
###Output
_____no_output_____
###Markdown
CommentsReport your insights gained in quality assessment here
###Code
comments = row.loc[f'{step}_comments']
comments_widget = widgets.Textarea(
value=comments if type(comments) == str else '',
placeholder='Enter comments here',
description='comments:'
)
comments_widget.layout.width = '20cm' ; comments_widget.layout.height = '5cm'
display(comments_widget)
save_comments_widget = widgets.Button(description = 'Save comments', icon = 'save') ; display(save_comments_widget)
def save_comments(click):
row.loc[f'{step}_comments'] = comments_widget.value
# Get a fresh copy of the master file list
master_df = src.pipeline.open_master_file_list()
# Append the newly created analysis state if the analysis state is
# new or merge it
master_df = src.pipeline.append_to_or_merge_with_master_file_list(master_df, row)
# Save the master file list
src.pipeline.save_master_file_list(master_df)
print('Saved comments')
save_comments_widget.on_click(save_comments)
###Output
_____no_output_____
###Markdown
Preview next step: CroppingFor cropping we have to set two parameters.* Spatial cropping points: for cropping out absolutely unusable spatial regions, caused by e.g. the microendoscope or a blood clot. For session-wise alignment it is advised to be conservative. * Temporal cropping points: for cropping out absolutely unusable temporal regions, caused by e.g. a lenze flare Current cropping parameters View the parameters that are currently set for the next step: cropping.
###Code
cropping_parameters = src.pipeline.get_parameters('cropping', mouse = mouse, session = session, trial = trial,
is_rest = is_rest)
print('parameters: ', cropping_parameters)
###Output
parameters: {'crop_spatial': True, 'cropping_points_spatial': [20, 410, 320, 640], 'crop_temporal': False, 'cropping_points_temporal': []}
###Markdown
Set cropping parameters Iteratively try out different parameters and view a (partial) result of the next step: cropping. These parameters are updated with respect to default and eventually optionally stored in the master parameters file.
###Code
# enter your update here:
parameters_update = {}
cropping_parameters = src.pipeline.get_parameters('cropping', mouse = mouse, session = session, trial = trial,
is_rest = is_rest)
cropping_parameters.update(parameters_update)
print('parameters: ', cropping_parameters)
###Output
parameters: {'crop_spatial': True, 'cropping_points_spatial': [20, 410, 320, 640], 'crop_temporal': False, 'cropping_points_temporal': []}
###Markdown
Preview resultView a (partial) result of the next step: cropping Spatial croppingWe want to crop out absolutely unusable spatial regions, caused by e.g. the microendoscope or a blood clot. For session-wise alignment it is advised to be conservative. Screenshot with borders View a screenshot of the movie with a drawn box as an overlay representing the cropping borders.
###Code
fig = src.steps.cropping.make_figure_cropping_points(index, m[0], cropping_parameters['cropping_points_spatial'])
def save_fig(click):
fname = f'data/interim/cropping/meta/figures/frame_borders/{src.pipeline.create_file_name(1, index)}.png'
fig.savefig(fname)
print(f'Saving figure as {fname}')
save_fig_widget = widgets.Button(description = 'Save figure') ; display(save_fig_widget)
save_fig_widget.on_click(save_fig)
###Output
_____no_output_____
###Markdown
View movie with bordersView a movie with a drawn box as an overlay representing the cropping borders.
###Code
m_with_borders = src.quality_assessment.preview_parameters_cropping_cropping_points_spatial_get_movie_with_borders(m.copy(), cropping_parameters['cropping_points_spatial'])
get_movie_player(m_with_borders)
m_with_borders[:400].save('/home/sebastian/Desktop/test.avi')
###Output
_____no_output_____
###Markdown
Temporal croppingWe want to crop out absolutely unusable temporal regions, caused by e.g. a lenze flare. Use the video players below to check out the currently selected clips for cropping.
###Code
n = len(cropping_parameters['cropping_points_temporal'])
print(f'There are {n} cut-out clips')
for i, cropping_points_temporal in enumerate(cropping_parameters['cropping_points_temporal']):
preview_parameters_cropping_cropping_points_temporal_get_cutout_clip_player(m, i + 1, cropping_points_temporal)
###Output
There are 0 cut-out clips
###Markdown
Store new parametersIf needed, store your newly set parameters for actualy usage in the next step: cropping.
###Code
preview_parameters_get_store_parameters_buttons('cropping', index, parameters_update)
###Output
Do you want to set the following new parameters:
{'cropping_points_spatial': [80, 450, 210, 680]}
specific for the following data?
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.