path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
CMIP5-MakeFigs.ipynb | ###Markdown
Re-create the IPCC_AR5 Figure12.5 from the CMIP5 ts data [IPCC-AR5 Figure 12.5](http://www.climatechange2013.org/images/figures/WGI_AR5_Fig12-5.jpg) Method: load the monthly CMIP5 ts (surface temperature) files, do some data cleaning and plot the figure* this notebook is what I use for general multi-model statistics - not just global means. So the models are regridded toa common 2x2 degree grid and the global mean is only computed when making the figure* the Raw CMIP5 netcdf files were concatenated on our home machine using xarray.mfdataset for each model and scenerio (historical/rcp45/rcp85)* saved in zarr format, using to_zarr* uploaded to the Google Cloud Storage
###Code
import xarray as xr
import numpy as np
%matplotlib inline
from glob import glob
from os import system
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import rcParams
from pathlib import Path
#import xesmf as xe
xr.set_options(enable_cftimeindex=True)
cloud = False
###Output
_____no_output_____
###Markdown
In this next cell are the basic functions (all in one cell so you can collapse it on first read)
###Code
if cloud:
def listd(fs,path):
return fs.ls(path)
def openzarr(path):
return xr.open_zarr(gcsfs.GCSMap(path))
else:
def listd(fs,path):
return glob(path+'/*/')
def openzarr(path):
return xr.open_zarr(path)
def find_models(data_fs,base_path,var,scenario):
"""
Search for all files in path matching the variable and scenario
returns:
models: list of model names for given scenario
"""
allmodels = listd(data_fs,base_path)
fmodels = []
for model in allmodels:
run = listd(data_fs,model)
for sce in run:
if scenario in sce:
model = sce.split("/")[-3]
fmodels += [model]
umodels = sorted(fmodels)
paths = []
for model in umodels:
path = base_path + model + '/' + scenario
paths += [path]
return umodels, paths
def get_datasets(var, umodels, paths, toprint=True):
"""
Load all datasets
returns:
ds : list of all models for given scenario
"""
ds = []
for idx, model in enumerate(umodels):
path = paths[idx]
dss = openzarr(path)
start_date = dss.attrs['start_date']
nt = dss.time.shape[0]
dss['time'] = to_enso(start_date,nt)
ds += [dss[var]]
if toprint:
fstr = '{:2g}: {:18} , {:12} , nt={:5g},{:5.0f}Mb'
print(fstr.format(idx,model,start_date,nt,dss.nbytes/ 1e6))
return ds
def find_short(ds, century, umodels, toprint=True):
"""
Identify models which are useful, finding those which do not span the interval
returns:
bad_models: list of the bad models
"""
slist = century.split('-')
[start_year, stop_year] = list(map(int, slist))
bad_models =[]
for idx, dss in enumerate(ds):
model = umodels[idx]
tfirst = enso2date(dss.time[0].values)
tlast = enso2date(dss.time[-1].values)
if (int(str(tfirst)[0:4]) > start_year):
print('trouble with model',model,'since start date is past',start_year)
bad_models += [model]
if (int(str(tlast)[0:4]) < stop_year):
print('trouble with model',model,'since stop date is before',stop_year)
bad_models += [model]
if toprint:
fstr = '{:2g}: {:18} , {:12} to {:12}'
print(fstr.format(idx,model,tfirst,tlast,))
return bad_models
def regrid_all(ds,umodels):
"""
Define common grid and use xESMF to regrid all datasets
returns:
data_2x2: a list of datasets on the common grid
"""
# regrid all lon,lat data to a common 2x2 grid
import xesmf as xe
ds_out = xr.Dataset({'lat': (['lat'], np.arange(-89,89, 2)),
'lon': (['lon'], np.arange(-179,179,2)),
})
data_2x2 =[]
for model,dss in zip(umodels,ds):
#print(model,'nt=',dss.time.shape[0])
regridder = xe.Regridder(dss, ds_out, 'bilinear', periodic=True, reuse_weights=True )
data_2x2 += [regridder(dss)]
return data_2x2
def concat_all(ds_2x2,umodels):
"""
Concatenates all of the good models into one DataArray
"""
dsall = xr.concat(ds_2x2,dim='model') #,coords=['time','lat','lon'])
dsall['names'] = ('model',umodels)
return dsall
def compute_global_mean(ds):
"""
Weights each grid point by the cos(latitude), computes global mean, normalizing by global mean of the weights
returns:
list of DataArrays: global mean model by model
"""
coslat = np.cos(np.deg2rad(ds.lat))
d_ones = xr.ones_like(ds)
weight_mean = (d_ones*coslat).mean(['lat','lon'])
ds_globalmean = ((ds * coslat).mean(['lat','lon'])/weight_mean).compute()
return ds_globalmean
# N.B. Once cftime is working properly the following functions could be replaced
# (we need to be able to use resample ...)
def monthly2yearly(century,ds):
"""
converts a DataArray on a monthly grid to one on a yearly grid, replacing the time grid
returns:
list of DataArrays: yearly mean model by model
"""
slist = century.split('-')
[start_year, stop_year] = list(map(int, slist))
start = to_enso(str(start_year)+'-01-16')[0]
stop = to_enso(str(stop_year)+'-12-16')[0]
ds_yearly=[]
for idx, dss in enumerate(ds):
print('year:',idx)
dss = dss.sel(time=slice(start, stop))
num_of_bins = dss.time.shape[0]/12
dnew = dss.groupby_bins('time', num_of_bins).mean('time').compute()
dyearly = dnew.rename({'time_bins':'time'})
dyearly['time'] = start_year + np.arange(dyearly.time.shape[0])
ds_yearly += [dyearly]
return ds_yearly
def to_enso(start_time,nt=1):
"""
Parse the time grid of a Dataset and replace by an enso time grid (months since 1960).
"""
import numpy as np
# get the reference year from start_time
ryear,rmonth,rday = start_time[0:10].split('-')
return (int(ryear)-1960)*12 + int(rmonth) - 0.5 + np.arange(0,nt)
def enso2date(T0,ryear=1960,leap=True):
"""
Print the date corresponding to an enso-time (months since 1960). The reference year can be changed.
"""
norm = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
iy = ryear + int(T0/12)
if T0 < 0:
iy = iy - 1
res = T0 - (iy - ryear)*12
im = int(res) + 1
if im == 13:
im = 1
iy = iy + 1
if leap & (im == 2) & (iy % 4 == 0 ):
id = 1 + int(29 * (res - int(res)))
else:
id = 1 + int(norm[im-1] * (res - int(res)))
return str(iy)+'/'+str(im)+'/'+str(id)
###Output
_____no_output_____
###Markdown
Connect to Dask Distributed Cluster when done debugging
###Code
#from dask.distributed import Client, progress
#from dask_kubernetes import KubeCluster
#cluster = KubeCluster(n_workers=10)
#cluster
#client = Client(cluster)
#client
###Output
_____no_output_____
###Markdown
Lets get started. We need to specify where our data lives in the google file system
###Code
if cloud:
import gcsfs
base_path = 'pangeo-data/CMIP5-ts/'
data_fs = gcsfs.GCSFileSystem(project='pangeo-181919', token='anon', access='read_only')
else:
base_path = '/d1/nhn2/zarr/CMIP5-ts/'
data_fs = ''
###Output
_____no_output_____
###Markdown
For each scenario and time interval:* make a list of available models* load the datasets for all models* eliminate the models which do not have the full time interval* calculate the annual means* regrid to a global 2x2 degree grid (until we have xesmf, just calculate the global mean)* concatenate the models
###Code
%%time
var = 'ts'
recompute_all = True
plot_global = True
plot_nino34 = False
save_netcdf = True
if recompute_all:
# Each of the following scenarios and time periods has a different subset of CMIP5 models available
# These subsets are different for each variable chosen
# YOU CAN PICK AND CHOOSE WHICH TO CALCULATE
all = []
all += [['historical','1861-2005']]
all += [['historical','1850-1860']]
all += [['rcp45','2100-2300']]
all += [['rcp45','2006-2099']]
all += [['rcp85','2006-2099']]
all += [['rcp85','2100-2300']]
ds_master = [] # list of datasets for each scenario,century
scenario_last = ''
for scenario,century in all:
print('SCENARIO=',scenario,'TIME RANGE',century)
if scenario in scenario_last:
print('\n same scenario, re-use ds \n')
ds = ds2; models = models2
else:
models, paths = find_models(data_fs,base_path,var,scenario)
print('total number of models available:',len(models),'\n',models)
ds = get_datasets(var, models, paths, toprint=False)
ds2 = ds.copy(); models2 = models.copy()
scenario_last = scenario
bad_models = find_short(ds, century, models, toprint=False)
bad_models = sorted(list(set(bad_models)))
for model in bad_models:
idx = models.index(model)
del models[idx],ds[idx]
print('\n number of good models with data in the specified time range:',len(models),'\n',models)
print('\n calculating annual means')
ds_yearly = monthly2yearly(century,ds)
print('\n regridding to 2x2 grid')
ds_temp = regrid_all(ds_yearly,models)
print('\n concatenating time series')
dsall = concat_all(ds_temp,models)
sctype = scenario+':'+century
if save_netcdf:
dsall.to_netcdf('ts-'+sctype+'.nc',encoding={'time':{'dtype':'float32'},'lon':{'dtype':'float32'},'lat':{'dtype':'float32'}})
dsall.attrs = [('sctype',sctype)]
ds_master += [dsall.to_dataset(name=var)]
###Output
SCENARIO= historical TIME RANGE 1861-2005
total number of models available: 49
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 'CSIRO-Mk3L-1-2', 'CanCM4', 'CanESM2', 'FGOALS-g2', 'FGOALS-s2', 'FIO-ESM', 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
trouble with model CanCM4 since start date is past 1861
trouble with model MIROC4h since start date is past 1861
number of good models with data in the specified time range: 47
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 'CSIRO-Mk3L-1-2', 'CanESM2', 'FGOALS-g2', 'FGOALS-s2', 'FIO-ESM', 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
calculating annual means
year: 0
year: 1
year: 2
year: 3
year: 4
year: 5
year: 6
year: 7
year: 8
year: 9
year: 10
year: 11
year: 12
year: 13
year: 14
year: 15
year: 16
year: 17
year: 18
year: 19
year: 20
year: 21
year: 22
year: 23
year: 24
year: 25
year: 26
year: 27
year: 28
year: 29
year: 30
year: 31
year: 32
year: 33
year: 34
year: 35
year: 36
year: 37
year: 38
year: 39
year: 40
year: 41
year: 42
year: 43
year: 44
year: 45
year: 46
regridding to 2x2 grid
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_48x96_89x179_peri.nc
Reuse existing file: bilinear_240x480_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_56x64_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_60x128_89x179_peri.nc
Reuse existing file: bilinear_108x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_73x96_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_143x144_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_120x180_89x179_peri.nc
concatenating time series
SCENARIO= historical TIME RANGE 1850-1860
same scenario, re-use ds
trouble with model CSIRO-Mk3L-1-2 since start date is past 1850
trouble with model CanCM4 since start date is past 1850
trouble with model GFDL-CM2p1 since start date is past 1850
trouble with model GFDL-CM3 since start date is past 1850
trouble with model GFDL-ESM2G since start date is past 1850
trouble with model GFDL-ESM2M since start date is past 1850
trouble with model HadCM3 since start date is past 1850
trouble with model HadGEM2-AO since start date is past 1850
trouble with model HadGEM2-CC since start date is past 1850
trouble with model HadGEM2-ES since start date is past 1850
trouble with model MIROC4h since start date is past 1850
trouble with model MRI-ESM1 since start date is past 1850
number of good models with data in the specified time range: 37
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 'CanESM2', 'FGOALS-g2', 'FGOALS-s2', 'FIO-ESM', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
calculating annual means
year: 0
year: 1
year: 2
year: 3
year: 4
year: 5
year: 6
year: 7
year: 8
year: 9
year: 10
year: 11
year: 12
year: 13
year: 14
year: 15
year: 16
year: 17
year: 18
year: 19
year: 20
year: 21
year: 22
year: 23
year: 24
year: 25
year: 26
year: 27
year: 28
year: 29
year: 30
year: 31
year: 32
year: 33
year: 34
year: 35
year: 36
regridding to 2x2 grid
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_48x96_89x179_peri.nc
Reuse existing file: bilinear_240x480_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_60x128_89x179_peri.nc
Reuse existing file: bilinear_108x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_143x144_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_120x180_89x179_peri.nc
concatenating time series
SCENARIO= rcp45 TIME RANGE 2100-2300
total number of models available: 43
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'CSIRO-Mk3L-1-2', 'CanCM4', 'CanESM2', 'FGOALS-g2', 'FIO-ESM', 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
trouble with model ACCESS1-0 since stop date is before 2300
trouble with model ACCESS1-3 since stop date is before 2300
trouble with model BNU-ESM since stop date is before 2300
trouble with model CCSM4 since stop date is before 2300
trouble with model CESM1-BGC since stop date is before 2300
trouble with model CESM1-CAM5-1-FV2 since stop date is before 2300
trouble with model CMCC-CM since stop date is before 2300
trouble with model CMCC-CMS since stop date is before 2300
trouble with model CSIRO-Mk3-6-0 since stop date is before 2300
trouble with model CanCM4 since stop date is before 2300
trouble with model FGOALS-g2 since stop date is before 2300
trouble with model FIO-ESM since stop date is before 2300
trouble with model GFDL-CM2p1 since stop date is before 2300
trouble with model GFDL-CM3 since stop date is before 2300
trouble with model GFDL-ESM2G since stop date is before 2300
trouble with model GFDL-ESM2M since stop date is before 2300
trouble with model GISS-E2-H-CC since stop date is before 2300
trouble with model GISS-E2-R-CC since stop date is before 2300
trouble with model HadCM3 since stop date is before 2300
trouble with model HadGEM2-AO since stop date is before 2300
trouble with model HadGEM2-CC since stop date is before 2300
trouble with model HadGEM2-ES since stop date is before 2300
trouble with model IPSL-CM5A-MR since stop date is before 2300
trouble with model IPSL-CM5B-LR since stop date is before 2300
trouble with model MIROC-ESM since stop date is before 2300
trouble with model MIROC-ESM-CHEM since stop date is before 2300
trouble with model MIROC4h since stop date is before 2300
trouble with model MIROC5 since stop date is before 2300
trouble with model MPI-ESM-MR since stop date is before 2300
trouble with model MRI-CGCM3 since stop date is before 2300
trouble with model NorESM1-ME since stop date is before 2300
trouble with model bcc-csm1-1 since stop date is before 2300
trouble with model bcc-csm1-1-m since stop date is before 2300
trouble with model inmcm4 since stop date is before 2300
number of good models with data in the specified time range: 9
['CESM1-CAM5', 'CNRM-CM5', 'CSIRO-Mk3L-1-2', 'CanESM2', 'GISS-E2-H', 'GISS-E2-R', 'IPSL-CM5A-LR', 'MPI-ESM-LR', 'NorESM1-M']
calculating annual means
year: 0
year: 1
year: 2
year: 3
year: 4
year: 5
year: 6
year: 7
year: 8
regridding to 2x2 grid
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_56x64_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
concatenating time series
SCENARIO= rcp45 TIME RANGE 2006-2099
same scenario, re-use ds
trouble with model CanCM4 since stop date is before 2099
trouble with model GFDL-CM2p1 since stop date is before 2099
trouble with model HadCM3 since stop date is before 2099
trouble with model MIROC4h since stop date is before 2099
number of good models with data in the specified time range: 39
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'CSIRO-Mk3L-1-2', 'CanESM2', 'FGOALS-g2', 'FIO-ESM', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
calculating annual means
year: 0
year: 1
year: 2
year: 3
year: 4
year: 5
year: 6
year: 7
year: 8
year: 9
year: 10
year: 11
year: 12
year: 13
year: 14
year: 15
year: 16
year: 17
year: 18
year: 19
year: 20
year: 21
year: 22
year: 23
year: 24
year: 25
year: 26
year: 27
year: 28
year: 29
year: 30
year: 31
year: 32
year: 33
year: 34
year: 35
year: 36
year: 37
year: 38
regridding to 2x2 grid
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_240x480_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_56x64_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_60x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_143x144_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_120x180_89x179_peri.nc
concatenating time series
SCENARIO= rcp85 TIME RANGE 2006-2099
total number of models available: 40
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'CanESM2', 'FGOALS-g2', 'FGOALS-s2', 'FIO-ESM', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
number of good models with data in the specified time range: 40
['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 'CESM1-CAM5', 'CESM1-CAM5-1-FV2', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'CanESM2', 'FGOALS-g2', 'FGOALS-s2', 'FIO-ESM', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4']
calculating annual means
year: 0
year: 1
year: 2
year: 3
year: 4
year: 5
year: 6
year: 7
year: 8
year: 9
year: 10
year: 11
year: 12
year: 13
year: 14
year: 15
year: 16
year: 17
year: 18
year: 19
year: 20
year: 21
year: 22
year: 23
year: 24
year: 25
year: 26
year: 27
year: 28
year: 29
year: 30
year: 31
year: 32
year: 33
year: 34
year: 35
year: 36
year: 37
year: 38
year: 39
regridding to 2x2 grid
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_192x288_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_48x96_89x179_peri.nc
Reuse existing file: bilinear_240x480_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_60x128_89x179_peri.nc
Reuse existing file: bilinear_108x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_143x144_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_96x144_89x179_peri.nc
Reuse existing file: bilinear_64x128_89x179_peri.nc
Reuse existing file: bilinear_160x320_89x179_peri.nc
Reuse existing file: bilinear_120x180_89x179_peri.nc
concatenating time series
SCENARIO= rcp85 TIME RANGE 2100-2300
same scenario, re-use ds
trouble with model ACCESS1-0 since stop date is before 2300
trouble with model ACCESS1-3 since stop date is before 2300
trouble with model BNU-ESM since stop date is before 2300
trouble with model CCSM4 since stop date is before 2300
trouble with model CESM1-BGC since stop date is before 2300
trouble with model CESM1-CAM5 since stop date is before 2300
trouble with model CESM1-CAM5-1-FV2 since stop date is before 2300
trouble with model CMCC-CESM since stop date is before 2300
trouble with model CMCC-CM since stop date is before 2300
trouble with model CMCC-CMS since stop date is before 2300
trouble with model CanESM2 since stop date is before 2300
trouble with model FGOALS-g2 since stop date is before 2300
trouble with model FGOALS-s2 since stop date is before 2300
trouble with model FIO-ESM since stop date is before 2300
trouble with model GFDL-CM3 since stop date is before 2300
trouble with model GFDL-ESM2G since stop date is before 2300
trouble with model GFDL-ESM2M since stop date is before 2300
trouble with model GISS-E2-H-CC since stop date is before 2300
trouble with model GISS-E2-R-CC since stop date is before 2300
trouble with model HadGEM2-AO since stop date is before 2300
trouble with model HadGEM2-CC since stop date is before 2300
trouble with model IPSL-CM5A-MR since stop date is before 2300
trouble with model IPSL-CM5B-LR since stop date is before 2300
trouble with model MIROC-ESM since stop date is before 2300
trouble with model MIROC-ESM-CHEM since stop date is before 2300
trouble with model MIROC5 since stop date is before 2300
trouble with model MPI-ESM-MR since stop date is before 2300
trouble with model MRI-CGCM3 since stop date is before 2300
trouble with model MRI-ESM1 since stop date is before 2300
trouble with model NorESM1-M since stop date is before 2300
trouble with model bcc-csm1-1 since stop date is before 2300
trouble with model bcc-csm1-1-m since stop date is before 2300
trouble with model inmcm4 since stop date is before 2300
number of good models with data in the specified time range: 7
['CNRM-CM5', 'CSIRO-Mk3-6-0', 'GISS-E2-H', 'GISS-E2-R', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'MPI-ESM-LR']
calculating annual means
year: 0
year: 1
year: 2
year: 3
year: 4
year: 5
year: 6
regridding to 2x2 grid
Reuse existing file: bilinear_128x256_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_90x144_89x179_peri.nc
Reuse existing file: bilinear_145x192_89x179_peri.nc
Reuse existing file: bilinear_96x96_89x179_peri.nc
Reuse existing file: bilinear_96x192_89x179_peri.nc
concatenating time series
CPU times: user 11min 14s, sys: 24min 53s, total: 36min 8s
Wall time: 2min 46s
###Markdown
Plot figure
###Code
if plot_global:
l = ['historical:1850-1860','historical:1861-2005','rcp45:2006-2099','rcp45:2100-2300','rcp85:2006-2099','rcp85:2100-2300']
sctypelist = l; markerlist = l.copy(); colorlist = l.copy(); tnamelist = l.copy(); alphalist = l.copy()
for idx,sctype in enumerate(sctypelist):
markerlist[idx] = '-'; alphalist[idx] = 0.6
if 'hist' in sctype:
colorlist[idx] = 'black'; alphalist[idx] = 0.45; tnamelist[idx] = 'Historical'
if '1850-1860' in sctype:
markerlist[idx] = '-.'
if 'rcp45' in sctype:
colorlist[idx] = '#99CCFF'; tnamelist[idx] = 'RCP4.5'
if 'rcp85' in sctype:
colorlist[idx] = '#CC3333'; tnamelist[idx] = 'RCP8.5'
if '2100-2300' in sctype:
markerlist[idx] = '-.'
# find the climatology dataset:
for ds in ds_master:
if 'historical:1861-2005' in ds[var].attrs['sctype']:
hvar = compute_global_mean(ds).compute()
hmodels = hvar.names.values.tolist()
# for each model, compute the time mean from the climatology interval: 1986-2005
tgm = hvar.sel(time=slice(1986,2005)).mean('time').load()
# calculate the model mean tgm, for use in models which do not have a climatological reference run
tgm0 = tgm[var].mean('model')
plt.figure(figsize=(10,6))
rcParams.update({'font.size': 16})
for idx,sctype in enumerate(sctypelist):
tname = tnamelist[idx];marker=markerlist[idx];color=colorlist[idx];alpha=alphalist[idx]
data_exists = False
for ds in ds_master:
if sctype in ds[var].attrs['sctype']:
tvar = compute_global_mean(ds).load()
data_exists = True
if data_exists:
year = tvar.time.values
#find the climatology for each models in this scenario:century
tclimo = 0*tvar[var].mean('time')
for idx,model in enumerate(tvar.names.values):
if model in hmodels:
hidx = hmodels.index(model)
tclimo[idx] = tgm[var][hidx]
else:
#print('using model mean climo data:',model)
tclimo[idx] = tgm0
num_models = tvar[var].shape[0]
range5 = 1.64*(tvar - tclimo)[var].std('model') # use std = 1.64 to give 95% and 5% of values
tvar_mean = (tvar - tclimo)[var].mean('model')
tvar_95 = tvar_mean + range5
tvar_05 = tvar_mean - range5
label = tname+' ('+str(num_models)+' models)'
plt.plot(year, tvar_mean, marker, color=color, label=label)
plt.fill_between(year, tvar_05, tvar_95, color=color, alpha=alpha)
plt.plot((1861, 1861), (-2, 6.2), 'k-', linewidth=1.5, alpha=0.75)
plt.plot((2006, 2006), (-2, 6.2), 'k-', linewidth=1.5, alpha=0.75)
plt.plot((2100, 2100), (-2, 12), 'k-', linewidth=1.5, alpha=0.75)
plt.plot((2200, 2200), (-2, 12), 'k-', linewidth=1.5, alpha=0.75)
plt.ylim(-2,12)
plt.xlim(1850,2300)
vtitle = r'CMIP5 global surface air temperature change $^\degree C$'
if var == 'ts':
vtitle = r'CMIP5 global surface temperature change $^\degree C$'
plt.title(vtitle,fontsize=16)
plt.legend(loc='upper left',fontsize='small')
figfile = 'global_' + var + '.png'
#plt.savefig(figfile)
###Output
_____no_output_____ |
Kernel SHAP vs LIME.ipynb | ###Markdown
Random draws from 8 guassian rvs
###Code
np.random.seed=2
X=np.random.randn(8,1000)
###Output
_____no_output_____
###Markdown
Fitting a linear model on the drawn samples
###Code
Y=1*X[0,] + 2*X[1,] - 3*X[2,] - 4*X[3,] + 5*X[4,] + 6*X[5,] + 7*X[6,] + 10 * X[7,]
plt.plot(X[7,],Y,'o')
###Output
_____no_output_____
###Markdown
Splitting the data to train,test,split
###Code
sc = StandardScaler()
X_std=sc.fit_transform(X.T)
df_X_std=pd.DataFrame(data=X_std)
test_size=0.30
rand_state=11
q0=-100
q1=np.percentile(Y,25.0)
q2=np.percentile(Y,50.0)
q3=np.percentile(Y,75.0)
q4=100
bins=[q0,q1,q2,q3,q4]
y_binned = np.digitize(Y, bins=bins,right=True)
X_train, X_dev_test, y_train, y_dev_test = train_test_split(df_X_std,Y,stratify=y_binned,
test_size=test_size, shuffle=True,random_state=rand_state)
X_dev, X_test, y_dev, y_test = train_test_split(X_dev_test,y_dev_test,
test_size=0.5, shuffle=False,random_state=rand_state)
###Output
_____no_output_____
###Markdown
Deep model
###Code
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def model(input_shape):
X_input=Input(shape=input_shape)
X=Dense(64,name='layer_1',activation='tanh',kernel_initializer = glorot_uniform(seed=0))(X_input)
X=Dropout(0.25)(X)
X=Dense(64,name='layer_2',activation='tanh',kernel_initializer = glorot_uniform(seed=0))(X)
X=Dropout(0.25)(X)
# X=Dense(64,name='layer_3',activation='tanh',kernel_initializer = glorot_uniform(seed=0))(X)
# X=Dropout(0.25)(X)
# X=Dense(64,name='layer_4',activation='tanh',kernel_initializer = glorot_uniform(seed=0))(X)
# X=Dropout(0.25)(X)
X=Dense(1 ,name='output',activation='linear')(X)
model = Model(inputs = X_input, outputs = X, name='PhaseModel')
model.summary()
return model
model1=model((X_std.shape[1],))
Adam=optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model1.compile(optimizer=Adam,
loss='mean_squared_error',
metrics=[coeff_determination])
###Output
_____no_output_____
###Markdown
Training
###Code
R2_train=[]
R2_dev=[]
R2_test=[]
for i in range(300):
model1.fit(X_train, y_train,
batch_size=1000,
epochs=50,
validation_data=(X_dev, y_dev),
shuffle=False,
verbose=0)
R2_train.append(model1.evaluate(X_train,y_train)[1])
R2_dev.append(model1.evaluate(X_dev,y_dev)[1])
R2_test.append(model1.evaluate(X_test,y_test)[1])
###Output
_____no_output_____
###Markdown
Results $R^2$
###Code
n_epochs=np.arange(len(R2_train))
plt.figure(figsize=(18,12))
plt.plot(n_epochs,R2_train,'--',label='Train')
plt.plot(n_epochs[-1],R2_dev[-1],'o',markersize=12,label='Dev')
plt.plot(n_epochs[-1],R2_test[-1],'x',markersize=12,label='Test')
#plt.ylim((0.5,1))
plt.ylabel(r'$R^2$')
plt.xlabel('#Epochs')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Predicted vs true values
###Code
y_pred_test=model1.predict(X_test)
plt.plot(y_pred_test,y_test,'o')
plt.xlabel(r'$Y_{true}$',fontsize=12)
plt.ylabel(r'$Y_{predicted}$',fontsize=12)
plt.show()
###Output
_____no_output_____
###Markdown
Feature importance SHAP explainer
###Code
#backgournd=shap.kmeans(X_train,10)
backgournd=X_train
explainer = shap.KernelExplainer(model1.predict, backgournd)
shap_values= explainer.shap_values(X_test.iloc[0:50])
df_shap_values=pd.DataFrame(data=shap_values[0],
columns=['var_'+str(i) for i in range(8)],
index=['sample_'+str(i) for i in range(shap_values[0].shape[0])])
###Output
_____no_output_____
###Markdown
Local SHAP values (for a single instance)
###Code
idx=7 ## Index of the desired sample instance
local_shap_vs_true_coef=pd.DataFrame(index=df_shap_values.columns)
local_shap_vs_true_coef['SHAP']=df_shap_values.iloc[idx].values/df_shap_values.iloc[idx,0]
local_shap_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
local_shap_vs_true_coef.plot(kind='bar',figsize=(18,12))
plt.show()
###Output
_____no_output_____
###Markdown
Global SHAP (averaged over X_test)
###Code
Global_shap_vs_true_coef=pd.DataFrame(index=df_shap_values.columns)
Global_shap_vs_true_coef['SHAP']=df_shap_values.mean(axis=0)
Global_shap_vs_true_coef['SHAP']=Global_shap_vs_true_coef['SHAP']/Global_shap_vs_true_coef['SHAP'].iloc[0]
Global_shap_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
Global_shap_vs_true_coef.plot(kind='bar',figsize=(18,12))
###Output
_____no_output_____
###Markdown
Global Absolute SHAP (averaged over X_test)
###Code
Global_abs_shap_vs_true_coef=pd.DataFrame(index=df_shap_values.columns)
Global_abs_shap_vs_true_coef['SHAP']=np.abs(df_shap_values).mean(axis=0)
Global_abs_shap_vs_true_coef['SHAP']=Global_abs_shap_vs_true_coef['SHAP']/Global_abs_shap_vs_true_coef['SHAP'].iloc[0]
Global_abs_shap_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
Global_abs_shap_vs_true_coef.plot(kind='bar',figsize=(18,12))
###Output
_____no_output_____
###Markdown
Deep SHAP
###Code
Deep_background=X_train
Deep_exp=shap.DeepExplainer(model1, Deep_background)
Deep_shap_values = Deep_exp.shap_values(X_test.values)
df_Deep_shap_values=pd.DataFrame(data=Deep_shap_values[0],
columns=X_test.columns,
index=X_test.index)
###Output
_____no_output_____
###Markdown
local Deep SHAP
###Code
idx=7 ## Index of the desired sample instance
local_shap_vs_true_coef=pd.DataFrame(index=df_Deep_shap_values.columns)
local_shap_vs_true_coef['Deep SHAP']=df_Deep_shap_values.iloc[idx].values/df_Deep_shap_values.iloc[idx,0]
local_shap_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
local_shap_vs_true_coef.plot(kind='bar',figsize=(18,12))
plt.show()
###Output
_____no_output_____
###Markdown
GLobal Deep SHAP
###Code
Global_shap_vs_true_coef=pd.DataFrame(index=df_Deep_shap_values.columns)
Global_shap_vs_true_coef['Deep SHAP']=df_Deep_shap_values.mean(axis=0)
Global_shap_vs_true_coef['Deep SHAP']=Global_shap_vs_true_coef['Deep SHAP']/Global_shap_vs_true_coef['Deep SHAP'].iloc[0]
Global_shap_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
Global_shap_vs_true_coef.plot(kind='bar',figsize=(18,12))
###Output
_____no_output_____
###Markdown
Global absolute Deep SHAP
###Code
Global_abs_shap_vs_true_coef=pd.DataFrame(index=df_Deep_shap_values.columns)
Global_abs_shap_vs_true_coef['Deep SHAP']=np.abs(df_Deep_shap_values).mean(axis=0)
Global_abs_shap_vs_true_coef['Deep SHAP']=Global_abs_shap_vs_true_coef['Deep SHAP']/Global_abs_shap_vs_true_coef['Deep SHAP'].iloc[0]
Global_abs_shap_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
Global_abs_shap_vs_true_coef.plot(kind='bar',figsize=(18,12))
# def lime_pred(x,model=model1):
# out=model.predict(x)
# return out.squeeze()
# lime_explainer = lime.lime_tabular.LimeTabularExplainer(X_train, feature_names=list(df_X_std.columns),
# #class_names=y_train,
# verbose=False,
# mode='regression',
# discretize_continuous=False)
# y_true=[]
# y_lime=[]
# for i in range(0,Y.shape[0],10):
# lime_exp = lime_explainer.explain_instance(df_X_std.iloc[i].values, lime_pred)
# y_true.append(Y[i])
# y_lime.append(lime_exp.local_pred[0])
# x_dummy=np.arange(-50,50,0.1)
# plt.figure(figsize=(18,12))
# plt.plot(y_true,y_lime,'o',alpha=0.5)
# plt.plot(x_dummy,x_dummy,'--')
# plt.ylabel(r'$Y_{LIME}$')
# plt.xlabel(r'$Y_{True}$')
# plt.show()
###Output
_____no_output_____
###Markdown
Local LIME values (local:for a single data instance)
###Code
# exp = lime_explainer.explain_instance(X_test.iloc[3,:], lime_pred)
# exp.show_in_notebook(show_table=True)
# lime_vals=[val[1] for val in exp.as_list()]
# lime_vals.reverse()
# lime_vs_true_coef=pd.DataFrame(index=df_shap_values.columns)
# lime_vs_true_coef['LIME']=lime_vals
# lime_vs_true_coef['True coef']=[1,2,-3,-4,5,6,7,10]
# lime_vs_true_coef.plot(kind='bar',figsize=(18,12))
###Output
_____no_output_____
###Markdown
SKATER
###Code
# skater_pred_model = InMemoryModel(model1.predict)
# interpreter = Interpretation()
# interpreter.load_data(df_X_std)
# imp=interpreter.feature_importance.feature_importance(skater_pred_model)
# plt.bar(n,imp/imp[0])
###Output
_____no_output_____ |
tests/011_pandapower_speed_test/template.ipynb | ###Markdown
Create the connection mapping
###Code
connections = pandas.DataFrame(columns=['fmu1_id', 'fmu1_path',
'fmu2_id', 'fmu2_path',
'fmu1_parameters',
'fmu2_parameters',
'fmu1_output',
'fmu2_input'])
# Connection for each customer
nodes = [7, 9, 24]
for index in nodes:
connections = connections.append(
{'fmu1_id': 'PV' + str(index),
'fmu1_path': pv_inverter_path,
'fmu2_id': 'pandapower',
'fmu2_path': pandapower_path,
'fmu1_parameters': pv_inverter_parameters,
'fmu2_parameters': pandapower_parameter,
'fmu1_output': 'P',
'fmu2_input': 'KW_' + str(index)},
ignore_index=True)
connections = connections.append(
{'fmu1_id': 'PV' + str(index),
'fmu1_path': pv_inverter_path,
'fmu2_id': 'pandapower',
'fmu2_path': pandapower_path,
'fmu1_parameters': pv_inverter_parameters,
'fmu2_parameters': pandapower_parameter,
'fmu1_output': 'Q',
'fmu2_input': 'KVAR_' + str(index)},
ignore_index=True)
connections = connections.append(
{'fmu1_id': 'pandapower',
'fmu1_path': pandapower_path,
'fmu2_id': 'PV' + str(index),
'fmu2_path': pv_inverter_path,
'fmu1_parameters': pandapower_parameter,
'fmu2_parameters': pv_inverter_parameters,
'fmu1_output': 'Vpu_' + str(index),
'fmu2_input': 'v'},
ignore_index=True)
def _sanitize_name(name):
"""
Make a Modelica valid name.
In Modelica, a variable name:
Can contain any of the characters {a-z,A-Z,0-9,_}.
Cannot start with a number.
:param name(str): Variable name to be sanitized.
:return: Sanitized variable name.
"""
# Check if variable has a length > 0
assert(len(name) > 0), 'Require a non-null variable name.'
# If variable starts with a number add 'f_'.
if(name[0].isdigit()):
name = 'f_' + name
# Replace all illegal characters with an underscore.
g_rexBadIdChars = re.compile(r'[^a-zA-Z0-9_]')
name = g_rexBadIdChars.sub('_', name)
return name
connections['fmu1_output'] = connections['fmu1_output'].apply(lambda x: _sanitize_name(x))
connections['fmu2_input'] = connections['fmu2_input'].apply(lambda x: _sanitize_name(x))
print(tabulate(connections[
['fmu1_id', 'fmu2_id', 'fmu1_output', 'fmu2_input']].head(),
headers='keys', tablefmt='psql'))
print(tabulate(connections[
['fmu1_id', 'fmu2_id', 'fmu1_output', 'fmu2_input']].tail(),
headers='keys', tablefmt='psql'))
connections.to_excel(connections_filename, index=False)
###Output
+----+------------+------------+---------------+--------------+
| | fmu1_id | fmu2_id | fmu1_output | fmu2_input |
|----+------------+------------+---------------+--------------|
| 0 | PV7 | pandapower | P | KW_7 |
| 1 | PV7 | pandapower | Q | KVAR_7 |
| 2 | pandapower | PV7 | Vpu_7 | v |
| 3 | PV9 | pandapower | P | KW_9 |
| 4 | PV9 | pandapower | Q | KVAR_9 |
+----+------------+------------+---------------+--------------+
+----+------------+------------+---------------+--------------+
| | fmu1_id | fmu2_id | fmu1_output | fmu2_input |
|----+------------+------------+---------------+--------------|
| 4 | PV9 | pandapower | Q | KVAR_9 |
| 5 | pandapower | PV9 | Vpu_9 | v |
| 6 | PV24 | pandapower | P | KW_24 |
| 7 | PV24 | pandapower | Q | KVAR_24 |
| 8 | pandapower | PV24 | Vpu_24 | v |
+----+------------+------------+---------------+--------------+
###Markdown
Launch FMU simulation
###Code
if run_simulation:
import shlex, subprocess
cmd = ("C:/JModelica.org-2.1/setenv.bat && " +
" cd " + pandapower_folder + " && "
"cyderc " +
" --path ./"
" --name pandapower" +
" --io pandapower.xlsx" +
" --fmu_struc python" +
" --path_to_simulatortofmu C:/Users/cyder/Desktop/" +
"SimulatorToFMU/simulatortofmu/parser/SimulatorToFMU.py")
args = shlex.split(cmd)
process = subprocess.Popen(args, bufsize=1, universal_newlines=True)
process.wait()
process.kill()
if run_simulation:
import os
import signal
import shlex, subprocess
cmd = ("C:/JModelica.org-2.1/setenv.bat && " +
"cyders " +
" --start " + str(start_s) +
" --end " + str(end_s) +
" --connections " + connections_filename +
" --nb_steps 25" +
" --solver " + solver_name +
" --rtol " + str(solver_relative_tolerance) +
" --atol " + str(solver_absolute_tolerance) +
" --result " + 'results/' + result_filename + '.csv')
args = shlex.split(cmd)
process = subprocess.Popen(args, bufsize=1, universal_newlines=True,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
process.wait()
process.send_signal(signal.CTRL_BREAK_EVENT)
process.kill()
print('Killed')
###Output
Killed
###Markdown
Plot results
###Code
# Load results
results = pandas.read_csv('results/' + result_filename + '.csv')
epoch = datetime.datetime.utcfromtimestamp(0)
begin_since_epoch = (begin_dt - epoch).total_seconds()
results['datetime'] = results['time'].apply(
lambda x: datetime.datetime.utcfromtimestamp(begin_since_epoch + x))
results.set_index('datetime', inplace=True, drop=False)
print('COLUMNS=')
print(results.columns)
print('START=')
print(results.head(1).index[0])
print('END=')
print(results.tail(1).index[0])
# Plot sum of all PVs for P and P curtailled and Q
cut = '2016-06-17 01:00:00'
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.title('PV generation')
for node in nodes:
plt.plot(results['datetime'],
results['pandapower.KW_' + str(node)] / 1000,
linewidth=3, alpha=0.7, label='node ' + str(node))
plt.legend(loc=0)
plt.ylabel('PV active power [MW]')
plt.xlabel('Time')
plt.xlim([cut, end])
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.title('Inverter reactive power')
for node in nodes:
plt.plot(results['datetime'],
results['pandapower.KVAR_' + str(node)] / 1000,
linewidth=3, alpha=0.7, label='node ' + str(node))
plt.legend(loc=0)
plt.ylabel('PV reactive power [MVAR]')
plt.xlabel('Time')
plt.xlim([cut, end])
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.title('PV voltage')
for node in nodes:
plt.plot(results['datetime'],
results['pandapower.Vpu_' + str(node)],
linewidth=3, alpha=0.7, label='node ' + str(node))
plt.legend(loc=0)
plt.ylabel('PV voltage [p.u.]')
plt.xlabel('Time')
plt.xlim([cut, end])
plt.ylim([0.95, results[['pandapower.Vpu_' + str(node)
for node in nodes]].max().max()])
plt.show()
# Load results
debug = pandas.read_csv('debug.csv', parse_dates=[1])
epoch = datetime.datetime.utcfromtimestamp(0)
begin_since_epoch = (begin_dt - epoch).total_seconds()
debug['datetime'] = debug['sim_time'].apply(
lambda x: datetime.datetime.utcfromtimestamp(begin_since_epoch + x))
debug.set_index('datetime', inplace=True, drop=False)
print('COLUMNS=')
print(debug.columns)
print('START=')
print(debug.head(1).index[0])
print('END=')
print(debug.tail(1).index[0])
# Plot time/voltage
import matplotlib.dates as mdates
print('Number of evaluation=' + str(len(debug)))
fig, axes = plt.subplots(1, 1, figsize=(11, 8))
plt.plot(debug['clock'],
debug['datetime'],
linewidth=3, alpha=0.7)
plt.ylabel('Simulation time')
plt.xlabel('Computer clock')
plt.gcf().autofmt_xdate()
myFmt = mdates.DateFormatter('%H:%M')
plt.gca().xaxis.set_major_formatter(myFmt)
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.plot(debug['clock'],
debug['KW_7'],
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['KW_9'],
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['KW_24'],
linewidth=3, alpha=0.7)
plt.ylabel('KW')
plt.xlabel('Computer clock')
plt.legend([17, 31, 24], loc=0)
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.plot(debug['clock'],
debug['Vpu_7'],
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['Vpu_9'],
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['Vpu_24'],
linewidth=3, alpha=0.7)
plt.ylabel('Vpu')
plt.xlabel('Computer clock')
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.plot(debug['clock'],
debug['Vpu_7'].diff(),
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['Vpu_9'].diff(),
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['Vpu_24'].diff(),
linewidth=3, alpha=0.7)
plt.ylabel('Vpu Diff')
plt.xlabel('Computer clock')
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(11, 3))
plt.plot(debug['clock'],
debug['KVAR_7'],
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['KVAR_9'],
linewidth=3, alpha=0.7)
plt.plot(debug['clock'],
debug['KVAR_24'],
linewidth=3, alpha=0.7)
plt.ylabel('KVAR')
plt.xlabel('Computer clock')
plt.show()
###Output
Number of evaluation=33959
|
tutorial10.ipynb | ###Markdown
This chapter is about dictionaries. Dictionaries have keys and values. The keys are used to find the values. Here is an interactive mode demonstration of creating a phone numbers dictionary:
###Code
#Create a dictionary
numbers = {}
#Add some numbers
numbers["Joe"] = "545-4464"
numbers["Jill"] = "979-4654"
#Look up a number
numbers["Joe"]
###Output
_____no_output_____
###Markdown
Here is an example of a program that makes a phone numbers dictionary:
###Code
def print_menu():
print('1. Print Phone Numbers')
print('2. Add a Phone Number')
print('3. Remove a Phone Number')
print('4. Lookup a Phone Number')
print('5. Quit')
print()
numbers = {}
menu_choice = 0
print_menu()
while menu_choice != 5:
menu_choice = int(input("Type in a number (1-5):"))
if menu_choice == 1:
print("Telephone Numbers:")
for x in sorted(numbers.keys()):
print("Name: ", x, " \tNumber: ", numbers[x])
print()
elif menu_choice == 2:
print("Add Name and Number")
name = input("Name:")
phone = input("Number:")
numbers[name] = phone
elif menu_choice == 3:
print("Remove Name and Number")
name = input("Name:")
if name in numbers:
del numbers[name]
else:
print(name, " was not found")
elif menu_choice == 4:
print("Lookup Number")
name = input("Name:")
if name in numbers:
print("The number is", numbers[name])
else:
print(name, " was not found")
elif menu_choice != 5:
print_menu()
###Output
_____no_output_____
###Markdown
And here is my output:
###Code
1. Print Phone Numbers
2. Add a Phone Number
3. Remove a Phone Number
4. Lookup a Phone Number
5. Quit
Type in a number (1-5):2
Add Name and Number
Name:Joe
Number:545-4464
Type in a number (1-5):2
Add Name and Number
Name:Jill
Number:979-4654
Type in a number (1-5):2
Add Name and Number
Name:Fred
Number:132-9874
Type in a number (1-5):1
Telephone Numbers:
Name: Fred Number: 132-9874
Name: Jill Number: 979-4654
Name: Joe Number: 545-4464
Type in a number (1-5):4
Lookup Number
Name:Joe
The number is 545-4464
Type in a number (1-5):3
Remove Name and Number
Name:Fred
Type in a number (1-5):1
Telephone Numbers:
Name: Jill Number: 979-4654
Name: Joe Number: 545-4464
Type in a number (1-5):5
###Output
_____no_output_____
###Markdown
This program is similar to the name list earlier in the the chapter on lists. Here's how the program works. First, the function `print_menu` is defined. `print_menu` just prints a menu that is later used twice in the program. Next comes the funny looking line `numbers = {}`. All that line does is tell Python that numbers is a dictionary. The next few lines just make the menu work. The lines:
###Code
for x in sorted(numbers.keys()):
print("Name: ", x, " \tNumber: ", numbers[x])
###Output
_____no_output_____
###Markdown
go through the dictionary and print all the information. The function `numbers.keys()` returns a list that is then used by the for loop. The list returned by keys is not in any particular order so if you want it in alphabetic order it must be sorted as is done with the sorted function. Similar to lists the statement `numbers[x]` is used to access a specific member of the dictionary. Of course in this case x is a string. Next the line `numbers[name] = phone` adds a name and phone number to the dictionary. If `name` had already been in the dictionary phone would replace whatever was there before. Next the lines:
###Code
if name in numbers:
del numbers[name]
###Output
_____no_output_____
###Markdown
see if a name is in the dictionary and remove it if it is. The function `name in numbers` returns true if name is in numbers but otherwise returns false. The line del numbers[name] removes the key name and the value associated with that key. The lines:
###Code
if name in numbers:
print("The number is", numbers[name])
###Output
_____no_output_____
###Markdown
check to see if the dictionary has a certain key and if it does prints out the number associated with it. Lastly, if the menu choice is invalid it reprints the menu for your viewing pleasure.A recap: Dictionaries have keys and values. Keys can be strings ornumbers. Keys point to values. Values can be any type of variable(including lists or even dictionaries (those dictionaries or lists ofcourse can contain dictionaries or lists themselves (scary right? :)))). Here is an example of using a list in a dictionary:
###Code
max_points = [25, 25, 50, 25, 100]
assignments = ['hw ch 1', 'hw ch 2', 'quiz ', 'hw ch 3', 'test']
students = {'#Max':max_points}
def print_menu():
print("1. Add student")
print("2. Remove student")
print("3. Print grades")
print("4. Record grade")
print("5. Print Menu")
print("6. Exit")
def print_all_grades():
print('\t', end=' ')
for i in range(len(assignments)):
print(assignments[i], '\t', end=' ')
print()
keys = list(students.keys())
keys.sort()
for x in keys:
print(x, '\t', end=' ')
grades = students[x]
print_grades(grades)
def print_grades(grades):
for i in range(len(grades)):
print(grades[i], '\t\t', end=' ')
print()
print_menu()
menu_choice = 0
while menu_choice != 6:
print()
menu_choice = int(input("Menu Choice (1-6):"))
if menu_choice == 1:
name = input("Student to add:")
students[name] = [0]*len(max_points)
elif menu_choice == 2:
name = input("Student to remove:")
if name in students:
del students[name]
else:
print("Student: ", name, " not found")
elif menu_choice == 3:
print_all_grades()
elif menu_choice == 4:
print("Record Grade")
name = input("Student:")
if name in students:
grades = students[name]
print("Type in the number of the grade to record")
print("Type a 0 (zero) to exit")
for i in range(len(assignments)):
print(i+1, ' ', assignments[i], '\t', end=' ')
print()
print_grades(grades)
which = 1234
while which != -1:
which = int(input("Change which Grade:"))
which = which-1
if 0 <= which < len(grades):
grade = int(input("Grade:"))
grades[which] = grade
elif which != -1:
print("Invalid Grade Number")
else:
print("Student not found")
elif menu_choice != 6:
print_menu()
###Output
_____no_output_____
###Markdown
and here is a sample output:
###Code
1. Add student
2. Remove student
3. Print grades
4. Record grade
5. Print Menu
6. Exit
Menu Choice (1-6):3
hw ch 1 hw ch 2 quiz hw ch 3 test
#Max 25 25 50 25 100
###Output
_____no_output_____
###Markdown
###Code
Menu Choice (1-6):5
1. Add student
2. Remove student
3. Print grades
4. Record grade
5. Print Menu
6. Exit
Menu Choice (1-6):1
Student to add:Bill
###Output
_____no_output_____
###Markdown
###Code
Menu Choice (1-6):4
Record Grade
Student:Bill
Type in the number of the grade to record
Type a 0 (zero) to exit
1 hw ch 1 2 hw ch 2 3 quiz 4 hw ch 3 5 test
0 0 0 0 0
Change which Grade:1
Grade:25
Change which Grade:2
Grade:24
Change which Grade:3
Grade:45
Change which Grade:4
Grade:23
Change which Grade:5
Grade:95
Change which Grade:0
###Output
_____no_output_____
###Markdown
###Code
Menu Choice (1-6):3
hw ch 1 hw ch 2 quiz hw ch 3 test
#Max 25 25 50 25 100
Bill 25 24 45 23 95
Menu Choice (1-6):6
###Output
_____no_output_____
###Markdown
Here's how the program works. Basically, the variable students isa dictionary with the keys being the name of the students and thevalues being their grades. The first two lines just create two lists.The next line `students = {'Max':max_points}` creates a newdictionary with the key `Max` and the value is set to be [25, 25, 50, 25, 100] (since thats what `max_points` was when theassignment is made) (I use the key `Max` since `` is sortedahead of any alphabetic characters). Next, `print_menu` isdefined. Then, the `print_all_grades` function is defined in thelines:
###Code
def print_all_grades():
print('\t', end=' ')
for i in range(len(assignments)):
print(assignments[i], '\t', end=' ')
print()
keys = list(students.keys())
keys.sort()
for x in keys:
print(x, '\t', end=' ')
grades = students[x]
print_grades(grades)
###Output
_____no_output_____ |
notebooks/00_quick_start/naml_synthetic.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. NAML: Neural News Recommendation with Attentive Multi-View LearningNAML \[1\] is a multi-view news recommendation approach. The core of NAML is a news encoder and a user encoder. The newsencoder is composed of a title encoder, a body encoder, a vert encoder and a subvert encoder. The CNN-based title encoder and body encoder learn title and body representations by capturing words semantic information. After getting news title, body, vert and subvert representations, an attention network is used to aggregate those vectors. In the user encoder, we learn representations of users from their browsed news. Besides, we apply additive attention to learn more informative news and user representations by selecting important words and news. Properties of NAML:- NAML is a multi-view neural news recommendation approach.- It uses news title, news body, news vert and news subvert to get news repersentations. And it uses user historical behaviors to learn user representations.- NAML uses additive attention to learn informative news and user representations by selecting important words and news. Data format: train dataOne simple example: `1 0 0 0 0 Impression:0 User:502 CandidateTitle0:17917,36557,47926,32224,24113,48923,19086,5636,3703,0... CandidateBody0:17024,53305,8832,29800,9787,4068,48731,48923,19086,38699,5766,22487,38336,29800,8548,39128,33457,7789,30543,7482,8548,49004,53305,22999,32747,21103,11799,5766,4868,17115,7482,15118,48731,2025,7789,23336,7789,48731,19086,10630,11128,36557,3703,47354,611,7789,19086,5636,51521,30706... CandidateVert0:14... CandidateSubvert0:219... ClickedTitle0:48,33405,35198,5969,5636,35845,850,48731,46799,24113... ClickedBody0:36557,67,34519,24113,8548,48,33405,35198,5969,14340,7053,850,8823,9498,46799,24113,12506,32747,31130,3074,48731,20869,14264,38289,37310,7789,36557,34967,48731,36916,23321,3595,48731,47354,4868,15719,7482,12771,50693,47354,17523,48,20918,17900,35198,48731,20869,1220,14264,7789... ClickedVert0:14... ClickedSubvert0:99... `In general, each line in data file represents one positive instance and n negative instances in a same impression. The format is like: `[label0] ... [labeln] [Impression:i] [User:u] [CandidateTitle0:w1,w2,w3,...] ... [CandidateBody0:w1,w2 ..] ... [CandidateVert0:v] ... [CandidateSubvert0:s] ... [ClickedTitle0:w1,w2,w3,...] ... [ClickedBody0:w1,w2,w3,...] ... [ClickedVert0:v] ... [ClickedSubvert0:s] ...`It contains several parts seperated by space, i.e. label part, Impression part ``, User part ``, CandidateNews part, ClickedHistory part. CandidateNews part describes the target news article we are going to score in this instance. It is represented by (aligned) title words, body words, news vertical index and subvertical index. To take a quick example, a news title may be : `Trump to deliver State of the Union address next week` , then the title words value may be `CandidateTitlei:34,45,334,23,12,987,3456,111,456,432`. ClickedHistory describe the k-th news article the user ever clicked and the format is the same as candidate news. Every clicked news has title words, body words, vertical and subvertical. We use a fixed length to describe an article, if the title or body is less than the fixed length, just pad it with zeros. test dataOne simple example: `1 Impression:0 User:1529 CandidateTitle0:5327,18658,13846,6439,611,50611,0,0,0,0 CandidateBody0:13846,3197,27902,225,5327,45008,29145,7789,509,7395,11502,36557,13846,23680,26492,38072,20507,5636,4247,32747,50132,7482,41049,32747,43022,50611,35979,7789,1191,36557,52870,21622,48148,42737,48731,36557,13846,23680,13173,7482,13848,38072,20507,7789,41675,36875,51461,12348,21045,42160 CandidateVert0:14 CandidateSubvert0:19 ClickedTitle0:9079,3703,32747,8546,19377,50184,32747,24026,40010,49754 ... ClickedBody0:26061,48731,8576,7789,8683,9079,5636,45084,46805,3703,509,43036,11502,28883,9498,18450,32747,8546,33405,35647,50184,7482,41143,8220,43618,38072,35198,43390,28057,32552,45245,10764,16247,4221,41038,36557,43683,46805,7789,29727,2179,51003,34797,897,21045,12974,23382,46287,48731,15206 ... ClickedVert0:14 ... ClickedSubvert0:219 ...`In general, each line in data file represents one instance. The format is like: `[label] [Impression:i] [User:u] [CandidateTitle0:w1,w2,w3,...] [CandidateBody0:w1,w2,w3,...] [CandidateVert0:v] [CandidateSubvert0:s] [ClickedTitle0:w1,w2,w3,...] ... [ClickedBody0:w1,w2,w3,...] ... [ClickedVert0:v] ... [ClickedSubvert0:s] ...` Global settings and imports
###Code
import sys
sys.path.append("../../")
from reco_utils.recommender.deeprec.deeprec_utils import download_deeprec_resources
from reco_utils.recommender.newsrec.newsrec_utils import prepare_hparams
from reco_utils.recommender.newsrec.models.naml import NAMLModel
from reco_utils.recommender.newsrec.io.naml_iterator import NAMLIterator
import papermill as pm
from tempfile import TemporaryDirectory
import tensorflow as tf
import os
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
tmpdir = TemporaryDirectory()
###Output
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/data/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Download and load data
###Code
data_path = tmpdir.name
yaml_file = os.path.join(data_path, r'naml.yaml')
train_file = os.path.join(data_path, r'train.txt')
valid_file = os.path.join(data_path, r'test.txt')
wordEmb_file = os.path.join(data_path, r'embedding.npy')
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.blob.core.windows.net/newsrec/', data_path, 'naml.zip')
###Output
100%|██████████| 72.6k/72.6k [00:04<00:00, 18.0kKB/s]
###Markdown
Create hyper-parameters
###Code
epochs=5
seed=42
hparams = prepare_hparams(yaml_file, wordEmb_file=wordEmb_file, epochs=epochs)
print(hparams)
iterator = NAMLIterator
###Output
_____no_output_____
###Markdown
Train the NAML model
###Code
model = NAMLModel(hparams, iterator, seed=seed)
print(model.run_eval(valid_file))
model.fit(train_file, valid_file)
res_syn = model.run_eval(valid_file)
print(res_syn)
pm.record("res_syn", res_syn)
###Output
{'group_auc': 0.5599, 'mean_mrr': 0.2027, 'ndcg@5': 0.2065, 'ndcg@10': 0.268}
|
Labs/Lab2/Romil/grad_desc_scratch.ipynb | ###Markdown
Preprocessing of data
###Code
X_new = preprocessing.scale(X_new)
X_b = np.concatenate((np.ones((m,1)),X_new),axis = 1)
y.shape
X_b.shape
###Output
_____no_output_____
###Markdown
Function for gradient descent
###Code
def gradient_descent (X,y,theta,learning_rate=0.01,iterations=100):
m = len(y)
cost_history = np.zeros(iterations)
theta_history = np.zeros((iterations,1+1))
for it in range(iterations):
prediction = np.dot(X,theta)
theta = theta -(1/m)*learning_rate*( X.T.dot((prediction - y)))
theta_history[it,:] =theta.T
cost_history[it] = cal_cost(theta,X,y)
return theta, cost_history, theta_history
###Output
_____no_output_____
###Markdown
Loss function
###Code
def cal_cost(theta,X,y):
m = len(y)
predictions = X.dot(theta)
cost = (1/2*m) * np.sum(np.square(predictions-y))
return cost
###Output
_____no_output_____
###Markdown
Visualization
###Code
def plot_graph(slope,c):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(X_new, y)
x_min, x_max = ax.get_xlim()
y_min, y_max = c, c + slope*(x_max-x_min)
ax.plot([x_min, x_max], [y_min, y_max], color = 'r')
ax.set_xlim([x_min, x_max])
lr =0.01
n_iter = 1000
theta = np.random.randn(2,1)
theta,cost_history,theta_history = gradient_descent(X_b,y,theta,lr,n_iter)
print('Theta0: {},\nTheta1: {}'.format(theta[0][0],theta[1][0]))
print('Final cost/MSE: {}'.format(cost_history[-1]))
intercept,coeff = theta[0][0],theta[1][0]
plot_graph(coeff,intercept)
plt.plot(cost_history)
###Output
_____no_output_____ |
notebooks/train_nn.ipynb | ###Markdown
[](https://colab.research.google.com/github/alexanderlarin/3dgnn/blob/master/notebooks/train_nn.ipynb)
###Code
from google.colab import drive
drive.mount('/content/gdrive')
!mkdir gdrive/My\ Drive/3dgnn
!mkdir gdrive/My\ Drive/3dgnn/datasets
!mkdir gdrive/My\ Drive/3dgnn/models
!mkdir gdrive/My\ Drive/3dgnn/log
!curl http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat --output gdrive/My\ Drive/3dgnn/datasets/nyu_depth_v2_labeled.mat
!rm -rf 3dgnn
!git clone https://github.com/alexanderlarin/3dgnn.git
!cd 3dgnn && git pull
import sys
import logging
sys.path.append('3dgnn')
logging.basicConfig(level=logging.INFO,
format='[%(levelname)s][%(asctime)s][%(name)s] %(message)s',
datefmt="%H:%M:%S",
handlers=[logging.FileHandler('gdrive/My Drive/3dgnn/log/3dgnn.txt'), logging.StreamHandler()])
from extract_hha import patch_hha_dataset
# If you don't have extracted hha images for the dataset
# but now it's strongly recommended do this in your local machine
# and upload to gdrive results of calculation
# patch_hha_dataset('gdrive/My Drive/3dgnn/datasets/nyu_depth_v2_labeled.mat',
# 'gdrive/My Drive/3dgnn/datasets/nyu_depth_v2_patch.mat', mp_chunk_size=16)
from train_nn import config, train_nn
config.use_gpu = True
train_nn('gdrive/My Drive/3dgnn/datasets/nyu_depth_v2_labeled.mat',
'gdrive/My Drive/3dgnn/datasets/hha', 'gdrive/My Drive/3dgnn/models',
from_last_check_point=True, check_point_prefix='checkpoint', num_epochs=250,
notebook=True)
###Output
_____no_output_____ |
Dacon_airline_0217_ver1.ipynb | ###Markdown
Label Encoding- missing value 0이 있으므로 Ordinal Encoding은 할 수 없음- 결측치 처리 대신 NaN으로 처리되기를 기대할 수 있음
###Code
le = preprocessing.LabelEncoder()
for f in cat_features:
df_train[f] = le.fit_transform(df_train[f].astype(str))
df_test[f] = le.fit_transform(df_test[f].astype(str))
print(df_train.shape)
print(df_test.shape)
df_train.head(2)
###Output
(3000, 42)
(2000, 41)
###Markdown
상관계수 파악- target이 있는 마지막 줄에 주목- Gate location, Flight Distance는 상관계수가 0에 가까움 -> feature 제거 고려?
###Code
tmp_feature_dict = {i: 0 for i in selected_features}
del tmp_feature_dict['target']
selected_features = [f for f in tmp_feature_dict.keys()]
print(len(selected_features))
corr = df_train[selected_features + ['target']].corr() # Compute the correlation matrix
mask = np.triu(np.ones_like(corr, dtype=bool)) # Generate a mask for the upper triangle
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1., vmax=1., center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
remove_features = []
CORR_THRESHOLD = 0.0
for f in corr.columns[:-1]:
if np.abs(corr[f]['target']) < CORR_THRESHOLD:
remove_features.append(f)
print(f'Correlation under {CORR_THRESHOLD}: {len(remove_features)}\n{remove_features}')
tmp_features = {k: 0 for k in selected_features}
for f in remove_features:
if f in selected_features:
del tmp_features[f]
final_features = [f for f in tmp_features.keys()]
del tmp_features
print(len(final_features), final_features)
train_set = df_train[final_features + ['target']]
print(train_set.shape)
train_set.head(2)
def set_seed(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
SEED = 7777
N_SPLITS = 5
N_ESTIMATORS = 5000
params = {
'n_estimators': N_ESTIMATORS,
'n_jobs': -1,
'random_state': SEED,
}
cbt_params = {
'silent': True,
'n_estimators': N_ESTIMATORS,
'learning_rate': 0.015,
'random_state': SEED,
#'depth': 8,
#'min_data_in_leaf': 4,
#'cat_features': selected_cat_features
}
models = []
set_seed(SEED)
folds = StratifiedKFold(n_splits=N_SPLITS, shuffle=True)
for fold, (train_idx, valid_idx) in enumerate(folds.split(train_set[final_features], train_set['target'])):
X_train = train_set.loc[train_idx][final_features]
y_train = train_set.loc[train_idx]['target']
X_valid = train_set.loc[valid_idx][final_features]
y_valid = train_set.loc[valid_idx]['target']
#model = RandomForestClassifier(**params)
model = CatBoostClassifier(**cbt_params)
model.fit(X_train, y_train)
models.append(model)
p_valid = model.predict(X_valid)
acc = accuracy_score(y_valid, p_valid)
print(f'#{fold} Accuracy: {acc}')
# grid_param = {
# 'n_estimators': [5000],
# 'learning_rate': [0.015],
# 'depth': [4, 6, 8],
# #'min_data_in_leaf': [4, 8, 12, 16],
# #'num_leaves': [4, 8, 12, 16],
# #'l2_leaf_reg': [1, 3, 5, 7]
# }
# cbt_clf = CatBoostClassifier(silent = True)
# grid_search = GridSearchCV(
# estimator = cbt_clf,
# param_grid = grid_param,
# cv = 5,
# n_jobs = 1,
# verbose = 3,
# scoring='accuracy',
# )
# grid_search.fit(train_set[final_features], train_set['target'])
# print(grid_search.best_params_)
###Output
_____no_output_____
###Markdown
###Code
probs = []
for model in models:
prob = model.predict_proba(df_test[final_features])
probs.append(prob)
pred = sum(probs) / len(probs)
pred = np.argmax(pred, axis=1)
df_submit = pd.read_csv('sample_submission.csv')
df_submit['target'] = pred
df_submit.to_csv('/content/gdrive/MyDrive/Dacon/airline/0217_ver1.csv', index=False)
###Output
_____no_output_____ |
notebooks/Key words in thread_name exploration.ipynb | ###Markdown
Exploration of key words occurances in thread names notebook
###Code
import os
import sys
from src import data_prepare
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
post, thread=data_prepare.load_train_data()
label_map=data_prepare.load_label_map()
thread.head()
key_words=pd.Series(label_map.index)
key_words
key_words[1]="beginner"
key_words[6]="king"
thread["name"]=thread["thread_name"].apply(lambda x: data_prepare.clean(x)).as_matrix()
###Output
_____no_output_____
###Markdown
Let's just try to calculate how efficient it is to look for names themselves in thread names for different classes
###Code
num_classes=len(key_words)
correct=pd.Series(np.zeros(num_classes),index=label_map.index)
over=pd.Series(np.zeros(num_classes),index=label_map.index)
under=pd.Series(np.zeros(num_classes),index=label_map.index)
al=pd.Series(np.zeros(num_classes),index=label_map.index)
for item in thread.itertuples():
label=item.thread_label_id
if key_words[label] in item.name:
correct[label]+=1
else:
under[label]+=1
for index,s in enumerate(key_words):
if index==label:
continue
if s in item.name:
over[index]+=1
break
al[label]+=1
x = pd.DataFrame({'all':al,'correct':correct,'under': under,'over':over,
'per cent correct':correct/al,'per cent under':under/al,
'per cent over':over/al},
index=correct.index)
x
x["efficiency"]=(x["correct"]-x["over"])/x['all']
x
###Output
_____no_output_____
###Markdown
Not the best metric for efficiency, but it seems like it makes sense to use this kind of raw prediction for classes that score more than 40-50%, especially if they don't have a lot of representatives in the dataset, like cybrid or supernatural
###Code
post_test, thread_test=data_prepare.load_test_data()
test_stat=pd.Series(np.zeros(13),index=label_map.index)
thread_test["name"]=thread_test["thread_name"].apply(lambda x: data_prepare.clean(x)).as_matrix()
for item in thread_test.itertuples():
for index,s in enumerate(key_words):
if s in item.name:
test_stat[index]+=1
test_stat
###Output
_____no_output_____ |
day4_sign.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import os
import datetime
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from skimage import color, exposure
from sklearn.metrics import accuracy_score
%load_ext tensorboard
cd "/content/drive/My Drive/Colab Notebooks/matrix_three_road_signs/"
train = pd.read_pickle('data/train.p')
test = pd.read_pickle('data/test.p')
x_train, y_train = train['features'], train['labels']
x_test, y_test = test['features'], test['labels']
if y_train.ndim == 1: y_train = to_categorical(y_train)
if y_test.ndim == 1: y_test = to_categorical(y_test)
input_shape = x_train.shape[1:]
num_classes = y_train.shape[1]
model = Sequential([
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Flatten(),
Dense(num_classes, activation='softmax'),
])
#model.summary()
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
model.fit(x_train, y_train)
def get_cnn_v1(input_shape, num_classes):
return Sequential([
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Flatten(),
Dense(num_classes, activation='softmax'),
])
def train_model(model, x_train, y_train, params_fit={}):
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
model.fit(
x_train,
y_train,
batch_size=params_fit.get('batch_size', 128),
epochs=params_fit.get('epochs', 5),
verbose=params_fit.get('verbose', 1),
validation_data=params_fit.get('validation_data', (x_train, y_train)),
callbacks=[tensorboard_callback]
)
return model
model = get_cnn_v1(input_shape, num_classes)
model_trained = train_model(model, x_train, y_train)
df = pd.read_csv('data/signnames.csv')
labels_dict = df.to_dict()['b']
y_pred_prob = model_trained.predict(x_test)
y_pred_prob
y_pred_prob[400]
np.argmax( y_pred_prob[400] )
plt.bar(range(43), y_pred_prob[400])
plt.imshow(x_test[400])
def predict(model_trained, x_test, y_test, scoring=accuracy_score):
y_test_norm = np.argmax(y_test,axis=1)
y_pred_prob = model_trained.predict(x_test)
y_pred = np.argmax(y_pred_prob, axis=1)
return scoring(y_test_norm, y_pred)
predict(model_trained, x_test, y_test)
def train_and_predict(model):
model_trained = train_model(model, x_train, y_train)
return predict(model_trained, x_test, y_test)
def get_cnn_v2(input_shape, num_classes):
return Sequential([
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(num_classes, activation='softmax'),
])
train_and_predict( get_cnn_v2(input_shape, num_classes))
def get_cnn_v3(input_shape, num_classes):
return Sequential([
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(num_classes, activation='softmax'),
])
train_and_predict( get_cnn_v3(input_shape, num_classes))
def get_cnn_v4(input_shape, num_classes):
return Sequential([
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same'),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(num_classes, activation='softmax'),
])
get_cnn_v4(input_shape, num_classes).summary()
train_and_predict( get_cnn_v4(input_shape, num_classes))
def get_cnn_v5(input_shape, num_classes):
return Sequential([
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same'),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(num_classes, activation='softmax'),
])
train_and_predict( get_cnn_v5(input_shape, num_classes))
x_train[0].shape
color.rgb2gray(x_train[0]).shape
plt.imshow(color.rgb2gray(x_train[0]))
x_train_gray = color.rgb2gray(x_train).reshape(-1,32,32,1)
x_test_gray = color.rgb2gray(x_test).reshape(-1,32,32,1)
model = get_cnn_v5((32,32,1), num_classes)
model_trained = train_model(model, x_train_gray, y_train, params_fit={})
predict(model_trained,x_test_gray,y_test)
plt.imshow(color.rgb2gray(x_train[0]),cmap=plt.get_cmap('gray'))
###Output
_____no_output_____ |
Projects/Project5/CNN_LeNet.ipynb | ###Markdown
Load Dataset
###Code
X = []
y = []
with open("X.pkl", 'rb') as picklefile:
X = pickle.load(picklefile)
with open("y.pkl", 'rb') as picklefile:
y = pickle.load(picklefile)
# set folder path
folderpath = 'Images/Train/Undistorted/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
X.append(np.asarray(img))
y.append(0)
else:
print filename, 'not a pic'
import pickle
with open('undistorted_X.pkl', 'wb') as picklefile:
pickle.dump(X, picklefile)
with open('undistorted_y.pkl', 'wb') as picklefile:
pickle.dump(y, picklefile)
# set folder path
folderpath = 'Images/Train/DigitalBlur2/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
X.append(np.asarray(img))
y.append(1)
else:
print filename, 'not a pic'
len(y)
with open('X.pkl', 'wb') as picklefile:
pickle.dump(X, picklefile)
with open('y.pkl', 'wb') as picklefile:
pickle.dump(y, picklefile)
X_stacked = np.stack(X)
X_norm = X_stacked/255.
y_cat = to_categorical(y)
X_train, X_test, y_train, y_test = train_test_split(X_norm, y_cat, train_size=2500, random_state=42)
# Data augmenter
dg = image.ImageDataGenerator(horizontal_flip=True, vertical_flip=True)
###Output
_____no_output_____
###Markdown
Load initial model weights
###Code
model.load_weights('lenet_weights.h5')
###Output
_____no_output_____
###Markdown
Model
###Code
cb_es = EarlyStopping(monitor='val_acc', patience=2, verbose=1)
cb_mc = ModelCheckpoint('lenet_weights2.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
# Fit generator
# play with samples/epoch, nb_epoch, val_samples.
model.fit_generator(dg.flow(X_train, y_train), samples_per_epoch=3000, nb_epoch=30, validation_data=dg.flow(X_test, y_test), nb_val_samples=300, callbacks=[cb_es, cb_mc])
###Output
Epoch 1/30
2980/3000 [============================>.] - ETA: 2s - loss: 0.1901 - acc: 0.9460
###Markdown
Test on real pics
###Code
clean_pics = []
blurry_pics = []
backBlur_pics = []
# set folder path
folderpath = 'Images/clearSamples/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
clean_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
# set folder path
folderpath = 'Images/natblurSamples/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
blurry_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
# set folder path
folderpath = 'Images/backBlurSamples/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
backBlur_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
len(backBlur_pics)
backBlur_pics_array = np.stack(backBlur_pics)/255.
len(blurry_pics)
clean_pics_array = np.stack(clean_pics)/255.
blurry_pics_array = np.stack(blurry_pics)/255.
blurry_pics_array.shape
model.predict_classes(clean_pics_array)
model.predict_proba(clean_pics_array)
model.predict_classes(blurry_pics_array)
model.predict_proba(blurry_pics_array)
model.predict_proba(backBlur_pics_array)
plt.imshow(blurry_pics_array[0])
plt.show()
model.save('lenet_3rdPass.h5')
model.save_weights('test_weights.h5')
###Output
_____no_output_____
###Markdown
Test the classifier on the "Background blur only" set
###Code
backBlur_pics = []
backBlur_filenames = []
# set folder path
folderpath = 'Images/backBlurAll_longIter/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
backBlur_filenames.append(filename)
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
backBlur_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
df_backBlur = pd.DataFrame(backBlur_filenames, columns=['filename'])
backBlur_pics_array = np.stack(backBlur_pics)/255.
df_backBlur['blur_class'] = model.predict_classes(backBlur_pics_array)
if not os.path.exists(folderpath+'blurry'):
os.mkdir(folderpath+'blurry')
for index, row in df_backBlur.iterrows():
if row['blur_class'] == 1:
oldpath = folderpath + row['filename']
newpath = folderpath + 'blurry/' + row['filename']
os.rename(oldpath, newpath)
###Output
_____no_output_____
###Markdown
Test the classifier on the Naturally Blurred set
###Code
natBlur_pics = []
natBlur_filenames = []
# set folder path
folderpath = 'Images/natBlurAll_longIter/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
natBlur_filenames.append(filename)
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
natBlur_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
df_natBlur = pd.DataFrame(natBlur_filenames, columns=['filename'])
natBlur_pics_array = np.stack(natBlur_pics)/255.
df_natBlur['blur_class'] = model.predict_classes(natBlur_pics_array)
if not os.path.exists(folderpath+'blurry'):
os.mkdir(folderpath+'blurry')
for index, row in df_natBlur.iterrows():
if row['blur_class'] == 1:
oldpath = folderpath + row['filename']
newpath = folderpath + 'blurry/' + row['filename']
os.rename(oldpath, newpath)
###Output
_____no_output_____
###Markdown
Test classifier on M3 pics
###Code
m3Blur_pics = []
m3Blur_filenames = []
# set folder path
folderpath = 'Images/M3Samples/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
m3Blur_filenames.append(filename)
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
m3Blur_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
df_m3Blur = pd.DataFrame(m3Blur_filenames, columns=['filename'])
m3Blur_pics_array = np.stack(m3Blur_pics)/255.
df_m3Blur['blur_class'] = model.predict_classes(m3Blur_pics_array)
if not os.path.exists(folderpath+'blurry'):
os.mkdir(folderpath+'blurry')
for index, row in df_m3Blur.iterrows():
if row['blur_class'] == 1:
oldpath = folderpath + row['filename']
newpath = folderpath + 'blurry/' + row['filename']
os.rename(oldpath, newpath)
df_m3Blur
###Output
_____no_output_____
###Markdown
Test my clear samples
###Code
clearSample_pics = []
clearSample_filenames = []
# set folder path
folderpath = 'Images/clearSamples/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
clearSample_filenames.append(filename)
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
clearSample_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
df_clearSample = pd.DataFrame(clearSample_filenames, columns=['filename'])
clearSample_pics_array = np.stack(clearSample_pics)/255.
df_clearSample['blur_class'] = model.predict_classes(clearSample_pics_array)
if not os.path.exists(folderpath+'blurry'):
os.mkdir(folderpath+'blurry')
for index, row in df_clearSample.iterrows():
if row['blur_class'] == 1:
oldpath = folderpath + row['filename']
newpath = folderpath + 'blurry/' + row['filename']
os.rename(oldpath, newpath)
###Output
_____no_output_____
###Markdown
Test my bad samples
###Code
blurSample_pics = []
blurSample_filenames = []
# set folder path
folderpath = 'Images/natBlurSamples/'
# load image arrays
for filename in os.listdir(folderpath):
if filename != '.DS_Store':
blurSample_filenames.append(filename)
imagepath = folderpath + filename
img = image.load_img(imagepath, target_size=(192,192))
blurSample_pics.append(np.asarray(img))
else:
print filename, 'not a pic'
df_blurSample = pd.DataFrame(blurSample_filenames, columns=['filename'])
blurSample_pics_array = np.stack(blurSample_pics)/255.
df_blurSample['blur_class'] = model.predict_classes(blurSample_pics_array)
if not os.path.exists(folderpath+'blurry'):
os.mkdir(folderpath+'blurry')
for index, row in df_blurSample.iterrows():
if row['blur_class'] == 1:
oldpath = folderpath + row['filename']
newpath = folderpath + 'blurry/' + row['filename']
os.rename(oldpath, newpath)
img = image.load_img('Images/NaturalBlurSet.xlsx', target_size=(192,192))
'.JPG'.lower()
###Output
_____no_output_____ |
en/01_quadratic/quadratic_pizza_task.ipynb | ###Markdown
Exercise of buying a pizza* topics: quadratic equation, function definition in python, `fsolve()` Task* You go for pizza with your friend.* Menu is clear, smaller pizza costs 100, bigger one which has diameter larger by 10cm, costs 200. Questions* At which diameter d, it pays off to buy 2 smaller pizzas rather than 1 bigger one.* How does the solution change if you do not care about a 1cm dry edge of the pizza? ------ In case this is your first Jupyter Notebook:* Every cell is executed with `Shift-Enter`, once your cursor is in it.* After successul run, a serial number of the execution will appear on the left from the cell* For cell execution and creation of a new cell below, use `Alt-Enter`.* Any text after a symbol is a comment (to annotate your code) and it is ignored by Python* Caution: If you execute a cell which has a hint in the output, the hint will disappear, therefore it is better to use `Alt-Enter`.------
###Code
# import of classical modules as in the introduction
import numpy as np
import matplotlib.pyplot as plt
# advanced feature (save to ignore), which enables to set parameters
# for ALL the plots in the notebook at once
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [8,6] # graph size
mpl.rcParams['lines.linewidth'] = 3 # line width
mpl.rcParams['lines.markersize'] = 10 # size of point markers
mpl.rcParams['xtick.labelsize'] = 12 # font size of x axis ticks
mpl.rcParams['ytick.labelsize'] = 12 # font size of y axis ticks
mpl.rcParams['axes.labelsize'] = 'larger' # font size of the axes labels
# uncomment the command below to find out all possible parameters of graph you can change/set.
# mpl.rcParams.keys()
# Our variable (x axis) is the pizza diameter (d)
# define an array of reasonable d values
###Output
[ 0. 0.5 1. 1.5 2. 2.5 3. 3.5 4. 4.5 5. 5.5 6. 6.5
7. 7.5 8. 8.5 9. 9.5 10. 10.5 11. 11.5 12. 12.5 13. 13.5
14. 14.5 15. 15.5 16. 16.5 17. 17.5 18. 18.5 19. 19.5 20. 20.5
21. 21.5 22. 22.5 23. 23.5 24. 24.5 25. 25.5 26. 26.5 27. 27.5
28. 28.5 29. 29.5 30. 30.5 31. 31.5 32. 32.5 33. 33.5 34. 34.5
35. 35.5 36. 36.5 37. 37.5 38. 38.5 39. 39.5 40. 40.5 41. 41.5
42. 42.5 43. 43.5 44. 44.5 45. 45.5 46. 46.5 47. 47.5 48. 48.5
49. 49.5 50. ]
###Markdown
Condition we are solving for is when area of 2 smaller pizzas is larger than area of 1 bigger pizza. $2S_{small}{\gt}S_{big}$* Hopefully the pizzas are circular$ 2{\cdot}\pi\left(\dfrac{d}{2}\right)^2 \gt \pi\left(\dfrac{d+10}{2}\right)^2$After arranging everything on one side, we solve for when the expression is > 0* Note that $\pi$ in Python is written as np.pi
###Code
# Calculate functional values for range of d you picked
f =
# Plot the function, together with x axis as below
plt.xlabel('Pizza diameter, d [cm]')
plt.ylabel('Area difference (2 smaller - 1 bigger) [cm2]')
plt.show()
###Output
_____no_output_____
###Markdown
From ~23cm of diameter, it always pays off to buy 2 smaller pizzas.--- Let's calculate the intercept exactly. We have at least two options:1. In the quadratic function intro, we have a method in case we know parameters `a,b,c`, therefore we would need to rearrange our expression on paper first.2. That is not practical for more complicated expressions, therefore Python has a function `fsolve()`* `fsolve()` solves equations numerically, using certain algorithm in a loop approaching progressively the correct solution.
###Code
# For using fsolve, we need to import it from scipy.optimize module
from scipy.optimize import fsolve
## Into fsolve, we need to pass a function of some variable/variables
## We can do that in the following way:
# we define (def) function which calculates area difference (what we plot on y axis) based on input d
def area_diff(d): # area_diff is only dependent on d
# fill in the expression for area difference
diff =
return diff
# test the function for d=10
###Output
_____no_output_____
###Markdown
This means that for diameter of 10cm, area of 2 smaller pizzas is smaller by $157\,cm^2$ than 1 larger.--- Try different inputs of d on your own * By trial you can find out, when the difference will become positive. * And that is the moment when you want to buy 2 smaller pizzas instead of two smaller ones.
###Code
# Let's use FSOLVE by inputing our area_diff to find exact solution.
fsolve(area_diff)
## What does this error mean?
# FSOLVE needs one required argument x0: which is our first estimate of a solution.
# writing fsolve and pressing Shift-Tab, all arguments of the FSOLVE function should show up
# try fsolve again and better
###Output
_____no_output_____
###Markdown
FSOLVE is mighty instrument* Works on any type of equation, not only quadratic* Solves system of equations too Unfortunately we cannot blindly trust every return (try yourselves):* if your initial guess `x0` is close to the first intercept, `fsolve` will return the second root of the quadratic equation, which is illogical (negative pizza diameter) for our purposes.* If `x0` is close to the APEX of the parabola (`x0=10`), `fsolve()` will be confused whether to go right or left and ultimately is going to fail.* If you are reasonably close with your `x0`, you are safe.--- If you are hungry by now, good job and good apetite. If not, try to extend your solution by considering the dry edge of a pizza...* What if you do not care about the dry edge of the pizza, so that you want to discard it from your calculations?* How is it going to affect your decisions about 2 vs 1 pizzas?
###Code
# The easiest way is to adapt our already written functions for area difference.
# We do it by adding one free parameter which value stands from thickness of the edge
def area_diff(d, edge=0): # edge=0, if we do not provide edge parameter to the function, Python will use predefined value of 0 set by a smart programmer.
# fill in the expression for area difference, now dependent on edge as well
diff =
return diff
# Repeat fsolve(), we pass edge parameter as args=1
# Plot both graphs, the original and the one with the edge discarded
plt.axhline(0, color='k', lw=0.5)
plt.axvline(0, color='k', lw=0.5)
plt.xlabel('Pizza diameter, d [cm]')
plt.ylabel('Area difference (2 smaller - 1 bigger) [cm2]')
plt.legend()
plt.show()
###Output
_____no_output_____ |
Trainer-Collaboratories/Fine_Tuning/MobileNetV2/Fine_tuning_MobileNetV2(GAP_256_0,25).ipynb | ###Markdown
**Import Google Drive**
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
**Import Library**
###Code
import glob
import numpy as np
import os
import shutil
np.random.seed(42)
from sklearn.preprocessing import LabelEncoder
import cv2
import tensorflow as tf
import keras
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
###Output
Using TensorFlow backend.
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
###Markdown
**Load Data**
###Code
os.chdir('/content/drive/My Drive/Colab Notebooks/DATA RD/')
Train = glob.glob('/content/drive/My Drive/Colab Notebooks/DATA RD/DATASETS/Data Split/Train/*')
Val=glob.glob('/content/drive/My Drive/Colab Notebooks/DATA RD/DATASETS/Data Split/Validation/*')
Test=glob.glob('/content/drive/My Drive/Colab Notebooks/DATA RD/DATASETS/Data Split/Test/*')
import matplotlib.image as mpimg
for ima in Train[600:601]:
img=mpimg.imread(ima)
imgplot = plt.imshow(img)
plt.show()
###Output
_____no_output_____
###Markdown
**Data Preparation**
###Code
nrows = 224
ncolumns = 224
channels = 3
def read_and_process_image(list_of_images):
X = [] # images
y = [] # labels
for image in list_of_images:
X.append(cv2.resize(cv2.imread(image, cv2.IMREAD_COLOR), (nrows,ncolumns), interpolation=cv2.INTER_CUBIC)) #Read the image
#get the labels
if 'Normal' in image:
y.append(0)
elif 'Mild' in image:
y.append(1)
elif 'Moderate' in image:
y.append(2)
elif 'Severe' in image:
y.append(3)
return X, y
X_train, y_train = read_and_process_image(Train)
X_val, y_val = read_and_process_image(Val)
X_test, y_test = read_and_process_image(Test)
import seaborn as sns
import gc
gc.collect()
#Convert list to numpy array
X_train = np.array(X_train)
y_train= np.array(y_train)
X_val = np.array(X_val)
y_val= np.array(y_val)
X_test = np.array(X_test)
y_test= np.array(y_test)
print('Train:',X_train.shape,y_train.shape)
print('Val:',X_val.shape,y_val.shape)
print('Test',X_test.shape,y_test.shape)
sns.countplot(y_train)
plt.title('Total Data Training')
sns.countplot(y_val)
plt.title('Total Data Validasi')
sns.countplot(y_test)
plt.title('Total Data Test')
y_train_ohe = pd.get_dummies(y_train)
y_val_ohe=pd.get_dummies(y_val)
y_test_ohe=pd.get_dummies(y_test)
y_train_ohe.shape,y_val_ohe.shape,y_test_ohe.shape
###Output
_____no_output_____
###Markdown
**Model Parameters**
###Code
batch_size = 16
EPOCHS = 100
WARMUP_EPOCHS = 2
LEARNING_RATE = 0.001
WARMUP_LEARNING_RATE = 1e-3
HEIGHT = 224
WIDTH = 224
CANAL = 3
N_CLASSES = 4
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
###Output
_____no_output_____
###Markdown
**Data Generator**
###Code
train_datagen =tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
test_datagen=tf.keras.preprocessing.image.ImageDataGenerator()
train_generator = train_datagen.flow(X_train, y_train_ohe, batch_size=batch_size)
val_generator = test_datagen.flow(X_val, y_val_ohe, batch_size=batch_size)
test_generator = test_datagen.flow(X_test, y_test_ohe, batch_size=batch_size)
###Output
_____no_output_____
###Markdown
**Define Model**
###Code
IMG_SHAPE = (224, 224, 3)
base_model =tf.keras.applications.MobileNetV2(weights='imagenet',
include_top=False,
input_shape=IMG_SHAPE)
x =tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
x =tf.keras.layers.Dropout(0.25)(x)
x =tf.keras.layers.Dense(256, activation='relu')(x)
x =tf.keras.layers.Dropout(0.25)(x)
final_output =tf.keras.layers.Dense(N_CLASSES, activation='softmax', name='final_output')(x)
model =tf.keras.models.Model(inputs=base_model.inputs,outputs=final_output)
###Output
_____no_output_____
###Markdown
**Train Top Layers**
###Code
for layer in model.layers:
layer.trainable = False
for i in range(-5, 0):
model.layers[i].trainable = True
metric_list = ["accuracy"]
optimizer =tf.keras.optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=metric_list)
model.summary()
import time
start = time.time()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = val_generator.n//val_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
verbose=1).history
end = time.time()
print('Waktu Training:', end - start)
###Output
WARNING:tensorflow:From <ipython-input-17-42947d619a66>:13: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/2
375/375 [==============================] - 67s 178ms/step - loss: 1.0370 - accuracy: 0.5305 - val_loss: 1.0771 - val_accuracy: 0.5397
Epoch 2/2
375/375 [==============================] - 66s 176ms/step - loss: 0.9336 - accuracy: 0.5530 - val_loss: 0.9844 - val_accuracy: 0.5860
Waktu Training: 137.82756233215332
###Markdown
**Train Fine Tuning**
###Code
for layer in model.layers:
layer.trainable = True
es =tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop =tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
callback_list = [es]
optimizer =tf.keras.optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=metric_list)
model.summary()
history_finetunning = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
verbose=1).history
###Output
Epoch 1/100
375/375 [==============================] - 69s 184ms/step - loss: 0.7880 - accuracy: 0.6743 - val_loss: 11.8788 - val_accuracy: 0.2520
Epoch 2/100
375/375 [==============================] - 68s 182ms/step - loss: 0.6365 - accuracy: 0.7373 - val_loss: 5.4351 - val_accuracy: 0.3683
Epoch 3/100
375/375 [==============================] - 68s 182ms/step - loss: 0.5762 - accuracy: 0.7635 - val_loss: 1.6380 - val_accuracy: 0.5343
Epoch 4/100
375/375 [==============================] - 68s 181ms/step - loss: 0.5586 - accuracy: 0.7777 - val_loss: 2.3704 - val_accuracy: 0.4731
Epoch 5/100
375/375 [==============================] - 68s 181ms/step - loss: 0.5625 - accuracy: 0.7740 - val_loss: 4.1205 - val_accuracy: 0.4751
Epoch 6/100
375/375 [==============================] - 69s 183ms/step - loss: 0.5386 - accuracy: 0.7882 - val_loss: 1.4705 - val_accuracy: 0.5659
Epoch 7/100
375/375 [==============================] - 68s 183ms/step - loss: 0.5261 - accuracy: 0.7857 - val_loss: 1.6054 - val_accuracy: 0.6082
Epoch 8/100
375/375 [==============================] - 68s 181ms/step - loss: 0.5157 - accuracy: 0.7917 - val_loss: 1.5991 - val_accuracy: 0.5195
Epoch 9/100
375/375 [==============================] - 69s 183ms/step - loss: 0.5026 - accuracy: 0.7978 - val_loss: 2.3484 - val_accuracy: 0.3972
Epoch 10/100
375/375 [==============================] - 68s 183ms/step - loss: 0.4797 - accuracy: 0.8100 - val_loss: 1.9325 - val_accuracy: 0.4664
Epoch 11/100
375/375 [==============================] - ETA: 0s - loss: 0.5003 - accuracy: 0.8013Restoring model weights from the end of the best epoch.
375/375 [==============================] - 68s 182ms/step - loss: 0.5003 - accuracy: 0.8013 - val_loss: 2.9917 - val_accuracy: 0.4832
Epoch 00011: early stopping
###Markdown
**Model Graph**
###Code
history = {'loss': history_warmup['loss'] + history_finetunning['loss'],
'val_loss': history_warmup['val_loss'] + history_finetunning['val_loss'],
'acc': history_warmup['accuracy'] + history_finetunning['accuracy'],
'val_acc': history_warmup['val_accuracy'] + history_finetunning['val_accuracy']}
sns.set_style("whitegrid")
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 18))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
###Output
_____no_output_____
###Markdown
**Evaluate Model**
###Code
loss_Val, acc_Val = model.evaluate(X_val, y_val_ohe,batch_size=1, verbose=1)
print("Validation: accuracy = %f ; loss_v = %f" % (acc_Val, loss_Val))
lastFullTrainPred = np.empty((0, N_CLASSES))
lastFullTrainLabels = np.empty((0, N_CLASSES))
lastFullValPred = np.empty((0, N_CLASSES))
lastFullValLabels = np.empty((0, N_CLASSES))
for i in range(STEP_SIZE_TRAIN+1):
im, lbl = next(train_generator)
scores = model.predict(im, batch_size=train_generator.batch_size)
lastFullTrainPred = np.append(lastFullTrainPred, scores, axis=0)
lastFullTrainLabels = np.append(lastFullTrainLabels, lbl, axis=0)
for i in range(STEP_SIZE_VALID+1):
im, lbl = next(val_generator)
scores = model.predict(im, batch_size=val_generator.batch_size)
lastFullValPred = np.append(lastFullValPred, scores, axis=0)
lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0)
lastFullComPred = np.concatenate((lastFullTrainPred, lastFullValPred))
lastFullComLabels = np.concatenate((lastFullTrainLabels, lastFullValLabels))
complete_labels = [np.argmax(label) for label in lastFullComLabels]
train_preds = [np.argmax(pred) for pred in lastFullTrainPred]
train_labels = [np.argmax(label) for label in lastFullTrainLabels]
validation_preds = [np.argmax(pred) for pred in lastFullValPred]
validation_labels = [np.argmax(label) for label in lastFullValLabels]
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe']
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax2).set_title('Validation')
plt.show()
###Output
_____no_output_____ |
Chapter6/starspace.ipynb | ###Markdown
StarSpaceStarSpace [[1]](fn1) is an entity embedding approach which uses a similarity function between entities to construct a prediction task for a neural network. It maps objects of different types into a common vector space where they can be compared to each other. StarSpace can learn word, sentence and document level embeddings, ranking, text classification, embedding graphs, image classification, etc. We will follow the official documentation of StarSpace and implement simple text classification.This notebook requires a working SparSpace program which can be built on any modern Linux or Windows machine as described in the building instructions in the [GitHub repository](https://github.com/facebookresearch/StarSpace). Here, we use the Linux toolchain to build the StarSpace executable. If you run this notebook on Windows, you can use either Visual Studio or tools such as [MinGW with MSYS](http://www.mingw.org/) or [Cygwin](https://www.cygwin.com/) to compile StarSpace.----- [1] Ledell Yu Wu, Adam Fisch, Sumit Chopra, Keith Adams, Antoine Bordes, and Jason Weston. Starspace: Embed all the things! In Proceedings of the 32nd AAAI Conference on Artificial Intelligence, pages 5569–5577, 2018. ---- First of all, we need to ensure that all the required libraries are available. The `-q` parameter is used to suppress long installation reports produced by `pip`.
###Code
!pip -q install gensim==3.8.3
!pip -q install matplotlib==3.3.3
!pip -q install scikit-learn==0.23.2
!pip -q install pandas==1.1.4
###Output
_____no_output_____
###Markdown
We clone the source code repository and compile the starspace binary.
###Code
import os
class StopExecution(Exception):
def _render_traceback_(self):
pass
if os.name == 'nt':
print('ERROR: you are running this notebook on a Windows system. Please open the StarSpace Visual Studio solution file (https://github.com/facebookresearch/StarSpace/blob/master/MVS/StarSpace.sln) and build the project.')
raise StopExecution
else:
!git clone [email protected]:facebookresearch/StarSpace.git && cd StarSpace && make
###Output
Cloning into 'StarSpace'...
remote: Enumerating objects: 5, done.[K
remote: Counting objects: 100% (5/5), done.[K
remote: Compressing objects: 100% (5/5), done.[K
remote: Total 873 (delta 0), reused 0 (delta 0), pack-reused 868[K
Receiving objects: 100% (873/873), 3.05 MiB | 5.39 MiB/s, done.
Resolving deltas: 100% (567/567), done.
g++ -pthread -std=gnu++11 -O3 -funroll-loops -g -c src/utils/normalize.cpp
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/dict.cpp
g++ -pthread -std=gnu++11 -O3 -funroll-loops -g -c src/utils/args.cpp
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/proj.cpp
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/parser.cpp -o parser.o
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/data.cpp -o data.o
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/model.cpp
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/starspace.cpp
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/doc_parser.cpp -o doc_parser.o
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/doc_data.cpp -o doc_data.o
g++ -pthread -std=gnu++11 -O3 -funroll-loops -I/usr/local/bin/boost_1_63_0/ -g -c src/utils/utils.cpp -o utils.o
g++ -pthread -std=gnu++11 -O3 -funroll-loops normalize.o dict.o args.o proj.o parser.o data.o model.o starspace.o doc_parser.o doc_data.o utils.o -I/usr/local/bin/boost_1_63_0/ -g src/main.cpp -o starspace
###Markdown
The executable is now available as `StarSpace/starspace`. The original bash script (classification_ag_news.sh) for the text classification example is available in the [Starspace GitHub repository](https://github.com/facebookresearch/Starspace/blob/master/examples/classification_ag_news.sh). We reimplement it as a Jupyter notebook. The data is based on [Antonio Gulli's corpus (AG)](http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html) which is a collection of more than 1 million news articles. From this collection, Zhang et al. [[2]](fn2) constructed a smaller corpus, containing only the four largest news categoriess from the original corpus. Each category (i.e. class value) contains 30,000 training instances and 1,900 testing instances. The total number of training samples is 120000 while 7600 samples are reserved for testing. We download, unpack and inspect the corpus.---- [2] Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks for Text Classification. Advances in Neural Information Processing Systems 28 (NIPS 2015).----
###Code
import tarfile
import requests
import os
request = requests.get('https://dl.fbaipublicfiles.com/starspace/ag_news_csv.tar.gz')
with open("data/ag_news_csv.tar.gz", "wb") as file:
file.write(request.content)
with tarfile.open('data/ag_news_csv.tar.gz', 'r:gz') as tar:
tar.extractall(path='data')
print(os.listdir('data/ag_news_csv'))
###Output
['classes.txt', 'test.csv', 'readme.txt', 'train.csv']
###Markdown
There are four classes and each news from the train and test set is classified using the line number of the actual class value. The training data looks as follows.
###Code
import pandas as pd
pd.set_option('display.max_colwidth', 30)
print(pd.read_csv('data/ag_news_csv/classes.txt', names=['categories']))
print(pd.read_csv('data/ag_news_csv/train.csv', names=['category', 'title', 'body']).iloc[:5])
###Output
categories
0 World
1 Sports
2 Business
3 Sci/Tech
category title body
0 3 Wall St. Bears Claw Back I... Reuters - Short-sellers, W...
1 3 Carlyle Looks Toward Comme... Reuters - Private investme...
2 3 Oil and Economy Cloud Stoc... Reuters - Soaring crude pr...
3 3 Iraq Halts Oil Exports fro... Reuters - Authorities have...
4 3 Oil prices soar to all-tim... AFP - Tearaway world oil p...
###Markdown
We read the data into a Pandas DataFrame object and preprocess the text by converting it to lowercase and replacing a number of characters. The category is prefixed with `__label__` as required for the fastText word embedding file format. The transformed data is randomly shuffled and written into a fastText compatible text file. The four news categories are balanced in the train as well as in the test data.
###Code
from pprint import pprint
idx2category = {1: '__label__world',2: '__label__sports', 3:'__label__business', 4:'__label__scitech'}
def preprocess(df):
df = df.replace({'category': idx2category})
df['text'] = df['title'] + ' ' + df['body']
df = df.drop(labels=['title', 'body'], axis=1)
df['text'] = df['text'].str.lower()
for s, rep in [("'"," ' "),
('"',''),
('.',' . '),
('<br />',' '),
(',',' , '),
('(',' ( '),
(')',' ) '),
('!',' ! '),
('?',' ? '),
(';',' '),
(':',' '),
('\\',''),
(' ',' ')
]:
df['text'] = df['text'].str.replace(s, rep)
df = df.sample(frac=1, random_state=42)
return df
for filename in ['data/ag_news_csv/train.csv','data/ag_news_csv/test.csv']:
df = pd.read_csv(filename, names=['category', 'title', 'body'])
df = preprocess(df)
print('File {}'.format(os.path.split(filename)[1]))
pprint(df['category'].value_counts().to_dict())
with open('{}.pp'.format(os.path.splitext(filename)[0]), 'w') as fp:
for row in df.itertuples():
fp.write('{} {}\n'.format(row.category, row.text))
###Output
File train.csv
{'__label__business': 30000,
'__label__scitech': 30000,
'__label__sports': 30000,
'__label__world': 30000}
File test.csv
{'__label__business': 1900,
'__label__scitech': 1900,
'__label__sports': 1900,
'__label__world': 1900}
###Markdown
We can now run StarSpace on the preprocessed files. The set of parameters is the same as in the example from the StarSpace repository. The `trainMode=0` and `fileFormat='FastText'` combinations defines the mode where the labels are individual words, i.e. the classification task.
###Code
!./StarSpace/starspace train \
-trainFile "data/ag_news_csv/train.pp" \
-model "data/ag_news_csv/model" \
-initRandSd 0.01 \
-adagrad false \
-ngrams 1 \
-lr 0.01 \
-epoch 5 \
-thread 20 \
-dim 10 \
-negSearchLimit 5 \
-trainMode 0 \
-label "__label__" \
-similarity "dot" \
-verbose false
###Output
Arguments:
lr: 0.01
dim: 10
epoch: 5
maxTrainTime: 8640000
validationPatience: 10
saveEveryEpoch: 0
loss: hinge
margin: 0.05
similarity: dot
maxNegSamples: 10
negSearchLimit: 5
batchSize: 5
thread: 20
minCount: 1
minCountLabel: 1
label: __label__
label: __label__
ngrams: 1
bucket: 2000000
adagrad: 0
trainMode: 0
fileFormat: fastText
normalizeText: 0
dropoutLHS: 0
dropoutRHS: 0
useWeight: 0
weightSep: :
Start to initialize starspace model.
Build dict from input file : data/ag_news_csv/train.pp
Read 5M words
Number of words in dictionary: 94698
Number of labels in dictionary: 4
Loading data from file : data/ag_news_csv/train.pp
Total number of examples loaded : 120000
Training epoch 0: 0.01 0.002
Epoch: 100.0% lr: 0.008117 loss: 0.035385 eta: <1min tot: 0h0m0s (20.0%)0.2% lr: 0.008833 loss: 0.043099 eta: <1min tot: 0h0m0s (12.0%)74.4% lr: 0.008600 loss: 0.039451 eta: <1min tot: 0h0m0s (14.9%)99.7% lr: 0.008117 loss: 0.035413 eta: <1min tot: 0h0m0s (19.9%)
---+++ Epoch 0 Train error : 0.03201538 +++--- ☃
Training epoch 1: 0.008 0.002
Epoch: 100.0% lr: 0.006000 loss: 0.018529 eta: <1min tot: 0h0m0s (40.0%)5.4% lr: 0.006500 loss: 0.019303 eta: <1min tot: 0h0m0s (31.1%)64.9% lr: 0.006317 loss: 0.018866 eta: <1min tot: 0h0m0s (33.0%)
---+++ Epoch 1 Train error : 0.01761493 +++--- ☃
Training epoch 2: 0.006 0.002
Epoch: 100.0% lr: 0.004183 loss: 0.014683 eta: <1min tot: 0h0m1s (60.0%) lr: 0.005900 loss: 0.012627 eta: <1min tot: 0h0m0s (40.9%)14.2% lr: 0.005783 loss: 0.014844 eta: <1min tot: 0h0m0s (42.8%)23.7% lr: 0.005583 loss: 0.015281 eta: <1min tot: 0h0m1s (44.7%)57.0% lr: 0.004950 loss: 0.015072 eta: <1min tot: 0h0m1s (51.4%)
---+++ Epoch 2 Train error : 0.01478347 +++--- ☃
Training epoch 3: 0.004 0.002
Epoch: 100.0% lr: 0.002000 loss: 0.012871 eta: <1min tot: 0h0m1s (80.0%)2% lr: 0.003817 loss: 0.017381 eta: <1min tot: 0h0m1s (60.6%)14.2% lr: 0.003617 loss: 0.012978 eta: <1min tot: 0h0m1s (62.8%)23.7% lr: 0.003433 loss: 0.012063 eta: <1min tot: 0h0m1s (64.7%)53.8% lr: 0.002983 loss: 0.011820 eta: <1min tot: 0h0m1s (70.8%)74.4% lr: 0.002317 loss: 0.012698 eta: <1min tot: 0h0m1s (74.9%)
---+++ Epoch 3 Train error : 0.01287099 +++--- ☃
Training epoch 4: 0.002 0.002
Epoch: 100.0% lr: -0.000000 loss: 0.011717 eta: <1min tot: 0h0m2s (100.0%)r: 0.001867 loss: 0.014404 eta: <1min tot: 0h0m1s (80.9%)15.8% lr: 0.001467 loss: 0.012105 eta: <1min tot: 0h0m1s (83.2%)23.7% lr: 0.001250 loss: 0.012151 eta: <1min tot: 0h0m1s (84.7%)58.6% lr: 0.000533 loss: 0.011196 eta: <1min tot: 0h0m1s (91.7%)69.7% lr: 0.000183 loss: 0.011304 eta: <1min tot: 0h0m2s (93.9%)
---+++ Epoch 4 Train error : 0.01133779 +++--- ☃
Saving model to file : data/ag_news_csv/model
Saving model in tsv format : data/ag_news_csv/model.tsv
###Markdown
The resulting Starspace model embeddsthe input into a common 10-dimensional space (set by the `-dim 10` setting). We load it into a dataframe and inspect it. As shown in the table below, the model embedds everything into a common space: words that are present in documents but also the categories (the last four rows). In this way, we can now compare entities of different kinds.
###Code
pd.read_csv('data/ag_news_csv/model.tsv', sep='\t', header=None, keep_default_na=False)
###Output
_____no_output_____
###Markdown
Wen compute predictions and measure the peformance. In the test mode, StarSpace reports the hit@k evaluation metric which tells us how many correct answers are among the top k predictions. We are interested in the most probable category, therefore we use the hit@1 metric (in general, assignment of categories to text can be viewed as a multi-label classification problem). StarSpace achieves the score $hit@1=0.46$ which means that in 46% of test cases the model's first prediction is the correct answer.
###Code
!./StarSpace/starspace test \
-model "data/ag_news_csv/model" \
-testFile "data/ag_news_csv/test.pp" \
-ngrams 1 \
-dim 10 \
-label "__label__" \
-thread 10 \
-similarity "dot" \
-trainMode 0 \
-verbose false \
-predictionFile "data/ag_news_csv/test.y"
###Output
Arguments:
lr: 0.01
dim: 10
epoch: 5
maxTrainTime: 8640000
validationPatience: 10
saveEveryEpoch: 0
loss: hinge
margin: 0.05
similarity: dot
maxNegSamples: 10
negSearchLimit: 50
batchSize: 5
thread: 10
minCount: 1
minCountLabel: 1
label: __label__
label: __label__
ngrams: 1
bucket: 2000000
adagrad: 1
trainMode: 0
fileFormat: fastText
normalizeText: 0
dropoutLHS: 0
dropoutRHS: 0
useWeight: 0
weightSep: :
Start to load a trained starspace model.
STARSPACE-2018-2
Model loaded.
Loading data from file : data/ag_news_csv/test.pp
Total number of examples loaded : 7600
Predictions use 4 known labels.
------Loaded model args:
Arguments:
lr: 0.01
dim: 10
epoch: 5
maxTrainTime: 8640000
validationPatience: 10
saveEveryEpoch: 0
loss: hinge
margin: 0.05
similarity: dot
maxNegSamples: 10
negSearchLimit: 5
batchSize: 5
thread: 10
minCount: 1
minCountLabel: 1
label: __label__
label: __label__
ngrams: 1
bucket: 2000000
adagrad: 1
trainMode: 0
fileFormat: fastText
normalizeText: 0
dropoutLHS: 0
dropoutRHS: 0
useWeight: 0
weightSep: :
Predictions use 4 known labels.
Evaluation Metrics :
hit@1: 0.464737 hit@10: 1 hit@20: 1 hit@50: 1 mean ranks : 1.70079 Total examples : 7600
###Markdown
This result was obtained using the parameters as specified by the authors in the [published example](https://github.com/facebookresearch/Starspace/blob/master/examples/classification_ag_news.sh). The performance (46.4%) differs significantly from the published results [[1]](fn1) where the authors report 91.6% accuracy on the test set for this task using the same number of dimensions (10).On the other hand, our implementation of the baseline classifier based on TF-IDF + SVM presented below shows similar performance (91%) to the BOW + multinomial logistic regression (88.8%) reported in the paper [[3]](fn3).--- [3] Zhang, X., and LeCun, Y. 2015. Text understanding from scratch. arXiv preprint arXiv:1502.01710. ----
###Code
import gensim
def to_tfidf(documents, dic=None, tfidf_model=None):
documents = [gensim.parsing.preprocessing.preprocess_string(doc) for doc in documents]
if dic is None:
dic = gensim.corpora.Dictionary(documents)
dic.filter_extremes()
bows = [dic.doc2bow(doc) for doc in documents]
if tfidf_model is None:
tfidf_model = gensim.models.tfidfmodel.TfidfModel(dictionary=dic)
tfidf_vectors = tfidf_model[bows]
return tfidf_vectors, dic, tfidf_model
train = pd.read_csv('data/ag_news_csv/train.csv', names=['category', 'title', 'body'])
X_train = [x.title + ' ' + x.body for x in train.itertuples()]
y_train = [x.category for x in train.itertuples()]
test = pd.read_csv('data/ag_news_csv/test.csv', names=['category', 'title', 'body'])
X_test = [x.title + ' ' + x.body for x in test.itertuples()]
y_test = [x.category for x in test.itertuples()]
X_train_tfidf, dic, tfidf_model = to_tfidf(X_train)
X_test_tfidf, _, __ = to_tfidf(X_test, dic, tfidf_model)
###Output
_____no_output_____
###Markdown
The TF-IDF weighting used with the linear SVM achieves the accuracy of 91%. Because this is a multiclass classification problem, this metric is the same as hit@1, reported by StarSpace.
###Code
from sklearn.svm import LinearSVC
from sklearn import metrics
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(y_train)
svc = LinearSVC()
svc.fit(gensim.matutils.corpus2csc(X_train_tfidf, num_terms=len(dic)).T, le.transform(y_train))
y_predicted = svc.predict(gensim.matutils.corpus2csc(X_test_tfidf, num_terms=len(dic)).T)
print('Accuracy: {:.3f}'.format(metrics.accuracy_score(le.transform(y_test), y_predicted)))
###Output
Accuracy: 0.910
###Markdown
We have embeddings for a large number of words, so we can run clustering to see if the embeddings vectors can be used to partition words into four categories.
###Code
import numpy as np
from sklearn.cluster import KMeans
model = pd.read_csv('data/ag_news_csv/model.tsv', sep='\t', header=None, keep_default_na=False)
embeddings = model[model.columns[1:]]
kmeans = KMeans(n_clusters=4, random_state=12345).fit(embeddings)
###Output
_____no_output_____
###Markdown
The three smaller clusters closely match the topics Business, World, and Sci/Tech while the largest cluster is less specific and contains words from all topics.
###Code
words_array = model[0].to_numpy()
for ci in range(kmeans.n_clusters):
cluster_words = np.compress(kmeans.labels_==ci, words_array)
print('Cluster {} ({} instances)'.format(ci, len(cluster_words)))
print(cluster_words[:100])
print('')
###Output
Cluster 0 (1640 instances)
['us' 'company' 'oil' 'inc' 'yesterday' '?' 'corp' 'prices' 'years'
'group' 'season' 'deal' 'sales' 'business' 'billion' 'former'
'washington' 'profit' 'states' '/b>' 'b>' 'chief' 'american' 'shares'
'take' 'bank' 'third' 'federal' 'companies' 'co' 'maker' 'bid' 'largest'
'industry' 'big' 'giant' '5' 'growth' 'investor' '//www' 'href=http'
'/a>' 'trade' 'earnings' 'dollar' 'buy' 'gold' 'union' 'amp' 'stock'
'loss' 'agreed' 'months' 'aspx' 'com/fullquote'
'target=/stocks/quickinfo/fullquote>' 'like' 'firm' 'air' 'rose'
'executive' 'jobs' 'update' 'price' 'boston' 'economy' 'drug' 'ahead'
'pay' 'near' 'biggest' 'economic' 'peoplesoft' 'car' 'o' 'street' 'work'
'your' 'free' '2005' 'much' '6' 'presidential' 'workers' 'wins' 'america'
'nation' 'share' 'financial' 'fall' 'wall' 'fell' 'lower' 'september'
'crude' 'october' 'chicago' 'job' '11' 'consumer']
Cluster 1 (89619 instances)
['the' ',' 'to' 'a' 'of' 'in' 'and' 's' 'on' 'for' '#39' ')' 'that' 'with'
'as' 'at' 'is' 'its' 'new' 'it' 'said' 'has' 'from' 'an' 'his' 'will'
'after' 'was' 'be' 'over' 'have' 'their' 'are' 'up' 'quot' 'but' 'more'
'first' 'two' 'he' 'world' 'this' '--' 'monday' 'wednesday' 'tuesday'
'out' 'thursday' 'one' 'not' 'against' 'friday' 'into' 'they' 'about'
'last' 'year' 'than' 'who' 'no' 'were' 'been' 'million' 'week' 'had'
'united' 'when' 'could' 'three' 'today' 'time' 'may' 'percent' '1' 'off'
'team' 'next' 'back' 'saturday' 'or' 'can' 'some' 'second' 'state' 'all'
'top' 'day' 'down' 'n' 'international' 'most' 'record' 'victory'
'officials' 'report' 'open' 'end' 'plans' 'court' 'if']
Cluster 2 (1594 instances)
['.' '-' "'" 'iraq' 'york' 'president' 'says' 'sunday' 'would'
'government' 'people' 'which' 'afp' 'win' 'night' 'china' 'minister'
'bush' 'killed' 'city' 'stocks' 'european' 'talks' 'league' 'country'
'reported' 'british' 'japan' 'india' 'police' 'prime' 'iraqi' 'leader'
'hit' 'say' 'baghdad' 'expected' 'election' 'north' 'under' 'war'
'australia' 'military' 'cut' 'nuclear' 'higher' 'un' 'official'
'palestinian' 'sox' 'attack' 'troops' 'russia' 'israeli' 'gaza' 'press'
'west' 'including' 'general' 'man' 'iran' 'football' 'forces' 'athens'
'past' 'europe' 'investors' 'peace' 'canadian' 'six' 'russian' 'beat'
'pakistan' 'held' 'public' 'eu' 'where' 'foreign' 'bomb' 'attacks'
'israel' 'nations' 'championship' 'korea' 'australian' 'kerry' 'leaders'
'french' 'men' 'house' 'death' 'killing' 'darfur' 'leading' 'arafat'
'capital' 'army' 'japanese' 'campaign' 'trial']
Cluster 3 (1849 instances)
['' '(' 'by' 'reuters' 'ap' '<' 'u' 'microsoft' 't' 'game' 'security'
'software' 'internet' '2' 'market' 'announced' 'news' '2004' 'service'
'you' 'before' 'technology' 'com' 'search' 'computer' 'space' 'online'
'what' 'network' 'google' 'ibm' 'research' 'according' 'music' 'help'
'while' 'games' 'web' 'san' 'mobile' 'services' '4' 'quarter' 'wireless'
'system' 'data' 'i' 'phone' 'apple' 'oracle' 'windows' 'global' 'intel'
'found' 'users' 'reports' 'released' 'release' 'offer' 'case' 'use' 'uk'
'video' 'pc' 'systems' 'support' 'nasa' 'sun' 'launch' 'linux' 'called'
'digital' 'scientists' 'net' 'program' 'version' 'future' 'center' 'site'
'customers' 'study' 'chip' 'sony' 'management' 'california' 'such'
'making' 'department' 'using' 'grand' 'ceo' 'university' 'tv' 'launched'
'times' 'source' 'server' 'better' 'phones' 'desktop']
|
GeneralExemplars/Coding Activities for Schools/National Higher/if_for_Higher.ipynb | ###Markdown
 If and for statements In blue, the instructions and goals are highlighted. In green, the information is highlighted. In yellow, the exercises are highlighted. In red, the error and alert messages are highlighted. Instructions Click Run on each cell to go through the code in each cell. This will take you through the cell and print out the results. If you wish to see all the outputs at once in the whole notebook, just click Cell and then Run All. Goals After this workshop, the student should get more familiar with the following topics: printing basic statements and commands in Jupyter Notebook performing basic arithmetic calculations in Python improving an existent model of the code recognizing and checking variable types in Python using the if and for statements for basic operations working with prime numbers and writing functions in computer science These objectives are in agreement with the Higher Scottish Curriculum for high-school students. Note This notebook is a revision of the concepts met in the Nat 3, Nat 4 and Nat 5 notebooks. If you feel uncomfortable with the exercises met here, go back to the previous notebooks and make sure you understand the notions. Explore Conditional if statement... Welcome to another session on the Jupyter Notebooks!! Today we will have a better look to the if statement and work with the for condition...we will take them one by one! Let us begin with a revision of if statement. As the name suggests, this instruction is used when only a certain condition is met: Exercise: Take a variable a and check whether it is even or odd
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: Check whether a number has one, two or three digits. For the beginning, just use an if , an elif and an else condition:
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: For the above code, try it with some numbers and see what happens. On your model, try using a float number. Note: If the code still works, it means you have already predicted the next step of the notebook: Note: If the code has printed error, it is because you only considered integer numbers, and not floaitng point numbers in our code. Exercise: Inspect the code on the cell below, and check what happens when you run it.
###Code
if(a != int(a)):
print("You need to plug in an integer number")
if(0 < a < 10):
print("Integer a has only one digit")
elif(10 < a < 99):
print("Integer a has two digits")
else:
print("Integer a has three digits")
###Output
_____no_output_____
###Markdown
Exercise: Convince yourself that the whole expression could be rewritten as follows:
###Code
if(a != int(a)):
print("You need to plug in an integer number")
else:
if(0 < a < 10):
print("Integer a has only one digit")
elif(10 < a < 99):
print("Integer a has two digits")
else:
print("Integer a has three digits")
###Output
_____no_output_____
###Markdown
Exercise: Can you try and add more conditions for this problem? Hint: There is no correct answer here. The exercise is up to your imagination
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: Take a number and check if the number is divisible either by two or five:
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: We will approach, now, a new type of problem. Let us check whether a number is primes, or not. Recall, first of all, what is a prime number. After the revision you have conducted on prime numbers, how can we instruct the computer to check if a number is prime or not? We should take each integer smaller than the number in question, and check if the division gives zero remainder. Note: you can already witness the key words corresponding to for and if statements. Exercise: Inspect the code lines below, debate this with your colleagues, and run the cell:
###Code
ok = True # boolean variable to check if the number is prime or not.
# if ok is True, then the number is prime
N = 28 # the variable of interest to be analysed
for i in range(2,N): # take all the numbers from 2 to N
if(N % i == 0): # check if the division is exact
ok = False # the number is no longer prime
###Output
_____no_output_____
###Markdown
Exercise: Now, the computer has already assigned a value to the boolean variable ok . We want to check whether it is equal to True, or False.
###Code
if(ok == 1):
print("The number " + str(N) + " is prime")
else:
print("The number " + str(N) + " is not prime")
###Output
The number 28 is not prime
###Markdown
Predict: What happens if I put the loop from 1 to N? Discuss the idea with your peers and Predict the outcome. Afterwards, just replace 2 with 1 in the code and check the results. Any number is divisible by 1, so all the numbers will be considered prime. Predict: How can I increase the performance of the algorithm described above? How many steps are made (how many variables are there from $ 2 $ to $ N $)? Do I really need to check if, for example, 38 is a multiple of 37, 36, or 29? Is there an integer which is divisible by a number greater than its half in the set of natural numbers? One variable has to be changed in the code above, in order to increase the performance of the algorithm (so that fewer steps have to be made in order to achieve the same valid result)
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Let's play a game: How well do you know your classmates? Talk to each other if you have not done it so far, work out what each person enjoys, and try to make a loop with the habits of your friends. It can look like the example below, but feel free to also come up with examples of your own.
###Code
for i in range(3):
print(input("The name of my friend is: "))
print(input("His/her favourite game is: "))
print(input("His/her favourite holiday place is: "))
###Output
_____no_output_____
###Markdown
Exercise: Add the following condition to the game: if the name of the person is the same as your name, print: "Wooah!!" . You can also add another condition here: for instance, does the person have another name? You can choose whether to use the elif statement or not. Once again, There is no correct answer to this exercise, the whole point is for you to get more familiarized with coding. Take-away This is it for today, and well done for managing to go through the material!! After this session, you should be more familiar with how simple sentences, numbers and conditional statements can be printed in Python. Moreover, ponder a bit on the for instruction, as it is heavily used in programming. Also, feel free to work more on this notebook using any commands you would like. Note: Always keep a back-up of the notebook, in case the original one is altered. For today's session, this should be enough! See you later!!
###Code
print("Bye bye! :D")
###Output
_____no_output_____ |
Closed-Lexical-Classes/.ipynb_checkpoints/2. Closed Classes Analysis-checkpoint.ipynb | ###Markdown
Goal: Investigate birth and death among closed classes of words1. Load the \*\_CLOSED_CLASSES.json files2. Separate the word from the part of speech and form JSON of form {unigram: {pos:'pos', max: max_usage, median_all: median_all, median_in_use:median_in_use, mean_all: mean_all, mean_in_use:mean_in_use, birth_years: [year1, year2, ...], death_years: [year1, year2, ...]} ...} Where - `max` is the maximum frequency of usage over the entire time period - `me(di)an_all` is the me(di)an of the frequencies of usage at all points in the time interval. - `me(di)an_in_use` is the me(di)an of the frequencies of usage only when actually in use (frequency >0) 3. Concatenate the final dictionaries4. Save as a single JSON Available parts of speech:- _PRON_ pronoun- _DET_ determiner or article- _ADP_ an adposition: either a preposition or a postposition- _CONJ_ conjunction- _PRT_ particle Load the \*\_CLOSED_CLASSES.json files
###Code
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import re
#For the Google POS tagging
underscore = re.compile('_{1}')
import statistics
def open_json(directory,file_path):
with open(directory+file_path,'r') as f:
ngrams = json.load(f)
f.close()
return ngrams
def normalize(ngrams):
years = [str(i) for i in range(1800,2020)]
unigram_dict = dict()
for word in tqdm(ngrams.keys()):
match_count_by_year = []
for year in years:
if year in ngrams[word].keys():
match_count_by_year.append(ngrams[word][year])
else:
#Zeroes are necessary for smoothing
match_count_by_year.append(0)
unigram_dict[word] = match_count_by_year
return unigram_dict, years
def smoothing(unigram_dict, years, smoothing = 5):
df = pd.DataFrame.from_dict(unigram_dict #take in the dictionary
).rolling(smoothing,center=True #create frames of size 5 (smoothing value), and replace value in middle
).mean( #average accross those frames
).rename({i:years[i] for i in range(len(years))}, axis = 'index' #rename the indices to years
).dropna()
years_map = {i:int(year) for i, year in enumerate(df.index)}
ngrams = df.to_dict(orient = 'list')
return ngrams, years_map
def analyze_birth_and_death(ngrams,years_map):
ngrams_analyzed = {}
for unigram in tqdm(ngrams.keys()):
frequency_list = ngrams[unigram]
frequency_in_use_list = [f for f in frequency_list if f>0]
if frequency_in_use_list: #only proceed if there is some value that is greater than 0 in the frequency list
max_usage = max(frequency_list)
median_all = statistics.median(frequency_list)
median_in_use = statistics.median(frequency_in_use_list)
mean_all = statistics.mean(frequency_list)
mean_in_use = statistics.mean(frequency_in_use_list)
birth_years, death_years = [],[]
for i in range(len(frequency_list)-1):
#Birth
if frequency_list[i]==0 and frequency_list[i+1]!=0:
birth_years.append(years_map[i+1])
#Death
if frequency_list[i]!=0 and frequency_list[i+1]==0:
death_years.append(years_map[i])
#Disregarding death in the final year
if len(birth_years)+len(death_years)>0:
#Replace the tagged unigram with the word and place POS separately
word_pos = underscore.split(unigram)
ngrams_analyzed[word_pos[0]] = {'POS':word_pos[1],
'max_usage':max_usage,
'median_all':median_all,
'median_in_use':median_in_use,
'mean_all':mean_all,
'mean_in_use':mean_in_use,
'birth_years':birth_years,
'death_years':death_years}
else:
pass
#print(unigram,'had no instances of usage after smoothing.')
return ngrams_analyzed
def save_json(dictionary,directory,file_path):
output = file_path+'.json'
if len(dictionary)>0:
with open(directory+output, 'w') as f_out:
json.dump(dictionary, f_out)
print('SAVED: ',output,len(dictionary))
else:
print('unigram dict empty',output)
###Output
_____no_output_____
###Markdown
Run Everything
###Code
%%time
final_dict = {}
directory = '../Ngrams/unigram_data/'
files = os.listdir(directory)
for file_path in files:
if '_CLOSED_CLASSES.json' in file_path:
ngrams = open_json(directory,file_path)
print('Opened',file_path)
unigram_dict, years = normalize(ngrams)
print('Normalized')
del ngrams
ngrams, years_map = smoothing(unigram_dict, years)
print('Smoothed')
del unigram_dict
del years
ngrams_analyzed = analyze_birth_and_death(ngrams,years_map)
print('Analyzed birth and death')
del ngrams
del years_map
final_dict.update(ngrams_analyzed)
del ngrams_analyzed
save_json(final_dict,directory,'CLOSED_CLASSES_SORTABLE')
###Output
Opened 1-00006-of-00024_CLOSED_CLASSES.json
|
docs/source/multilabelembeddings.ipynb | ###Markdown
Multi-label embedding-based classification Multi-label embedding techniques emerged as a response the need to cope with a large label space, but with the rise of computing power they became a method of improving classification quality. Typically the embedding-based multi-label classification starts with embedding the label matrix of the training set in some way, training a regressor for unseen samples to predict their embeddings, and a classifier (sometimes very simple ones) to correct the regression error. Scikit-multilearn provides several multi-label embedders alongisde a general regressor-classifier classification class. Currently available embedding strategies include: - Label Network Embeddings via OpenNE network embedding library, as in the [LNEMLC paper](https://arxiv.org/abs/1812.02956)- Cost-Sensitive Label Embedding with Multidimensional Scaling, as in the [CLEMS paper](https://github.com/ej0cl6/csmlc)- scikit-learn based embeddings such as PCA or [manifold learning approaches](https://scikit-learn.org/stable/modules/manifold.html)Let's start with loading some data:
###Code
import numpy
import sklearn.metrics as metrics
from yyskmultilearn.dataset import load_dataset
X_train, y_train, feature_names, label_names = load_dataset('emotions', 'train')
X_test, y_test, _, _ = load_dataset('emotions', 'test')
###Output
emotions:train - exists, not redownloading
emotions:test - exists, not redownloading
###Markdown
Label Network EmbeddingsThe label network embeddings approaches require a working tensorflow installation and the OpenNE library. To install them, run the following code:```bashpip install networkx tensorflowgit clone https://github.com/thunlp/OpenNE/pip install -e OpenNE/src``` For an example we will use the LINE embedding method, one of the most efficient and well-performing state of the art approaches, for the meaning of parameters consult the [OpenNE documentation](). We select `order = 3` which means that the method will take both first and second order proximities between labels for embedding. We select a dimension of 5 times the number of labels, as the linear embeddings tend to need more dimensions for best performance, normalize the label weights to maintain normalized distances in the network and agregate label embedings per sample by summation which is a classical approach.
###Code
from yyskmultilearn.embedding import OpenNetworkEmbedder
from yyskmultilearn.cluster import LabelCooccurrenceGraphBuilder
graph_builder = LabelCooccurrenceGraphBuilder(weighted=True, include_self_edges=False)
openne_line_params = dict(batch_size=1000, order=3)
embedder = OpenNetworkEmbedder(
graph_builder,
'LINE',
dimension = 5*y_train.shape[1],
aggregation_function = 'add',
normalize_weights=True,
param_dict = openne_line_params
)
###Output
_____no_output_____
###Markdown
We now need to select a regressor and a classifier, we use random forest regressors with MLkNN which is a well working combination often used for multi-label embedding:
###Code
from yyskmultilearn.embedding import EmbeddingClassifier
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
embedder,
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
###Output
Pre-procesing for non-uniform negative sampling!
Pre-procesing for non-uniform negative sampling!
epoch:0 sum of loss:4.97153359652
epoch:0 sum of loss:5.11869335175
epoch:1 sum of loss:4.98133981228
epoch:1 sum of loss:4.97720247507
epoch:2 sum of loss:4.81723511219
epoch:2 sum of loss:5.05428689718
epoch:3 sum of loss:5.09079384804
epoch:3 sum of loss:4.72988516092
epoch:4 sum of loss:5.0347994566
epoch:4 sum of loss:4.95063251257
epoch:5 sum of loss:4.68008613586
epoch:5 sum of loss:4.9329983592
epoch:6 sum of loss:4.74205821753
epoch:6 sum of loss:4.68989795446
epoch:7 sum of loss:4.62912601233
epoch:7 sum of loss:4.81548637152
epoch:8 sum of loss:4.40033769608
epoch:8 sum of loss:4.73801320791
epoch:9 sum of loss:4.61178982258
epoch:9 sum of loss:4.91443294287
###Markdown
Cost-Sensitive Label Embedding with Multidimensional ScalingCLEMS is another well-perfoming method in multi-label embeddings. It uses weighted multi-dimensional scaling to embedd a cost-matrix of unique label combinations. The cost-matrix contains the cost of mistaking a given label combination for another, thus real-valued functions are better ideas than discrete ones. Also, the `is_score` parameter is used to tell the embedder if the cost function is a score (the higher the better) or a loss (the lower the better). Additional params can be also assigned to the weighted scaler. The most efficient parameter for the number of dimensions is equal to number of labels, and is thus enforced here.
###Code
from yyskmultilearn.embedding import CLEMS, EmbeddingClassifier
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
dimensional_scaler_params = {'n_jobs': -1}
clf = EmbeddingClassifier(
CLEMS(metrics.jaccard_similarity_score, is_score=True, params=dimensional_scaler_params),
RandomForestRegressor(n_estimators=10, n_jobs=-1),
MLkNN(k=1),
regressor_per_dimension= True
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
###Output
_____no_output_____
###Markdown
Scikit-learn based embeddersAny scikit-learn embedder can be used for multi-label classification embeddings with scikit-multilearn, just select one and try, here's a spectral embedding approach with 10 dimensions of the embedding space:
###Code
from yyskmultilearn.embedding import SKLearnEmbedder, EmbeddingClassifier
from sklearn.manifold import SpectralEmbedding
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
SKLearnEmbedder(SpectralEmbedding(n_components = 10)),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
###Output
_____no_output_____
###Markdown
Multi-label embedding-based classification Multi-label embedding techniques emerged as a response the need to cope with a large label space, but with the rise of computing power they became a method of improving classification quality. Typically the embedding-based multi-label classification starts with embedding the label matrix of the training set in some way, training a regressor for unseen samples to predict their embeddings, and a classifier (sometimes very simple ones) to correct the regression error. Scikit-multilearn provides several multi-label embedders alongisde a general regressor-classifier classification class. Currently available embedding strategies include: - Label Network Embeddings via OpenNE network embedding library, as in the [LNEMLC paper](https://arxiv.org/abs/1812.02956)- Cost-Sensitive Label Embedding with Multidimensional Scaling, as in the [CLEMS paper](https://github.com/ej0cl6/csmlc)- scikit-learn based embeddings such as PCA or [manifold learning approaches](https://scikit-learn.org/stable/modules/manifold.html)Let's start with loading some data:
###Code
import numpy
import sklearn.metrics as metrics
from skmultilearn.dataset import load_dataset
X_train, y_train, feature_names, label_names = load_dataset('emotions', 'train')
X_test, y_test, _, _ = load_dataset('emotions', 'test')
###Output
emotions:train - exists, not redownloading
emotions:test - exists, not redownloading
###Markdown
Label Network EmbeddingsThe label network embeddings approaches require a working tensorflow installation and the OpenNE library. To install them, run the following code:```bashpip install networkx tensorflowgit clone https://github.com/thunlp/OpenNE/pip install -e OpenNE/src``` For an example we will use the LINE embedding method, one of the most efficient and well-performing state of the art approaches, for the meaning of parameters consult the [OpenNE documentation](). We select `order = 3` which means that the method will take both first and second order proximities between labels for embedding. We select a dimension of 5 times the number of labels, as the linear embeddings tend to need more dimensions for best performance, normalize the label weights to maintain normalized distances in the network and agregate label embedings per sample by summation which is a classical approach.
###Code
from skmultilearn.embedding import OpenNetworkEmbedder
from skmultilearn.cluster import LabelCooccurrenceGraphBuilder
graph_builder = LabelCooccurrenceGraphBuilder(weighted=True, include_self_edges=False)
openne_line_params = dict(batch_size=1000, order=3)
embedder = OpenNetworkEmbedder(
graph_builder,
'LINE',
dimension = 5*y_train.shape[1],
aggregation_function = 'add',
normalize_weights=True,
param_dict = openne_line_params
)
###Output
_____no_output_____
###Markdown
We now need to select a regressor and a classifier, we use random forest regressors with MLkNN which is a well working combination often used for multi-label embedding:
###Code
from skmultilearn.embedding import EmbeddingClassifier
from sklearn.ensemble import RandomForestRegressor
from skmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
embedder,
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
###Output
Pre-procesing for non-uniform negative sampling!
Pre-procesing for non-uniform negative sampling!
epoch:0 sum of loss:4.97153359652
epoch:0 sum of loss:5.11869335175
epoch:1 sum of loss:4.98133981228
epoch:1 sum of loss:4.97720247507
epoch:2 sum of loss:4.81723511219
epoch:2 sum of loss:5.05428689718
epoch:3 sum of loss:5.09079384804
epoch:3 sum of loss:4.72988516092
epoch:4 sum of loss:5.0347994566
epoch:4 sum of loss:4.95063251257
epoch:5 sum of loss:4.68008613586
epoch:5 sum of loss:4.9329983592
epoch:6 sum of loss:4.74205821753
epoch:6 sum of loss:4.68989795446
epoch:7 sum of loss:4.62912601233
epoch:7 sum of loss:4.81548637152
epoch:8 sum of loss:4.40033769608
epoch:8 sum of loss:4.73801320791
epoch:9 sum of loss:4.61178982258
epoch:9 sum of loss:4.91443294287
###Markdown
Cost-Sensitive Label Embedding with Multidimensional ScalingCLEMS is another well-perfoming method in multi-label embeddings. It uses weighted multi-dimensional scaling to embedd a cost-matrix of unique label combinations. The cost-matrix contains the cost of mistaking a given label combination for another, thus real-valued functions are better ideas than discrete ones. Also, the `is_score` parameter is used to tell the embedder if the cost function is a score (the higher the better) or a loss (the lower the better). Additional params can be also assigned to the weighted scaler. The most efficient parameter for the number of dimensions is equal to number of labels, and is thus enforced here.
###Code
from skmultilearn.embedding import CLEMS, EmbeddingClassifier
from sklearn.ensemble import RandomForestRegressor
from skmultilearn.adapt import MLkNN
dimensional_scaler_params = {'n_jobs': -1}
clf = EmbeddingClassifier(
CLEMS(metrics.jaccard_similarity_score, is_score=True, params=dimensional_scaler_params),
RandomForestRegressor(n_estimators=10, n_jobs=-1),
MLkNN(k=1),
regressor_per_dimension= True
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
###Output
_____no_output_____
###Markdown
Scikit-learn based embeddersAny scikit-learn embedder can be used for multi-label classification embeddings with scikit-multilearn, just select one and try, here's a spectral embedding approach with 10 dimensions of the embedding space:
###Code
from skmultilearn.embedding import SKLearnEmbedder, EmbeddingClassifier
from sklearn.manifold import SpectralEmbedding
from sklearn.ensemble import RandomForestRegressor
from skmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
SKLearnEmbedder(SpectralEmbedding(n_components = 10)),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
###Output
_____no_output_____ |
notebooks/figures_janelia.ipynb | ###Markdown
Figure 1Overall AAN results, for digits only
###Code
# Init the figure
fig = plt.figure(figsize=(20, 1), constrained_layout=True)
grid = plt.GridSpec(nrows=26, ncols=10, wspace=-.40, hspace=0.4, figure=fig)
# Panel 1 - digits
plt.subplot(grid[0:18, 0])
medians = [np.median(exp151["correct"]), np.median(exp152["correct"])]
plt.scatter(x=np.repeat(0.25, 20), y=exp151["correct"], s=20, color="black", alpha=0.5, marker="o")
plt.scatter(x=np.repeat(0.75, 20), y=exp152["correct"], s=20, color="black", alpha=0.5, marker="o")
plt.scatter(x=[0.25, 0.75], y=medians, color="red", alpha=1, s=300, linewidth=3, marker="_")
plt.xticks(np.array([0.25, 0.75]), ('Astrocytes', 'Neurons'))
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.xticks(rotation=90)
plt.ylabel("Correct")
# -------------------------------------------------
model_names = ["0.0","0.1", "0.2", "0.3", "0.4", "0.5"]
models = [exp155, exp157_s01, exp157_s02, exp158_s03, exp158_s04, exp157_s05, exp157_s06]
medians = [
np.median(exp155["correct"]),
np.median(exp157_s01["correct"]),
np.median(exp157_s02["correct"]),
np.median(exp158_s03["correct"]),
np.median(exp158_s04["correct"]),
np.median(exp157_s05["correct"]),
]
plt.subplot(grid[0:5, 3:8])
plt.scatter(x=model_names, y=medians, color="red", alpha=1, s=200, linewidth=3, marker="_")
for name, model in zip(model_names, models):
plt.scatter(x=np.repeat(name, 20), y=model["correct"], color="black", alpha=0.6, s=20)
plt.ylim(0, 1.1)
plt.ylabel("")
plt.xlabel("Diffusion (std dev)")
_ = sns.despine()
# -------------------------------------------------
model_names = ["0.0", "0.01", "0.05", "0.1", "0.2"]
models = [exp155, exp159_s01, exp159_s05, exp159_s1, exp159_s2]
medians = [
np.median(exp155["correct"]),
np.median(exp159_s01["correct"]),
np.median(exp159_s05["correct"]),
np.median(exp159_s1["correct"]),
np.median(exp159_s2["correct"]),
]
plt.subplot(grid[10:15, 3:8])
plt.scatter(x=model_names, y=medians, color="red", alpha=1, s=200, linewidth=3, marker="_")
for name, model in zip(model_names, models):
plt.scatter(x=np.repeat(name, 20), y=model["correct"], color="black", alpha=0.6, s=20)
plt.ylim(0, 1.1)
plt.ylabel("")
plt.xlabel("Noise (std dev)")
_ = sns.despine()
# -------------------------------------------------
model_names = ["0.0", "0.01", "0.05", "0.1", "0.2"]
models = [exp155, exp160_p01, exp160_p05, exp160_p1, exp160_p2]
medians = [
np.median(exp155["correct"]),
np.median(exp160_p01["correct"]),
np.median(exp160_p05["correct"]),
np.median(exp160_p1["correct"]),
np.median(exp160_p2["correct"]),
]
plt.subplot(grid[20:25, 3:8])
plt.scatter(x=model_names, y=medians, color="red", alpha=1, s=200, linewidth=3, marker="_")
for name, model in zip(model_names, models):
plt.scatter(x=np.repeat(name, 20), y=model["correct"], color="black", alpha=0.6, s=20)
plt.ylim(0, 1.1)
plt.ylabel("")
plt.xlabel("p(unstabel)")
_ = sns.despine()
plt.savefig("figure_janelia_digits.png", bbox_inches="tight")
###Output
_____no_output_____ |
1_EDA_DataPreprocess_v2.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Data Loading
###Code
# Stock price data
TCS_stock = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/historical_stock_price_v2_TCS.csv")
HDFC_stock = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/historical_stock_price_v2_HDFC.csv")
HUL_stock = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/historical_stock_price_v2_HUL.csv")
MARUTI_stock = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/historical_stock_price_v2_MARUTI.csv")
# Tech Indicators price data
TCS_SMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_TCS.BSE_SMA.csv")
HDFC_SMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_HDFC.BSE_SMA.csv")
HUL_SMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_HINDUNILVR.BSE_SMA.csv")
MARUTI_SMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_MARUTI.BSE_SMA.csv")
TCS_EMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_TCS.BSE_EMA.csv")
HDFC_EMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_HDFC.BSE_EMA.csv")
HUL_EMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_HINDUNILVR.BSE_EMA.csv")
MARUTI_EMA = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/technical_indicator_MARUTI.BSE_EMA.csv")
# Stock Indices
nifty_50 = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY 50_Data.csv')
bse = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/BSE.csv')
nifty_it = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY IT_Data.csv')
nifty_auto = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY AUTO_Data.csv')
nifty_finance = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY FINANCIAL SERVICES_Data.csv')
nifty_fmcg = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY FMCG_Data.csv')
###Output
_____no_output_____
###Markdown
EDA and Data Preprocessing of stock data
###Code
TCS_stock.head(), HDFC_stock.head(), HUL_stock.head(), MARUTI_stock.head()
TCS_stock.info(), HDFC_stock.info(), HUL_stock.info(), MARUTI_stock.info()
# Drop NULL rows
TCS_stock.dropna(inplace = True)
HUL_stock.dropna(inplace = True)
HDFC_stock.dropna(inplace = True)
MARUTI_stock.dropna(inplace = True)
# Shape after dropping null records
TCS_stock.shape, HUL_stock.shape, HDFC_stock.shape, MARUTI_stock.shape
TCS_stock['date'].describe()
# Change Dtype of Columns
TCS_stock["date"] = pd.to_datetime(TCS_stock["date"])
TCS_stock = TCS_stock.astype({"open": float, "volume": float})
HDFC_stock["date"] = pd.to_datetime(HDFC_stock["date"])
HDFC_stock = HDFC_stock.astype({"open": float, "volume": float})
HUL_stock["date"] = pd.to_datetime(HUL_stock["date"])
HUL_stock = HUL_stock.astype({"open": float, "volume": float})
MARUTI_stock["date"] = pd.to_datetime(MARUTI_stock["date"])
MARUTI_stock = MARUTI_stock.astype({"open": float, "volume": float})
# Sort the Database by Date
TCS_stock = TCS_stock.sort_values(by = 'date', ignore_index = True)
HDFC_stock = HDFC_stock.sort_values(by = 'date', ignore_index = True)
HUL_stock = HUL_stock.sort_values(by = 'date', ignore_index = True)
MARUTI_stock = MARUTI_stock.sort_values(by = 'date', ignore_index = True)
TCS_stock.describe(), HDFC_stock.describe(), HUL_stock.describe(), MARUTI_stock.describe()
TCS_stock['adj close'].tail(1), HDFC_stock['adj close'].tail(1), HUL_stock['adj close'].tail(1), MARUTI_stock['adj close'].tail(1)
Companies = [TCS_stock, HDFC_stock, HUL_stock, MARUTI_stock]
Companies_Title = ["TCS_stock","HDFC_stock","HUL_stock","MARUTI_stock"]
# Lets view historical view of the closing prices
plt.figure(figsize=(20, 12))
for index, company in enumerate(Companies):
plt.subplot(3, 2, index + 1)
plt.plot(company["date"], company["adj close"])
plt.title(Companies_Title[index])
plt.ylabel('Adj. Close')
# Now lets plot the total volume of stock being traded each day
plt.figure(figsize=(20, 12))
for index, company in enumerate(Companies):
plt.subplot(3, 2, index + 1)
plt.plot(company["date"], company["volume"])
plt.title(Companies_Title[index])
plt.ylabel('volume')
###Output
_____no_output_____
###Markdown
Now, we have seen the visualizations for the closing price and volume traded each day, let's go ahead and calculate the moving average of the stock. What was the moving average of the various stocks ?
###Code
Moving_Average_Day = [10, 20, 50]
for Moving_Average in Moving_Average_Day:
for company in Companies:
column_name = f'Moving Average for {Moving_Average} days'
company[column_name] = company["adj close"].rolling(Moving_Average).mean()
plt.figure(figsize=(20, 12))
for index, company in enumerate(Companies):
plt.subplot(3, 2, index + 1)
plt.plot(company["date"], company["adj close"])
plt.plot(company["date"], company["Moving Average for 10 days"])
plt.plot(company["date"], company["Moving Average for 20 days"])
plt.plot(company["date"], company["Moving Average for 50 days"])
plt.title(Companies_Title[index])
plt.legend(("Adj. Close", "Moving Average for 10 days", "Moving Average for 20 days", "Moving Average for 50 days"))
###Output
_____no_output_____
###Markdown
What was the daily return of the stock on average ? Now that we've done some baseline analysis, let's go ahead and dive a little deeper. We're now going to analyze the risk of the stock. In order to do so we'll need to take a closer look at the daily changes of the stock, and not just its absolute value.
###Code
# pct_change() function calculates the percentage change between the current and a prior element.
# This function by default calculates the percentage change from the immediately previous row.
for company in Companies:
company["Daily Return"] = company["adj close"].pct_change()
plt.figure(figsize=(20, 12))
for index, company in enumerate(Companies):
plt.subplot(3, 2, index + 1)
plt.plot(company["date"], company["Daily Return"])
plt.title(Companies_Title[index])
plt.ylabel('Daily Return')
###Output
_____no_output_____
###Markdown
Now, let's get an overall at the average daily return using a histogram.
###Code
# distplot is a deprecated function, so to ignore warnings, the filterwarnings function is used.
import warnings
warnings.filterwarnings('ignore')
plt.figure(figsize=(20, 15))
for index, company in enumerate(Companies):
plt.subplot(3, 2, index + 1)
sns.distplot(company["Daily Return"].dropna(), color = "purple")
plt.title(Companies_Title[index])
###Output
_____no_output_____
###Markdown
Kurtosis is a statistical measure that defines how heavily the tails of a distribution differ from the tails of a normal distribution. In other words kurtosis identifies whether the tails of a given distribution contain extreme values.
###Code
print("Kurtosis Value")
for index, company in enumerate(Companies):
print(f'{Companies_Title[index]}: {company["Daily Return"].kurtosis()}')
###Output
Kurtosis Value
TCS_stock: 4.606939097631171
HDFC_stock: 6.189191071931482
HUL_stock: 13.80131880548373
MARUTI_stock: 9.984329363446168
###Markdown
The above graph and the positive kurtosis value indicate that getting extreme daily return values is rare. EDA and Data Preprocessing of tech indicators data
###Code
TCS_SMA.shape, HDFC_SMA.shape, HUL_SMA.shape, MARUTI_SMA.shape
TCS_EMA.shape, HDFC_EMA.shape, HUL_EMA.shape, MARUTI_EMA.shape
TCS_SMA.columns
# Change Dtype of Columns
TCS_SMA["time"] = pd.to_datetime(TCS_SMA["time"])
HDFC_SMA["time"] = pd.to_datetime(HDFC_SMA["time"])
HUL_SMA["time"] = pd.to_datetime(HUL_SMA["time"])
MARUTI_SMA["time"] = pd.to_datetime(MARUTI_SMA["time"])
TCS_EMA["time"] = pd.to_datetime(TCS_EMA["time"])
HDFC_EMA["time"] = pd.to_datetime(HDFC_EMA["time"])
HUL_EMA["time"] = pd.to_datetime(HUL_EMA["time"])
MARUTI_EMA["time"] = pd.to_datetime(MARUTI_SMA["time"])
# Sort the Database by Date
TCS_SMA = TCS_SMA.sort_values(by = 'time', ignore_index = True)
HDFC_SMA = HDFC_SMA.sort_values(by = 'time', ignore_index = True)
HUL_SMA = HUL_SMA.sort_values(by = 'time', ignore_index = True)
MARUTI_SMA = MARUTI_SMA.sort_values(by = 'time', ignore_index = True)
# Sort the Database by Date
TCS_EMA = TCS_EMA.sort_values(by = 'time', ignore_index = True)
HDFC_EMA = HDFC_EMA.sort_values(by = 'time', ignore_index = True)
HUL_EMA = HUL_EMA.sort_values(by = 'time', ignore_index = True)
MARUTI_EMA = MARUTI_EMA.sort_values(by = 'time', ignore_index = True)
pd.to_datetime('2019-10-29').month_name()
TCS_SMA_sub = TCS_SMA[TCS_SMA["time"].isin(TCS_stock['date'])].copy()
HDFC_SMA_sub = HDFC_SMA[HDFC_SMA["time"].isin(HDFC_stock['date'])].copy()
HUL_SMA_sub = HUL_SMA[HUL_SMA["time"].isin(HUL_stock['date'])].copy()
MARUTI_SMA_sub = MARUTI_SMA[MARUTI_SMA["time"].isin(MARUTI_stock['date'])].copy()
TCS_EMA_sub = TCS_EMA[TCS_EMA["time"].isin(TCS_stock['date'])].copy()
HDFC_EMA_sub = HDFC_EMA[HDFC_EMA["time"].isin(HDFC_stock['date'])].copy()
HUL_EMA_sub = HUL_EMA[HUL_EMA["time"].isin(HUL_stock['date'])].copy()
MARUTI_EMA_sub = MARUTI_EMA[MARUTI_EMA["time"].isin(MARUTI_stock['date'])].copy()
TCS_SMA_sub['time'].values[:5]
(TCS_EMA_sub['time'].values==TCS_stock['date'].values).all()
TCS_stock['date'].values[:5]
TCS_SMA_sub.shape, HDFC_SMA_sub.shape, HUL_SMA_sub.shape, MARUTI_SMA_sub.shape
MARUTI_EMA_sub.columns
TCS_SMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/TCS_SMA_PreProcessed.csv',index=False)
HDFC_SMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/HDFC_SMA_PreProcessed.csv',index=False)
HUL_SMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/HUL_SMA_PreProcessed.csv',index=False)
MARUTI_SMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/MARUTI_SMA_PreProcessed.csv',index=False)
TCS_EMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/TCS_EMA_PreProcessed.csv',index=False)
HDFC_EMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators//HDFC_EMA_PreProcessed.csv',index=False)
HUL_EMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/HUL_EMA_PreProcessed.csv',index=False)
MARUTI_EMA_sub.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/TechIndicators/MARUTI_EMA_PreProcessed.csv',index=False)
TCS_stock.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/TCS_stock_v2_PreProcessed.csv',index=False)
HDFC_stock.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/HDFC_stock_v2_PreProcessed.csv',index=False)
HUL_stock.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/HUL_stock_v2_PreProcessed.csv',index=False)
MARUTI_stock.to_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockPrice/MARUTI_stock_v2_PreProcessed.csv',index=False)
###Output
_____no_output_____
###Markdown
EDA and Data Preprocessing of tech indicators data
###Code
# # Stock Indices
# nifty_50 = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY 50_Data.csv')
# bse = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/BSE.csv')
# nifty_it = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY IT_Data.csv')
# nifty_auto = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY AUTO_Data.csv')
# nifty_finance = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY FINANCIAL SERVICES_Data.csv')
# nifty_fmcg = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Datasets/Indian/StockIndices/NIFTY FMCG_Data.csv')
# nifty_50.shape, bse.shape, nifty_it.shape, nifty_auto.shape, nifty_finance.shape, nifty_fmcg.shape
# TCS_EMA.shape, HDFC_EMA.shape, HUL_EMA.shape, MARUTI_EMA.shape
###Output
_____no_output_____ |
Lab2_classification.ipynb | ###Markdown
Imports
###Code
from sentence_transformers import SentenceTransformer, InputExample, losses, models,evaluation
from transformers import BertTokenizer
from csv import QUOTE_NONE
from torch.utils.data import DataLoader,Dataset, TensorDataset
import pandas as pd
import torch
from torch.nn.utils.rnn import pad_sequence
import pickle
import os
###Output
_____no_output_____
###Markdown
Loading the data and explore it
###Code
multinli_train = pd.read_json("multinli_1.0/multinli_1.0_train.jsonl", lines=True)
multinli_test = pd.read_json("multinli_1.0/multinli_1.0_dev_matched.jsonl", lines=True)
# multinli_mismatched = pd.read_json("multinli_1.0/multinli_1.0_dev_mismatched.jsonl", lines=True)
multinli_train.head(5)
multinli_train_reduced = pd.concat([multinli_train[i] for i in ["gold_label", "sentence1","sentence2"]], axis=1)
multinli_test_reduced = pd.concat([multinli_test[i] for i in ["gold_label", "sentence1","sentence2"]], axis=1)
multinli_train_reduced.head(5)
print(multinli_train_reduced['gold_label'].unique())
print(multinli_test_reduced['gold_label'].unique())
print(multinli_test_reduced.shape)
multinli_test_reduced= multinli_test_reduced.loc[multinli_test_reduced["gold_label"] != '-']
multinli_test_reduced.shape
multinli_test_reduced.head(5)
###Output
_____no_output_____
###Markdown
Trying training the BERT in pytorch (time consuming)
###Code
# '''
# This class is adapted from https://towardsdatascience.com/fine-tuning-pre-trained-transformer-models-for-sentence-entailment-d87caf9ec9db
# This step is done to tokenize the dataset such as [CLS],[SEP],etc. In addition, marking the position of each sentence. (WE ARE INTERESTED TO GET THE SENTENCE EMBEDDING FIRST -->
# then)
# '''
# class MNLIDataBert(Dataset):
# def __init__(self, train_df, val_df):
# self.label_dict = {'entailment': 0, 'contradiction': 1, 'neutral': 2}
# self.train_df = train_df
# self.val_df = val_df
# self.base_path = '/multinli_1.0/'
# self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) # Using a pre-trained BERT tokenizer to encode sentences
# self.train_data = None
# self.val_data = None
# self.init_data()
# def init_data(self):
# self.train_data = self.load_data(self.train_df)
# self.val_data = self.load_data(self.val_df)
# def load_data(self, df):
# MAX_LEN = 512
# token_ids = []
# mask_ids = []
# seg_ids = []
# y = []
# premise_list = df['sentence1'].to_list()
# hypothesis_list = df['sentence2'].to_list()
# label_list = df['gold_label'].to_list()
# for (premise, hypothesis, label) in zip(premise_list, hypothesis_list, label_list):
# premise_id = self.tokenizer.encode(premise, add_special_tokens = False)
# hypothesis_id = self.tokenizer.encode(hypothesis, add_special_tokens = False)
# pair_token_ids = [self.tokenizer.cls_token_id] + premise_id + [self.tokenizer.sep_token_id] + hypothesis_id + [self.tokenizer.sep_token_id]
# premise_len = len(premise_id)
# hypothesis_len = len(hypothesis_id)
# segment_ids = torch.tensor([0] * (premise_len + 2) + [1] * (hypothesis_len + 1)) # sentence 0 and sentence 1
# attention_mask_ids = torch.tensor([1] * (premise_len + hypothesis_len + 3)) # mask padded values
# token_ids.append(torch.tensor(pair_token_ids))
# seg_ids.append(segment_ids)
# mask_ids.append(attention_mask_ids)
# y.append(self.label_dict[label])
# token_ids = pad_sequence(token_ids, batch_first=True)
# mask_ids = pad_sequence(mask_ids, batch_first=True)
# seg_ids = pad_sequence(seg_ids, batch_first=True)
# y = torch.tensor(y)
# dataset = TensorDataset(token_ids, mask_ids, seg_ids, y)
# print(len(dataset))
# return dataset
# def get_data_loaders(self, batch_size=32, shuffle=True):
# train_loader = DataLoader(
# self.train_data,
# shuffle=shuffle,
# batch_size=batch_size
# )
# val_loader = DataLoader(
# self.val_data,
# shuffle=shuffle,
# batch_size=batch_size
# )
# return train_loader, val_loader
# mnli_dataset = MNLIDataBert(multinli_train_reduced.head(5), multinli_test_reduced.head(5)) #for pytorch
###Output
_____no_output_____
###Markdown
Preparing for training
###Code
label_dict = {'entailment': 0, 'contradiction': 1, 'neutral': 2}
encoding_dict= {"gold_label": {"entailment":0, "contradiction":1,"neutral":2}}
multinli_train_reduced2 = multinli_train_reduced.replace(encoding_dict)
multinli_test_reduced2 = multinli_test_reduced.replace(encoding_dict)
from sklearn.model_selection import train_test_split
multinli_train_reduced2, musltinli_val = train_test_split(multinli_train_reduced2, test_size=0.2)
#Define your train examples. You need more than just two examples...
sen1 = list(multinli_train_reduced2.sentence1)
sen2 = list(multinli_train_reduced2.sentence1)
resulting_list = []
for a,b,label in zip(sen1,sen2,list(multinli_train_reduced2.gold_label)):
resulting_list.append(InputExample(texts=[a, b], label=label))
#same for val set
#Define your train examples. You need more than just two examples...
sen1 = list(musltinli_val.sentence1)
sen2 = list(musltinli_val.sentence1)
resulting_list_val = []
for a,b,label in zip(sen1,sen2,list(musltinli_val.gold_label)):
resulting_list_val.append(InputExample(texts=[a, b], label=label))
###Output
_____no_output_____
###Markdown
Training (Bert+maxpooling+Softmax)
###Code
word_embedding_model = models.Transformer('bert-base-uncased', max_seq_length=256)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
#Define your train dataset, the dataloader and the train loss
train_dataloader = DataLoader(resulting_list, shuffle=True, batch_size=10)
val_dataloader= DataLoader(resulting_list_val, shuffle=True, batch_size=10)
train_loss = losses.SoftmaxLoss(model,num_labels=3,sentence_embedding_dimension=word_embedding_model.get_word_embedding_dimension())
test_evaluator= evaluation.LabelAccuracyEvaluator(val_dataloader,softmax_model=train_loss,name="classification_evaluation")
#Tune the model
model.fit(train_objectives=[(train_dataloader, train_loss)],evaluator=test_evaluator, epochs=1, warmup_steps=100, show_progress_bar=True)
###Output
Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertModel: ['cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.bias']
- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Iteration: 100%|██████████| 1/1 [00:02<00:00, 2.75s/it]
Epoch: 100%|██████████| 1/1 [00:03<00:00, 3.09s/it]
###Markdown
Testing on the testing set
###Code
#Define your test examples
sen1 = list(multinli_test_reduced2.sentence1)
sen2 = list(multinli_test_reduced2.sentence1)
resulting_list_test = []
for a,b,label in zip(sen1,sen2,list(multinli_test_reduced2.gold_label)):
resulting_list_test.append(InputExample(texts=[a, b], label=label))
from sentence_transformers import SentenceTransformer, InputExample, losses, models, evaluation
test_dataloader= DataLoader(resulting_list_test, shuffle=True, batch_size=10)
evaluator = evaluation.LabelAccuracyEvaluator(test_dataloader,softmax_model=train_loss,name="classification_testing")
evaluator(model,output_path = "./eval_testset/")
###Output
_____no_output_____
###Markdown
END IS HERE >>>>>>>>>>>>>>>>> Start of Combining regression and classificaiton objective
###Code
locations = {
"train":"stsbenchmark/sts-train.csv",
"test":"stsbenchmark/sts-test.csv",
"valid":"stsbenchmark/sts-dev.csv"
}
df = pd.read_csv(locations["train"],sep='\t',header=None,usecols=[4, 5, 6], quoting=QUOTE_NONE,names=["label","sen1","sen2"])
df.label = ((df.label/5) - 0.5) * 2
df.describe()
#Define your train examples. You need more than just two examples...
def return_suitable_list(location: str, test_case=False): #this makes the dataset production ready
df = pd.read_csv(location,sep='\t',header=None,usecols=[4, 5, 6], quoting=QUOTE_NONE,names=["label","sen1","sen2"])
df.label = ((df.label/5) - 0.5) * 2
if test_case:
df = df[:5]
sen1 = list(df.sen1)
sen2 = list(df.sen2)
resulting_list = []
for a,b,label in zip(sen1,sen2,list(df.label)):
resulting_list.append(InputExample(texts=[a, b], label=label))
return resulting_list
###Output
_____no_output_____
###Markdown
Afer saving the model from above (Classification mode used NLI), we train it again but using the regression objective (RUN THIS TO get the model of combining objectives (final result is supposedly better regression model))
###Code
#model = SentenceTransformer(model_save_path) ... < saved model from above OR if you just trained the model from above, you can continue with this normally!
train_loss = losses.CosineSimilarityLoss(model)
resulting_list_sts_train = return_suitable_list(locations["train"], test_case=False)
resulting_list_sts_valid = return_suitable_list(locations["valid"], test_case=False)
valid_evaluator= evaluation.EmbeddingSimilarityEvaluator.from_input_examples(resulting_list_sts_valid,name="fine-tuning for 2.3")
train_dataloader = DataLoader(resulting_list_sts_train, shuffle=True, batch_size=10)
#Tune the model
model.fit(train_objectives=[(train_dataloader, train_loss)],evaluator=valid_evaluator, epochs=1, warmup_steps=100, show_progress_bar=True)
###Output
Iteration: 100%|██████████| 1/1 [00:02<00:00, 2.42s/it]
Epoch: 100%|██████████| 1/1 [00:02<00:00, 2.69s/it]
###Markdown
Testing the hypothesis by testing!
###Code
evaluator = evaluation.EmbeddingSimilarityEvaluator.from_input_examples(return_suitable_list(locations["test"], test_case=False),name="final testing of 2.3")
evaluator(model, output_path = "./eval_testset/")
###Output
_____no_output_____
###Markdown
END OF 2.1 >>>>>>>>>>>>>
###Code
#some code is from the docs, which you might want to find here -->
#https://www.sbert.net/docs/training/overview.html
areWeTesting = False
#Define the model. We do the basic bert + mean Pooling
word_embedding_model = models.Transformer('bert-base-uncased', max_seq_length=256)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
#Define your train dataset, the dataloader and the train loss
resulting_list = return_suitable_list(locations["train"], test_case=areWeTesting)
train_dataloader = DataLoader(resulting_list, shuffle=True, batch_size=10)
train_loss = losses.CosineSimilarityLoss(model)
#Tune the model
# model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=20, warmup_steps=100, output_path= "./", save_best_model= True, checkpoint_path = "./ckpts/", checkpoint_save_steps = 500)
#some code is from the docs, which you might want to find here -->
#https://www.sbert.net/docs/training/overview.html
#Define the model. We do the basic bert + mean Pooling
word_embedding_model = models.Transformer('bert-base-uncased', max_seq_length=256)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
#Define your train dataset, the dataloader and the train loss
train_loss = losses.CosineSimilarityLoss(model)
#Tune the model
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=1, warmup_steps=100)
# #that's actually the sbert one
# from sentence_transformers import SentenceTransformer
# model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# #Our sentences we like to encode
# sentences = ['This framework generates embeddings for each input sentence',
# 'Sentences are passed as a list of string.',
# 'The quick brown fox jumps over the lazy dog.']
# #Sentences are encoded by calling model.encode()
# embeddings = model.encode(sentences)
# #Print the embeddings
# for sentence, embedding in zip(sentences, embeddings):
# print("Sentence:", sentence)
# print("Embedding:", embedding)
# print("")
model
sts_train=pd.read_csv("stsbenchmark/sts-train.csv",sep='\t',header=None,usecols=[4, 5, 6], quoting=QUOTE_NONE,names=["label","sen1","sen2"])
#Unlike describe in the assignment the similarity is [0,5], not [1,5]
sts_train.describe()
# from transformers import BertTokenizer, BertModel
# # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# # model = BertModel.from_pretrained("bert-base-uncased")
# word_embedding_model = models.Transformer('bert-base-uncased', max_seq_length=256)
# pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
# model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# text = "This fucking thing better works."
# encoded_input = tokenizer(text, return_tensors='pt')
# print(type(encoded_input))
# output = model(**encoded_input)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained("bert-base-uncased")
text = ["Dummy example, does this shit even work?","oimfowemfo","Replace me by any text you'd like.", "WTF is this"]#"Dummy example, does this shit even work?"
encoded_input = tokenizer(text, return_tensors='pt', padding=True)
output = model(**encoded_input)
output
print(type(output))
# #This here should hopefully make meaningful embeddings from BERT(not SBERT)
# from transformers import BertTokenizer, BertModel
# # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# # model = BertModel.from_pretrained("bert-base-uncased")
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# model1 = BertModel.from_pretrained("bert-base-uncased")
# model2 = BertModel.from_pretrained("bert-base-uncased")
# #load datasets
# sts_train=pd.read_csv("stsbenchmark/sts-train.csv",sep='\t',header=None,usecols=[4, 5, 6], quoting=QUOTE_NONE,names=["label","sen1","sen2"])
# sts_test=pd.read_csv("stsbenchmark/sts-test.csv",sep='\t',header=None,usecols=[4, 5, 6], quoting=QUOTE_NONE,names=["label","sen1","sen2"])
# sts_dev=pd.read_csv("stsbenchmark/sts-dev.csv",sep='\t',header=None,usecols=[4, 5, 6], quoting=QUOTE_NONE,names=["label","sen1","sen2"])
# print("loading data ok")
# print("starting tokenizing the data")
# encoded_input1 = tokenizer(list(sts_train.sen1), return_tensors='pt', padding=True)
# output = model1(**encoded_input1)
# encoded_input2 = tokenizer(list(sts_train.sen2), return_tensors='pt', padding=True)
# output2 = model1(**encoded_input2)
# pooling_layer1= models.Pooling(200,pooling_mode_mean_tokens=True)
# pooling_layer2= models.Pooling(200,pooling_mode_mean_tokens=True)
# final_model1= SentenceTransformer(modules=[model1,pooling_layer1])
# embedding1=final_model1.encode(encoded_input1, batch_size=128, convert_to_numpy=True, show_progress_bar=True)
# final_model2= SentenceTransformer(modules=[model2,pooling_layer2])
# embedding2=final_model2.encode(encoded_input2, batch_size=128, convert_to_numpy=True, show_progress_bar=True)
# text = ['This fucking thing better works',"Hate this bla bla, what the fuck is this for"]
# encoded_input = tokenizer(text, return_tensors='pt', padding=True)
# output = final_model1(**encoded_input)
# train_sen1_encoding= tokenizer(list(sts_train["sen1"]), padding="max_length", truncation=True)
# train_sen2_encoding= tokenizer(list(sts_train["sen2"]), padding="max_length", truncation=True)
# sts_train.head(5) # we split the process up here so you can see the difference on the label
###Output
_____no_output_____ |
3. Natural Language Processing in TensorFlow/3. Sequence Models/assignment/C3W3_Assignment.ipynb | ###Markdown
Week 3: Exploring Overfitting in NLPWelcome to this assignment! During this week you saw different ways to handle sequence-like data. You saw how some Keras' layers such as `GRU`, `Conv` and `LSTM` can be used to tackle problems in this space. Now you will put this knowledge into practice by creating a model architecture that does not overfit.For this assignment you will be using a variation of the [Sentiment140 dataset](http://help.sentiment140.com/home), which contains 1.6 million tweets alongside their respective sentiment (0 for negative and 4 for positive).You will also need to create the helper functions very similar to the ones you coded in previous assignments pre-process data and to tokenize sentences. However the objective of the assignment is to find a model architecture that will not overfit.Let's get started!
###Code
import csv
import random
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import matplotlib.pyplot as plt
from scipy.stats import linregress
###Output
_____no_output_____
###Markdown
Defining some useful global variablesNext you will define some global variables that will be used throughout the assignment.- `EMBEDDING_DIM`: Dimension of the dense embedding, will be used in the embedding layer of the model. Defaults to 100.- `MAXLEN`: Maximum length of all sequences. Defaults to 16.- `TRUNCATING`: Truncating strategy (truncate either before or after each sequence.). Defaults to 'post'.- `PADDING`: Padding strategy (pad either before or after each sequence.). Defaults to 'post'.- `OOV_TOKEN`: Token to replace out-of-vocabulary words during text_to_sequence calls. Defaults to \"\\\". - `MAX_EXAMPLES`: Max number of examples to use. Defaults to 160000 (10% of the original number of examples) - `TRAINING_SPLIT`: Proportion of data used for training. Defaults to 0.9 **For now leave them unchanged but after submitting your assignment for grading you are encouraged to come back here and play with these parameters to see the impact they have in the classification process.**
###Code
EMBEDDING_DIM = 100
MAXLEN = 16
TRUNCATING = 'post'
PADDING = 'post'
OOV_TOKEN = "<OOV>"
MAX_EXAMPLES = 160000
TRAINING_SPLIT = 0.9
###Output
_____no_output_____
###Markdown
Explore the datasetThe dataset is provided in a csv file. Each row of this file contains the following values separated by commas:- target: the polarity of the tweet (0 = negative, 4 = positive)- ids: The id of the tweet- date: the date of the tweet- flag: The query. If there is no query, then this value is NO_QUERY.- user: the user that tweeted- text: the text of the tweetTake a look at the first two examples:
###Code
SENTIMENT_CSV = "./data/training_cleaned.csv"
with open(SENTIMENT_CSV, 'r') as csvfile:
print(f"First data point looks like this:\n\n{csvfile.readline()}")
print(f"Second data point looks like this:\n\n{csvfile.readline()}")
###Output
First data point looks like this:
"0","1467810369","Mon Apr 06 22:19:45 PDT 2009","NO_QUERY","_TheSpecialOne_","@switchfoot http://twitpic.com/2y1zl - Awww, that's a bummer. You shoulda got David Carr of Third Day to do it. ;D"
Second data point looks like this:
"0","1467810672","Mon Apr 06 22:19:49 PDT 2009","NO_QUERY","scotthamilton","is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!"
###Markdown
**Notice that this file does not have a header so you won't need to skip the first row when parsing the file.**For the task at hand you will only need the information of the target and the text, which are the first and last element of each row. Parsing the raw dataNow you need to read the data from the csv file. To do so, complete the `parse_data_from_file` function.A couple of things to note:- You should NOT omit the first line as the file does not contain headers.- There is no need to save the data points as numpy arrays, regular lists is fine.- To read from csv files use `csv.reader` by passing the appropriate arguments.- `csv.reader` returns an iterable that returns each row in every iteration. So the label can be accessed via `row[0]` and the text via `row[5]`.- The labels are originally encoded as strings ('0' representing negative and '4' representing positive). **You need to change this so that the labels are integers and 0 is used for representing negative, while 1 should represent positive.**
###Code
def parse_data_from_file(filename):
sentences = []
labels = []
with open(filename, 'r') as csvfile:
### START CODE HERE
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
labels.append(0 if row[0] == 0 else 1)
sentences.append(row[5])
### END CODE HERE
return sentences, labels
# Test your function
sentences, labels = parse_data_from_file(SENTIMENT_CSV)
print(f"dataset contains {len(sentences)} examples\n")
print(f"Text of second example should look like this:\n{sentences[1]}\n")
print(f"Text of fourth example should look like this:\n{sentences[3]}")
print(f"\nLabels of last 5 examples should look like this:\n{labels[-5:]}")
###Output
dataset contains 1600000 examples
Text of second example should look like this:
is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!
Text of fourth example should look like this:
my whole body feels itchy and like its on fire
Labels of last 5 examples should look like this:
[1, 1, 1, 1, 1]
###Markdown
***Expected Output:***```dataset contains 1600000 examplesText of second example should look like this:is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!Text of fourth example should look like this:my whole body feels itchy and like its on fire Labels of last 5 examples should look like this:[1, 1, 1, 1, 1]``` You might have noticed that this dataset contains a lot of examples. In order to keep a low execution time of this assignment you will be using only 10% of the original data. The next cell does this while also randomnizing the datapoints that will be used:
###Code
# Bundle the two lists into a single one
sentences_and_labels = list(zip(sentences, labels))
# Perform random sampling
random.seed(42)
sentences_and_labels = random.sample(sentences_and_labels, MAX_EXAMPLES)
# Unpack back into separate lists
sentences, labels = zip(*sentences_and_labels)
print(f"There are {len(sentences)} sentences and {len(labels)} labels after random sampling\n")
###Output
There are 160000 sentences and 160000 labels after random sampling
###Markdown
***Expected Output:***```There are 160000 sentences and 160000 labels after random sampling``` Training - Validation SplitNow you will code the `train_val_split`, which given the list of sentences, the list of labels and the proportion of data for the training set, should return the training and validation sentences and labels:
###Code
def train_val_split(sentences, labels, training_split):
### START CODE HERE
# Compute the number of sentences that will be used for training (should be an integer)
train_size = int(len(sentences)*training_split)
# Split the sentences and labels into train/validation splits
train_sentences = sentences[:train_size]
train_labels = labels[:train_size]
validation_sentences = sentences[train_size:]
validation_labels = labels[train_size:]
### END CODE HERE
return train_sentences, validation_sentences, train_labels, validation_labels
# Test your function
train_sentences, val_sentences, train_labels, val_labels = train_val_split(sentences, labels, TRAINING_SPLIT)
print(f"There are {len(train_sentences)} sentences for training.\n")
print(f"There are {len(train_labels)} labels for training.\n")
print(f"There are {len(val_sentences)} sentences for validation.\n")
print(f"There are {len(val_labels)} labels for validation.")
###Output
There are 144000 sentences for training.
There are 144000 labels for training.
There are 16000 sentences for validation.
There are 16000 labels for validation.
###Markdown
***Expected Output:***```There are 144000 sentences for training.There are 144000 labels for training.There are 16000 sentences for validation.There are 16000 labels for validation.``` Tokenization - Sequences, truncating and paddingNow that you have sets for training and validation it is time for you to begin the tokenization process.Begin by completing the `fit_tokenizer` function below. This function should return a [Tokenizer](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text/Tokenizer) that has been fitted to the training sentences.
###Code
def fit_tokenizer(train_sentences, oov_token):
### START CODE HERE
# Instantiate the Tokenizer class, passing in the correct values for num_words and oov_token
tokenizer = Tokenizer(oov_token=oov_token)
# Fit the tokenizer to the training sentences
tokenizer.fit_on_texts(train_sentences)
### END CODE HERE
return tokenizer
# Test your function
tokenizer = fit_tokenizer(train_sentences, OOV_TOKEN)
word_index = tokenizer.word_index
VOCAB_SIZE = len(word_index)
print(f"Vocabulary contains {VOCAB_SIZE} words\n")
print("<OOV> token included in vocabulary" if "<OOV>" in word_index else "<OOV> token NOT included in vocabulary")
print(f"\nindex of word 'i' should be {word_index['i']}")
###Output
Vocabulary contains 128293 words
<OOV> token included in vocabulary
index of word 'i' should be 2
###Markdown
***Expected Output:***```Vocabulary contains 128293 words token included in vocabularyindex of word 'i' should be 2```
###Code
def seq_pad_and_trunc(sentences, tokenizer, padding, truncating, maxlen):
### START CODE HERE
# Convert sentences to sequences
sequences = tokenizer.texts_to_sequences(sentences)
# Pad the sequences using the correct padding, truncating and maxlen
pad_trunc_sequences = pad_sequences(sequences, maxlen=maxlen, padding=padding, truncating=truncating)
### END CODE HERE
return pad_trunc_sequences
# Test your function
train_pad_trunc_seq = seq_pad_and_trunc(train_sentences, tokenizer, PADDING, TRUNCATING, MAXLEN)
val_pad_trunc_seq = seq_pad_and_trunc(val_sentences, tokenizer, PADDING, TRUNCATING, MAXLEN)
print(f"Padded and truncated training sequences have shape: {train_pad_trunc_seq.shape}\n")
print(f"Padded and truncated validation sequences have shape: {val_pad_trunc_seq.shape}")
###Output
Padded and truncated training sequences have shape: (144000, 16)
Padded and truncated validation sequences have shape: (16000, 16)
###Markdown
***Expected Output:***```Padded and truncated training sequences have shape: (144000, 16)Padded and truncated validation sequences have shape: (16000, 16)``` Remember that the `pad_sequences` function returns numpy arrays, so your training and validation sequences are already in this format.However the labels are still Python lists. Before going forward you should convert them numpy arrays as well. You can do this by running the following cell:
###Code
train_labels = np.array(train_labels)
val_labels = np.array(val_labels)
###Output
_____no_output_____
###Markdown
Using pre-defined EmbeddingsThis time you will not be learning embeddings from your data but you will be using pre-trained word vectors.In particular you will be using the 100 dimension version of [GloVe](https://nlp.stanford.edu/projects/glove/) from Stanford.
###Code
# Define path to file containing the embeddings
GLOVE_FILE = './data/glove.6B.100d.txt'
# Initialize an empty embeddings index dictionary
GLOVE_EMBEDDINGS = {}
# Read file and fill GLOVE_EMBEDDINGS with its contents
with open(GLOVE_FILE) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
GLOVE_EMBEDDINGS[word] = coefs
###Output
_____no_output_____
###Markdown
Now you have access to GloVe's pre-trained word vectors. Isn't that cool?Let's take a look at the vector for the word **dog**:
###Code
test_word = 'dog'
test_vector = GLOVE_EMBEDDINGS[test_word]
print(f"Vector representation of word {test_word} looks like this:\n\n{test_vector}")
###Output
Vector representation of word dog looks like this:
[ 0.30817 0.30938 0.52803 -0.92543 -0.73671 0.63475
0.44197 0.10262 -0.09142 -0.56607 -0.5327 0.2013
0.7704 -0.13983 0.13727 1.1128 0.89301 -0.17869
-0.0019722 0.57289 0.59479 0.50428 -0.28991 -1.3491
0.42756 1.2748 -1.1613 -0.41084 0.042804 0.54866
0.18897 0.3759 0.58035 0.66975 0.81156 0.93864
-0.51005 -0.070079 0.82819 -0.35346 0.21086 -0.24412
-0.16554 -0.78358 -0.48482 0.38968 -0.86356 -0.016391
0.31984 -0.49246 -0.069363 0.018869 -0.098286 1.3126
-0.12116 -1.2399 -0.091429 0.35294 0.64645 0.089642
0.70294 1.1244 0.38639 0.52084 0.98787 0.79952
-0.34625 0.14095 0.80167 0.20987 -0.86007 -0.15308
0.074523 0.40816 0.019208 0.51587 -0.34428 -0.24525
-0.77984 0.27425 0.22418 0.20164 0.017431 -0.014697
-1.0235 -0.39695 -0.0056188 0.30569 0.31748 0.021404
0.11837 -0.11319 0.42456 0.53405 -0.16717 -0.27185
-0.6255 0.12883 0.62529 -0.52086 ]
###Markdown
Feel free to change the `test_word` to see the vector representation of any word you can think of.Also, notice that the dimension of each vector is 100. You can easily double check this by running the following cell:
###Code
print(f"Each word vector has shape: {test_vector.shape}")
###Output
Each word vector has shape: (100,)
###Markdown
Represent the words in your vocabulary using the embeddingsSave the vector representation of each word in the vocabulary in a numpy array.A couple of things to notice:- If a word in your vocabulary is not present in `GLOVE_EMBEDDINGS` the representation for that word is left as a column of zeros.- `word_index` starts counting at 1, because of this you will need to add an extra column at the left-most side of the `EMBEDDINGS_MATRIX` array. This is the reason why you add 1 to `VOCAB_SIZE` in the cell below:
###Code
# Initialize an empty numpy array with the appropriate size
EMBEDDINGS_MATRIX = np.zeros((VOCAB_SIZE+1, EMBEDDING_DIM))
# Iterate all of the words in the vocabulary and if the vector representation for
# each word exists within GloVe's representations, save it in the EMBEDDINGS_MATRIX array
for word, i in word_index.items():
embedding_vector = GLOVE_EMBEDDINGS.get(word)
if embedding_vector is not None:
EMBEDDINGS_MATRIX[i] = embedding_vector
###Output
_____no_output_____
###Markdown
Now you have the pre-trained embeddings ready to use! Define a model that does not overfitNow you need to define a model that will handle the problem at hand while not overfitting.A couple of things to note / hints:- The first layer is provided so you can see how the Embedding layer is configured when using pre-trained embeddings- You can try different combinations of layers covered in previous ungraded labs such as: - `Conv1D` - `Dropout` - `GlobalMaxPooling1D` - `MaxPooling1D` - `LSTM` - `Bidirectional(LSTM)`- The last two layers should be `Dense` layers.- There multiple ways of solving this problem. So try an architecture that you think will not overfit.- Try simpler architectures first to avoid long training times. Architectures that are able to solve this problem usually have around 3-4 layers (excluding the last two `Dense` ones)- Include at least one `Dropout` layer to mitigate overfitting.
###Code
# GRADED FUNCTION: create_model
def create_model(vocab_size, embedding_dim, maxlen, embeddings_matrix):
### START CODE HERE
model = tf.keras.Sequential([
# This is how you need to set the Embedding layer when using pre-trained embeddings
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=maxlen, weights=[embeddings_matrix], trainable=False),
# tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=maxlen),
tf.keras.layers.Dropout(0.2),
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Conv1D(32, 5, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
### END CODE HERE
return model
# Create your untrained model
model = create_model(VOCAB_SIZE, EMBEDDING_DIM, MAXLEN, EMBEDDINGS_MATRIX)
# Train the model and save the training history
history = model.fit(train_pad_trunc_seq, train_labels, epochs=20, validation_data=(val_pad_trunc_seq, val_labels))
###Output
Model: "sequential_14"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_16 (Embedding) (None, 16, 100) 12829400
dropout_14 (Dropout) (None, 16, 100) 0
conv1d_12 (Conv1D) (None, 12, 32) 16032
global_max_pooling1d_9 (Glo (None, 32) 0
balMaxPooling1D)
dense_27 (Dense) (None, 32) 1056
dense_28 (Dense) (None, 1) 33
=================================================================
Total params: 12,846,521
Trainable params: 17,121
Non-trainable params: 12,829,400
_________________________________________________________________
Epoch 1/20
4500/4500 [==============================] - 17s 4ms/step - loss: 0.0014 - accuracy: 0.9997 - val_loss: 2.1434e-06 - val_accuracy: 1.0000
Epoch 2/20
4500/4500 [==============================] - 16s 4ms/step - loss: 6.7361e-07 - accuracy: 1.0000 - val_loss: 1.0282e-07 - val_accuracy: 1.0000
Epoch 3/20
4500/4500 [==============================] - 16s 4ms/step - loss: 3.8523e-08 - accuracy: 1.0000 - val_loss: 8.1522e-09 - val_accuracy: 1.0000
Epoch 4/20
4500/4500 [==============================] - 16s 4ms/step - loss: 3.3134e-09 - accuracy: 1.0000 - val_loss: 8.8144e-10 - val_accuracy: 1.0000
Epoch 5/20
4500/4500 [==============================] - 16s 4ms/step - loss: 4.5257e-10 - accuracy: 1.0000 - val_loss: 2.0377e-10 - val_accuracy: 1.0000
Epoch 6/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.3161e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 7/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0529e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 8/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0521e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 9/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0525e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 10/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0522e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 11/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0524e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 12/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0528e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 13/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0528e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 14/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0521e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 15/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0523e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 16/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0519e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 17/20
4500/4500 [==============================] - 16s 4ms/step - loss: 1.0522e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 18/20
4500/4500 [==============================] - 17s 4ms/step - loss: 1.0530e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 19/20
4500/4500 [==============================] - 17s 4ms/step - loss: 1.0524e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
Epoch 20/20
4500/4500 [==============================] - 17s 4ms/step - loss: 1.0524e-10 - accuracy: 1.0000 - val_loss: 1.0033e-10 - val_accuracy: 1.0000
###Markdown
**To pass this assignment your `val_loss` (validation loss) should either be flat or decreasing.** Although a flat `val_loss` and a lowering `train_loss` (or just `loss`) also indicate some overfitting what you really want to avoid is having a lowering `train_loss` and an increasing `val_loss`.With this in mind, the following three curves will be acceptable solutions: While the following would not be able to pass the grading: Run the following cell to check your loss curves:
###Code
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = [*range(20)]
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(epochs, loss, 'r')
plt.plot(epochs, val_loss, 'b')
plt.title('Training and validation loss')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss", "Validation Loss"])
plt.show()
###Output
_____no_output_____
###Markdown
If you wish so, you can also check the training and validation accuracies of your model:
###Code
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
#------------------------------------------------
# Plot training and validation accuracy per epoch
#------------------------------------------------
plt.plot(epochs, acc, 'r')
plt.plot(epochs, val_acc, 'b')
plt.title('Training and validation accuracy')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["Accuracy", "Validation Accuracy"])
plt.show()
###Output
_____no_output_____
###Markdown
A more rigorous way of setting the passing threshold of this assignment is to use the slope of your `val_loss` curve.**To pass this assignment the slope of your `val_loss` curve should be 0.0005 at maximum.**
###Code
# Test the slope of your val_loss curve
slope, *_ = linregress(epochs, val_loss)
print(f"The slope of your validation loss curve is {slope:.5f}")
###Output
The slope of your validation loss curve is -0.00000
###Markdown
**If your model generated a validation loss curve that meets the criteria above, run the following cell and then submit your assignment for grading. Otherwise, try with a different architecture.**
###Code
with open('history.pkl', 'wb') as f:
pickle.dump(history.history, f)
###Output
_____no_output_____ |
examples/reporting/Allegro_Trains_logging_example.ipynb | ###Markdown
Allegro Trains logging example[](https://colab.research.google.com/github/allegroai/trains/blob/master/examples/reporting/Allegro_Trains_logging_example.ipynb)This example introduces Trains [Logger](https://allegro.ai/docs/logger.html) functionality. Logger is the Trains console log and metric interface.You can find more reporting examples [here](https://github.com/allegroai/trains/tree/master/examples/reporting).
###Code
!pip install trains
!pip install numpy
###Output
_____no_output_____
###Markdown
Create a new TaskCreate a new Task and get a Logger object for the Task.To create a new Task object, call the `Task.init` method providing it with `project_name` (the project name for the experiment) and `task_name` (the name of the experiment). When `Task.init` executes, a link to the Web UI Results page for the newly generated Task will be printed, and the Task will be updated in real time in the Trains demo server.You can read about the `Task` class in the docs [here](https://allegro.ai/docs/task.html).After the Task is created, get a Logger for it.
###Code
import numpy as np
from trains import Task
# Start a new task
task = Task.init(project_name="Colab notebooks", task_name="Explicit Logging")
# Get the task logger,
# You can also call Task.current_task().get_logger() from anywhere in your code.
logger = task.get_logger()
###Output
_____no_output_____
###Markdown
Explicit scalar loggingUse the [Logger.report_scalar](https://allegro.ai/docs/logger.htmltrains.logger.Logger.report_scalar) method to explicitly log scalars. Scalar plots appear in the Web UI, Results > Scalars tab.
###Code
# report two scalar series on the same graph
for i in range(10):
logger.report_scalar("unified graph", "series A", iteration=i, value=1./(i+1))
logger.report_scalar("unified graph", "series B", iteration=i, value=10./(i+1))
# report two scalar series on two different graphs
for i in range(10):
logger.report_scalar("graph A", "series A", iteration=i, value=1./(i+1))
logger.report_scalar("graph B", "series B", iteration=i, value=10./(i+1))
###Output
_____no_output_____
###Markdown
Explicit logging of other dataYou can log other data and report the data in a variety of plot types, including histograms, confusion matrices, 2D and 3D scatter diagrams, and surface diagrams. They appear in the Results > Plots tab.For information about the methods to report each type of plot, see the [Logger](https://allegro.ai/docs/logger.html) module.
###Code
iteration = 100
# report a single histogram
histogram = np.random.randint(10, size=10)
logger.report_histogram(
"single_histogram",
"random histogram",
iteration=iteration,
values=histogram,
xaxis="title x",
yaxis="title y",
)
# report a two histograms on the same plot
histogram1 = np.random.randint(13, size=10)
histogram2 = histogram * 0.75
logger.report_histogram(
"two_histogram",
"series 1",
iteration=iteration,
values=histogram1,
xaxis="title x",
yaxis="title y",
)
logger.report_histogram(
"two_histogram",
"series 2",
iteration=iteration,
values=histogram2,
xaxis="title x",
yaxis="title y",
)
# report confusion matrix
confusion = np.random.randint(10, size=(10, 10))
logger.report_matrix(
"example_confusion",
"ignored",
iteration=iteration,
matrix=confusion,
xaxis="title X",
yaxis="title Y",
)
scatter2d = np.hstack(
(np.atleast_2d(np.arange(0, 10)).T, np.random.randint(10, size=(10, 1)))
)
# report 2d scatter plot with markers
logger.report_scatter2d(
"example_scatter",
"series_lines+markers",
iteration=iteration,
scatter=scatter2d,
xaxis="title x",
yaxis="title y",
mode='lines+markers'
)
# report 3d surface
surface = np.random.randint(10, size=(10, 10))
logger.report_surface(
"example_surface",
"series1",
iteration=iteration,
matrix=surface,
xaxis="title X",
yaxis="title Y",
zaxis="title Z",
)
# report 3d scatter plot
scatter3d = np.random.randint(10, size=(10, 3))
logger.report_scatter3d(
"example_scatter_3d",
"series_xyz",
iteration=iteration,
scatter=scatter3d,
xaxis="title x",
yaxis="title y",
zaxis="title z",
)
###Output
_____no_output_____
###Markdown
Explicit debug samples reportingExplicitly report debug samples, including images, audio, and video. Downloading the filesWe use StorageManager to download a local copy of the files. You can use it immediately. Just provide the URL. Cache is enabled by default for all downloaded remote URLs/files.For more information, you can read about the storage manager [here](https://allegro.ai/docs/storage_manager_storagemanager.html).
###Code
from trains.storage import StorageManager
image_local_copy = StorageManager.get_local_copy(
remote_url="https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg",
name="picasso.jpg"
)
print("Image location: {}".format(image_local_copy))
video_local_copy = StorageManager.get_local_copy(
remote_url="https://test-videos.co.uk/vids/bigbuckbunny/mp4/h264/720/Big_Buck_Bunny_720_10s_1MB.mp4",
name="Big_Buck_Bunny_720_10s_1MB.mp4"
)
print("Video location: {}".format(video_local_copy))
audio_local_copy = StorageManager.get_local_copy(
remote_url="https://www2.cs.uic.edu/~i101/SoundFiles/PinkPanther30.wav",
name="PinkPanther30.wav"
)
print("Audio location: {}".format(audio_local_copy))
###Output
_____no_output_____
###Markdown
Report images and mediaUse [Logger.report_image](https://allegro.ai/docs/logger.html?highlight=report_imagetrains.logger.Logger.report_image) and [Logger.report_media](https://allegro.ai/docs/logger.html?highlight=report_mediatrains.logger.Logger.report_media) to report the downloaded samples. The debug samples appear in the Results > Debug Samples tab.
###Code
logger.report_image("image", "image from url", iteration=100, local_path=image_local_copy)
# Image can be uploaded via 'report_media' too
# report video, an already uploaded video media (url)
logger.report_media(
'video', 'big bunny', iteration=1, local_path=video_local_copy)
# This will actually use the cache and will not download the file again.
audio_local_copy_cache = StorageManager.get_local_copy(
remote_url="https://www2.cs.uic.edu/~i101/SoundFiles/PinkPanther30.wav",
name="PinkPanther30.wav"
)
# report audio, report an already uploaded audio media (url)
logger.report_media(
'audio', 'pink panther', iteration=1, local_path=audio_local_copy)
# reporting html from url to debug samples section
logger.report_media("html", "url_html", iteration=1, url="https://allegro.ai/docs/index.html")
###Output
_____no_output_____
###Markdown
Explicit text loggingUse [Logger.report_text](https://allegro.ai/docs/logger.html?highlight=report_texttrains.logger.Logger.report_text) to log text message. They appear in Results > Log.
###Code
# report text
logger.report_text("hello, this is plain text")
###Output
_____no_output_____
###Markdown
Flushing the reportsReports are flushed in the background every couple of seconds, and at the end of the process execution.Or, flush the Logger by calling [Logger.flush](https://allegro.ai/docs/logger.html?highlight=report_texttrains.logger.Logger.flush).
###Code
logger.flush()
###Output
_____no_output_____ |
Results_Iteration#2.ipynb | ###Markdown
Analysis of results of iteration 2 This is the notebook for the results of the second iteration of repository analysis. The documentation for this run can be found in the [RepoAnalysis](./RepoAnalysis.ipynb) notebook. In the second iteration, an approach that is based on commit-deltas (from git log) has been used which greatly increased performance and aimed at increasing data density (as no join over ght.raw_patches is necessary anymore).As this run was meant as a way to compare the different analysis methods (thus running on the same data set as the first one), the evaluation is shorter. We expect to see similar effects as in [the first result set](Results_Iteration1.ipynb), but the better data density might also lead to new results.The notebook is structured as follows: Before the data is evaluated, an [overview](General-overview-over-results) over it is gained and it is [joined with and aggregated on](Prepare-for-analysis) author information. The data is then [visualized](Visualization) with boxplots in order to manually compare it to the last run. Finally, [a Mann–Whitney U test](Statistical-Testing) is applied to check for significance between the experiment groups and comre those results to the previous run.All figures generated in this notebook can be found unter [./results/figures_run_2](./results/figures_run_2).Footnote: The result table for this run (`lb_results2`) has been lost after this notebook was created. Data for one repository was lost because of unavailability. This might make the given cardinalities a tiny bit inaccurate compared to the actual data, but should not change the overall results of the run.
###Code
%load_ext autoreload
%aimport dbUtils
import matplotlib.pyplot as pyplot
tableName = 'lb_results2'
###Output
_____no_output_____
###Markdown
General overview over results First let's get an overview over the structure of the result data before we get into the evaluation. How many tuples are there and how do they look like? There are 530k tuples. Additions and deletions are now embedded into the result table. This table actually is the counterpart for the `lb_deltas` table of result set 1, because it already includes delta information instead of absolute numbers. To compare, the other table includes 292k tuples. This however, cannot yet be fully compared, as the other table also eliminated fork duplicates.
###Code
dbUtils.runQuery('''
SELECT *
FROM crm20.'''+tableName+'''
''', mute=True)
###Output
Time used: 2.827662706375122
###Markdown
How many tuples can we attribute to experiment authors? There are 28k tuples that we can attribute to experiment authors (and thus use for evaluation). This is a lot more than the 16k from iteration 1, which shows that the methodological migrations was a success.
###Code
dbUtils.runQuery('''
SELECT DISTINCT lb_results2.sha
FROM crm20.lb_results2, ght.commits
WHERE lb_results2.sha = commits.sha
AND author_id IN (SELECT author_id FROM crm20.lb_experimentusers)
''')
###Output
Time used: 3.4830398559570312
###Markdown
Prepare for analysis Add author information and filter for experiment authors Again, author information is joined in to attribute the commit changes to developers. This time, only the experiment users are taken. Note the `DISTINCT` which, together with the removal of the `repoId` column, eliminates duplicates originating from forks.
###Code
dbUtils.runQuery('''
DROP MATERIALIZED VIEW IF EXISTS crm20.lb_experimentset2;
CREATE MATERIALIZED VIEW crm20.lb_experimentset2 AS (
SELECT DISTINCT lb_results2.sha, lb_results2.timestamp, author_id, additions, deletions, additions + deletions AS changes, loc, cloc, file_count, num_methods, num_lambdas, num_comment_lines, num_reflection, num_snakes, total_indent
FROM crm20.lb_results2, ght.commits
WHERE lb_results2.sha = commits.sha
AND author_id IN (SELECT author_id FROM crm20.lb_experimentusers)
);
SELECT * FROM crm20.lb_experimentset2
''')
###Output
Time used: 3.6220011711120605
###Markdown
Create averages for authors As this analysis run will not use the lifecylce approach, averages for each author can safely be calculated. This allows to reflect over the overall code quality of each author and to compare authors of the two groups.
###Code
dbUtils.runQuery('''
DROP VIEW IF EXISTS crm20.lb_authoravgs2;
CREATE VIEW crm20.lb_authoravgs2 AS (
SELECT
author_id,
AVG(CAST(loc AS DECIMAL)) AS loc,
AVG(CAST(cloc AS DECIMAL)) AS cloc,
AVG(CAST(file_count AS DECIMAL)) AS filecount,
AVG(CAST(num_methods AS DECIMAL)/changes) AS methods,
AVG(CAST(num_lambdas AS DECIMAL)/changes) AS lambdas,
AVG(CAST(num_comment_lines AS DECIMAL)/changes) AS commentlines,
AVG(CAST(num_reflection AS DECIMAL)/changes) AS reflection,
AVG(CAST(num_snakes AS DECIMAL)/changes) AS snakes,
AVG(CAST(total_indent AS DECIMAL)/changes) AS indent
FROM crm20.lb_experimentset2
WHERE changes > 0
AND changes < 1000
GROUP BY author_id
);
SELECT * FROM crm20.lb_authoravgs2
''', mute=True)
###Output
Time used: 0.038674116134643555
###Markdown
Visualization Again to visualize effects, boxplots have been chosen. This is very similar to [Results_Iteration1](Results_Iteration1.ipynb). The following queries extract the respective data sets for both experiment groups
###Code
boxDataPolyglot = dbUtils.runQuery('''
SELECT *
FROM crm20.lb_authoravgs2
WHERE author_id IN (SELECT author_id FROM crm20.lb_polyglots)
''', mute=True)
display(boxDataPolyglot)
boxDataControlGroup = dbUtils.runQuery('''
SELECT *
FROM crm20.lb_authoravgs2
WHERE author_id IN (SELECT author_id FROM crm20.lb_controlgroup)
''', mute=True)
display(boxDataControlGroup)
###Output
Time used: 0.034987449645996094
###Markdown
When plotting the data and putting the plots side by side, the results look very similar: Value ranges increased a bit, which is expectable with a bigger dataset, but there were no big notable changes.
###Code
for metric in boxDataPolyglot:
if metric == 'author_id': continue
pyplot.figure(figsize=(15, 5))
pyplot.title('Metric: '+metric)
pyplot.boxplot([boxDataPolyglot[metric], boxDataControlGroup[metric]], labels=['polyglot', 'control group'])
pyplot.savefig('figures/boxplot_'+metric+'.png')
###Output
_____no_output_____
###Markdown
Statistical Testing Again, a Mann–Whitney U test is applies. All metrics that were identified as significant before were identified again, most with better certainties (as would be expected with a strict data superset). One outlyer is comment line density, but this is inside uncertainty boundaries.Interestingly, compared to the first evaluation, snake case density has now been identified as significant. This is not surprising, as the last test already indicated this tendency. However, the next, scaled-up, run will need to confirm this result.
###Code
from scipy.stats import mannwhitneyu
for metric in boxDataPolyglot:
if metric == 'author_id': continue
pvalue = mannwhitneyu(boxDataPolyglot[metric], boxDataControlGroup[metric]).pvalue
print((metric+': ').ljust(15)+str(pvalue)+'\t '+str(pvalue < 0.05))
###Output
loc: 0.03300520224406583 True
cloc: 0.022058035759941794 True
filecount: 0.21545100634855147 False
methods: 0.0917505570894625 False
lambdas: 0.4067688390652851 False
commentlines: 0.02077806898728143 True
reflection: 0.42879115773327764 False
snakes: 0.029028346082374923 True
indent: 0.024817833855852296 True
|
Word_embeddings_colaboratory_8_2_2021.ipynb | ###Markdown
Loading packages
###Code
!pip install fastai --upgrade
!pip install dtreeviz
!pip install fastbook
import fastbook
fastbook.setup_book()
from fastbook import *
from pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype
from fastai.tabular.all import *
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from dtreeviz.trees import *
from IPython.display import Image, display_svg, SVG
pd.options.display.max_rows = 20
pd.options.display.max_columns = 8
###Output
_____no_output_____
###Markdown
Loading Data
###Code
path = "/content/gdrive/MyDrive/archivos_tfm/ch1_train_combination_and_monoTherapy.csv"
df = pd.read_csv(path, low_memory=False)
df.head(5)
df.columns
# A little bit of data analysis
df.describe()
dep_var = 'SYNERGY_SCORE'
procs = [Categorify, FillMissing]
# We shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
df[1:5]
# We will erase Combination ID as it offers no additional information
# We only want perfect samples, so only QA = 1
df_nocomb = df.drop(["COMBINATION_ID"], 1)
df_def = df_nocomb[df_nocomb['QA'] == 1]
df_def.describe()
# We create the train/validation splits
dataset_size = df_def.shape[0]
cutoff = int(dataset_size * 0.7)
train_idx = df_def.index[:cutoff]
valid_idx = df_def.index[cutoff:]
splits = (list(train_idx),list(valid_idx))
cont,cat = cont_cat_split(df_def, 1, dep_var=dep_var)
to = TabularPandas(df_nocomb, procs, cat, cont, y_names=dep_var, splits=splits)
len(to.train),len(to.valid)
xs,y = to.train.xs,to.train.y
valid_xs,valid_y = to.valid.xs,to.valid.y
###Output
_____no_output_____
###Markdown
Baseline model: mean and median
###Code
train_df = df_def[:cutoff]
train_df.describe()
###Output
_____no_output_____
###Markdown
Let's see how good it performs a model whose only information is the median
###Code
mean = np.mean(train_df["SYNERGY_SCORE"])
median = np.median(train_df["SYNERGY_SCORE"])
print(f" Median = {median} \n Mean = {mean}")
###Output
Median = 10.002977
Mean = 12.892230668515133
###Markdown
We create our metrics
###Code
def r_mse(pred,y): return round(math.sqrt(((pred-y)**2).mean()), 6)
def m_rmse(m, xs, y): return r_mse(m.predict(xs), y)
error_mean = r_mse(mean, valid_y)
error_median = r_mse(median, valid_y)
print(f" Error Median = {error_median} \n Error Mean = {error_mean}")
###Output
Error Median = 27.760743
Error Mean = 27.660492
###Markdown
Decision Trees
###Code
# Now that we have preprocessed our dataset, we build the tree
Tree = DecisionTreeRegressor(max_leaf_nodes=4)
Tree.fit(xs, y);
draw_tree(Tree, xs, size=10, leaves_parallel=True, precision=2)
samp_idx = np.random.permutation(len(y))[:500]
dtreeviz(Tree, xs.iloc[samp_idx], y.iloc[samp_idx], xs.columns, dep_var,
fontname='DejaVu Sans', scale=1.6, label_fontsize=10,
orientation='LR')
###Output
/usr/local/lib/python3.7/dist-packages/sklearn/base.py:451: UserWarning: X does not have valid feature names, but DecisionTreeRegressor was fitted with feature names
"X does not have valid feature names, but"
###Markdown
Let's now have the decision tree algorithm build a bigger tree. Here, we are not passing in any stopping criteria such as max_leaf_nodes:
###Code
m = DecisionTreeRegressor()
m.fit(xs, y);
# In the training set
m_rmse(m, xs, y)
###Output
_____no_output_____
###Markdown
This just means that the model fits well in the training dataset, but we have to check how well it generalizes over unseen data:
###Code
m_rmse(m, valid_xs, valid_y)
###Output
_____no_output_____
###Markdown
Now we will check for overfitting:
###Code
m.get_n_leaves(), len(xs)
###Output
_____no_output_____
###Markdown
We see that it has as many leafs as datapoints, let's see what happens if we restrict the model.
###Code
m = DecisionTreeRegressor(min_samples_leaf=25)
m.fit(to.train.xs, to.train.y)
m_rmse(m, xs, y), m_rmse(m, valid_xs, valid_y)
m.get_n_leaves()
###Output
_____no_output_____
###Markdown
**The RMSE is almost the same as the baseline model. That's not good, let's try some hyperparameter tuning.**
###Code
leafs = np.arange(500)+1
error_list = list()
for n_leafs in leafs:
m = DecisionTreeRegressor(min_samples_leaf=n_leafs)
m.fit(to.train.xs, to.train.y)
error_list.append( m_rmse(m, valid_xs, valid_y) )
error_list = np.asarray(error_list)
best_error = min(error_list)
best_leaf = leafs[error_list== min(error_list)][0]
print(f"Best number of leafs = {best_leaf} \n Error = {best_error}")
###Output
Best number of leafs = 179
Error = 26.609312
###Markdown
Not outstanding, barely better. We should try another algorithm Random Forest
###Code
def rf(xs, y, n_estimators=100, max_samples=300,
max_features=0.5, min_samples_leaf=5, **kwargs):
return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators,
max_samples=max_samples, max_features=max_features,
min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)
m = rf(xs, y);
m_rmse(m, xs, y), m_rmse(m, valid_xs, valid_y)
###Output
_____no_output_____
###Markdown
A little better than the Tree regressor, but not that great. Feature importance
###Code
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
fi = rf_feat_importance(m, xs)
fi[:10]
def plot_fi(fi):
return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False)
plot_fi(fi[:30]);
###Output
_____no_output_____ |
day 6.ipynb | ###Markdown
day 6 assignment
###Code
class bank:
def __init__(self,ownername,balance):
self.ownername = ownername
self.balance = balance
def deposit(self,amount):
self.balance +=amount
print("your updated balance is :", self.balance)
def withdraw(self,amount):
if(self.balance>amount):
self.balance-=amount
print("your updated balance is :", self.balance)
else:
print("you don't have enough cradit in your account, see you have only",self.balance)
khashyap=bank("khashyap21",2000)
khashyap.deposit(2000)
khashyap.withdraw(4000)
khashyap.withdraw(3000)
###Output
your updated balance is : 1000
###Markdown
Question 2
###Code
import math
class cone:
def __init__(self,radius,height):
self.radius=radius
self.height=height
def volume(self):
vol = math.pi * (self.radius**2) * (self.height/3)
print("Volume of this cone is : ",vol)
def surfaceArea(self):
area = math.pi* self.radius *(self.radius+(math.sqrt((self.radius**2)+(self.height**2))))
print("Surface area of this cone is ",area)
con = cone(3,4)
con.volume()
con.surfaceArea()
###Output
Surface area of this cone is 75.39822368615503
|
bbdd/Flujo compra aleatorio datos nuevos.ipynb | ###Markdown
Creación y compra de claves juegos
###Code
import mysql.connector
import pandas as pd
from mysql.connector import errorcode
import random
from datetime import datetime
try:
cnx = mysql.connector.connect(
host="localhost",
user="root",
database='stum_for_you',
passwd=""
)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = cnx.cursor()
for auxiliar in range(0,100):
#elegimos un juego
cursor.execute("SELECT COUNT(*) FROM juegos")
numjuegos = cursor.fetchone()[0] + 1
id_juego = random.randrange(1,numjuegos)
sql = """SELECT * FROM juegos WHERE id_juego = %s """ % (id_juego)
cursor.execute(sql)
juego = cursor.fetchone()
#elegimos un proveedor
cursor.execute("SELECT COUNT(*) FROM proveedor")
numprov = cursor.fetchone()[0] + 1
id_proveedor = random.randrange(1,numprov)
sql = """SELECT * FROM proveedor WHERE id_proveedor = %s """ % (id_proveedor)
cursor.execute(sql)
proveedor = cursor.fetchone()
# añadimos a la tabla transacción compra
elem = random.randrange(1,100)
sql = "INSERT INTO transacciones_compra (precio_total, fecha_compra) VALUES (%s, %s)"
descuento = random.randrange(1,10) /10
print(descuento)
preciojuego=int(juego[3]*descuento )
#fecha= aleatoriofecha
inicio = datetime(2017, 1, 30)
final = datetime(2020, 1, 1)
fecha = inicio + (final - inicio) * random.random()
val = (preciojuego * elem, fecha)
cursor.execute(sql,val)
cnx.commit()
cursor.execute("SELECT MAX(id_transaccion) FROM transacciones_compra")
id_transac = cursor.fetchone()[0]
# añadimos clave
print(elem)
for i in range (0, elem) :
aux = True
while aux :
num1 = str(random.randrange(1,999999))
num2 = str(random.randrange(1,9999))
clave = num1.zfill(6) + '-' + num2.zfill(4)
sql = """SELECT * FROM claves_juegos WHERE clave = %s """ % (clave)
cursor.execute(sql)
if(len(cursor.fetchall()) == 0) :
aux = False
sql = "INSERT INTO claves_juegos (clave, fecha_anexion, id_juego) VALUES (%s, %s, %s)"
val = (clave, fecha, id_juego)
cursor.execute(sql,val)
cnx.commit()
cursor.execute("SELECT MAX(id_clave) FROM claves_juegos")
id_clave = cursor.fetchone()[0]
# añadimos compra
sql = "INSERT INTO compra_juegos (id_proveedor, id_transaccion, id_claves_juego, precio) VALUES (%s, %s, %s, %s)"
val = (id_proveedor, id_transac, id_clave, preciojuego)
cursor.execute(sql,val)
cnx.commit()
# print("proveedor :" , id_proveedor , "id_transac :" , id_transac ,"id_clave :" , id_clave , "preciojuegoD :"
# , preciojuego , "preciojuego :", juego[3])
###Output
0.7
50
0.9
18
0.7
98
0.5
96
0.7
74
0.5
23
0.3
93
0.4
14
0.5
75
0.8
16
0.7
30
0.2
49
0.4
15
0.5
69
0.5
12
0.7
23
0.6
16
0.8
61
0.1
39
0.8
41
0.9
27
0.8
79
0.6
61
0.5
57
0.4
61
0.5
91
0.6
16
0.6
50
0.6
51
0.7
46
0.4
87
0.3
45
0.3
3
0.2
21
0.7
26
0.2
36
0.7
44
0.9
35
0.8
86
0.1
94
0.5
89
0.1
43
0.4
7
0.4
58
0.5
86
0.1
37
0.5
85
0.9
69
0.2
80
0.8
60
0.8
46
0.8
52
0.2
57
0.8
74
0.5
38
0.2
35
0.1
94
0.9
16
0.7
30
0.7
30
0.1
91
0.8
94
0.8
35
0.9
3
0.6
5
0.7
19
0.9
97
0.9
17
0.4
31
0.4
47
0.9
31
0.5
47
0.8
42
0.2
40
0.5
1
0.8
96
0.9
15
0.4
29
0.8
44
0.6
12
0.3
47
0.9
77
0.2
51
0.2
80
0.4
57
0.6
73
0.9
53
0.7
98
0.3
1
0.7
85
0.1
57
0.9
69
0.5
71
0.9
4
0.4
64
0.7
23
0.2
86
0.7
3
0.9
85
0.9
92
###Markdown
Creación y compra de claves dlcs
###Code
import mysql.connector
import pandas as pd
from mysql.connector import errorcode
import random
from datetime import datetime
try:
cnx = mysql.connector.connect(
host="localhost",
user="root",
database='stum_for_you',
passwd=""
)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = cnx.cursor()
for auxiliar in range(0,70):
#elegimos un dlc
cursor.execute("SELECT COUNT(*) FROM dlcs")
numdlcs = cursor.fetchone()[0] + 1
id_dlc = random.randrange(1,numdlcs)
sql = """SELECT * FROM dlcs WHERE id_dlc = %s """ % (id_dlc)
cursor.execute(sql)
dlc = cursor.fetchone()
#elegimos un proveedor
cursor.execute("SELECT COUNT(*) FROM proveedor")
numprov = cursor.fetchone()[0] + 1
id_proveedor = random.randrange(1,numprov)
sql = """SELECT * FROM proveedor WHERE id_proveedor = %s """ % (id_proveedor)
cursor.execute(sql)
proveedor = cursor.fetchone()
# añadimos a la tabla transacción compra
elem = random.randrange(1,100)
sql = "INSERT INTO transacciones_compra (precio_total, fecha_compra) VALUES (%s, %s)"
descuento = random.randrange(1,10) /10
#print(descuento)
preciodlc=int(dlc[1]*descuento)
#fecha= aleatoriofecha
inicio = datetime(2017, 1, 30)
final = datetime(2020, 1, 1)
fecha = inicio + (final - inicio) * random.random()
val = (preciodlc * elem, fecha)
cursor.execute(sql,val)
cnx.commit()
cursor.execute("SELECT MAX(id_transaccion) FROM transacciones_compra")
id_transac = cursor.fetchone()[0]
# añadimos clave
#print(elem)
for i in range (0, elem) :
aux = True
while aux :
num1 = str(random.randrange(1,999999))
num2 = str(random.randrange(1,9999))
clave = num1.zfill(6) + '#' + num2.zfill(4)
sql = """SELECT * FROM claves_dlc WHERE clave = %s """ % (clave)
cursor.execute(sql)
if(len(cursor.fetchall()) == 0) :
aux = False
sql = "INSERT INTO claves_dlc (clave, fecha_anexion, id_dlc) VALUES (%s, %s, %s)"
val = (clave, fecha, id_dlc)
cursor.execute(sql,val)
cnx.commit()
cursor.execute("SELECT MAX(id_clave) FROM claves_dlc")
id_clave = cursor.fetchone()[0]
# añadimos compra
sql = "INSERT INTO compra_dlcs (id_proveedor, id_transaccion, id_claves_dlc, precio) VALUES (%s, %s, %s, %s)"
val = (id_proveedor, id_transac, id_clave, preciodlc)
cursor.execute(sql,val)
cnx.commit()
#print("proveedor :" , id_proveedor , "id_transac :" , id_transac ,"id_clave :" , id_clave , "precioDLCD :"
# , preciodlc , "preciodlc :", dlc[3])
###Output
_____no_output_____ |
Coordinate Transformation Tutorial.ipynb | ###Markdown
Coordinate transformations and Error PropagationThe idea is to explore different options to propagate errors from observables ($\alpha$, $\delta$, $\varpi$, $\mu_{\alpha*}$, $\mu_\delta$ and $V_r$) to Cartesian Heliocentric Velocity. In between, we shall see also transformations to intermediate coordinate systems (basically Galactic spherical coordinates).We shall see three ways:- Astropy- PyGaia- GalPy _(soon)_- Python Code __(¡¡WATCH OUT!! Parallax error -> the Jacobian is asuming that distance = 1/plx)__For each one, we will average a thousand executions using _timeit_ package and obtain an estimated time cost.
###Code
import timeit
import numpy as np
""" Test star coordinates & errors """
#J2000
ra=266.40506655 #right ascention in degrees
dec=-28.93616241 #declination in degrees
plx=4 #parallax in mas
pmra=2 #proper motion in alpha* in mas/yr
pmdec=3 #proper motion in delta in mas/yr
vr=0 #radial velocity in km/s
e_ra=0.1 #error in RA in mas
e_dec=0.1 #error in DEC in mas
e_plx=0.3 #error in plx in mas
e_pmra=0.7 #error in PMRA in mas/yr
e_pmdec=0.7 #error in PMDEC in mas/yr
e_vr=0 #error in Vr in km/s
""" Correct values based on NED calculator (ned.ipac.caltech.edu)
l=0 degrees
b=0 degrees
d=250 pc (1/plx)
"""
###Output
_____no_output_____
###Markdown
1) Astropy
###Code
from astropy import units as u
from astropy.coordinates import SkyCoord,Galactocentric
from astropy.coordinates import HeliocentricTrueEcliptic,Galactic,LSR,HCRS
star=SkyCoord(ra=ra*u.degree, dec=dec*u.degree,
distance=(plx*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=pmra*u.mas/u.yr,
pm_dec=pmdec*u.mas/u.yr,
radial_velocity=vr*u.km/u.s)
star
""" Part I: change of coordinates """
#A: ICRS to Galactic
star_GAL=star.transform_to(Galactic)
tAstro=timeit.timeit(stmt='star.transform_to(Galactic)',globals=globals(),number=1000)/1000
print('Astropy\n\tStar at ({} deg:{} deg) in ICRS ->\
({}:{}) in Gal.Coord.\n\nTime: {} seconds'.format(ra,de,star_GAL.l,star_GAL.b,tAstro))
#B: ICRS to Galactocentric (http://docs.astropy.org/en/stable/generated/examples/coordinates/plot_galactocentric-frame.html)
star_cart = star.transform_to(Galactocentric)
tAstro=timeit.timeit(stmt='star.transform_to(Galactocentric)',globals=globals(),number=1000)/1000
print(star_cart.x,star_cart.y,star_cart.z)
print(star_cart.v_x,star_cart.v_y,star_cart.v_z)
print('Time: {}'.format(tAstro))
""" Part II: error propagation """
#As far as I know, not available -in a suitable way- in version 2.02
###Output
_____no_output_____
###Markdown
2) PyGaia
###Code
import pygaia.astrometry.vectorastrometry as vecast
from pygaia.astrometry.coordinates import CoordinateTransformation
from pygaia.astrometry.coordinates import Transformations
""" Part I: change of coordinates """
#A: ICRS to GAL
#define the transformation
ICRS2GAL=CoordinateTransformation(Transformations.ICRS2GAL)
#use the methods to transform: first the position
l,b=ICRS2GAL.transformSkyCoordinates(np.deg2rad(ra),np.deg2rad(dec))
tGaiaCoord=timeit.timeit(stmt='ICRS2GAL.transformSkyCoordinates(np.deg2rad(ra),np.deg2rad(dec))',
globals=globals(),number=1000)/1000
#then the proper motions
mul,mub=ICRS2GAL.transformProperMotions(np.deg2rad(ra),np.deg2rad(dec),pmra,pmdec)
tGaiaPM=timeit.timeit(stmt='ICRS2GAL.transformProperMotions(np.deg2rad(ra),np.deg2rad(dec),pmra,pmdec)',
globals=globals(),number=1000)/1000
print('PyGaia\n\tStar at ({} deg:{} deg) in ICRS -> ({} deg:{} deg) in Gal.Coord.\n\nTime: {} seconds'.format(
ra,dec,np.rad2deg(l),np.rad2deg(b),tGaiaCoord))
print('PyGaia\n\tStar at ({} mas/yr:{} mas/yr) in ICRS -> ({} mas/yr:{} mas/yr) in Gal.Coord.\n\nTime: {} seconds'.format(
pmra,pmdec,mul,mub,tGaiaPM))
#B: GAL to Helio-cartesian
#to change to cartesian, we use the module 'vecast'
x,y,z,u,v,w=vecast.astrometryToPhaseSpace(l,b,plx,mul,mub,vr)
tGaia=timeit.timeit(stmt='vecast.astrometryToPhaseSpace(l,b,plx,mul,mub,vr)',
globals=globals(),number=1000)/1000
print('PyGaia\n\tStar at ({} deg:{} deg:{} mas) in GAL -> ({} pc:{} pc:{} pc) in Heliocentric.Coord.\n\nTime:\
{} seconds'.format(
np.rad2deg(l),np.rad2deg(b),plx,x,y,z,tGaia))
#A+B:ICRS to Heliocentric Cartesian
#full transformation in one function
def pygaiachange(ra,dec,plx,pmra,pmdec,vr):
""" From observables in ICRS (angles in degrees, plx in mas, proper motion in mas/yr, los velocity in km/s)
returns X,Y,Z (in pc) and U,V,W (in km/s)."""
import pygaia.astrometry.vectorastrometry as vecast
from pygaia.astrometry.coordinates import CoordinateTransformation
from pygaia.astrometry.coordinates import Transformations
ICRS2GAL=CoordinateTransformation(Transformations.ICRS2GAL)
#GAL2ICRS=CoordinateTransformation(Transformations.GAL2ICRS)
l,b=ICRS2GAL.transformSkyCoordinates(np.deg2rad(ra),np.deg2rad(dec))
mul,mub=ICRS2GAL.transformProperMotions(np.deg2rad(ra),np.deg2rad(dec),pmra,pmdec)
return vecast.astrometryToPhaseSpace(l,b,plx,mul,mub,vr)
x,y,z,u,v,w=pygaiachange(ra,dec,plx,pmra,pmdec,vr)
tGaia=timeit.timeit(stmt='pygaiachange(ra,dec,plx,pmra,pmdec,vr)',
globals=globals(),number=1000)/1000
print('PyGaia\n\tStar at ({} deg:{} deg:{} mas) in ICRS -> ({} pc:{} pc:{} pc) in Heliocentric.Coord.\n\nTime:\
{} seconds'.format(
ra,dec,plx,x,y,z,tGaia))
print('PyGaia\n\tStar at ({} mas/yr:{} mas/yr:{} km/s) in ICRS -> ({} kms/s:{} km/s:{} km/s) in Heliocentric.Coord.\n\nTime:\
{} seconds'.format(
pmra,pmdec,vr,u,v,w,tGaia))
""" Part II: error propagation (only rotations)"""
"""
Version 1.2 (December 2016)
++++++++++++++++++++
- Add method to CoordinateTransformation for the transformation of the full (5x5) covariance matrix of
the astrometric parameters.
- Add keyword to astrometric errors prediction functions that allows to specify an extended mission
lifetime.
+ def transformCovarianceMatrix(self, phi, theta, covmat):
+
+ Transform the astrometric covariance matrix to its representation in the new coordinate system.
+
+ Parameters
+ ----------
+
+ phi - The longitude-like angle of the position of the source (radians).
+ theta - The latitude-like angle of the position of the source (radians).
+ covmat - Covariance matrix (5x5) of the astrometric parameters.
+
+ Returns
+ -------
+
+ covmat_rot - Covariance matrix in its representation in the new coordinate system.
+
+
+ c, s = self._getJacobian(phi,theta)
+ jacobian = identity(5)
+ jacobian[0][0]=c
+ jacobian[1][1]=c
+ jacobian[3][3]=c
+ jacobian[4][4]=c
+ jacobian[0][1]=s
+ jacobian[1][0]=-s
+ jacobian[3][4]=s
+ jacobian[4][3]=-s
+
+ return dot( dot(jacobian, covmat), jacobian.transpose() )
+
def _getJacobian(self, phi, theta):
Calculates the Jacobian for the transformation of the position errors and proper motion errors
between coordinate systems. This Jacobian is also the rotation matrix for the transformation of
proper motions. See section 1.5.3 of the Hipparcos Explanatory Volume 1 (equation 1.5.20).
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
Returns
-------
jacobian - The Jacobian matrix corresponding to (phi, theta) and the currently desired coordinate
system transformation.
p, q, r = normalTriad(phi, theta)
# zRot = z-axis of new coordinate system expressed in terms of old system
zRot = self.rotationMatrix[2,:]
zRotAll = zRot
if (p.ndim == 2):
for i in range(p.shape[1]-1):
zRotAll = vstack((zRotAll,zRot))
pRot = cross(zRotAll, transpose(r))
if (p.ndim == 2):
normPRot = sqrt(diag(dot(pRot,transpose(pRot))))
for i in range(pRot.shape[0]):
pRot[i,:] = pRot[i,:]/normPRot[i]
else:
pRot = pRot/norm(pRot)
if (p.ndim == 2):
return diag(dot(pRot,p)), diag(dot(pRot,q))
else:
return dot(pRot,p), dot(pRot,q)
"""
#Since the transformation is nested inside the 'CoordinateTransformation' method, it is only available for
#changes of coordinates defined in 'Transfromations' object. That is: ICRS<->GAL<->Ecliptic
ICRS2GAL=CoordinateTransformation(Transformations.ICRS2GAL)
help(ICRS2GAL.transformCovarianceMatrix)
GALcovMatrix=ICRS2GAL.transformCovarianceMatrix(ra,dec,np.diag([e_ra,e_dec,e_plx,e_pmra,e_pmdec]))
print(GALcovMatrix)
###Output
[[ 0.1 0. 0. 0. 0. ]
[ 0. 0.1 0. 0. 0. ]
[ 0. 0. 0.3 0. 0. ]
[ 0. 0. 0. 0.7 0. ]
[ 0. 0. 0. 0. 0.7]]
###Markdown
3) Python Code
###Code
from Jacobian import *
""" Part I: change of coordinates """
#A: ICRS to Galactic
#position
l,b=radec2lb(np.deg2rad(ra),np.deg2rad(de))
tPythonCoord=timeit.timeit(stmt='radec2lb(ra,de)',
globals=globals(),number=1000)/1000
#proper motions
mul,mub=pmradec2lb(np.deg2rad(ra),np.deg2rad(de),l,b,pmra,pmdec)
tPythonPM=timeit.timeit(stmt='pmradec2lb(np.deg2rad(ra),np.deg2rad(de),l,b,pmra,pmdec)',
globals=globals(),number=1000)/1000
print('Python Code\n\tStar at ({} deg:{} deg) in ICRS -> ({} deg:{} deg) in Gal.Coord.\n\nTime: {} seconds'.format(
ra,de,np.rad2deg(l),np.rad2deg(b),tPythonCoord))
print('Python\n\tStar at ({} mas/yr:{} mas/yr) in ICRS -> ({} kms/s:{} km/s) in Gal.Coord.\n\nTime:\
{} seconds'.format(
pmra,pmdec,mul,mub,tPythonPM))
""" Part II: error propagation """
#From ra,dec,plx,pmra,pmdec,vr to l,b,plx,U,V,W
J6=Jacob([ra,de,plx,pmra,pmdec,0])
J4=Jacob4([ra,de,plx,pmra,pmdec,0])
print(J6)
print(J4)
tJ6=timeit.timeit(stmt='Jacob([ra,de,plx,pmra,pmdec,0])',
globals=globals(),number=1000)/1000
tJ4=timeit.timeit(stmt='Jacob4([ra,de,plx,pmra,pmdec,0])',
globals=globals(),number=1000)/1000
print('Time [s]: ',tJ6,'/',tJ4)
print('Time to process Error Propagation: ',tJ4)
print('\nOriginal Covariance Matrix: ')
cov=np.diag([e_plx,e_pmra,e_pmdec,e_vr])**2
print(cov)
print('\nPropagated Covariance Matrix: ')
new_cov=J4@[email protected]
print(np.round(new_cov,2))
###Output
Time to process Error Propagation: 9.28001142156063e-05
Original Covariance Matrix:
[[0.09 0. 0. 0. ]
[0. 0.49 0. 0. ]
[0. 0. 0.49 0. ]
[0. 0. 0. 0. ]]
Propagated Covariance Matrix:
[[ 0.09 0. -0.1 0. ]
[ 0. 0. -0. 0. ]
[-0.1 -0. 0.79 -0. ]
[ 0. 0. -0. 0.69]]
|
homework_3-RNN/hw3_1(20206080).ipynb | ###Markdown
Homework 3 Problem 1 In this homework, you'll learn how to model the sentences with recurrent neural networks(RNNs). We'll provide you with basic skeleton codes for preprocessing sequences and performing sentimental analysis with RNNs. However, provided codes can be improved with some simple modifications. The purpose of this homework is to implement several advanced techniques for improving the performance of vanilla RNNs.First, we'll import required libraries.
###Code
!pip install torchtext
!pip install spacy
!python -m spacy download en
import random
import time
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext import data
from torchtext import datasets
###Output
Requirement already satisfied: torchtext in /usr/local/lib/python3.6/dist-packages (0.3.1)
Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (from torchtext) (1.5.0+cu101)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from torchtext) (4.41.1)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from torchtext) (2.23.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torchtext) (1.18.4)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch->torchtext) (0.16.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (2020.4.5.1)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (2.9)
Requirement already satisfied: spacy in /usr/local/lib/python3.6/dist-packages (2.2.4)
Requirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (0.6.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from spacy) (46.3.0)
Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy) (2.0.3)
Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.18.4)
Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (2.23.0)
Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.0.2)
Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy) (3.0.2)
Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.0.2)
Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (4.41.1)
Requirement already satisfied: thinc==7.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (7.4.0)
Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.0.0)
Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.1.3)
Requirement already satisfied: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (0.4.1)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (1.24.3)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (2.9)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (2020.4.5.1)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (3.0.4)
Requirement already satisfied: importlib-metadata>=0.20; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy) (1.6.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.20; python_version < "3.8"->catalogue<1.1.0,>=0.0.7->spacy) (3.1.0)
Requirement already satisfied: en_core_web_sm==2.2.5 from https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.5/en_core_web_sm-2.2.5.tar.gz#egg=en_core_web_sm==2.2.5 in /usr/local/lib/python3.6/dist-packages (2.2.5)
Requirement already satisfied: spacy>=2.2.2 in /usr/local/lib/python3.6/dist-packages (from en_core_web_sm==2.2.5) (2.2.4)
Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (2.23.0)
Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (3.0.2)
Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.0.2)
Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (2.0.3)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (46.3.0)
Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.18.4)
Requirement already satisfied: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (0.4.1)
Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.0.2)
Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.1.3)
Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (4.41.1)
Requirement already satisfied: thinc==7.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (7.4.0)
Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.0.0)
Requirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (0.6.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (2020.4.5.1)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (1.24.3)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (2.9)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (3.0.4)
Requirement already satisfied: importlib-metadata>=0.20; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy>=2.2.2->en_core_web_sm==2.2.5) (1.6.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.20; python_version < "3.8"->catalogue<1.1.0,>=0.0.7->spacy>=2.2.2->en_core_web_sm==2.2.5) (3.1.0)
[38;5;2m✔ Download and installation successful[0m
You can now load the model via spacy.load('en_core_web_sm')
[38;5;2m✔ Linking successful[0m
/usr/local/lib/python3.6/dist-packages/en_core_web_sm -->
/usr/local/lib/python3.6/dist-packages/spacy/data/en
You can now load the model via spacy.load('en')
###Markdown
PreprocessingFor your convenience, we will provide you with the basic preprocessing steps for handling IMDB movie dataset. For more information, see https://pytorch.org/text/
###Code
TEXT = data.Field(tokenize='spacy', include_lengths=True)
LABEL = data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state=random.seed(1234))
print('Number of training examples: {:d}'.format(len(train_data)))
print('NUmber of validation examples: {:d}'.format(len(valid_data)))
print('Number of testing examples: {:d}'.format(len(test_data)))
TEXT.build_vocab(train_data,
max_size=25000)
LABEL.build_vocab(train_data)
# Tokens include <unk> and <pad>
print('Unique tokens in text vocabulary: {:d}'.format(len(TEXT.vocab)))
# Label is either positive or negative
print('Unique tokens in label vocabulary: {:d}'.format(len(LABEL.vocab)))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
batch_size = 64
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=False,
device=device)
print(device)
# Note that the sequence is padded with <PAD>(=1) tokens after the sequence ends.
for batch in train_iterator:
text, text_length = batch.text
break
print(text[:, -1])
print(text[-10:, -1])
print(text_length[-1])
# We will re-load dataset since we already loaded one batch in above cell.
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
batch_size = 64
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=True,
device=device)
###Output
_____no_output_____
###Markdown
ProblemsWe will provide you with skeleton codes for training RNNs below. Run this code and you'll notice that the training / validation performance is not better than random guessing (50\~60%).In this homework, you'll have to improve the performance of this network above 80% with several techniques commonly used in RNNs. **Please provide your answer in your report and attach notebook file which contains source code for below techniques.**(a) (3pt) Look at the shape of tensor `hidden` and `embedded`. Have you noticed what is the problem? Explain what is the issue and report the test performance when you fix the issue. (Hint: This is related to the length of sequences. See how sequence is padded. You may use `nn.utils.rnn.pack_padded_sequence`.)(b) (3pt) Use different architectures, such as LSTM or GRU, and report the test performance. "Do not" change hyperparameters from (a), such as batch_size, hidden_dim,...Now, try to use below techniques to further improve the performance of provided source codes. Compare the test performance of each component with/without it.(c) (1pt) For now, the number of layers in RNN is 1. Try to stack more layers, up to 3.(d) (1pt) Use bidirectional RNNs.(e) (1pt) Use dropout for regularization with stacked layers (recommended: 3 layers and dropout rate 0.5).(f) (1pt) Finally, apply all techniques and have an enough time to play with introduced techniques (e.g., changing hyperparameters, train more epochs, try other techniques you know, ...). Report the final test performance with your implementation and hyperparameter choice. Please note that this is not a competition assignment. We will not evaluate your assignment strictly! Simple RNN architecture
###Code
class SimpleRNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx):
super(SimpleRNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.rnn = nn.RNN(embedding_dim,
hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text, text_lengths):
embedded = self.embedding(text)
output, hidden = self.rnn(embedded)
hidden = hidden[-1]
return self.fc(hidden.squeeze(0))
###Output
_____no_output_____
###Markdown
Simple fixed RNN architecture
###Code
class FixedSimpleRNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx):
super(FixedSimpleRNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.rnn = nn.RNN(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text, text_lengths):
# text.shape [padded_sentence_size, batch_size]
embedded = self.embedding(text) # embedded.shape [padded_sentence_size, batch_size, embedding_dim]
packed_seq = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, enforce_sorted=False)
output, hidden = self.rnn(packed_seq) # hidden.shape [num_layers * num_directions, batch_size, hidden_size], num_layers * num_directions = 1
return self.fc(hidden.squeeze(0)) # hidden.squeeze(0) [batch_size, hidden_size]
###Output
_____no_output_____
###Markdown
LSTM RNN architecture
###Code
class LSTMRNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx):
super(LSTMRNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text, text_lengths):
embedded = self.embedding(text)
packed_seq = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, enforce_sorted=False)
output, (hn, cn) = self.lstm(packed_seq)
return self.fc(hn.squeeze(0))
###Output
_____no_output_____
###Markdown
GRU RNN architecture
###Code
class GRURNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx):
super(GRURNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.gru = nn.GRU(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text, text_lengths):
embedded = self.embedding(text) #[sentect len,batch size,embedding dim]
packed_seq = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, enforce_sorted=False)
output, hidden = self.gru(packed_seq)
return self.fc(hidden.squeeze(0))
###Output
_____no_output_____
###Markdown
Stacked layers and Dropout RNN architecture
###Code
class StackedRNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx, num_layers=1, dropout=0):
if num_layers<=0:
raise Exception('num_layers must be major than 0')
super(StackedRNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.rnn = nn.RNN(embedding_dim, hidden_dim, num_layers=num_layers, dropout=dropout)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text, text_lengths):
embedded = self.embedding(text)
packed_seq = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, enforce_sorted=False)
output, hidden = self.rnn(packed_seq)
hidden = hidden[-1]
return self.fc(hidden)
###Output
_____no_output_____
###Markdown
Bidirectional RNN architecture
###Code
class BidirectionalRNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx):
super(BidirectionalRNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.rnn = nn.RNN(embedding_dim, hidden_dim, bidirectional=True)
self.fc = nn.Linear(hidden_dim*2, output_dim)
def forward(self, text, text_lengths):
embedded = self.embedding(text)
packed_seq = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, enforce_sorted=False)
output, hidden = self.rnn(packed_seq)
conc_hidden = torch.cat((hidden[0], hidden[1]), 1)
return self.fc(conc_hidden)
###Output
_____no_output_____
###Markdown
Custom RNN architecture
###Code
class CustomRNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, pad_idx):
super(CustomRNN, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim, padding_idx=pad_idx)
self.gru = nn.GRU(embedding_dim, hidden_dim, bidirectional=True)
self.fc = nn.Linear(hidden_dim*2, output_dim)
def forward(self, text, text_lengths):
embedded = self.embedding(text) #[sentect len,batch size,embedding dim]
packed_seq = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, enforce_sorted=False)
output, hn = self.gru(packed_seq)
conc_hidden = torch.cat([hn[0], hn[1]], 1)
return self.fc(conc_hidden)
###Output
_____no_output_____
###Markdown
Train
###Code
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
input_dim = len(TEXT.vocab)
embedding_dim = 100
hidden_dim = 128
output_dim = 1
num_epochs = 10
val_iter = 1
pad_idx = TEXT.vocab.stoi[TEXT.pad_token]
def train(model, pth_path, input_dim = input_dim, embedding_dim = embedding_dim, hidden_dim = hidden_dim, output_dim = output_dim, num_epochs = num_epochs, val_iter = val_iter, pad_idx = pad_idx, lr=0.001):
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.BCEWithLogitsLoss().to(device)
model = model.to(device)
model.train()
train_loss = list()
train_acc = list()
val_loss = list()
val_acc = list()
best_valid_loss = float('inf')
for epoch in range(num_epochs):
running_loss = 0
running_acc = 0
start_time = time.time()
for batch in train_iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(-1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
running_acc += acc.item()
running_loss /= len(train_iterator)
running_acc /= len(train_iterator)
train_loss.append(running_loss)
train_acc.append(running_acc)
if epoch % val_iter == 0:
model.eval()
valid_loss = 0
valid_acc = 0
with torch.no_grad():
for batch in valid_iterator:
text, text_lengths = batch.text
eval_predictions = model(text, text_lengths).squeeze(1)
valid_loss += criterion(eval_predictions, batch.label).item()
valid_acc += binary_accuracy(eval_predictions, batch.label).item()
model.train()
valid_loss /= len(valid_iterator)
valid_acc /= len(valid_iterator)
val_loss.append(valid_loss)
val_acc.append(valid_acc)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), pth_path)
training_time = time.time() - start_time
print('#####################################')
print('Epoch {:d} | Training Time {:.1f}s'.format(epoch+1, training_time))
print('Train Loss: {:.4f}, Train Acc: {:.2f}%'.format(running_loss, running_acc*100))
if epoch % val_iter == 0:
print('Valid Loss: {:.4f}, Valid Acc: {:.2f}%'.format(valid_loss, valid_acc*100))
return train_loss, train_acc, val_loss, val_acc
def load_model_test_performance(model, model_path):
model.load_state_dict(torch.load(model_path))
criterion = nn.BCEWithLogitsLoss().to(device)
model.eval()
test_loss, test_acc = 0, 0
with torch.no_grad():
for batch in test_iterator:
text, text_lengths = batch.text
test_preds = model(text, text_lengths).squeeze(1)
test_loss += criterion(test_preds, batch.label).item()
test_acc += binary_accuracy(test_preds, batch.label).item()
test_loss /= len(test_iterator)
test_acc /= len(test_iterator)
print('Test Loss: {:.4f}, Test Acc: {:.2f}%'.format(test_loss, test_acc*100))
def plot(train_loss, train_acc, val_loss, val_acc):
plt.figure(figsize=(10, 5))
plt.title("Training plot")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.grid()
plt.plot(range(1, len(train_loss)+1), train_loss, label="training loss")
plt.plot(range(1, len(train_acc)+1), train_acc, label="training accuracy")
plt.plot(range(1, len(val_loss)+1), val_loss, label="validation loss")
plt.plot(range(1, len(val_acc)+1), val_acc, label="validation accuracy")
plt.legend()
plt.show()
model = SimpleRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx)
train_loss, train_acc, val_loss, val_acc = train(model, './simplernn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './simplernn.pth')
model = FixedSimpleRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx)
train_loss, train_acc, val_loss, val_acc = train(model, './fixedsimplernn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './fixedsimplernn.pth')
model = LSTMRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx)
train_loss, train_acc, val_loss, val_acc = train(model, './lstmrnn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './lstmrnn.pth')
model = GRURNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx)
train_loss, train_acc, val_loss, val_acc = train(model, './grurnn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './grurnn.pth')
model = StackedRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx, num_layers=3)
train_loss, train_acc, val_loss, val_acc = train(model, './stackedrnn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './stackedrnn.pth')
model = BidirectionalRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx)
train_loss, train_acc, val_loss, val_acc = train(model, './birnn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './birnn.pth')
model = StackedRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx, num_layers=3, dropout=0.5)
train_loss, train_acc, val_loss, val_acc = train(model, './droprnn.pth')
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './droprnn.pth')
model = CustomRNN(input_dim, embedding_dim, hidden_dim, output_dim, pad_idx)
train_loss, train_acc, val_loss, val_acc = train(model, './customrnn.pth', hidden_dim = 256, num_epochs = 20, lr=0.0005)
plot(train_loss, train_acc, val_loss, val_acc)
load_model_test_performance(model, './customrnn.pth')
###Output
#####################################
Epoch 1 | Training Time 23.0s
Train Loss: 0.6328, Train Acc: 62.62%
Valid Loss: 0.5320, Valid Acc: 73.54%
#####################################
Epoch 2 | Training Time 23.0s
Train Loss: 0.4859, Train Acc: 76.45%
Valid Loss: 0.4646, Valid Acc: 77.77%
#####################################
Epoch 3 | Training Time 22.8s
Train Loss: 0.3996, Train Acc: 82.08%
Valid Loss: 0.4029, Valid Acc: 82.12%
#####################################
Epoch 4 | Training Time 22.9s
Train Loss: 0.3056, Train Acc: 86.95%
Valid Loss: 0.4008, Valid Acc: 82.10%
#####################################
Epoch 5 | Training Time 22.9s
Train Loss: 0.2433, Train Acc: 90.11%
Valid Loss: 0.4393, Valid Acc: 81.55%
#####################################
Epoch 6 | Training Time 22.8s
Train Loss: 0.1941, Train Acc: 92.38%
Valid Loss: 0.3832, Valid Acc: 84.43%
#####################################
Epoch 7 | Training Time 23.0s
Train Loss: 0.1456, Train Acc: 94.64%
Valid Loss: 0.3815, Valid Acc: 85.38%
#####################################
Epoch 8 | Training Time 22.8s
Train Loss: 0.1063, Train Acc: 96.33%
Valid Loss: 0.4253, Valid Acc: 85.37%
#####################################
Epoch 9 | Training Time 22.8s
Train Loss: 0.0804, Train Acc: 97.35%
Valid Loss: 0.4920, Valid Acc: 85.18%
#####################################
Epoch 10 | Training Time 22.8s
Train Loss: 0.0442, Train Acc: 98.71%
Valid Loss: 0.5386, Valid Acc: 85.89%
#####################################
Epoch 11 | Training Time 22.9s
Train Loss: 0.0309, Train Acc: 99.14%
Valid Loss: 0.6312, Valid Acc: 85.98%
#####################################
Epoch 12 | Training Time 22.9s
Train Loss: 0.0231, Train Acc: 99.40%
Valid Loss: 0.7306, Valid Acc: 85.14%
#####################################
Epoch 13 | Training Time 22.8s
Train Loss: 0.0142, Train Acc: 99.65%
Valid Loss: 0.6875, Valid Acc: 86.03%
#####################################
Epoch 14 | Training Time 22.8s
Train Loss: 0.0322, Train Acc: 98.86%
Valid Loss: 0.6284, Valid Acc: 78.69%
#####################################
Epoch 15 | Training Time 22.9s
Train Loss: 0.0585, Train Acc: 97.89%
Valid Loss: 0.6660, Valid Acc: 84.27%
#####################################
Epoch 16 | Training Time 22.8s
Train Loss: 0.0294, Train Acc: 99.02%
Valid Loss: 0.6735, Valid Acc: 85.18%
#####################################
Epoch 17 | Training Time 22.8s
Train Loss: 0.0056, Train Acc: 99.93%
Valid Loss: 0.7342, Valid Acc: 86.49%
#####################################
Epoch 18 | Training Time 22.6s
Train Loss: 0.0016, Train Acc: 99.99%
Valid Loss: 0.7694, Valid Acc: 86.41%
#####################################
Epoch 19 | Training Time 22.7s
Train Loss: 0.0009, Train Acc: 100.00%
Valid Loss: 0.8229, Valid Acc: 86.51%
#####################################
Epoch 20 | Training Time 22.7s
Train Loss: 0.0007, Train Acc: 100.00%
Valid Loss: 0.8564, Valid Acc: 86.73%
|
content/docs/data-science-with-python/labs/python-basics/3-2-Loops.ipynb | ###Markdown
LOOPS IN PYTHON Table of ContentsFor LoopsWhile Loops Estimated Time Needed: 15 min For Loops Sometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by **loops**. We will look at two types of loops, **for** loops and **while** loops.Before we discuss loops lets discuss the **range** object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate a sequence that contains three elements ordered from 0 to 2 we simply use the following command:
###Code
range(3)
###Output
_____no_output_____
###Markdown
:Example of range function. The `for` loopThe **for** loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list. Let's try to use a **for** loop to print all the years presented in the list **dates**: This can be done as follows:
###Code
dates = [1982,1980,1973]
N=len(dates)
for i in range(N):
print(dates[i])
###Output
_____no_output_____
###Markdown
The code in the indent is executed **N** times, each time the value of **i** is increased by 1 for every execution. The statement executed is to** print** out the value in the list at index **i** as shown here: Example of printing out the elements of a list. In this example we can print out a sequence of numbers from 0 to 7:
###Code
for i in range(0,8):
print(i)
###Output
_____no_output_____
###Markdown
Write a for loop the prints out all the element between -5 and 5 using the range function. Double-click __here__ for the solution.<!-- for i in range(-5,6): print(i) --> In Python we can directly access the elements in the list as follows:
###Code
for year in dates:
print(year)
###Output
_____no_output_____
###Markdown
For each iteration, the value of the variable **years** behaves like the value of **dates[i]** in the first example: Example of a for loop Print the elements of the following list:**Genres=[ 'rock', 'R&B', 'Soundtrack' 'R&B', 'soul', 'pop']**Make sure you follow Python conventions. Double-click __here__ for the solution.<!-- Genres=[ 'rock', 'R&B', 'Soundtrack' 'R&B', 'soul', 'pop']for Genre in Genres: print(Genre) --> We can change the elements in a list:
###Code
squares=['red','yellow','green','purple','blue ']
for i in range(0,5):
print("Before square ",i, 'is', squares[i])
squares[i]='wight'
print("After square ",i, 'is', squares[i])
###Output
_____no_output_____
###Markdown
Write a for loop that prints out the following list: squares=['red','yellow','green','purple','blue ']: Double-click __here__ for the solution.<!-- squares=['red','yellow','green','purple','blue ']for square in squares: print(square) --> We can access the index and the elements of a list as follows:
###Code
squares=['red','yellow','green','purple','blue ']
for i,square in enumerate(squares):
print(i,square)
###Output
_____no_output_____
###Markdown
While Loops As you can see, the **for** loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The **while** loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value. Let’s say we would like to iterate through list **dates** and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code:
###Code
dates = [1982,1980,1973,2000]
i=0;
year=0
while(year!=1973):
year=dates[i]
i=i+1
print(year)
print("it took ", i ,"repetitions to get out of loop")
###Output
_____no_output_____
###Markdown
A while loop iterates merely until the condition in the argument is not met, as shown in the following figure : An Example of indices as negative numbers Write a while loop to display the values of the Rating of an album playlist stored in the list “PlayListRatings”. If the score is less than 6, exit the loop. The list “PlayListRatings” is given by: PlayListRatings = [10,9.5,10, 8,7.5, 5,10, 10]:
###Code
PlayListRatings = [10,9.5,10,8,7.5,5,10,10]
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- PlayListRatings = [10,9.5,10, 8,7.5, 5,10, 10]i=0;Rating=100while(Rating>6): Rating=PlayListRatings[i] i=i+1 print(Rating) --> Write a while loop to copy the strings 'orange' of the list 'squares' to the list 'new_squares'. Stop and exit the loop if the value on the list is not 'orange':
###Code
squares=['orange','orange','purple','blue ','orange']
new_squares=[];
###Output
_____no_output_____ |
_notebooks/2021-11-11-nb_chef_recipe_cs_summarization.ipynb | ###Markdown
Chef Recipe | Extractive summarization with Azure Text Analytics> Use Chef to create summaries with Azure- toc: true- badges: true- comments: true- categories: [recipe, azuretextanalytics, azure, python, jupyter]- hide: false- image: images/social/recipe_cs_summary_text_horizontal.svg ---- Modules Chef{% gist 1bc116f05d09e598a1a2dcfbb0e2fc22 chef.py %} Ingredients{% gist 5c75b7cdea330d15dcd93adbb08648c3 az_cs_summarization.py %} Call graph ---- Configuration Prerequisites:- Add the .env file in the same folder of the notebook Parameters
###Code
gist_user = 'davidefornelli'
gist_chef_id = '1bc116f05d09e598a1a2dcfbb0e2fc22'
gist_ingredients_id = '5c75b7cdea330d15dcd93adbb08648c3'
ingredients_to_import = [
(gist_ingredients_id, 'az_cs_summarization.py')
]
texts = [
'''
A computer is a machine that can be programmed to carry out sequences of arithmetic or logical operations automatically. Modern computers can perform generic sets of operations known as programs. These programs enable computers to perform a wide range of tasks. A computer system is a "complete" computer that includes the hardware, operating system (main software), and peripheral equipment needed and used for "full" operation. This term may also refer to a group of computers that are linked and function together, such as a computer network or computer cluster.
A broad range of industrial and consumer products use computers as control systems. Simple special-purpose devices like microwave ovens and remote controls are included, as are factory devices like industrial robots and computer-aided design, as well as general-purpose devices like personal computers and mobile devices like smartphones. Computers power the Internet, which links hundreds of millions of other computers and users.
Early computers were meant to be used only for calculations. Simple manual instruments like the abacus have aided people in doing calculations since ancient times. Early in the Industrial Revolution, some mechanical devices were built to automate long tedious tasks, such as guiding patterns for looms. More sophisticated electrical machines did specialized analog calculations in the early 20th century. The first digital electronic calculating machines were developed during World War II. The first semiconductor transistors in the late 1940s were followed by the silicon-based MOSFET (MOS transistor) and monolithic integrated circuit (IC) chip technologies in the late 1950s, leading to the microprocessor and the microcomputer revolution in the 1970s. The speed, power and versatility of computers have been increasing dramatically ever since then, with transistor counts increasing at a rapid pace (as predicted by Moore's law), leading to the Digital Revolution during the late 20th to early 21st centuries.
Conventionally, a modern computer consists of at least one processing element, typically a central processing unit (CPU) in the form of a microprocessor, along with some type of computer memory, typically semiconductor memory chips. The processing element carries out arithmetic and logical operations, and a sequencing and control unit can change the order of operations in response to stored information. Peripheral devices include input devices (keyboards, mice, joystick, etc.), output devices (monitor screens, printers, etc.), and input/output devices that perform both functions (e.g., the 2000s-era touchscreen). Peripheral devices allow information to be retrieved from an external source and they enable the result of operations to be saved and retrieved.
'''
]
###Output
_____no_output_____
###Markdown
Configure environment
###Code
%pip install httpimport python-dotenv
###Output
Requirement already satisfied: httpimport in /home/daforne/repos/github/davidefornelli/cookbook/.venv/lib/python3.7/site-packages (0.7.2)
Requirement already satisfied: python-dotenv in /home/daforne/repos/github/davidefornelli/cookbook/.venv/lib/python3.7/site-packages (0.19.2)
Note: you may need to restart the kernel to use updated packages.
###Markdown
Import chef
###Code
import httpimport
with httpimport.remote_repo(
['chef'],
f"https://gist.githubusercontent.com/{gist_user}/{gist_chef_id}/raw"
):
import chef
###Output
_____no_output_____
###Markdown
Import ingredients
###Code
def ingredients_import(ingredients):
for ingredient in ingredients:
mod, package = chef.process_gist_ingredient(
gist_id=ingredient[0],
gist_file=ingredient[1],
gist_user=gist_user
)
globals()[package] = mod
ingredients_import(ingredients=ingredients_to_import)
###Output
_____no_output_____
###Markdown
Extract summaries
###Code
import os
from dotenv import load_dotenv
load_dotenv()
# Apply summarization
summary_text = az_cs_summarization.summarize(
texts=texts,
cs_endpoint=os.environ['CS_TEXTANALYTICS_ENDPOINT'],
cs_key=os.environ['CS_TEXTANALYTICS_KEY'],
language='en'
)
###Output
_____no_output_____
###Markdown
Results
###Code
for sx in summary_text:
for s in sx.sentences:
print(s.text)
###Output
A computer is a machine that can be programmed to carry out sequences of arithmetic or logical operations automatically.
These programs enable computers to perform a wide range of tasks.
A broad range of industrial and consumer products use computers as control systems.
|
macro_benchmark/SSD_Tensorflow/notebooks/ssd_notebook.ipynb | ###Markdown
SSD 300 ModelThe SSD 300 network takes 300x300 image inputs. In order to feed any image, the latter is resize to this input shape (i.e.`Resize.WARP_RESIZE`). Note that even though it may change the ratio width / height, the SSD model performs well on resized images (and it is the default behaviour in the original Caffe implementation).SSD anchors correspond to the default bounding boxes encoded in the network. The SSD net output provides offset on the coordinates and dimensions of these anchors.
###Code
# Input placeholder.
net_shape = (300, 300)
data_format = 'NHWC'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# Evaluation pre-processing: resize to SSD net shape.
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# Define the SSD model.
reuse = True if 'ssd_net' in locals() else None
ssd_net = ssd_vgg_300.SSDNet()
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
# Restore SSD model.
ckpt_filename = '../checkpoints/ssd_300_vgg.ckpt'
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
# SSD default anchor boxes.
ssd_anchors = ssd_net.anchors(net_shape)
###Output
_____no_output_____
###Markdown
Post-processing pipelineThe SSD outputs need to be post-processed to provide proper detections. Namely, we follow these common steps:* Select boxes above a classification threshold;* Clip boxes to the image shape;* Apply the Non-Maximum-Selection algorithm: fuse together boxes whose Jaccard score > threshold;* If necessary, resize bounding boxes to original image shape.
###Code
# Main image processing routine.
def process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(300, 300)):
# Run SSD network.
rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
feed_dict={img_input: img})
# Get classes and bboxes from the net outputs.
rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(
rpredictions, rlocalisations, ssd_anchors,
select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)
rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
# Resize bboxes to original image shape. Note: useless for Resize.WARP!
rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)
return rclasses, rscores, rbboxes
# Test on some demo image and visualize output.
path = '../demo/'
image_names = sorted(os.listdir(path))
img = mpimg.imread(path + image_names[-5])
rclasses, rscores, rbboxes = process_image(img)
# visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
visualization.plt_bboxes(img, rclasses, rscores, rbboxes)
###Output
_____no_output_____ |
ReproducingMLpipelines/Paper1/DataPreprocessing.ipynb | ###Markdown
Load and transform dataset _**(a)**_. Install Bioconductor biocLite package in order to access the golubEsets library. [golubEsets](https://bioconductor.org/packages/release/data/experiment/manuals/golubEsets/man/golubEsets.pdf) contains the raw data used by Todd Golub in the original paper.
###Code
## Most code is commented in this cell since it is unnecessary and time-consuming to run it everytime.
#options(repos='http://cran.rstudio.com/')
#source("http://bioconductor.org/biocLite.R")
#biocLite("golubEsets")
suppressMessages(library(golubEsets))
###Output
_____no_output_____
###Markdown
_**(b)**_. Load the training, testing data from library golubEsets. Also transpose the data to make observations as rows.
###Code
#Training data predictor and response
data(Golub_Train)
golub_train_p = t(exprs(Golub_Train))
golub_train_r =pData(Golub_Train)[, "ALL.AML"]
#Testing data predictor
data(Golub_Test)
golub_test_p = t(exprs(Golub_Test))
golub_test_r = pData(Golub_Test)[, "ALL.AML"]
#Show summary
rbind(Train = dim(golub_train_p), Test = dim(golub_test_p))
cbind(Train = table(golub_train_r),Test = table(golub_test_r))
###Output
_____no_output_____
###Markdown
_**(c)**_. Perform data preprocessing: thresholding, filtering, logarithmic transformation and normalization as in the paper. The predictor is reduced to 3051 after preprocessing.Most details of step 1(c) are not included in the original paper. We combine the information in paper 2, paper 9 and also a reproduce work done by [Robert Gentleman](http://dept.stat.lsa.umich.edu/~ionides/810/gentleman05.pdf), who confirmed in his work the procedure of thresholding and filtering is the same as in the original paper. One also need to notice that we should use the mean and standard deviation in the training data to normalize the testing data as mentioned in the Appendix A of the paper 2. At the end of this step, there are 3051 predictors left. The resulting dataset are same as the $72\times 3051$ Golub dataset available online.
###Code
# Thresholding
golub_train_pp = golub_train_p
golub_train_pp[golub_train_pp<100] = 100
golub_train_pp[golub_train_pp>16000] = 16000
# Filtering
golub_filter = function(x, r = 5, d=500){
minval = min(x)
maxval = max(x)
(maxval/minval>r)&&(maxval-minval>d)
}
index = apply(golub_train_pp, 2, golub_filter)
golub_index = (1:7129)[index]
golub_train_pp = golub_train_pp[, golub_index]
golub_test_pp = golub_test_p
golub_test_pp[golub_test_pp<100] = 100
golub_test_pp[golub_test_pp>16000] = 16000
golub_test_pp = golub_test_pp[, golub_index]
# Log Transformation
golub_train_p_trans = log10(golub_train_pp)
golub_test_p_trans = log10(golub_test_pp)
# Normalization
train_m = colMeans(golub_train_p_trans)
train_sd = apply(golub_train_p_trans, 2, sd)
golub_train_p_trans = t((t(golub_train_p_trans)-train_m)/train_sd)
golub_test_p_trans = t((t(golub_test_p_trans)-train_m)/train_sd)
golub_train_3051 = golub_train_p_trans
golub_train_response = golub_train_r
golub_test_3051 = golub_test_p_trans
golub_test_response = golub_test_r
save(golub_train_3051, golub_train_response, golub_test_3051, golub_test_response, file = "../transformed data/golub3051.rda")
###Output
_____no_output_____ |
.ipynb_checkpoints/Naive_Bayes_CountVectorizer-checkpoint.ipynb | ###Markdown
###Code
import pandas as pd
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import model_selection, naive_bayes, svm
from sklearn.metrics import accuracy_score
from collections import Counter
#[1] Importing dataset
dataset = pd.read_json(r"C:\Users\Panos\Desktop\Dissert\Code\Video_Games_5.json", lines=True, encoding='latin-1')
dataset = dataset[['reviewText','overall']]
#[2] Reduce number of classes
ratings = []
for index,entry in enumerate(dataset['overall']):
if entry == 1.0 or entry == 2.0:
ratings.append(-1)
elif entry == 3.0:
ratings.append(0)
elif entry == 4.0 or entry == 5.0:
ratings.append(1)
#[3] Cleaning the text
import re
import nltk
from nltk.corpus import stopwords
corpus = []
for i in range(0, len(dataset)):
review = re.sub('[^a-zA-Z]', ' ', dataset['reviewText'][i])
review = review.lower()
review = review.split()
review = [word for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
#[4] Prepare Train and Test Data sets
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(corpus,ratings,test_size=0.3)
print(Counter(Train_Y).values()) # counts the elements' frequency
#[5] Encoding
Encoder = LabelEncoder()
Train_Y = Encoder.fit_transform(Train_Y)
Test_Y = Encoder.fit_transform(Test_Y)
#[6] Word Vectorization
Count_vect = CountVectorizer(max_features=10000)
Count_vect.fit(corpus)
Train_X_Count = Count_vect.transform(Train_X)
Test_X_Count = Count_vect.transform(Test_X)
# the vocabulary that it has learned from the corpus
#print(Count_vect.vocabulary_)
# the vectorized data
#print(Train_X_Count)
#[7] Use the Naive Bayes Algorithms to Predict the outcome
# fit the training dataset on the NB classifier
Naive = naive_bayes.MultinomialNB()
Naive.fit(Train_X_Count,Train_Y)
# predict the labels on validation dataset
predictions_NB = Naive.predict(Test_X_Count)
# Use accuracy_score function to get the accuracy
print("-----------------------Naive Bayes------------------------\n")
print("Naive Bayes Accuracy Score -> ",accuracy_score(predictions_NB, Test_Y)*100)
# Making the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Test_Y, predictions_NB)
print("\n",cm,"\n")
# Printing a classification report of different metrics
from sklearn.metrics import classification_report
my_tags = ['Positive','Neutral','Negative']
print(classification_report(Test_Y, predictions_NB,target_names=my_tags))
# Export reports to files for later visualizations
report_NB = classification_report(Test_Y, predictions_NB,target_names=my_tags, output_dict=True)
report_NB_df = pd.DataFrame(report_NB).transpose()
report_NB_df.to_csv(r'NB_report_CountVect.csv', index = True, float_format="%.3f")
###Output
-----------------------Naive Bayes------------------------
Naive Bayes Accuracy Score -> 76.33963241004402
[[ 5213 1521 1736]
[ 1742 2975 3825]
[ 3117 4511 44894]]
precision recall f1-score support
Positive 0.52 0.62 0.56 8470
Neutral 0.33 0.35 0.34 8542
Negative 0.89 0.85 0.87 52522
accuracy 0.76 69534
macro avg 0.58 0.61 0.59 69534
weighted avg 0.78 0.76 0.77 69534
|
Model/09-27-xgb-reg02.ipynb | ###Markdown
XGBoostinghttps://nbviewer.jupyter.org/github/jphall663/interpretable_machine_learning_with_python/blob/master/xgboost_pdp_ice.ipynb?flush_cache=trueXGBoosting 
###Code
import numpy as np # array, vector, matrix calculations
import pandas as pd # DataFrame handling
###Output
_____no_output_____
###Markdown
Import data and clean
###Code
df = pd.read_csv('credit_cards_dataset.csv')
X = df.drop(['ID', 'default.payment.next.month'], axis=1).values
Y = df['default.payment.next.month'].values
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
###Output
_____no_output_____
###Markdown
Setting Parameterhttps://xgboost.readthedocs.io/en/latest/python/python_intro.html
###Code
import xgboost as xgb # gradient boosting machines (GBMs)
# XGBoost Regressor
mod = xgb.XGBRegressor(
gamma=1,
learning_rate=0.01,
max_depth=3,
n_estimators=10000,
subsample=0.8,
random_state=42,
verbosity=2
)
mod.fit(X_train, Y_train)
mod.save_model('reg01.model')
ypred = mod.predict(X_test)
ypred[:3]
#ypred.reshape(-1,1)
import math
from sklearn.metrics import mean_squared_error, mean_absolute_error
mrse = math.sqrt(mean_squared_error(Y_test, ypred))
print('MRSE (L2 loss): {}'.format(mrse))
mae = mean_absolute_error(Y_test, ypred)
print('MAE (L1 loss) : {}'.format(mae))
predictions = np.rint(ypred)
#predictions.reshape(-1,1)
predictions.sum()
predictions.shape
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
accuracy = accuracy_score(Y_test, predictions)
cm = confusion_matrix(Y_test, predictions)
precision = precision_score(Y_test, predictions)
recall = recall_score(Y_test, predictions)
print(accuracy)
print(cm)
print(precision)
print(recall)
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.figure()
plot_confusion_matrix(cm, classes=['Non_Default','Default'], normalize=False,
title='Non Normalized confusion matrix')
#plt.show()
###Output
_____no_output_____ |
notebooks/tf2-20ng-bert.ipynb | ###Markdown
20 newsgroup text classification with BERT finetuningIn this notebook, we'll use a pre-trained [BERT](https://arxiv.org/abs/1810.04805) model for text classification using TensorFlow 2 / Keras and HuggingFace's [Transformers](https://github.com/huggingface/transformers). This notebook is based on ["Predicting Movie Review Sentiment with BERT on TF Hub"](https://github.com/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb) by Google and ["BERT Fine-Tuning Tutorial with PyTorch"](https://mccormickml.com/2019/07/22/BERT-fine-tuning/) by Chris McCormick.**Note that using a GPU with this notebook is highly recommended.**First, the needed imports.
###Code
%matplotlib inline
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import TensorBoard
from transformers import BertTokenizer, TFBertForSequenceClassification
from transformers import __version__ as transformers_version
from distutils.version import LooseVersion as LV
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import io, sys, os, datetime
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
print('Using TensorFlow version:', tf.__version__,
'Keras version:', tf.keras.__version__,
'Transformers version:', transformers_version)
assert(LV(tf.__version__) >= LV("2.3.0"))
if len(tf.config.list_physical_devices('GPU')):
from tensorflow.python.client import device_lib
for d in device_lib.list_local_devices():
if d.device_type == 'GPU':
print('GPU:', d.physical_device_desc)
else:
print('No GPU, using CPU instead.')
###Output
_____no_output_____
###Markdown
20 Newsgroups data setNext we'll load the [20 Newsgroups](http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html) data set. The dataset contains 20000 messages collected from 20 different Usenet newsgroups (1000 messages from each group):|[]()|[]()|[]()|[]()|| --- | --- |--- | --- || alt.atheism | soc.religion.christian | comp.windows.x | sci.crypt | | talk.politics.guns | comp.sys.ibm.pc.hardware | rec.autos | sci.electronics | | talk.politics.mideast | comp.graphics | rec.motorcycles | sci.space | | talk.politics.misc | comp.os.ms-windows.misc | rec.sport.baseball | sci.med | | talk.religion.misc | comp.sys.mac.hardware | rec.sport.hockey | misc.forsale |
###Code
TEXT_DATA_DIR = "/media/data/20_newsgroup"
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
args = {} if sys.version_info < (3,) else {'encoding': 'latin-1'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
labels = np.array(labels)
print('Found %s texts.' % len(texts))
###Output
_____no_output_____
###Markdown
We split the data into training, validation, and test sets using scikit-learn's `train_test_split()`.
###Code
TEST_SET = 4000
(texts_train, texts_test,
labels_train, labels_test) = train_test_split(texts, labels,
test_size=TEST_SET,
shuffle=True, random_state=42)
(texts_train, texts_valid,
labels_train, labels_valid) = train_test_split(texts_train, labels_train,
shuffle=False,
test_size=0.1)
print('Length of training texts:', len(texts_train), 'labels:', len(labels_train))
print('Length of validation texts:', len(texts_valid), 'labels:', len(labels_valid))
print('Length of test texts:', len(texts_test), 'labels:', len(labels_test))
###Output
_____no_output_____
###Markdown
BERTNext we specify the pre-trained BERT model we are going to use. The model `"bert-base-uncased"` is the lowercased "base" model (12-layer, 768-hidden, 12-heads, 110M parameters). TokenizationWe load the used vocabulary from the BERT model, and use the BERT tokenizer to convert the messages into tokens that match the data the BERT model was trained on.
###Code
BERTMODEL='bert-base-uncased'
CACHE_DIR='/media/data/transformers-cache/'
tokenizer = BertTokenizer.from_pretrained(BERTMODEL,
do_lower_case=True,
cache_dir=CACHE_DIR)
###Output
_____no_output_____
###Markdown
Next we tokenize all datasets. We set the maximum sequence lengths for our training and test messages as MAX_LEN_TRAIN and MAX_LEN_TEST. The maximum length supported by the used BERT model is 512 tokens.
###Code
%%time
MAX_LEN_TRAIN, MAX_LEN_TEST = 128, 512
data_train = tokenizer(texts_train, padding=True, truncation=True,
return_tensors="tf", max_length=MAX_LEN_TRAIN)
data_valid = tokenizer(texts_valid, padding=True, truncation=True,
return_tensors="tf", max_length=MAX_LEN_TRAIN)
data_test = tokenizer(texts_test, padding=True, truncation=True,
return_tensors="tf", max_length=MAX_LEN_TEST)
###Output
_____no_output_____
###Markdown
Let us look at the truncated tokenized first training message.
###Code
data_train["input_ids"][0]
###Output
_____no_output_____
###Markdown
We can also convert the token ids back to tokens. `[CLS]` and `[SEP]` are special tokens required by BERT.
###Code
tokenizer.decode(data_train["input_ids"][0])
###Output
_____no_output_____
###Markdown
TF DatasetsLet's now define our TF `Dataset`s for training, validation, and test data. A batch size of 16 or 32 is often recommended for fine-tuning BERT on a specific task.
###Code
BATCH_SIZE = 32
dataset_train = tf.data.Dataset.from_tensor_slices((data_train.data, labels_train))
dataset_train = dataset_train.shuffle(len(dataset_train)).batch(BATCH_SIZE)
dataset_valid = tf.data.Dataset.from_tensor_slices((data_valid.data, labels_valid))
dataset_valid = dataset_valid.batch(BATCH_SIZE)
dataset_test = tf.data.Dataset.from_tensor_slices((data_test.data, labels_test))
dataset_test = dataset_test.batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Model initializationWe now load a pretrained BERT model with a single linear classification layer added on top.
###Code
model = TFBertForSequenceClassification.from_pretrained(BERTMODEL,
cache_dir=CACHE_DIR,
num_labels=20)
###Output
_____no_output_____
###Markdown
We use Adam as the optimizer, categorical crossentropy as loss, and then compile the model.`LR` is the learning rate for the Adam optimizer (2e-5 to 5e-5 recommended for BERT finetuning).
###Code
LR = 2e-5
optimizer = tf.keras.optimizers.Adam(learning_rate=LR, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
print(model.summary())
###Output
_____no_output_____
###Markdown
Learning
###Code
logdir = os.path.join(os.getcwd(), "logs",
"20ng-bert-"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print('TensorBoard log directory:', logdir)
os.makedirs(logdir)
callbacks = [TensorBoard(log_dir=logdir)]
###Output
_____no_output_____
###Markdown
For fine-tuning BERT on a specific task, 2-4 epochs is often recommended.
###Code
%%time
EPOCHS = 4
history = model.fit(dataset_train, validation_data=dataset_valid,
epochs=EPOCHS, verbose=2, callbacks=callbacks)
###Output
_____no_output_____
###Markdown
Let's take a look at loss and accuracy for train and validation sets:
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,3))
ax1.plot(history.epoch,history.history['loss'], label='training')
ax1.plot(history.epoch,history.history['val_loss'], label='validation')
ax1.set_title('loss')
ax1.set_xlabel('epoch')
ax1.legend(loc='best')
ax2.plot(history.epoch,history.history['accuracy'], label='training')
ax2.plot(history.epoch,history.history['val_accuracy'], label='validation')
ax2.set_title('accuracy')
ax2.set_xlabel('epoch')
ax2.legend(loc='best');
###Output
_____no_output_____
###Markdown
InferenceFor a better measure of the quality of the model, let's see the model accuracy for the test messages.
###Code
%%time
test_scores = model.evaluate(dataset_test, verbose=2)
print("Test set %s: %.2f%%" % (model.metrics_names[1], test_scores[1]*100))
###Output
_____no_output_____
###Markdown
We can also look at classification accuracies separately for each newsgroup, and compute a confusion matrix to see which newsgroups get mixed the most:
###Code
test_predictions = model.predict(dataset_test)
cm=confusion_matrix(labels_test,
np.argmax(test_predictions[0], axis=1),
labels=list(range(20)))
print('Classification accuracy for each newsgroup:'); print()
labels = [l[0] for l in sorted(labels_index.items(), key=lambda x: x[1])]
for i,j in enumerate(cm.diagonal()/cm.sum(axis=1)): print("%s: %.4f" % (labels[i].ljust(26), j))
print()
print('Confusion matrix (rows: true newsgroup; columns: predicted newsgroup):'); print()
np.set_printoptions(linewidth=9999)
print(cm); print()
plt.figure(figsize=(10,10))
plt.imshow(cm, cmap="gray", interpolation="none")
plt.title('Confusion matrix (rows: true newsgroup; columns: predicted newsgroup)')
plt.grid(None)
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=90)
plt.yticks(tick_marks, labels);
###Output
_____no_output_____ |
uncertainty/.ipynb_checkpoints/quantile-regression-with-keras-checkpoint.ipynb | ###Markdown
code, kbd, pre, samp { font-family:'consolas', Lucida Console, SimSun, Fira Code, Monaco !important; font-size: 11pt !important;}
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import gc
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
#
def autocorrelation(ys, t=1):
return np.corrcoef(ys[:-t], ys[t:])
#==========================================================================
def preprocess_sales(sales, start=1400, upper=1970):
'''process sales data
'''
if start is not None:
print("dropping...")
to_drop = [f"d_{i+1}" for i in range(start-1)]
print(sales.shape)
sales.drop(to_drop, axis=1, inplace=True)
print(sales.shape)
#=======
print("adding...")
new_columns = ['d_%i'%i for i in range(1942, upper, 1)] # 1942-1970
for col in new_columns:
sales[col] = np.nan
print("melting...")
sales = sales.melt(id_vars=["id", "item_id", "dept_id", "cat_id", "store_id", "state_id","scale","start"],
var_name='d', value_name='demand')
print("generating order")
if start is not None:
skip = start
else:
skip = 1
sales["nb"] = sales.index // 42840 + skip
return sales
#===============================================================
def preprocess_calendar(calendar):
'''clean and transform calendar data
'''
global maps, mods
calendar["event_name"] = calendar["event_name_1"]
calendar["event_type"] = calendar["event_type_1"]
map1 = {mod:i for i,mod in enumerate(calendar['event_name'].unique())}
calendar['event_name'] = calendar['event_name'].map(map1)
map2 = {mod:i for i,mod in enumerate(calendar['event_type'].unique())}
calendar['event_type'] = calendar['event_type'].map(map2)
calendar['nday'] = calendar['date'].str[-2:].astype(int)
maps["event_name"] = map1
maps["event_type"] = map2
mods["event_name"] = len(map1)
mods["event_type"] = len(map2)
calendar["wday"] -=1
calendar["month"] -=1
calendar["year"] -= 2011
mods["month"] = 12
mods["year"] = 6
mods["wday"] = 7
mods['snap_CA'] = 2
mods['snap_TX'] = 2
mods['snap_WI'] = 2
calendar.drop(["event_name_1", "event_name_2", "event_type_1", "event_type_2", "date", "weekday"],
axis=1, inplace=True)
return calendar
#=========================================================
def make_dataset(categorize=False ,start=1400, upper= 1970):
global maps, mods
print("loading calendar...")
calendar = pd.read_csv("../input/m5-forecasting-uncertainty/calendar.csv")
print("loading sales...")
sales = pd.read_csv("../input/walmartadd/sales.csv")
cols = ["item_id", "dept_id", "cat_id","store_id","state_id"]
if categorize:
for col in cols:
temp_dct = {mod:i for i, mod in enumerate(sales[col].unique())}
mods[col] = len(temp_dct)
maps[col] = temp_dct
for col in cols:
sales[col] = sales[col].map(maps[col])
#
sales =preprocess_sales(sales, start=start, upper= upper)
calendar = preprocess_calendar(calendar)
calendar = reduce_mem_usage(calendar)
print("merge with calendar...")
sales = sales.merge(calendar, on='d', how='left')
del calendar
print("reordering...")
sales.sort_values(by=["id","nb"], inplace=True)
print("re-indexing..")
sales.reset_index(inplace=True, drop=True)
gc.collect()
sales['n_week'] = (sales['nb']-1)//7
sales["nday"] -= 1
mods['nday'] = 31
sales = reduce_mem_usage(sales)
gc.collect()
return sales
#===============================================================================#
%%time
CATEGORIZE = True;
START = 1400; UPPER = 1970;
maps = {}
mods = {}
sales = make_dataset(categorize=CATEGORIZE ,start=START, upper= UPPER)
sales["x"] = sales["demand"] / sales["scale"]
LAGS = [28, 35, 42, 49, 56, 63]
FEATS = []
for lag in tqdm(LAGS):
sales[f"x_{lag}"] = sales.groupby("id")["x"].shift(lag)
FEATS.append(f"x_{lag}")
#
#sales.loc[(sales.start>1844)&(sales.nb>1840)&(sales.nb<1850), ['id','start','nb','demand']]
#sales.start.max() #1845
print(sales.shape)
sales = sales.loc[sales.nb>sales.start]
print(sales.shape)
nb = sales['nb'].values
MAX_LAG = max(LAGS)
#tr_mask = np.logical_and(nb>START + MAX_LAG, nb<=1913)
tr_mask = np.logical_and(nb>START + MAX_LAG, nb<=1941) # SORRY THIS IS FAKE VALIDATION. I DIDN'T THINK IT WOULD HAVE HAD LIFTED UP MY SCORE LIKE THAT
val_mask = np.logical_and(nb>1913, nb<=1941)
te_mask = np.logical_and(nb>1941, nb<=1969)
scale = sales['scale'].values
ids = sales['id'].values
#y = sales['demand'].values
#ys = y / scale
ys = sales['x'].values
Z = sales[FEATS].values
sv = scale[val_mask]
se = scale[te_mask]
ids = ids[te_mask]
ids = ids.reshape((-1, 28))
ca = sales[['snap_CA']].values
tx = sales[['snap_TX']].values
wi = sales[['snap_WI']].values
wday = sales[['wday']].values
month = sales[['month']].values
year = sales[['year']].values
event = sales[['event_name']].values
nday = sales[['nday']].values
item = sales[['item_id']].values
dept = sales[['dept_id']].values
cat = sales[['cat_id']].values
store = sales[['store_id']].values
state = sales[['state_id']].values
def make_data(mask):
x = {"snap_CA":ca[mask], "snap_TX":tx[mask], "snap_WI":wi[mask], "wday":wday[mask],
"month":month[mask], "year":year[mask], "event":event[mask], "nday":nday[mask],
"item":item[mask], "dept":dept[mask], "cat":cat[mask], "store":store[mask],
"state":state[mask], "num":Z[mask]}
t = ys[mask]
return x, t
xt, yt = make_data(tr_mask) #train
xv, yv = make_data(val_mask) # val
xe, ye = make_data(te_mask) # test
import tensorflow.keras.layers as L
import tensorflow.keras.models as M
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import tensorflow as tf
###Output
_____no_output_____
###Markdown
It is a baseline model. Feel free to add your own FE magic !!!
###Code
#=====
def qloss(y_true, y_pred):
# Pinball loss for multiple quantiles
qs = [0.005, 0.025, 0.165, 0.250, 0.500, 0.750, 0.835, 0.975, 0.995]
q = tf.constant(np.array([qs]), dtype=tf.float32)
e = y_true - y_pred
v = tf.maximum(q*e, (q-1)*e)
return K.mean(v)
#============================#
def make_model(n_in):
num = L.Input((n_in,), name="num")
ca = L.Input((1,), name="snap_CA")
tx = L.Input((1,), name="snap_TX")
wi = L.Input((1,), name="snap_WI")
wday = L.Input((1,), name="wday")
month = L.Input((1,), name="month")
year = L.Input((1,), name="year")
event = L.Input((1,), name="event")
nday = L.Input((1,), name="nday")
item = L.Input((1,), name="item")
dept = L.Input((1,), name="dept")
cat = L.Input((1,), name="cat")
store = L.Input((1,), name="store")
state = L.Input((1,), name="state")
inp = {"snap_CA":ca, "snap_TX":tx, "snap_WI":wi, "wday":wday,
"month":month, "year":year, "event":event, "nday":nday,
"item":item, "dept":dept, "cat":cat, "store":store,
"state":state, "num":num}
#
ca_ = L.Embedding(mods["snap_CA"], mods["snap_CA"], name="ca_3d")(ca)
tx_ = L.Embedding(mods["snap_TX"], mods["snap_TX"], name="tx_3d")(tx)
wi_ = L.Embedding(mods["snap_WI"], mods["snap_WI"], name="wi_3d")(wi)
wday_ = L.Embedding(mods["wday"], mods["wday"], name="wday_3d")(wday)
month_ = L.Embedding(mods["month"], mods["month"], name="month_3d")(month)
year_ = L.Embedding(mods["year"], mods["year"], name="year_3d")(year)
event_ = L.Embedding(mods["event_name"], mods["event_name"], name="event_3d")(event)
nday_ = L.Embedding(mods["nday"], mods["nday"], name="nday_3d")(nday)
item_ = L.Embedding(mods["item_id"], 10, name="item_3d")(item)
dept_ = L.Embedding(mods["dept_id"], mods["dept_id"], name="dept_3d")(dept)
cat_ = L.Embedding(mods["cat_id"], mods["cat_id"], name="cat_3d")(cat)
store_ = L.Embedding(mods["store_id"], mods["store_id"], name="store_3d")(store)
state_ = L.Embedding(mods["state_id"], mods["state_id"], name="state_3d")(state)
p = [ca_, tx_, wi_, wday_, month_, year_, event_, nday_, item_, dept_, cat_, store_, state_]
emb = L.Concatenate(name="embds")(p)
context = L.Flatten(name="context")(emb)
x = L.Concatenate(name="x1")([context, num])
x = L.Dense(500, activation="relu", name="d1")(x)
x = L.Dropout(0.3)(x)
x = L.Concatenate(name="m1")([x, context])
x = L.Dense(500, activation="relu", name="d2")(x)
x = L.Dropout(0.3)(x)
x = L.Concatenate(name="m2")([x, context])
x = L.Dense(500, activation="relu", name="d3")(x)
preds = L.Dense(9, activation="linear", name="preds")(x)
model = M.Model(inp, preds, name="M1")
model.compile(loss=qloss, optimizer="adam")
return model
net = make_model(len(FEATS))
ckpt = ModelCheckpoint("w.h5", monitor='val_loss', verbose=1, save_best_only=True,mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
es = EarlyStopping(monitor='val_loss', patience=3)
print(net.summary())
net.fit(xt, yt, batch_size=50_000, epochs=20, validation_data=(xv, yv), callbacks=[ckpt, reduce_lr, es])
nett = make_model(len(FEATS))
nett.load_weights("w.h5")
pv = nett.predict(xv, batch_size=50_000, verbose=1)
pe = nett.predict(xe, batch_size=50_000, verbose=1)
nett.evaluate(xv, yv, batch_size=50_000)
pv = pv.reshape((-1, 28, 9))
pe = pe.reshape((-1, 28, 9))
sv = sv.reshape((-1, 28))
se = se.reshape((-1, 28))
Yv = yv.reshape((-1, 28))
k = np.random.randint(0, 42840)
#k = np.random.randint(0, 200)
print(ids[k, 0])
plt.plot(np.arange(28, 56), Yv[k], label="true")
plt.plot(np.arange(28, 56), pv[k ,:, 3], label="q25")
plt.plot(np.arange(28, 56), pv[k ,:, 4], label="q50")
plt.plot(np.arange(28, 56), pv[k, :, 5], label="q75")
plt.legend(loc="best")
plt.show()
###Output
_____no_output_____
###Markdown
Prediction
###Code
names = [f"F{i+1}" for i in range(28)]
piv = pd.DataFrame(ids[:, 0], columns=["id"])
QUANTILES = ["0.005", "0.025", "0.165", "0.250", "0.500", "0.750", "0.835", "0.975", "0.995"]
VALID = []
EVAL = []
for i, quantile in tqdm(enumerate(QUANTILES)):
t1 = pd.DataFrame(pv[:,:, i]*sv, columns=names)
t1 = piv.join(t1)
t1["id"] = t1["id"] + f"_{quantile}_validation"
t2 = pd.DataFrame(pe[:,:, i]*se, columns=names)
t2 = piv.join(t2)
t2["id"] = t2["id"] + f"_{quantile}_evaluation"
VALID.append(t1)
EVAL.append(t2)
#============#
sub = pd.DataFrame()
sub = sub.append(VALID + EVAL)
del VALID, EVAL, t1, t2
sub.head()
sub.to_csv('./submission_from_keras.csv', index=False)
###Output
_____no_output_____ |
nbs/99_manuscript/lvs/lv116/lv116-cell_types.ipynb | ###Markdown
Description Generates the figure for top cell types for a specified LV (in Settings section below). Modules loading
###Code
%load_ext autoreload
%autoreload 2
import re
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from data.recount2 import LVAnalysis
from utils import chunker
import conf
###Output
_____no_output_____
###Markdown
Settings
###Code
LV_NAME = "LV116"
LV_AXIS_THRESHOLD = 3.0
N_TOP_SAMPLES = 400
N_TOP_ATTRS = 25
OUTPUT_FIGURES_DIR = Path(
conf.MANUSCRIPT["FIGURES_DIR"], "lvs_analysis", f"{LV_NAME.lower()}"
).resolve()
display(OUTPUT_FIGURES_DIR)
OUTPUT_FIGURES_DIR.mkdir(parents=True, exist_ok=True)
OUTPUT_CELL_TYPE_FILEPATH = OUTPUT_FIGURES_DIR / f"{LV_NAME.lower()}-cell_types.svg"
display(OUTPUT_CELL_TYPE_FILEPATH)
###Output
_____no_output_____
###Markdown
Load MultiPLIER summary
###Code
multiplier_model_summary = pd.read_pickle(conf.MULTIPLIER["MODEL_SUMMARY_FILE"])
multiplier_model_summary.shape
multiplier_model_summary.head()
###Output
_____no_output_____
###Markdown
Load data Original data
###Code
INPUT_SUBSET = "z_score_std"
INPUT_STEM = "projection-smultixcan-efo_partial-mashr-zscores"
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
f"{INPUT_SUBSET}-{INPUT_STEM}.pkl",
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
data = pd.read_pickle(input_filepath)
data.shape
data.head()
###Output
_____no_output_____
###Markdown
LV data
###Code
lv_obj = LVAnalysis(LV_NAME, data)
multiplier_model_summary[
multiplier_model_summary["LV index"].isin((LV_NAME[2:],))
& (
(multiplier_model_summary["FDR"] < 0.05)
| (multiplier_model_summary["AUC"] >= 0.75)
)
]
lv_data = lv_obj.get_experiments_data()
lv_data.shape
lv_data.head()
###Output
_____no_output_____
###Markdown
LV cell types analysis Get top attributes
###Code
lv_attrs = lv_obj.get_attributes_variation_score()
display(lv_attrs.head(20))
# show those with cell type or tissue in their name
_tmp = pd.Series(lv_attrs.index)
lv_attrs[
_tmp.str.match(
"(?:cell.+type$)|(?:tissue$)|(?:tissue.+type$)",
case=False,
flags=re.IGNORECASE,
).values
].sort_values(ascending=False)
_tmp = lv_data.loc[
:,
[
"tissue",
"cell type",
"cell subtype",
"tissue type",
LV_NAME,
],
]
_tmp_seq = list(chunker(_tmp.sort_values(LV_NAME, ascending=False), 25))
_tmp_seq[1]
SELECTED_ATTRIBUTE = "tissue"
# it has to be in the order desired for filling nans in the SELECTED_ATTRIBUTE
SECOND_ATTRIBUTES = ["cell type", "celltype", "agent"]
###Output
_____no_output_____
###Markdown
Get plot data
###Code
plot_data = lv_data.loc[:, [SELECTED_ATTRIBUTE] + SECOND_ATTRIBUTES + [LV_NAME]]
# if blank/nan, fill cell type column with tissue content
_new_column = plot_data[[SELECTED_ATTRIBUTE] + SECOND_ATTRIBUTES].fillna(
method="backfill", axis=1
)[SELECTED_ATTRIBUTE]
plot_data[SELECTED_ATTRIBUTE] = _new_column
plot_data = plot_data.drop(columns=SECOND_ATTRIBUTES)
plot_data = plot_data.fillna({SELECTED_ATTRIBUTE: "NOT CATEGORIZED"})
# plot_data = plot_data.dropna(subset=[SELECTED_ATTRIBUTE])
plot_data = plot_data.sort_values(LV_NAME, ascending=False)
plot_data.head(20)
###Output
_____no_output_____
###Markdown
Customize x-axis values When cell type values are not very clear, customize their names by looking at their specific studies to know exactly what the authors meant.
###Code
final_plot_data = plot_data.replace(
{
SELECTED_ATTRIBUTE: {
"whole blood": "Whole blood",
"PBMCs": "Peripheral blood mononuclear cells",
"monocyte-derived macrophages": "Monocyte-derived macrophages",
"peripheral blood monocytes": "Monocytes",
"dermal fibroblast": "Dermal fibroblasts",
"proximal tubular epithelial cells": "Proximal tubular epithelial cells",
"glioblastoma cell line": "Glioblastoma cell line",
}
}
)
_srp_code = "SRP048804"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'cell line']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
_srp_code = "SRP045352"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: "Monocytes" + f" ({lv_data.loc[(_srp_code, x.name), 'agent']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
_srp_code = "SRP056733"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: "Macrophages" + f" ({lv_data.loc[(_srp_code, x.name), 'agent']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
_srp_code = "SRP062958"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'treatment']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
# # add also tissue information to these projects
_srp_code = "SRP015670"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'treatment']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
# # add also tissue information to these projects
_srp_code = "SRP045500"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'diseasestatus']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
# # add also tissue information to these projects
_srp_code = "SRP045569"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'treatment']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
# # add also tissue information to these projects
_srp_code = "SRP062966"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'ism']} SLE)",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
# # add also tissue information to these projects
_srp_code = "SRP059039"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'group']} cases)",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
# # add also tissue information to these projects
_srp_code = "SRP060370"
_tmp = final_plot_data.loc[(_srp_code,)].apply(
lambda x: x[SELECTED_ATTRIBUTE]
+ f" ({lv_data.loc[(_srp_code, x.name), 'treatment']})",
axis=1,
)
final_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values
final_plot_data = final_plot_data.replace(
{
SELECTED_ATTRIBUTE: {
"\(interferon-alpha\)": "(IFNa)",
"\(HSV-1\)": "(HSV)",
"\(West Nile virus \(WNV\)\)": "(WNV)",
"\(ISM_high SLE\)": "(ISM high SLE)",
}
},
regex=True,
)
###Output
_____no_output_____
###Markdown
Threshold LV values
###Code
final_plot_data.loc[
final_plot_data[LV_NAME] > LV_AXIS_THRESHOLD, LV_NAME
] = LV_AXIS_THRESHOLD
###Output
_____no_output_____
###Markdown
Delete samples with no tissue/cell type information
###Code
final_plot_data = final_plot_data[
final_plot_data[SELECTED_ATTRIBUTE] != "NOT CATEGORIZED"
]
###Output
_____no_output_____
###Markdown
Set x-axis order
###Code
N_TOP_ATTRS = 15
attr_order = (
final_plot_data.groupby(SELECTED_ATTRIBUTE)
.max()
.sort_values(LV_NAME, ascending=False)
.index[:N_TOP_ATTRS]
.tolist()
)
len(attr_order)
attr_order[:5]
###Output
_____no_output_____
###Markdown
Plot
###Code
with sns.plotting_context("paper", font_scale=2.0), sns.axes_style("whitegrid"):
sns.catplot(
data=final_plot_data,
y=LV_NAME,
x=SELECTED_ATTRIBUTE,
order=attr_order,
kind="strip",
height=5,
aspect=3,
)
plt.xticks(rotation=45, horizontalalignment="right")
plt.xlabel("")
plt.savefig(
OUTPUT_CELL_TYPE_FILEPATH,
bbox_inches="tight",
facecolor="white",
)
###Output
_____no_output_____
###Markdown
Debug
###Code
# with pd.option_context(
# "display.max_rows", None, "display.max_columns", None, "display.max_colwidth", None
# ):
# _tmp = final_plot_data[final_plot_data[SELECTED_ATTRIBUTE].str.contains("Salm")]
# display(_tmp)
# # what is there in these projects?
# with pd.option_context(
# "display.max_rows", None, "display.max_columns", None, "display.max_colwidth", None
# ):
# _tmp = (
# lv_data.loc[["SRP063059"]]
# .dropna(how="all", axis=1)
# .sort_values(LV_NAME, ascending=False)
# )
# display(_tmp)
###Output
_____no_output_____ |
notebooks/automl/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Automated Machine Learning_**Classification with Deployment using a Bank Marketing Dataset**_ Contents1. [Introduction](Introduction)1. [Setup](Setup)1. [Train](Train)1. [Results](Results)1. [Deploy](Deploy)1. [Test](Test)1. [Acknowledgements](Acknowledgements) IntroductionIn this example we use the UCI Bank Marketing dataset to showcase how you can use AutoML for a classification problem and deploy it to an Azure Container Instance (ACI). The classification goal is to predict if the client will subscribe to a term deposit with the bank.If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. Please find the ONNX related documentations [here](https://github.com/onnx/onnx).In this notebook you will learn how to:1. Create an experiment using an existing workspace.2. Configure AutoML using `AutoMLConfig`.3. Train the model using local compute with ONNX compatible config on.4. Explore the results, featurization transparency options and save the ONNX model5. Inference with the ONNX model.6. Register the model.7. Create a container image.8. Create an Azure Container Instance (ACI) service.9. Test the ACI service.In addition this notebook showcases the following features- **Blacklisting** certain pipelines- Specifying **target metrics** to indicate stopping criteria- Handling **missing data** in the input SetupAs part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
###Code
import logging
from matplotlib import pyplot as plt
import pandas as pd
import os
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.automl.core.featurization import FeaturizationConfig
from azureml.core.dataset import Dataset
from azureml.train.automl import AutoMLConfig
from azureml.explain.model._internal.explanation_client import ExplanationClient
ws = Workspace.from_config()
# choose a name for experiment
experiment_name = 'automl-classification-bmarketing-all'
experiment=Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
###Output
_____no_output_____
###Markdown
Create or Attach existing AmlComputeYou will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. Creation of AmlCompute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace this code will skip the creation process.As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota.
###Code
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Choose a name for your cluster.
amlcompute_cluster_name = "cpu-cluster"
found = False
# Check if this compute target already exists in the workspace.
cts = ws.compute_targets
if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute_target = cts[amlcompute_cluster_name]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2", # for GPU, use "STANDARD_NC6"
#vm_priority = 'lowpriority', # optional
max_nodes = 6)
# Create the cluster.
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)
print('Checking cluster status...')
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min_node_count is provided, it will use the scale settings for the cluster.
compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# For a more detailed view of current AmlCompute status, use get_status().
###Output
_____no_output_____
###Markdown
Data Load DataLeverage azure compute to load the bank marketing dataset as a Tabular Dataset into the dataset variable. Training Data
###Code
data = pd.read_csv("https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv")
data.head()
# Add missing values in 75% of the lines.
import numpy as np
missing_rate = 0.75
n_missing_samples = int(np.floor(data.shape[0] * missing_rate))
missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))
rng = np.random.RandomState(0)
rng.shuffle(missing_samples)
missing_features = rng.randint(0, data.shape[1], n_missing_samples)
data.values[np.where(missing_samples)[0], missing_features] = np.nan
if not os.path.isdir('data'):
os.mkdir('data')
# Save the train data to a csv to be uploaded to the datastore
pd.DataFrame(data).to_csv("data/train_data.csv", index=False)
ds = ws.get_default_datastore()
ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)
# Upload the training data as a tabular dataset for access during training on remote compute
train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))
label = "y"
###Output
_____no_output_____
###Markdown
Validation Data
###Code
validation_data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv"
validation_dataset = Dataset.Tabular.from_delimited_files(validation_data)
###Output
_____no_output_____
###Markdown
Test Data
###Code
test_data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_test.csv"
test_dataset = Dataset.Tabular.from_delimited_files(test_data)
###Output
_____no_output_____
###Markdown
TrainInstantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.|Property|Description||-|-||**task**|classification or regression or forecasting||**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: accuracyAUC_weightedaverage_precision_score_weightednorm_macro_recallprecision_score_weighted||**iteration_timeout_minutes**|Time limit in minutes for each iteration.||**blacklist_models** or **whitelist_models** |*List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. Allowed values for **Classification**LogisticRegressionSGDMultinomialNaiveBayesBernoulliNaiveBayesSVMLinearSVMKNNDecisionTreeRandomForestExtremeRandomTreesLightGBMGradientBoostingTensorFlowDNNTensorFlowLinearClassifierAllowed values for **Regression**ElasticNetGradientBoostingDecisionTreeKNNLassoLarsSGDRandomForestExtremeRandomTreesLightGBMTensorFlowLinearRegressorTensorFlowDNNAllowed values for **Forecasting**ElasticNetGradientBoostingDecisionTreeKNNLassoLarsSGDRandomForestExtremeRandomTreesLightGBMTensorFlowLinearRegressorTensorFlowDNNArimaProphet||**experiment_exit_score**| Value indicating the target for *primary_metric*. Once the target is surpassed the run terminates.||**experiment_timeout_minutes**| Maximum amount of time in minutes that all iterations combined can take before the experiment terminates.||**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.||**featurization**| 'auto' / 'off' Indicator for whether featurization step should be done automatically or not. Note: If the input data is sparse, featurization cannot be turned on.||**n_cross_validations**|Number of cross validation splits.||**training_data**|Input dataset, containing both features and label column.||**label_column_name**|The name of the label column.||**model_explainability**|Indicate to explain each trained pipeline or not.|**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-trainprimary-metric)
###Code
automl_settings = {
"experiment_timeout_minutes" : 20,
"enable_early_stopping" : True,
"iteration_timeout_minutes": 5,
"max_concurrent_iterations": 4,
"max_cores_per_iteration": -1,
#"n_cross_validations": 2,
"primary_metric": 'AUC_weighted',
"featurization": 'auto',
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
compute_target=compute_target,
experiment_exit_score = 0.9984,
blacklist_models = ['KNN','LinearSVM'],
enable_onnx_compatible_models=True,
training_data = train_data,
label_column_name = label,
validation_data = validation_dataset,
model_explainability=True,
**automl_settings
)
###Output
_____no_output_____
###Markdown
Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.
###Code
remote_run = experiment.submit(automl_config, show_output = False)
remote_run
###Output
_____no_output_____
###Markdown
Run the following cell to access previous runs. Uncomment the cell below and update the run_id.
###Code
#from azureml.train.automl.run import AutoMLRun
#experiment_name = 'automl-classification-bmarketing'
#experiment = Experiment(ws, experiment_name)
#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')
#remote_run
# Wait for the remote run to complete
remote_run.wait_for_completion()
best_run_customized, fitted_model_customized = remote_run.get_output()
###Output
_____no_output_____
###Markdown
TransparencyView updated featurization summary
###Code
custom_featurizer = fitted_model_customized.named_steps['datatransformer']
df = custom_featurizer.get_featurization_summary()
pd.DataFrame(data=df)
###Output
_____no_output_____
###Markdown
Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied.
###Code
df = custom_featurizer.get_featurization_summary(is_user_friendly=False)
pd.DataFrame(data=df)
df = custom_featurizer.get_stats_feature_type_summary()
pd.DataFrame(data=df)
###Output
_____no_output_____
###Markdown
Results
###Code
from azureml.widgets import RunDetails
RunDetails(remote_run).show()
###Output
_____no_output_____
###Markdown
Retrieve the Best Model's explanationRetrieve the explanation from the best_run which includes explanations for engineered features and raw features. Make sure that the run for generating explanations for the best model is completed.
###Code
# Wait for the best model explanation run to complete
from azureml.train.automl.run import AutoMLRun
model_explainability_run_id = remote_run.get_properties().get('ModelExplainRunId')
print(model_explainability_run_id)
if model_explainability_run_id is not None:
model_explainability_run = AutoMLRun(experiment=experiment, run_id=model_explainability_run_id)
model_explainability_run.wait_for_completion()
# Get the best run object
best_run, fitted_model = remote_run.get_output()
###Output
_____no_output_____
###Markdown
Download engineered feature importance from artifact storeYou can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run.
###Code
client = ExplanationClient.from_run(best_run)
engineered_explanations = client.download_model_explanation(raw=False)
exp_data = engineered_explanations.get_feature_importance_dict()
exp_data
###Output
_____no_output_____
###Markdown
Download raw feature importance from artifact storeYou can use ExplanationClient to download the raw feature explanations from the artifact store of the best_run.
###Code
client = ExplanationClient.from_run(best_run)
engineered_explanations = client.download_model_explanation(raw=True)
exp_data = engineered_explanations.get_feature_importance_dict()
exp_data
###Output
_____no_output_____
###Markdown
Retrieve the Best ONNX ModelBelow we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.Set the parameter return_onnx_model=True to retrieve the best ONNX model, instead of the Python model.
###Code
best_run, onnx_mdl = remote_run.get_output(return_onnx_model=True)
###Output
_____no_output_____
###Markdown
Save the best ONNX model
###Code
from azureml.automl.core.onnx_convert import OnnxConverter
onnx_fl_path = "./best_model.onnx"
OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)
###Output
_____no_output_____
###Markdown
Predict with the ONNX model, using onnxruntime package Note: The code will install the onnxruntime==0.4.0 if not installed. Newer versions of the onnxruntime have compatibility issues.
###Code
test_df = test_dataset.to_pandas_dataframe()
import sys
import json
from azureml.automl.core.onnx_convert import OnnxConvertConstants
from azureml.train.automl import constants
if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:
python_version_compatible = True
else:
python_version_compatible = False
onnxrt_present = False
try:
import onnxruntime
from azureml.automl.core.onnx_convert import OnnxInferenceHelper
from onnxruntime import __version__ as ORT_VER
if ORT_VER == '0.4.0':
onnxrt_present = True
except ImportError:
onnxrt_present = False
# Install the onnxruntime if the version 0.4.0 is not installed.
if not onnxrt_present:
print("Installing the onnxruntime version 0.4.0.")
!{sys.executable} -m pip install --user --force-reinstall onnxruntime==0.4.0
onnxrt_present = True
def get_onnx_res(run):
res_path = 'onnx_resource.json'
run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)
with open(res_path) as f:
onnx_res = json.load(f)
return onnx_res
if onnxrt_present and python_version_compatible:
mdl_bytes = onnx_mdl.SerializeToString()
onnx_res = get_onnx_res(best_run)
onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res)
pred_onnx, pred_prob_onnx = onnxrt_helper.predict(test_df)
print(pred_onnx)
print(pred_prob_onnx)
else:
if not python_version_compatible:
print('Please use Python version 3.6 or 3.7 to run the inference helper.')
if not onnxrt_present:
print('Please install the onnxruntime package to do the prediction with ONNX model.')
###Output
_____no_output_____
###Markdown
Deploy Retrieve the Best ModelBelow we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. Widget for Monitoring RunsThe widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details
###Code
best_run, fitted_model = remote_run.get_output()
import os
import shutil
sript_folder = os.path.join(os.getcwd(), 'inference')
project_folder = '/inference'
os.makedirs(project_folder, exist_ok=True)
model_name = best_run.properties['model_name']
script_file_name = 'inference/score.py'
conda_env_file_name = 'inference/env.yml'
best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')
best_run.download_file('outputs/conda_env_v_1_0_0.yml', 'inference/env.yml')
###Output
_____no_output_____
###Markdown
Register the Fitted Model for DeploymentIf neither `metric` nor `iteration` are specified in the `register_model` call, the iteration with the best primary metric is registered.
###Code
description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'
tags = None
model = remote_run.register_model(model_name = model_name, description = description, tags = tags)
print(remote_run.model_id) # This will be written to the script file later in the notebook.
###Output
_____no_output_____
###Markdown
Deploy the model as a Web Service on Azure Container Instance
###Code
from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice
from azureml.core.webservice import Webservice
from azureml.core.model import Model
inference_config = InferenceConfig(runtime = "python",
entry_script = script_file_name,
conda_file = conda_env_file_name)
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "bmData", 'type': "automl_classification"},
description = 'sample service for Automl Classification')
aci_service_name = 'automl-sample-bankmarketing-all'
print(aci_service_name)
aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)
aci_service.wait_for_deployment(True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Delete a Web ServiceDeletes the specified web service.
###Code
#aci_service.delete()
###Output
_____no_output_____
###Markdown
Get Logs from a Deployed Web ServiceGets logs from a deployed web service.
###Code
#aci_service.get_logs()
###Output
_____no_output_____
###Markdown
TestNow that the model is trained, run the test data through the trained model to get the predicted values.
###Code
# Load the bank marketing datasets.
from numpy import array
X_test = test_dataset.drop_columns(columns=['y'])
y_test = test_dataset.keep_columns(columns=['y'], validate=True)
test_dataset.take(5).to_pandas_dataframe()
X_test = X_test.to_pandas_dataframe()
y_test = y_test.to_pandas_dataframe()
y_pred = fitted_model.predict(X_test)
actual = array(y_test)
actual = actual[:,0]
print(y_pred.shape, " ", actual.shape)
###Output
_____no_output_____
###Markdown
Calculate metrics for the predictionNow visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values from the trained model that was returned.
###Code
%matplotlib notebook
test_pred = plt.scatter(actual, y_pred, color='b')
test_test = plt.scatter(actual, actual, color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
###Output
_____no_output_____ |
notebooks/parrot.ipynb | ###Markdown
Load test data from "/notebooks/data/output.json" which is a list of json events
###Code
import json
with open("/notebooks/data/output.json") as f:
data = json.loads(f.read())
print(type(data))
###Output
<type 'list'>
|
AAAI/Learnability/CIN/MLP/ds3/synthetic_type3_MLP_size_100_m_20.ipynb | ###Markdown
Generate dataset
###Code
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [7,4],cov=[[0.1,0],[0,0.1]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [8,6.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [5.5,6.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [-1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [0,2],cov=[[0.1,0],[0,0.1]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [0,-1],cov=[[0.1,0],[0,0.1]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [0,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [-0.5,-0.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [0.4,0.2],cov=[[0.1,0],[0,0.1]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
train_data=[]
a = []
fg_instance = np.array([[0.0,0.0]])
bg_instance = np.array([[0.0,0.0]])
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
fg_instance += x[b]
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
bg_instance += x[b]
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
a
fg_instance
bg_instance
(fg_instance+bg_instance)/m , m
# mosaic_list_of_images =[]
# mosaic_label = []
train_label=[]
fore_idx=[]
train_data = []
for j in range(train_size):
np.random.seed(j)
fg_instance = torch.zeros([2], dtype=torch.float64) #np.array([[0.0,0.0]])
bg_instance = torch.zeros([2], dtype=torch.float64) #np.array([[0.0,0.0]])
# a=[]
for i in range(m):
if i == fg_idx:
fg_class = np.random.randint(0,3)
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
fg_instance += x[b]
# a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
bg_instance += x[b]
# a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
train_data.append((fg_instance+bg_instance)/m)
# a = np.concatenate(a,axis=0)
# mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
train_label.append(fg_class)
fore_idx.append(fg_idx)
train_data[0], train_label[0]
train_data = torch.stack(train_data, axis=0)
train_data.shape, len(train_label)
test_label=[]
# fore_idx=[]
test_data = []
for j in range(1000):
np.random.seed(j)
fg_instance = torch.zeros([2], dtype=torch.float64) #np.array([[0.0,0.0]])
fg_class = np.random.randint(0,3)
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
fg_instance += x[b]
# a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
test_data.append((fg_instance)/m)
# a = np.concatenate(a,axis=0)
# mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
test_label.append(fg_class)
# fore_idx.append(fg_idx)
test_data[0], test_label[0]
test_data = torch.stack(test_data, axis=0)
test_data.shape, len(test_label)
x1 = (train_data).numpy()
y1 = np.array(train_label)
x1[y1==0,0]
x1[y1==0,0][:,0]
x1[y1==0,0][:,1]
x1 = (train_data).numpy()
y1 = np.array(train_label)
plt.scatter(x1[y1==0,0][:,0], x1[y1==0,0][:,1], label='class 0')
plt.scatter(x1[y1==1,0][:,0], x1[y1==1,0][:,1], label='class 1')
plt.scatter(x1[y1==2,0][:,0], x1[y1==2,0][:,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_data).numpy()
y1 = np.array(test_label)
plt.scatter(x1[y1==0,0][:,0], x1[y1==0,0][:,1], label='class 0')
plt.scatter(x1[y1==1,0][:,0], x1[y1==1,0][:,1], label='class 1')
plt.scatter(x1[y1==2,0][:,0], x1[y1==2,0][:,1], label='class 2')
plt.legend()
plt.title("test dataset4")
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
train_data[0].shape, train_data[0]
batch = 200
traindata_1 = MosaicDataset(train_data, train_label )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(test_data, test_label )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
# testdata_11 = MosaicDataset(test_dataset, labels )
# testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,50)
self.linear2 = nn.Linear(50,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.zeros_(self.linear2.bias)
def forward(self,x):
x = F.relu(self.linear1(x))
x = (self.linear2(x))
return x[:,0]
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
# print(outputs.shape)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list, lr_list):
final_loss = []
for LR in lr_list:
print("--"*20, "Learning Rate used is", LR)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
# print(outputs.shape)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
final_loss.append(loss_curi)
return final_loss
train_loss_all=[]
testloader_list= [ testloader_1]
lr_list = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5 ]
fin_loss = train_all(trainloader_1, 1, testloader_list, lr_list)
train_loss_all.append(fin_loss)
%matplotlib inline
len(fin_loss)
for i,j in enumerate(fin_loss):
plt.plot(j,label ="LR = "+str(lr_list[i]))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
###Output
_____no_output_____ |
python-scripts/data_analytics_learn/link_pandas/Ex_Files_Pandas_Data/Exercise Files/01_01/Final/.ipynb_checkpoints/Intro to Jupyter-checkpoint.ipynb | ###Markdown
What is a Jupyter notebook? Application for creating and sharing documents that contain:- live code- equations- visualizations- explanatory textHome page: http://jupyter.org/ Notebook tutorials- [Quick Start Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/)- [User Documentation](http://jupyter-notebook.readthedocs.io/en/latest/)- [Examples Documentation](http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/examples_index.html)- [Cal Tech](http://bebi103.caltech.edu/2015/tutorials/t0b_intro_to_jupyter_notebooks.html) Notebook Users- students, readers, viewers, learners - read a digital book - interact with a "live" book- notebook developers - create notebooks for students, readers, ... Notebooks contain cells- Code cells - execute computer (Python, or many other languages)- Markdown cells - documentation, "narrative" cells - guide a reader through a notebook Following cells are "live" cells
###Code
print ("Hello Jupyter World!; You are helping me learn")
(5+7)/4
import numpy as np
my_first_array = np.arange(11)
print (my_first_array)
###Output
[ 0 1 2 3 4 5 6 7 8 9 10]
|
notebooks/elasticsearch/tmdb/raw-es-commands.ipynb | ###Markdown
Init Default Feature StoreThe feature store can be removed by sending a DELETE request to `_ltr` endpoint.
###Code
url = 'http://{}:9200/_ltr/'.format(host)
print(url)
requests.delete(url)
###Output
_____no_output_____
###Markdown
To initialize the LTR plugin, issue a PUT request to the `_ltr` endpoint.
###Code
url = 'http://{}:9200/_ltr/'.format(host)
print(url)
requests.put(url)
###Output
_____no_output_____
###Markdown
Create Feature SetA feature set can be created by issuing a PUT to `_ltr/featureset/[feature_name]`
###Code
feature_set = {
"featureset": {
"features": [
{
"name": "title_bm25",
"params": [
"keywords"
],
"template": {
"match": {
"title": "{{keywords}}"
}
}
},
{
"name": "overview_bm25",
"params": [
"keywords"
],
"template": {
"match": {
"overview": "{{keywords}}"
}
}
}
]
},
"validation": {
"index": "tmdb",
"params": {
"keywords": "rambo"
}
}
}
url = 'http://{}:9200/_ltr/_featureset/my_feature_set'.format(host)
print(url)
requests.put(url, json=feature_set)
###Output
_____no_output_____
###Markdown
Log Some Judged Queries To Build Training Set If we have 4 judged documents: 7555,1370, 1369, and 1368 for keywords rambo:```doc_id, relevant?, keywords1368, 1, rambo1369, 1, rambo1370, 1, rambo7555, 0, rambo```We need to get feature value for each row.To do this, we utilize the logging extension to populate the judgment list with features for training.
###Code
search_with_log = {
"query": {
"bool": {
"filter": [
{
"sltr": {
"_name": "logged_features",
"featureset": "my_feature_set",
"params": {
"keywords": "rambo"
}
}
},
{
"terms": {
"_id": [
"7555","1370", "1369", "1368"
]
}
}
]
}
},
"ext": {
"ltr_log": {
"log_specs": {
"name": "ltr_features",
"named_query": "logged_features"
}
}
}
}
url = 'http://{}:9200/tmdb/_search'.format(host)
print(url)
resp = requests.get(url, json=search_with_log).json()
print(json.dumps(resp['hits']['hits'][0], indent=2))
###Output
_____no_output_____
###Markdown
Training Set Now... ```doc_id, relevant?, keywords, title_bm25, overview_bm251368, 1, rambo, 0, 11.1139431369, 1, rambo, 11.657, 10.081370, 1, rambo, 9.456, 13.2657555, 0, rambo, 6.037, 11.114``` Train a modelWe won't do this here, but if you like you can try out training a model using Ranklib ```cd notebooks/elasticsearch/tmdbjava -jar data/RankyMcRankFace.jar -train data/title_judgments.txt -save data/model.txt``` Uploading a ModelOnce features have been logged and training data has been generated, a model can be pushed into Elasticsearch. The following shows what a request to PUT a new model looks like.
###Code
model = """## LambdaMART
## No. of trees = 10
## No. of leaves = 10
## No. of threshold candidates = 256
## Learning rate = 0.1
## Stop early = 100
<ensemble>
<tree id="1" weight="0.1">
<split>
<feature> 2 </feature>
<threshold> 10.664251 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<output> -1.8305741548538208 </output>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 9.502127 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 7.0849166 </threshold>
<split pos="left">
<output> 0.23645669221878052 </output>
</split>
<split pos="right">
<output> 1.7593677043914795 </output>
</split>
</split>
<split pos="right">
<output> 1.9719607830047607 </output>
</split>
</split>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<output> 1.3728954792022705 </output>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 8.602512 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<feature> 2 </feature>
<threshold> 13.815164 </threshold>
<split pos="left">
<output> 1.9401178359985352 </output>
</split>
<split pos="right">
<output> 1.99532949924469 </output>
</split>
</split>
<split pos="right">
<feature> 1 </feature>
<threshold> 11.085816 </threshold>
<split pos="left">
<output> 2.0 </output>
</split>
<split pos="right">
<output> 1.99308180809021 </output>
</split>
</split>
</split>
<split pos="right">
<output> 1.9870178699493408 </output>
</split>
</split>
</split>
</split>
</tree>
</ensemble>
"""
create_model = {
"model": {
"name": "my_model",
"model": {
"type": "model/ranklib",
"definition": model
}
}
}
url = 'http://{}:9200/_ltr/_featureset/my_feature_set/_createmodel'.format(host)
print(url)
requests.post(url, json=create_model).json()
###Output
_____no_output_____
###Markdown
Searching with a ModelNow that a model has been uploaded to Elasticsearch we can use it to re-rank the results of a query.
###Code
search = {
"query": {
"sltr": {
"params": {
"keywords": "rambo"
},
"model": "my_model"
}
}
}
url = 'http://{}:9200/tmdb/_search'.format(host)
resp = requests.get(url, json=search).json()
print(url)
for hit in resp['hits']['hits']:
print(hit['_source']['title'])
###Output
_____no_output_____
###Markdown
Init Default Feature StoreThe feature store can be removed by sending a DELETE request to `_ltr` endpoint.
###Code
url = 'http://{}:9200/_ltr/'.format(host)
print(url)
requests.delete(url)
###Output
_____no_output_____
###Markdown
To initialize the LTR plugin, issue a PUT request to the `_ltr` endpoint.
###Code
url = 'http://{}:9200/_ltr/'.format(host)
print(url)
requests.put(url)
###Output
_____no_output_____
###Markdown
Create Feature SetA feature set can be created by issuing a PUT to `_ltr/featureset/[feature_name]`
###Code
feature_set = {
"featureset": {
"features": [
{
"name": "title_bm25",
"params": [
"keywords"
],
"template": {
"match": {
"title": "{{keywords}}"
}
}
},
{
"name": "overview_bm25",
"params": [
"keywords"
],
"template": {
"match": {
"overview": "{{keywords}}"
}
}
}
]
},
"validation": {
"index": "tmdb",
"params": {
"keywords": "rambo"
}
}
}
url = 'http://{}:9200/_ltr/_featureset/my_feature_set'.format(host)
print(url)
requests.put(url, json=feature_set)
###Output
_____no_output_____
###Markdown
Log Some Judged Queries To Build Training Set If we have 4 judged documents: 7555,1370, 1369, and 1368 for keywords rambo:```doc_id, relevant?, keywords1368, 1, rambo1369, 1, rambo1370, 1, rambo7555, 0, rambo```We need to get feature value for each row.To do this, we utilize the logging extension to populate the judgment list with features for training.
###Code
search_with_log = {
"query": {
"bool": {
"filter": [
{
"sltr": {
"_name": "logged_features",
"featureset": "my_feature_set",
"params": {
"keywords": "rambo"
}
}
},
{
"terms": {
"_id": [
"7555","1370", "1369", "1368"
]
}
}
]
}
},
"ext": {
"ltr_log": {
"log_specs": {
"name": "ltr_features",
"named_query": "logged_features"
}
}
}
}
url = 'http://{}:9200/tmdb/_search'.format(host)
print(url)
resp = requests.get(url, json=search_with_log).json()
print(json.dumps(resp['hits']['hits'][0], indent=2))
###Output
_____no_output_____
###Markdown
Training Set Now... ```doc_id, relevant?, keywords, title_bm25, overview_bm251368, 1, rambo, 0, 11.1139431369, 1, rambo, 11.657, 10.081370, 1, rambo, 9.456, 13.2657555, 0, rambo, 6.037, 11.114``` Train a modelWe won't do this here, but if you like you can try out training a model using Ranklib ```cd notebooks/elasticsearch/tmdbjava -jar data/RankyMcRankFace.jar -train data/title_judgments.txt -save data/model.txt``` Uploading a ModelOnce features have been logged and training data has been generated, a model can be pushed into Elasticsearch. The following shows what a request to PUT a new model looks like.
###Code
model = """## LambdaMART
## No. of trees = 10
## No. of leaves = 10
## No. of threshold candidates = 256
## Learning rate = 0.1
## Stop early = 100
<ensemble>
<tree id="1" weight="0.1">
<split>
<feature> 2 </feature>
<threshold> 10.664251 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<output> -1.8305741548538208 </output>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 9.502127 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 7.0849166 </threshold>
<split pos="left">
<output> 0.23645669221878052 </output>
</split>
<split pos="right">
<output> 1.7593677043914795 </output>
</split>
</split>
<split pos="right">
<output> 1.9719607830047607 </output>
</split>
</split>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<output> 1.3728954792022705 </output>
</split>
<split pos="right">
<feature> 2 </feature>
<threshold> 8.602512 </threshold>
<split pos="left">
<feature> 1 </feature>
<threshold> 0.0 </threshold>
<split pos="left">
<feature> 2 </feature>
<threshold> 13.815164 </threshold>
<split pos="left">
<output> 1.9401178359985352 </output>
</split>
<split pos="right">
<output> 1.99532949924469 </output>
</split>
</split>
<split pos="right">
<feature> 1 </feature>
<threshold> 11.085816 </threshold>
<split pos="left">
<output> 2.0 </output>
</split>
<split pos="right">
<output> 1.99308180809021 </output>
</split>
</split>
</split>
<split pos="right">
<output> 1.9870178699493408 </output>
</split>
</split>
</split>
</split>
</tree>
</ensemble>
"""
create_model = {
"model": {
"name": "my_model",
"model": {
"type": "model/ranklib",
"definition": model
}
}
}
url = 'http://{}:9200/_ltr/_featureset/my_feature_set/_createmodel'.format(host)
print(url)
requests.post(url, json=create_model).json()
###Output
_____no_output_____
###Markdown
Searching with a ModelNow that a model has been uploaded to Elasticsearch we can use it to re-rank the results of a query.
###Code
search = {
"query": {
"sltr": {
"params": {
"keywords": "rambo"
},
"model": "my_model"
}
}
}
url = 'http://{}:9200/tmdb/_search'.format(host)
resp = requests.get(url, json=search).json()
print(url)
for hit in resp['hits']['hits']:
print(hit['_source']['title'])
###Output
_____no_output_____ |
Moloch DAO Agent-Based Model.ipynb | ###Markdown
Size of community effect
###Code
df_list = []
for n in range(5, 50, 5):
row_list = []
params = {
"num_nodes": n, # number of DAO members
"avg_node_degree": 3, # how many other DAO members is each connected to?
"proposal_dimension": 2, # number of categories considered in evaluating the value of the proposal
"evaluation_period": 3, # num. time steps for agents to evaluate the proposal
"num_proposals": 10
}
model = MolochDAO(**params)
for i in range(50):
model.run()
# print(model.votes)
percent_passed = sum(model.votes) / len(model.votes)
# print(n, i, sum(model.votes) / len(model.votes)) # count how many proposals passed in this simulation run
row_list = [n, i, percent_passed]
df_list.append(row_list)
model.votes = [] # reset and get ready for the next run
df_list = pd.DataFrame(df_list, columns = ["num_members", "trial", "pct_proposals_passed"])
df_list.head()
plt.rcParams["figure.figsize"] = [22, 12]
plt.rcParams["figure.autolayout"] = True
# sns.set_style("dark")
sns.set(font_scale = 2)
plt.style.use("dark_background")
# tips = sns.load_dataset("tips")
ax = sns.boxplot(x="num_members", y="pct_proposals_passed", palette="pastel", data=df_list)
# plt.ylim(5, 30)
ax.grid(False)
ax.set_xlabel("Number of DAO Members", fontsize = 20)
ax.set_ylabel("% Proposals Passed by Community", fontsize = 20)
plt.show()
for n in range(0, 30, 5):
print(n)
model_df
###Output
_____no_output_____ |
LSTM_WithRCode.ipynb | ###Markdown
###Code
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import tensorflow as tf
from tensorflow.keras import backend
from math import ceil
import keras
import numpy as np
import io
from google.colab import files
class TimeseriesGenerator(keras.utils.Sequence):
"""Utility class for generating batches of temporal data.
This class takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
# Arguments
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
# Returns
A [Sequence](/utils/#sequence) instance.
# Examples
```python
from keras.preprocessing.sequence import TimeseriesGenerator
import numpy as np
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = TimeseriesGenerator(data, targets,
length=10, sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
batch_0 = data_gen[0]
x, y = batch_0
assert np.array_equal(x,
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]]))
assert np.array_equal(y,
np.array([[10], [11]]))
```
"""
def __init__(self, data, targets, length,
sampling_rate=1,
stride=1,
start_index=0,
end_index=None,
shuffle=False,
reverse=False,
batch_size=128):
self.data = data
self.targets = targets
self.length = length
self.sampling_rate = sampling_rate
self.stride = stride
self.start_index = start_index + length
if end_index is None:
end_index = len(data) - 1
self.end_index = end_index
self.shuffle = shuffle
self.reverse = reverse
self.batch_size = batch_size
if self.start_index > self.end_index:
raise ValueError('`start_index+length=%i > end_index=%i` '
'is disallowed, as no part of the sequence '
'would be left to be used as current step.'
% (self.start_index, self.end_index))
def __len__(self):
return (self.end_index - self.start_index +
self.batch_size * self.stride) // (self.batch_size * self.stride)
def _empty_batch(self, num_rows):
samples_shape = [num_rows, self.length // self.sampling_rate]
samples_shape.extend(self.data.shape[1:])
targets_shape = [num_rows]
targets_shape.extend(self.targets.shape[1:])
return np.empty(samples_shape), np.empty(targets_shape)
def __getitem__(self, index):
if self.shuffle:
rows = np.random.randint(
self.start_index, self.end_index + 1, size=self.batch_size)
else:
i = self.start_index + self.batch_size * self.stride * index
rows = np.arange(i, min(i + self.batch_size *
self.stride, self.end_index + 1), self.stride)
samples, targets = self._empty_batch(len(rows))
for j, row in enumerate(rows):
indices = range(rows[j] - self.length, rows[j], self.sampling_rate)
samples[j] = self.data[indices]
targets[j] = self.targets[rows[j]]
if self.reverse:
return samples[:, ::-1, ...], targets
return samples, targets
print(tf.__version__)
df = pd.read_csv('/content/drive/MyDrive/dfresultWithLocalrateMrbdrateCovidValues2020.csv')
df.columns
df = df.drop(['LocalRate', 'morbidtyValues'], axis = 1)
df.head(15)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
df[['EP_POV', 'EP_UNEMP', 'EP_PCI', 'EP_NOHSDP', 'EP_AGE65',
'EP_AGE17', 'EP_DISABL', 'EP_SNGPNT', 'EP_MINRTY', 'EP_LIMENG',
'EP_MUNIT', 'EP_MOBILE', 'EP_CROWD', 'EP_NOVEH', 'EP_GROUPQ',
'EP_UNINSUR', 'LST_Day', 'LST_Night', 'NL_Temp', 'NL_Humid ',
'NL_Pres ', 'NL_windu ', 'NL_Lr ', 'NL_Pe ']] = scaler.fit_transform(df[['EP_POV', 'EP_UNEMP', 'EP_PCI', 'EP_NOHSDP', 'EP_AGE65',
'EP_AGE17', 'EP_DISABL', 'EP_SNGPNT', 'EP_MINRTY', 'EP_LIMENG',
'EP_MUNIT', 'EP_MOBILE', 'EP_CROWD', 'EP_NOVEH', 'EP_GROUPQ',
'EP_UNINSUR', 'LST_Day', 'LST_Night', 'NL_Temp', 'NL_Humid ',
'NL_Pres ', 'NL_windu ', 'NL_Lr ', 'NL_Pe ']])
df.head(3150)
#df['Dates'] = pd.to_datetime(df['Dates'], infer_datetime_format=True)
df = df.drop(['FIPS_Final'], axis=1)
#df = df.drop(['Unnamed: 0'], axis=1)
print("Done")
print(df.shape)
#some_values = [18097, 6037]
#df_countySeparated = df.loc[df['Counties'].isin(some_values)]
df_countySeparated = df
df_countySeparated = df_countySeparated[['covidValues', 'EP_POV', 'EP_UNEMP', 'EP_PCI',
'EP_NOHSDP', 'EP_AGE65', 'EP_AGE17', 'EP_DISABL', 'EP_SNGPNT',
'EP_MINRTY', 'EP_LIMENG', 'EP_MUNIT', 'EP_MOBILE', 'EP_CROWD',
'EP_NOVEH', 'EP_GROUPQ', 'EP_UNINSUR', 'LST_Day', 'LST_Night',
'NL_Temp', 'NL_Humid ', 'NL_Pres ', 'NL_windu ', 'NL_Lr ', 'NL_Pe ']]
print(df_countySeparated.columns)
df_countySeparated.columns
print(df_countySeparated.shape)
#DMA
#df_average7Days = df_countySeparated.groupby(np.arange(len(df_countySeparated))//2).mean()
#print(df_average7Days.shape)
#df_countySeparated = df_average7Days
#print(df_countySeparated.shape)
df_countySeparated['covidConfirmedCases_EWM'] = df['covidValues'].ewm(span=48, adjust=False).mean()
#df_countySeparated['covidConfirmedCases_EWM'] = df['CovidValues']
df_countySeparated = df_countySeparated[['covidConfirmedCases_EWM', 'EP_POV', 'EP_UNEMP', 'EP_PCI', 'EP_NOHSDP', 'EP_AGE65',
'EP_AGE17', 'EP_DISABL', 'EP_SNGPNT', 'EP_MINRTY', 'EP_LIMENG',
'EP_MUNIT', 'EP_MOBILE', 'EP_CROWD', 'EP_NOVEH', 'EP_GROUPQ',
'EP_UNINSUR', 'LST_Day', 'LST_Night', 'NL_Temp', 'NL_Humid ',
'NL_Pres ', 'NL_windu ', 'NL_Lr ', 'NL_Pe ']]
df_countySeparated.columns
data_scaled = scaler.fit_transform(df_countySeparated)
data_scaled
features = data_scaled
target = data_scaled[:, 0]
x_train, x_test, y_train, y_test = train_test_split(features, target, test_size = 0.3, random_state = 1, shuffle = False)
x_train.shape
x_test.shape
batch_size = 128
win_length = 7
num_features = 27
train_generator = TimeseriesGenerator(x_train, y_train, length = win_length, sampling_rate=1, batch_size=batch_size)
test_generator = TimeseriesGenerator(x_test, y_test, length = win_length, sampling_rate=1, batch_size=batch_size)
print(train_generator[0][0].shape)
print("DoneTillHere")
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(128, input_shape=(win_length, num_features),
return_sequences=True))
model.add(tf.keras.layers.LeakyReLU(alpha = 0.5))
model.add(tf.keras.layers.LSTM(64, return_sequences = True))
model.add(tf.keras.layers.LeakyReLU(alpha = 0.5))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.LSTM(32, return_sequences = False))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(1))
"""
model.add(tf.keras.layers.LSTM(256, input_shape=(win_length, num_features),
return_sequences=True))
model.add(tf.keras.layers.LeakyReLU(alpha = 0.5))
model.add(tf.keras.layers.LSTM(128, return_sequences = True))
model.add(tf.keras.layers.LeakyReLU(alpha = 0.5))
model.add(tf.keras.layers.LSTM(64, return_sequences = True))
model.add(tf.keras.layers.LeakyReLU(alpha = 0.5))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.LSTM(32, return_sequences = False))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(1))
"""
!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
early_stopping = tf.keras.callbacks.EarlyStopping(monitor = "val_loss", patience = 15, mode='min')
model.compile(loss='mean_squared_error', optimizer = 'Adamax', metrics = ['mean_absolute_error'])
history = model.fit(train_generator, epochs=3,
validation_data = test_generator,
shuffle=False)
#print(history.history.keys())
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model accuracy')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#plt.show()
model.evaluate_generator(test_generator, verbose=0 )
predictions = model.predict_generator(test_generator)
predictions.shape[0]
df_pred = pd.concat([pd.DataFrame(predictions), pd.DataFrame(x_test[:, 1:][win_length:])], axis = 1)
rev_trans = scaler.inverse_transform(df_pred)
df_final = df_countySeparated[predictions.shape[0]*-1:]
print(df_final.count)
df_final['Covid_Pred'] = rev_trans[:, 0]
df_final
df_final.reset_index(drop=True, inplace=True)
df_final[['covidConfirmedCases_EWM','Covid_Pred']].plot()
y_test = df_final.loc[:,'covidConfirmedCases_EWM']
y_pred = df_final.loc[:,'Covid_Pred']
from sklearn.metrics import r2_score
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",r2_score(y_test, y_pred))
"""
from google.colab import files
files.download('rporgramLSTMResult.csv')
df_final.to_csv("rporgramLSTMResult.csv")
"""
!pip install shap
import shap
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output),train_generator)
print(shap.__version__)
###Output
_____no_output_____ |
notebooks/00.1-data-exploration/frogs/0.0-Frog-vocalizations.ipynb | ###Markdown
Frog vocalizationsSource:- https://data.mendeley.com/datasets/5j852hzfjs/1folder-763acb9b-e08c-4b56-8426-4d06abdb5d14- https://arxiv.org/abs/1901.02495- https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4942061/ < a second dataset that is also probably too small- https://data.mendeley.com/datasets/5j852hzfjs/1
###Code
This dataset might not work...
###Output
_____no_output_____ |
notebooks/nbk_04_embeddings_hdbscan.ipynb | ###Markdown
Preprocessing:
###Code
X_array = data.loc[:, data.columns].values
X_array.shape
# Feature seletion:
sel = VarianceThreshold(threshold=0)
X_array_sel = sel.fit_transform(X_array)
X_array_sel.shape
# Normalizing data:
x_array_norm = MinMaxScaler().fit_transform(X_array_sel)
# pd.DataFrame(x_array_norm)
# Standardizing data:
x_array_std = StandardScaler().fit_transform(X_array_sel)
# pd.DataFrame(x_array_std)
# x_array_prep = X_array_sel
# x_array_prep = x_array_norm
x_array_prep = x_array_std
n_components = 30
###Output
_____no_output_____
###Markdown
Embeddings:
###Code
methods = OrderedDict()
methods['PCA'] = PCA(n_components=n_components)
results = pd.DataFrame()
metrics_summary = []
for i, (label, method) in enumerate(methods.items()):
print(i, label, method)
# Performing the embedding algorithm:
t0 = time()
x_embedded = method.fit_transform(x_array_prep)
t1 = time()
model = HDBSCAN(min_cluster_size=2, min_samples=1)
model.fit(x_embedded)
cluster_labels = model.labels_
results[label] = cluster_labels
sample_silhouette_values = silhouette_samples(x_embedded, cluster_labels)
silhouette_avg = sample_silhouette_values[np.where(cluster_labels >= 0)[0]].mean()
n_clusters = len(np.unique(cluster_labels))-1
method_metrics = {'silhouette_avg': silhouette_avg, 'n_clusters': n_clusters,
'n_outliers': sum(cluster_labels == -1)}
metrics_summary.append(method_metrics)
fig = plt.figure(figsize=(20, 6))
# Plot 1:
ax = fig.add_subplot(1, 3, 1)
ax.set_title("%s: %d clusters - silhouete_avg: %.2g (%.2g sec)" % (label, n_clusters, silhouette_avg, t1 - t0))
for k in np.unique(cluster_labels):
indexes = np.where(cluster_labels == k)[0]
if k == -1:
plt.scatter(x_embedded[indexes, 0], x_embedded[indexes, 1], alpha=0.5, s=80, label=k, c='k')
else:
plt.scatter(x_embedded[indexes, 0], x_embedded[indexes, 1], alpha=0.5, s=80, label=k)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.legend(ncol=2)
# Plot 2:
ax = fig.add_subplot(1, 3, 2)
plt.title('Condensed Tree')
model.condensed_tree_.plot(select_clusters=True)
# Plot 3:
ax = fig.add_subplot(1, 3, 3)
plt.title('Single Linkage Tree')
model.single_linkage_tree_.plot()
plt.tight_layout()
plt.savefig(f'../imgs/imgs_v2/img_0{i+1}_{label}_plots.png', dpi=100)
plt.show()
metrics_summary = pd.DataFrame(metrics_summary, index=results.columns)
metrics_summary
# Presenting all elements of all groups by all methods:
all_groups = {}
for method in results.columns:
print('\n'+method+':')
all_groups[method] = {}
method_results = results[method].values
for val in np.unique(method_results):
print(val, results.index[np.where(method_results == val)[0]].values)
all_groups[method][val] = list(results.index[np.where(method_results == val)[0]])
all_clusters = []
all_relations = {}
# Concatenating the results of all methods:
for subject in results.index:
related_subjects = []
g_indexes = results.loc[subject, :].values
n_out = len(np.where(g_indexes == -1)[0])
if n_out < len(g_indexes)/2:
all_partners = []
for method, idx in zip(results.columns, g_indexes):
if idx != -1:
all_partners += all_groups[method][idx]
all_partners = np.array(all_partners)
all_partners = all_partners[~(all_partners == subject)]
unique, counts = np.unique(all_partners, return_counts=True)
for val, c in zip(unique, counts):
if c > len(g_indexes)/2:
related_subjects.append(val)
new_cluster = set(related_subjects + [subject])
if related_subjects and new_cluster not in all_clusters:
all_clusters.append(new_cluster)
all_relations[subject] = related_subjects
print('Subject:', subject, 'related subjects:', related_subjects)
related_subjects = {'subject': [], 'related subjects': []}
for key, val in all_relations.items():
related_subjects['subject'].append(key)
related_subjects['related subjects'].append(val)
related_subjects = pd.DataFrame(related_subjects)
related_subjects.to_csv('related_subjects.csv')
related_subjects
for i, cluster in enumerate(all_clusters):
print(i+1, cluster)
cluster_tags = []
for subject in results.index:
tag = -1
for k, cluster in enumerate(all_clusters):
if subject in cluster:
tag = k
break
cluster_tags.append(tag)
data['cluster_tag'] = cluster_tags
data
x_pca = PCA(n_components=n_components).fit_transform(x_array_prep)
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(121, projection='3d')
plt.title('Final Clustering 3D')
for k in np.unique(cluster_tags):
indexes = np.where(cluster_tags == k)[0]
if k == -1:
ax.scatter(x_pca[indexes, 0], x_pca[indexes, 1], x_pca[indexes, 2], alpha=0.5, s=80, label=k, c='k')
else:
ax.scatter(x_pca[indexes, 0], x_pca[indexes, 1], x_pca[indexes, 2], alpha=0.5, s=80, label=k)
ax.set_xlabel('Component 1')
ax.set_ylabel('Component 2')
ax.set_zlabel('Component 3')
plt.legend(ncol=2)
plt.tight_layout()
ax = fig.add_subplot(122)
plt.title('Final Clustering 2D')
for k in np.unique(cluster_tags):
indexes = np.where(cluster_tags == k)[0]
if k == -1:
plt.scatter(x_pca[indexes, 0], x_pca[indexes, 1], alpha=0.5, s=80, label=k, c='k')
else:
plt.scatter(x_pca[indexes, 0], x_pca[indexes, 1], alpha=0.5, s=80, label=k)
ax.set_xlabel('Component 1')
ax.set_ylabel('Component 2')
plt.legend(ncol=2)
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
Model backlog/Models/[7] - Deep Learning - Batch 256.ipynb | ###Markdown
Dependencies
###Code
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from keras import optimizers
from keras.models import Sequential
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.layers import Dense, Dropout, BatchNormalization, Activation
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import confusion_matrix, roc_auc_score, recall_score, precision_score
# Set seeds to make the experiment more reproducible.
from tensorflow import set_random_seed
from numpy.random import seed
set_random_seed(0)
seed(0)
%matplotlib inline
sns.set_style("whitegrid")
warnings.filterwarnings("ignore")
###Output
Using TensorFlow backend.
###Markdown
Load data
###Code
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
submission = pd.read_csv('../input/sample_submission.csv')
print('Train set shape:', train.shape)
print('Test set shape:', test.shape)
print('Train set overview:')
display(train.head())
###Output
Train set shape: (262144, 258)
Test set shape: (131073, 257)
Train set overview:
###Markdown
Preprocess
###Code
train['set'] = 0
test['set'] = 1
data = pd.concat([train, test])
# data['count_magic'] = data.groupby(['wheezy-copper-turtle-magic'])['id'].transform('count')
data = pd.concat([data, pd.get_dummies(data['wheezy-copper-turtle-magic'], prefix='magic', drop_first=True)], axis=1).drop(['wheezy-copper-turtle-magic'], axis=1)
data.drop('id', axis=1, inplace=True)
train = data[data['set'] == 0]
test = data[data['set'] == 1]
labels = train['target']
train.drop(['target', 'set'], axis=1, inplace=True)
test.drop(['target', 'set'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Normalize data using MinMaxScaler
###Code
non_cat_features = list(train.filter(regex='^(?!magic_)'))
scaler = MinMaxScaler()
train[non_cat_features] = scaler.fit_transform(train[non_cat_features])
test[non_cat_features] = scaler.transform(test[non_cat_features])
###Output
_____no_output_____
###Markdown
Model Model parameters
###Code
N_FOLDS = 5
BATCH_SIZE = 256
EPOCHS = 50
LEARNING_RATE = 0.001
ES_PATIENCE = 5
RLROP_PATIENCE = 3
RLROP_FACTOR = 0.5
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=RLROP_FACTOR, min_lr=1e-6, verbose=1)
callback_list = [es, rlrop]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
def model_fn():
model = Sequential()
model.add(Dense(1024, input_dim=X_train.shape[1]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=['binary_accuracy'])
return model
X = train.values
train_cols = train.columns
submission['target'] = 0
train['preds'] = 0
skf = StratifiedKFold(n_splits=N_FOLDS, random_state=0)
counter = 0
for train_index, val_index in skf.split(X, labels):
counter += 1
print('Fold {}\n'.format(counter))
X_train, X_val = X[train_index], X[val_index]
Y_train, Y_val = labels[train_index], labels[val_index]
model = model_fn()
history = model.fit(X_train, Y_train, validation_data=(X_val, Y_val),
callbacks=callback_list,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
verbose=0)
train_predictions = model.predict_classes(X_train)
val_predictions = model.predict_classes(X_val)
train_auc = roc_auc_score(Y_train, train_predictions) * 100
val_auc = roc_auc_score(Y_val, val_predictions) * 100
train_precision = precision_score(Y_train, train_predictions) * 100
val_precision = precision_score(Y_val, val_predictions) * 100
train_recall = recall_score(Y_train, train_predictions) * 100
val_recall = recall_score(Y_val, val_predictions) * 100
print('-----Train----------')
print('AUC: %.2f Precision: %.2f Recall: %.2f \n' % (train_auc, train_precision, train_recall))
print('-----Validation-----')
print('AUC: %.2f Precision: %.2f Recall: %.2f \n' % (val_auc, val_precision, val_recall))
# Make predictions
predictions = model.predict(test)
submission['target'] += [x[0] for x in predictions]
train['preds'] += [x[0] for x in model.predict_classes(X)]
submission['target'] /= N_FOLDS
train['preds'] /= N_FOLDS
###Output
Fold 1
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 00004: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 00010: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Restoring model weights from the end of the best epoch
Epoch 00016: early stopping
-----Train----------
AUC: 89.19 Precision: 88.96 Recall: 89.49
-----Validation-----
AUC: 73.33 Precision: 73.12 Recall: 73.82
Fold 2
Epoch 00011: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 00019: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 00034: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 00042: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
-----Train----------
AUC: 88.20 Precision: 90.74 Recall: 85.10
-----Validation-----
AUC: 73.16 Precision: 75.47 Recall: 68.66
Fold 3
-----Train----------
AUC: 66.59 Precision: 66.69 Recall: 66.35
-----Validation-----
AUC: 60.10 Precision: 60.20 Recall: 59.69
Fold 4
-----Train----------
AUC: 66.46 Precision: 65.96 Recall: 68.10
-----Validation-----
AUC: 59.93 Precision: 59.63 Recall: 61.57
Fold 5
-----Train----------
AUC: 66.24 Precision: 66.65 Recall: 65.08
-----Validation-----
AUC: 59.93 Precision: 60.20 Recall: 58.71
###Markdown
Model graph loss
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(20, 7))
ax1.plot(history.history['loss'], label='Train loss')
ax1.plot(history.history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history.history['binary_accuracy'], label='Train Accuracy')
ax2.plot(history.history['val_binary_accuracy'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
###Output
_____no_output_____
###Markdown
Model evaluation Confusion matrix
###Code
f = plt.subplots(1, 1, figsize=(16, 5), sharex=True)
train_cnf_matrix = confusion_matrix(labels, [np.round(x) for x in train['preds']])
train_cnf_matrix_norm = train_cnf_matrix / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=[0, 1], columns=[0, 1])
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues")
plt.show()
###Output
_____no_output_____
###Markdown
Metrics ROC AUC
###Code
print('AUC %.2f' % roc_auc_score(labels, train['preds']))
###Output
AUC 0.86
###Markdown
Test predictions
###Code
submission.to_csv('submission.csv', index=False)
submission.head(10)
###Output
_____no_output_____ |
resources/aux_notebooks/tf_tutorial_advanced.ipynb | ###Markdown
Advanced Tensorflow TutorialA highly subjective list of cool stuff about tensorflow that didn't fit into basic tutorial. Part I: Debugging tensorflowTensorflow error messages are hideous monstrosities with a heart of gold :)If your code breaks, TF will throw a wall of text your way. But you shouldn't be afraid of it. The key skill here is finding the part of error that actually matters: your code. Let's look at an example:
###Code
import numpy as np
import tensorflow as tf
keras, L = tf.keras, tf.keras.layers
tf.reset_default_graph()
sess = tf.Session()
embeddings = tf.Variable(np.random.randn(16, 10).astype('float32'))
sequence_ids = tf.placeholder('int32')
sequence_emb = tf.gather(embeddings, sequence_ids)
mean_emb = tf.reduce_mean(sequence_emb, axis=2)
sess.run(tf.global_variables_initializer())
sess.run(mean_emb, {sequence_ids: np.random.randint(32, size=[3, 20])})
sess.run(mean_emb, {sequence_ids: np.random.randint(32, size=20)})
###Output
_____no_output_____
###Markdown
Okay, here's what you should see* First and most important, this is just a traceback. No need to freak out. Keep calm.* Second, it tells us which sess.run caused an error - the second one. Here's the relevant part``` 1 sess.run(tf.global_variables_initializer()) 2 sess.run(mean_emb, {sequence_ids: np.random.randint(32, size=[3, 20])})----> 3 sess.run(mean_emb, {sequence_ids: np.random.randint(32, size=20)})```* Then it tells us which line broke down:``` File "", line 11, in mean_emb = tf.reduce_mean(sequence_emb, axis=2)```* And the error```Invalid reduction dimension (2 for input with 2 dimension(s)```This information should already be sufficient ot find out what happened: we took 1d indices, mapped them to 2d embeddings and now want to averate over axis 2, but embeddings only got axes [0, 1].Let's try a few more:
###Code
%%writefile my_rnn_library.py
import numpy as np
import tensorflow as tf
def my_rnn(x_emb, emb_size, hid_size):
""" takes x_emb[time, batch, emb_size] and predicts"""
W = tf.Variable(np.random.randn(emb_size + hid_size, hid_size).astype('float32'),)
h0 = tf.zeros([tf.shape(x_emb)[1], hid_size])
def scan_step(h_t, x_t):
rnn_inp = tf.concat([h_t, x_t], axis=1)
h_next = tf.tanh(tf.matmul(x_t, W))
return h_next
return tf.scan(scan_step, elems=x_emb, initializer=h0)
%load_ext autoreload
%autoreload 2
# ^-- an extension that reloads .py modules if you change their code
import my_rnn_library
x = tf.placeholder('float32', [None, None, None])
h = my_rnn_library.my_rnn(x, emb_size=32, hid_size=128)
sess.run(tf.global_variables_initializer())
sess.run(h, {x: np.random.randn(10, 3, 32)})
# spoiler: its gonna fail. Your task is to understand what operation failed and how to fix that.
###Output
_____no_output_____
###Markdown
Debugging tensorflow: invalid valuesIf your code fails with an error, it's easy to find out what's wrong. However, sometimes there's no error, but your network doesn't train and your loss is equal to NaN or -inf. Or mean squared error is negative. Or ... well, you just know it's wrong.The question is: where is it wrong. There are two strategies: using tf.asserts and good old tinkering. We'll try the old way.The next example contains two errors:* an error with shapes that causes tensorflow * an error that causes tensorflow to return NaN
###Code
x = tf.placeholder_with_default(np.random.randn(3, 15, 100).astype('float32'),
[None, None, 100])
x_len = tf.placeholder_with_default(np.array([3, 14, 8], dtype='int32'), [None])
logits = L.Dense(256)(x)
mask = tf.sequence_mask(x_len, dtype=tf.float32)
logits = logits - (1 - mask)[:, :, None] * 1e9
probs = tf.nn.softmax(logits, axis=1)
mean_logp = tf.log(tf.reduce_mean(probs, axis=-1))
mean_prob = tf.exp(mean_logp)
grads = tf.gradients(mean_prob, [x])[0]
grad_norms = tf.reduce_sum(grads ** 2, axis=(1, 2)) ** 0.5
sess.run(tf.global_variables_initializer())
sess.run(grad_norms)
# Your quest is as usual: find where's Waldo (NaN). And eliminate it :)
###Output
_____no_output_____
###Markdown
Part II: Cool tensorflow features
###Code
# for the next section we'll need to reload tensorflow without eager
# PLEASE RESTART THE NOTEBOOK! (kernel-restart in jupyter, runtime -> restart in colab)
# also if you're in colab, please request GPU-enabled runtime (settings -> notebook settings)
###Output
_____no_output_____
###Markdown
1. Tensorflow EagerWhen you've first seen tensorflow in action, there was a lot of complicated stuff happening: defining operations on placeholders, sessions, variable initializers, etc.Luckily, TF also allows you to write code on the fly much the same way as you did in numpy. It's called __Tensorflow Eager__.
###Code
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
# use tensorflow operations like you would use numpy
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = tf.matmul(x, tf.random_normal([2, 4]))
z = tf.nn.softmax(y, axis=1)
# every tensor has a value (like numpy arrays)
z
# ... and can be converted to numpy
z.numpy()
# you can even mix numpy arrays in tf computations
z + np.linspace(0, 4, 4)
###Output
_____no_output_____
###Markdown
Training with tf.eagerEager execution has it's own API for automatic gradients. It's called GradientTape.
###Code
x = tf.Variable([3.0, 5.0])
with tf.GradientTape() as tape:
y = x * x
dy_dx = tape.gradient(y, x)
print('gradients:', dy_dx)
###Output
_____no_output_____
###Markdown
Now let's train some networks. As usual, we'll use keras functional API for the ease of execution.
###Code
from keras.datasets.mnist import load_data
(X_train, y_train), (X_test, y_test) = load_data()
X_train, X_test = X_train.astype('float32') / 255., X_test.astype('float32') / 255.
y_train, y_test = y_train.astype('int32'), y_test.astype('int32')
keras, L = tf.keras, tf.keras.layers # use these and not just import keras
model = keras.models.Sequential([
L.InputLayer(X_train.shape[1:]), L.Flatten(), L.Dense(100), L.Activation('relu'), L.Dense(10)
])
opt = tf.train.AdamOptimizer(learning_rate=1e-3)
for i in range(1000):
batch = np.random.randint(0, len(X_train), size=100)
with tf.GradientTape() as tape:
logits = model(X_train[batch])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_train[batch], logits=logits)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
if i % 100 == 0:
print('step %i, loss=%.3f' % (i, loss.numpy()))
# we can now evaluate our model using any external metrics we want
from sklearn.metrics import accuracy_score
y_test_pred = model(X_test).numpy().argmax(-1)
print("Test acc:", accuracy_score(y_test, y_test_pred))
###Output
_____no_output_____
###Markdown
RTFM:* [tf.eager basics](https://www.tensorflow.org/tutorials/eager/eager_basics)* [tape-based gradients](https://www.tensorflow.org/tutorials/eager/automatic_differentiation)* [training walkthrough](https://www.tensorflow.org/tutorials/eager/custom_training_walkthrough)* You can also embed eager code into normal tensorflow graph with [tf.contrib.eager.py_func](https://www.tensorflow.org/guide/eager)
###Code
# Please restart the notebook again
###Output
_____no_output_____
###Markdown
2. TensorboardIf you run more than one experiment, you will eventually have to compare your results. We've already mentioned that this can be done with tensorboard. Ideally, you wanna obtain something like this:_except the training is not finished_If you're not into tensorflow [visdom](https://github.com/facebookresearch/visdom), [tensorboardX](https://github.com/lanpa/tensorboardX) 3. Tensorflow HubMost deep learning applications nowadays depend on some kind of pre-trained network to start from. Be it Keras [applications](https://keras.io/applications/) for computer vision, [gensim](https://github.com/RaRe-Technologies/gensim) for embeddings, and many smaller model zoos dedicated to every particular topic.One such model zoo is Tensorfow Hub, featuring several hot NLP models:* [Universal Sentence Encoder](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/semantic_similarity_with_tf_hub_universal_encoder.ipynbscrollTo=MSeY-MUQo2Ha)* [ELMO](https://tfhub.dev/google/elmo/2)
###Code
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
sess = tf.Session()
!pip3 install --quiet tensorflow-hub
import tensorflow_hub as hub
model = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2")
sess.run([tf.global_variables_initializer(), tf.tables_initializer()]);
sentence_embs = model(["A cat sat on a mat.", "I am the monument to all your sins"])
print(sess.run(sentence_embs))
###Output
_____no_output_____
###Markdown
Part III. Worst practicesThere's a number of things about TF that kind of... ~~sucks~~ in need of improvement.Don't get me wrong, they are all great for their job. Except they can easily be misused with dramatic consequences. __1. TF.contrib is a mess__Tensorflow [contrib](https://www.tensorflow.org/api_docs/python/tf/contrib) is a place where tensorflow holds dozens of sub-libraries dedicated to everything. You name it:* Helper functions for sequence-to-sequence models - [check!](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq)* Wrapper modules for CUDNN RNN operations - [check!](https://github.com/tensorflow/probability)* A full-blown deep learning library? - [check](https://www.tensorflow.org/api_docs/python/tf/contrib/keras)-[check](https://www.tensorflow.org/api_docs/python/tf/contrib/slim)-[check](https://www.tensorflow.org/api_docs/python/tf/contrib/learn)!The catch is that most of the code in tf.contrib was built by independent authors. Sometimes it's poorly supported. Sometimes it's outdated. And it is definitely not designed for full compatibility with one another.For instance LSTM cells from tf.contrib.rnn are not guaranteed to work with tf.keras abstractions. And neither do tf.slim layers fit into keras models.There's a rule of thumb: if the functionality you need is in both tf core and tf.contrib, pick tf core. If it's only in tf.contrib - read through it and maybe play with it on a toy task before integrating it into your larger projects. __2. Pythonic and symbolic loops__Sometimes you want your tensorfow graph to contain loops. The most obvious example is RNN.Tensorflow allows you to define such loops with primitives like [tf.while_loop](https://www.tensorflow.org/api_docs/python/tf/while_loop) and [tf.scan](https://www.tensorflow.org/api_docs/python/tf/scan).If you read the docs, you'll also see other primitives like __tf.map_fn__ and __tf.cond__. It is tempting to use those operations to write python-style code. __But you shouldn't__. Or rather, try hard to have as few of them as possible. Each iteration of symbolic loop introduces a gigantic overhead in computation time.
###Code
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
sess = tf.Session()
x_ph = tf.placeholder_with_default(np.linspace(-10, 10, 10**4).astype('float32'), [None])
my_square = tf.map_fn(lambda x_i: x_i ** 2, x_ph)
my_sum_squares = tf.scan(lambda ctr, x_i: ctr + x_i, elems=my_square, initializer=0.0)[-1]
tf_square = x_ph ** 2
tf_sum_squares = tf.reduce_sum(tf_square)
print("Symbolic loops:")
%time print(sess.run(my_sum_squares))
print("Vector operations:")
%time print(sess.run(tf_sum_squares))
###Output
_____no_output_____
###Markdown
__TL;DR:__ use control flow ops sparingly. Few large iterations are okay, many small iterations are not. __3. Control Dependencies__ By default, if your tensorflow graph has two parallel branches of code, there's no way of telling which branch will be executed first. This can cause inconveniences. You may want to explicitly tell tensoflow "Run this op before that one" to save memory or make debug logs prettier.However, you can also use control dependencies to mutate graph state in the middle of execution. DON'T DO THAT unless you absolutely have to. And even then __DON'T DO THAT__.Here's a demotivational example
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.Session()
x = tf.Variable(1.0)
y1 = x ** 2
add_first = tf.assign_add(x, 1)
with tf.control_dependencies([add_first, y1]):
y2 = x ** 2
add_second = tf.assign_add(x, 1)
with tf.control_dependencies([y2, add_second]):
y3 = x ** 2
sess.run(tf.global_variables_initializer())
print('First run:', sess.run([y1, y2, y3]))
print('Second run:', sess.run([y1, y2, y3]))
# Bonus quest: change as few lines as possible to make it print [1, 4, 9], [9, 16, 25]
###Output
_____no_output_____
###Markdown
Part IV: cool stuff that didn't make it into tutorial* [tf.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) - an advanced tool for loading and managing data.* Creating new tf ops - [in c++](https://www.tensorflow.org/extend/adding_an_op)* Managing gradient computation with [tf.stop_gradient](https://github.com/tensorflow/fold) and [gradient override map](https://stackoverflow.com/questions/41391718/tensorflows-gradient-override-map-function)* [Gradient checkpointing](https://github.com/openai/gradient-checkpointing/) to backprop through large models in low memory* Tensorflow is available in many other languages. For instance, here's [tf for javascript](https://js.tensorflow.org/) or [tutorial on exporting keras model for an android app](https://medium.com/@thepulkitagarwal/deploying-a-keras-model-on-android-3a8bb83d75ca)* Efficient gpu parallelism with [horovod](https://github.com/uber/horovod) [if we have time] XLA: Tensorflow, compiledWhile tf.eager gives you the freedom to experiment, eventually you'll figure out exactly what you want and you'll need your code to run... faster. Preferably much faster. And on half as much gpu memory so you can increase batch size.Your typical neural network has a lot of operations that are fast to compute but require allocating large amounts of memory. Consider adding bias element-wise to a large tensor and then applying nonlinearity. These operations can be _fused_ together: you don't allocate new memory but perform everything in-place as a single operation.__Warning:__ XLA become included by default starting from tf 1.12; earlier versions will require compiling tensorflow manually **with** XLA support.
###Code
# Please restart notebook and make sure you use tensorflow with GPU.
# If you don't, the code will work but it XLA will give no performance boost
# and actually run slower.
import numpy as np
import tensorflow as tf
keras = tf.contrib.keras
L = tf.contrib.keras.layers
assert tf.test.is_gpu_available()
tf.reset_default_graph()
config = tf.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
config.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
model = keras.models.Sequential()
model.add(L.InputLayer([None, 256]))
model.add(L.SimpleRNN(256, return_sequences=True))
model.add(L.SimpleRNN(256))
model.add(L.Dense(100))
x = tf.placeholder_with_default(np.random.randn(1, 1000, 256).astype('float32'), [None, None, 256])
pred = model(x)
sess.run(tf.global_variables_initializer())
sess.run(pred); # "warmup run"
%timeit sess.run(pred)
###Output
_____no_output_____ |
salinization/dev/07. data-preparation/ben-tre-preparation.ipynb | ###Markdown
Bottom value: used for missing measurements to avoid zero in 'multiplicative' model of seasonal decompression
###Code
bottom_value = 0.01
###Output
_____no_output_____
###Markdown
Read in cleaned dataset
###Code
df = pd.read_csv('../../dataset/final/bentre-cleaned.csv', parse_dates=['date'])
# set index to time-series based 'date'
df.set_index('date', inplace=True)
df.index
df.info()
# sort by date index
df.sort_index(inplace=True)
df.head(20)
# replace zeros with bottom_value
df[['min', 'max']] = df[['min', 'max']].replace(0.0, bottom_value)
df.tail(20)
###Output
_____no_output_____
###Markdown
Experiment: Filling missing dates and interpolating measurements (JUMP TO IMPLEMENTATION)One year worth samples at a specific station
###Code
station_code = 'BINHDAI'
station_year = 2012
# only need 'code', 'min' and 'max' columns since we are analyzing by one station at a time
sdf = df[(df['code'] == station_code) & (df.index.year == station_year)][['code', 'min', 'max']]
sdf.info()
sdf.head(10)
min_date = sdf.index.min()
min_date
max_date = sdf.index.max()
max_date
###Output
_____no_output_____
###Markdown
Fill missing dates from the beginning of the year to last entry of the dataset
###Code
start_date = f'{station_year}-01-01'
end_date = max_date + pd.DateOffset(1) # add one extra day as the upper limit for forward fill in interpolate
date_range = pd.date_range(start=start_date, end=end_date, freq='D')
date_range
#sdf.set_index('date', inplace=True) # no need since 'date' is already index
tdf = sdf.reindex(date_range).fillna(np.nan).rename_axis('date').reset_index()
# assign station code to new rows
tdf['code'] = station_code
# set lower limit if it does not have value for forward fill in interpolate
if np.isnan(tdf.at[0, 'min']):
tdf.at[0, 'min'] = bottom_value
if np.isnan(tdf.at[0, 'max']):
tdf.at[0, 'max'] = bottom_value
# use the extra day as the upper limit for forward fill in interpolate
tdf.at[tdf.index[-1], ['min', 'max']] = bottom_value
tdf.head(40)
tdf.tail(20)
# make 'date' as DateTimeIndex again
tdf = tdf.set_index('date')
tdf.info()
###Output
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 182 entries, 2012-01-01 to 2012-06-30
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 code 182 non-null object
1 min 65 non-null float64
2 max 65 non-null float64
dtypes: float64(2), object(1)
memory usage: 5.7+ KB
###Markdown
Interpolating missing measurements
###Code
# https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html
# method: linear, time, quadratic, pchip, akima
tdf.interpolate(method ='time', limit_direction ='forward', inplace=True)
sdf.plot(title='Original Samples', xlim=[tdf.index.date.min(), tdf.index.date.max()], rot=90, figsize=(18, 5))
tdf.plot(title='Interpolated Samples', rot=90, figsize=(18, 5));
###Output
_____no_output_____
###Markdown
Fill missing dates from last entry of the dataset till the end of the year
###Code
start_date = end_date + pd.DateOffset(1)
end_date = f'{station_year}-12-31'
date_range = pd.date_range(start=start_date, end=end_date, freq='D')
date_range
###Output
_____no_output_____
###Markdown
bottom value is used as the filler
###Code
edf = pd.DataFrame({ 'code': station_code, 'min': bottom_value, 'max': bottom_value }, index=date_range)
edf.head()
edf.tail()
# combine 2 halves of the year back to original station dataframe
sdf = pd.concat([tdf, edf])
sdf.info()
# make sure frequent is on daily basis
sdf.index.freq = 'D'
sdf.index
sdf.head(40)
sdf.tail()
sdf.plot(title=f'{station_code} Samples of {station_year}', rot=90, figsize=(18, 5));
###Output
_____no_output_____
###Markdown
Implementation: Filling missing dates and interpolating measurements All years worth samples at each station
###Code
# get all station codes
station_codes = df['code'].unique()
station_codes
def fill_interpolate(data, code, year):
min_date = data.index.min()
max_date = data.index.max()
# no annual data
if pd.isnull(min_date) and pd.isnull(max_date):
return pd.DataFrame({ 'code': code, 'min': bottom_value, 'max': bottom_value }, index=pd.date_range(f'{year}-01-01', f'{year}-12-31', freq='D'))
# fill missing dates from the beginning of the year to last entry of the dataset
start_date = f'{year}-01-01'
end_date = max_date + pd.DateOffset(1) # add one extra day as the upper limit for forward fill in interpolate
date_range = pd.date_range(start=start_date, end=end_date, freq='D')
data = data.reindex(date_range).fillna(np.nan).rename_axis('date').reset_index()
data['code'] = code
if np.isnan(data.at[0, 'min']): # set lower limit
data.at[0, 'min'] = bottom_value
if np.isnan(data.at[0, 'max']):
data.at[0, 'max'] = bottom_value
data.at[data.index[-1], ['min', 'max']] = bottom_value # use the extra day as the upper limit for forward fill in interpolate
# make 'date' as DateTimeIndex again
data = data.set_index('date')
# fill missing measurements for those newly inserted dates
data = data.interpolate(method ='time', limit_direction ='forward')
# fill missing dates from last entry of the dataset till the end of the year
start_date = end_date + pd.DateOffset(1)
end_date = f'{year}-12-31'
date_range = pd.date_range(start=start_date, end=end_date, freq='D')
return pd.concat([data, pd.DataFrame({ 'code': code, 'min': bottom_value, 'max': bottom_value }, index=date_range)])
start_year = 2002
end_year = 2018
for station_code in station_codes:
# filter samples for this station
sdf = df[df['code'] == station_code][['code', 'min', 'max']]
station_years = sdf.index.year.unique().to_numpy()
print(f'Station {station_code} has samples on these years: {station_years}')
new_sdf = pd.DataFrame()
for station_year in range(start_year, end_year + 1):
print(f'Processing station {station_code} on year {station_year}')
if new_sdf.empty:
new_sdf = fill_interpolate(sdf[sdf.index.year == station_year], station_code, station_year)
else:
new_sdf = pd.concat([new_sdf, fill_interpolate(sdf[sdf.index.year == station_year], station_code, station_year)])
# save to csv file
new_sdf.index.freq = 'D' # make sure the frequency is daily
new_sdf.to_csv(f'../../dataset/final/stations/{station_code}.csv', index=True, index_label='date')
###Output
Station ANTHUAN has samples on these years: [2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
2016]
Processing station ANTHUAN on year 2002
Processing station ANTHUAN on year 2003
Processing station ANTHUAN on year 2004
Processing station ANTHUAN on year 2005
Processing station ANTHUAN on year 2006
Processing station ANTHUAN on year 2007
Processing station ANTHUAN on year 2008
Processing station ANTHUAN on year 2009
Processing station ANTHUAN on year 2010
Processing station ANTHUAN on year 2011
Processing station ANTHUAN on year 2012
Processing station ANTHUAN on year 2013
Processing station ANTHUAN on year 2014
Processing station ANTHUAN on year 2015
Processing station ANTHUAN on year 2016
Processing station ANTHUAN on year 2017
Processing station ANTHUAN on year 2018
Station LOCTHUAN has samples on these years: [2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2015 2016
2018]
Processing station LOCTHUAN on year 2002
Processing station LOCTHUAN on year 2003
Processing station LOCTHUAN on year 2004
Processing station LOCTHUAN on year 2005
Processing station LOCTHUAN on year 2006
Processing station LOCTHUAN on year 2007
Processing station LOCTHUAN on year 2008
Processing station LOCTHUAN on year 2009
Processing station LOCTHUAN on year 2010
Processing station LOCTHUAN on year 2011
Processing station LOCTHUAN on year 2012
Processing station LOCTHUAN on year 2013
Processing station LOCTHUAN on year 2014
Processing station LOCTHUAN on year 2015
Processing station LOCTHUAN on year 2016
Processing station LOCTHUAN on year 2017
Processing station LOCTHUAN on year 2018
Station SONDOC has samples on these years: [2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
2016 2018]
Processing station SONDOC on year 2002
Processing station SONDOC on year 2003
Processing station SONDOC on year 2004
Processing station SONDOC on year 2005
Processing station SONDOC on year 2006
Processing station SONDOC on year 2007
Processing station SONDOC on year 2008
Processing station SONDOC on year 2009
Processing station SONDOC on year 2010
Processing station SONDOC on year 2011
Processing station SONDOC on year 2012
Processing station SONDOC on year 2013
Processing station SONDOC on year 2014
Processing station SONDOC on year 2015
Processing station SONDOC on year 2016
Processing station SONDOC on year 2017
Processing station SONDOC on year 2018
Station BENTRAI has samples on these years: [2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
2016]
Processing station BENTRAI on year 2002
Processing station BENTRAI on year 2003
Processing station BENTRAI on year 2004
Processing station BENTRAI on year 2005
Processing station BENTRAI on year 2006
Processing station BENTRAI on year 2007
Processing station BENTRAI on year 2008
Processing station BENTRAI on year 2009
Processing station BENTRAI on year 2010
Processing station BENTRAI on year 2011
Processing station BENTRAI on year 2012
Processing station BENTRAI on year 2013
Processing station BENTRAI on year 2014
Processing station BENTRAI on year 2015
Processing station BENTRAI on year 2016
Processing station BENTRAI on year 2017
Processing station BENTRAI on year 2018
Station BINHDAI has samples on these years: [2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
2016]
Processing station BINHDAI on year 2002
Processing station BINHDAI on year 2003
Processing station BINHDAI on year 2004
Processing station BINHDAI on year 2005
Processing station BINHDAI on year 2006
Processing station BINHDAI on year 2007
Processing station BINHDAI on year 2008
Processing station BINHDAI on year 2009
Processing station BINHDAI on year 2010
Processing station BINHDAI on year 2011
Processing station BINHDAI on year 2012
Processing station BINHDAI on year 2013
Processing station BINHDAI on year 2014
Processing station BINHDAI on year 2015
Processing station BINHDAI on year 2016
Processing station BINHDAI on year 2017
Processing station BINHDAI on year 2018
Station GIAOHOA has samples on these years: [2016]
Processing station GIAOHOA on year 2002
Processing station GIAOHOA on year 2003
Processing station GIAOHOA on year 2004
Processing station GIAOHOA on year 2005
Processing station GIAOHOA on year 2006
Processing station GIAOHOA on year 2007
Processing station GIAOHOA on year 2008
Processing station GIAOHOA on year 2009
Processing station GIAOHOA on year 2010
Processing station GIAOHOA on year 2011
Processing station GIAOHOA on year 2012
Processing station GIAOHOA on year 2013
Processing station GIAOHOA on year 2014
Processing station GIAOHOA on year 2015
Processing station GIAOHOA on year 2016
Processing station GIAOHOA on year 2017
Processing station GIAOHOA on year 2018
Station MYHOA has samples on these years: [2016 2017]
Processing station MYHOA on year 2002
Processing station MYHOA on year 2003
Processing station MYHOA on year 2004
Processing station MYHOA on year 2005
Processing station MYHOA on year 2006
Processing station MYHOA on year 2007
Processing station MYHOA on year 2008
Processing station MYHOA on year 2009
Processing station MYHOA on year 2010
Processing station MYHOA on year 2011
Processing station MYHOA on year 2012
Processing station MYHOA on year 2013
Processing station MYHOA on year 2014
Processing station MYHOA on year 2015
Processing station MYHOA on year 2016
Processing station MYHOA on year 2017
Processing station MYHOA on year 2018
Station HUONGMY has samples on these years: [2016]
Processing station HUONGMY on year 2002
Processing station HUONGMY on year 2003
Processing station HUONGMY on year 2004
Processing station HUONGMY on year 2005
Processing station HUONGMY on year 2006
Processing station HUONGMY on year 2007
Processing station HUONGMY on year 2008
Processing station HUONGMY on year 2009
Processing station HUONGMY on year 2010
Processing station HUONGMY on year 2011
Processing station HUONGMY on year 2012
Processing station HUONGMY on year 2013
Processing station HUONGMY on year 2014
Processing station HUONGMY on year 2015
Processing station HUONGMY on year 2016
Processing station HUONGMY on year 2017
Processing station HUONGMY on year 2018
Station PHUOCLONG has samples on these years: [2017 2018]
Processing station PHUOCLONG on year 2002
Processing station PHUOCLONG on year 2003
Processing station PHUOCLONG on year 2004
Processing station PHUOCLONG on year 2005
Processing station PHUOCLONG on year 2006
Processing station PHUOCLONG on year 2007
Processing station PHUOCLONG on year 2008
Processing station PHUOCLONG on year 2009
Processing station PHUOCLONG on year 2010
Processing station PHUOCLONG on year 2011
Processing station PHUOCLONG on year 2012
Processing station PHUOCLONG on year 2013
Processing station PHUOCLONG on year 2014
Processing station PHUOCLONG on year 2015
Processing station PHUOCLONG on year 2016
Processing station PHUOCLONG on year 2017
Processing station PHUOCLONG on year 2018
Station VANGQUOITAY has samples on these years: [2017]
Processing station VANGQUOITAY on year 2002
Processing station VANGQUOITAY on year 2003
Processing station VANGQUOITAY on year 2004
Processing station VANGQUOITAY on year 2005
Processing station VANGQUOITAY on year 2006
Processing station VANGQUOITAY on year 2007
Processing station VANGQUOITAY on year 2008
Processing station VANGQUOITAY on year 2009
Processing station VANGQUOITAY on year 2010
Processing station VANGQUOITAY on year 2011
Processing station VANGQUOITAY on year 2012
Processing station VANGQUOITAY on year 2013
Processing station VANGQUOITAY on year 2014
Processing station VANGQUOITAY on year 2015
Processing station VANGQUOITAY on year 2016
Processing station VANGQUOITAY on year 2017
Processing station VANGQUOITAY on year 2018
Station CAIHOP has samples on these years: [2017]
Processing station CAIHOP on year 2002
Processing station CAIHOP on year 2003
Processing station CAIHOP on year 2004
Processing station CAIHOP on year 2005
Processing station CAIHOP on year 2006
Processing station CAIHOP on year 2007
Processing station CAIHOP on year 2008
Processing station CAIHOP on year 2009
Processing station CAIHOP on year 2010
Processing station CAIHOP on year 2011
Processing station CAIHOP on year 2012
Processing station CAIHOP on year 2013
Processing station CAIHOP on year 2014
Processing station CAIHOP on year 2015
Processing station CAIHOP on year 2016
|
Traffic_Sign_Classifier_YL.ipynb | ###Markdown
Self-Driving Car Engineer Nanodegree Deep Learning Project: Build a Traffic Sign Recognition ClassifierIn this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/!/rubrics/481/view) for this project.The [rubric](https://review.udacity.com/!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. --- Step 0: Load The Data
###Code
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = './traffic-signs-data/train.p'
validation_file= './traffic-signs-data/valid.p'
testing_file = './traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
###Output
_____no_output_____
###Markdown
--- Step 1: Dataset Summary & ExplorationThe pickled data is a dictionary with 4 key/value pairs:- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
###Code
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
import numpy as np
# TODO: Number of training examples
n_train = y_train.shape[0]
# TODO: Number of validation examples
n_validation = y_valid.shape[0]
# TODO: Number of testing examples.
n_test = y_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape[1:3]
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
###Output
Number of training examples = 34799
Number of validation examples = 4410
Number of testing examples = 12630
Image data shape = (32, 32)
Number of classes = 43
###Markdown
Include an exploratory visualization of the dataset Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
###Code
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
num_bins = n_classes
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(16, 4))
ax0.hist(y_train, num_bins)
ax1.hist(y_valid, num_bins)
ax2.hist(y_test, num_bins)
ax0.set_xlabel('classes')
ax0.set_ylabel('probability denstiy')
ax0.set_title('train data histogram')
ax1.set_xlabel('classes')
ax1.set_ylabel('probability denstiy')
ax1.set_title('validation data histogram')
ax2.set_xlabel('classes')
ax2.set_ylabel('probability denstiy')
ax2.set_title('test data histogram')
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
---- Step 2: Design and Test a Model ArchitectureDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. There are various aspects to consider when thinking about this problem:- Neural network architecture (is the network over or underfitting?)- Play around preprocessing techniques (normalization, rgb to grayscale, etc)- Number of examples per label (some have more than others).- Generate fake data.Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. Pre-process the Data Set (normalization, grayscale, etc.) Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. Other pre-processing steps are optional. You can try different techniques to see if it improves performance. Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
###Code
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
### visualize a sample from the dataset
import random
index = random.randint(0, X_train.shape[0])
img = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(img)
print (y_train[index])
### convert RGB images to gray scale
import cv2
from numpy import newaxis
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
plt.figure(figsize=(1,1))
plt.imshow(img_gray, cmap = 'gray')
### normalize the grayscale image
#img_gray= np.divide(np.subtract(img_gray, 128), 128)
img_gray = img_gray[:, :, newaxis]
print ('grayscale image shape is : ', img_gray.shape)
### convert all dataset to gray and normalized images
def preprocess(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#img_gray= np.divide(np.subtract(img_gray, 128), 128)
img_gray = img_gray[:, :, newaxis]
return img_gray
print ('original dataset has dimensions of ',X_train.shape, X_valid.shape, X_test.shape)
X_train_tmp = []
for idx in range (X_train.shape[0]):
img = X_train[idx]
X_train_tmp.append(preprocess(img))
X_train_new = np.asarray(X_train_tmp)
X_valid_tmp = []
for idx in range (X_valid.shape[0]):
img = X_valid[idx]
X_valid_tmp.append(preprocess(img))
X_valid_new = np.asarray(X_valid_tmp)
X_test_tmp = []
for idx in range (X_test.shape[0]):
img = X_test[idx]
X_test_tmp.append(preprocess(img))
X_test_new = np.asarray(X_test_tmp)
print ('new dataset has dimensions of ', X_train_new.shape, X_valid_new.shape, X_test_new.shape)
### shuffle the training data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
###Output
_____no_output_____
###Markdown
Model Architecture
###Code
### Define your architecture here.
### Feel free to use as many code cells as needed.
### setup tensor flow
import tensorflow as tf
EPOCHS = 50
BATCH_SIZE = 256
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x3. Output = 30x30x8.
conv1_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 3, 8), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(8))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation
conv1 = tf.nn.relu(conv1)
# Layer 2: Convolutional. Input = 30x30x8. Output = 28x28x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 8, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation
conv2 = tf.nn.relu(conv2)
# Max Pooling. Input = 28x28x16. Output = 14x14x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 3: Convolutional. Input = 14x14x16. Output = 12x12x32.
conv3_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 16, 32), mean = mu, stddev = sigma))
conv3_b = tf.Variable(tf.zeros(32))
conv3 = tf.nn.conv2d(conv2, conv3_W, strides=[1, 1, 1, 1], padding='VALID') + conv3_b
# Activation
conv3 = tf.nn.relu(conv3)
# Layer 4: Convolutional. Input = 12x12x32. Output = 10x10x32.
conv4_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 32, 32), mean = mu, stddev = sigma))
conv4_b = tf.Variable(tf.zeros(32))
conv4 = tf.nn.conv2d(conv3, conv4_W, strides=[1, 1, 1, 1], padding='VALID') + conv4_b
# Activation.
conv4 = tf.nn.relu(conv4)
# Max Pooling. Input = 10x10x32. Output = 5x5x32.
conv4 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 5: Convolutional. Input = 5x5x32. Output = 3x3x32.
conv5_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 32, 32), mean = mu, stddev = sigma))
conv5_b = tf.Variable(tf.zeros(32))
conv5 = tf.nn.conv2d(conv4, conv5_W, strides=[1, 1, 1, 1], padding='VALID') + conv5_b
# Activation.
conv5 = tf.nn.relu(conv5)
# Flatten. Input = 3x3x32. Output = 288.
fc0 = flatten(conv5)
# Fully Connected. Input = 288. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(288, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
# Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
# Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
### setup placeholder for features and labels
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
### setup training pipeline
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
### setup model evaluation
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
###Output
_____no_output_____
###Markdown
Train, Validate and Test the Model A validation set can be used to assess how well the model is performing. A low accuracy on the training and validationsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
###Code
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_valid, y_valid)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
### test the model
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
###Output
INFO:tensorflow:Restoring parameters from ./lenet
Test Accuracy = 0.941
###Markdown
--- Step 3: Test a Model on New ImagesTo give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. Load and Output the Images
###Code
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import glob
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
new_img = []
images = glob.glob('test_images/*.jpg')
for fname in images:
img = mpimg.imread(fname)
res = cv2.resize(img, dsize=(32, 32), interpolation=cv2.INTER_CUBIC)
new_img.append(res)
print (os.path.split(fname)[1])
plt.imshow(res)
plt.show()
X_test_img = np.asarray(new_img)
###Output
11_Rightofway.jpg
###Markdown
Predict the Sign Type for Each Image
###Code
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
predict = sess.run(logits, feed_dict = {x: X_test_img})
probs=sess.run(tf.nn.softmax(predict))
print ('class ID predictions are: ', np.argmax(probs, 1))
###Output
INFO:tensorflow:Restoring parameters from ./lenet
class ID predictions are: [11 17 25 33 12 14]
###Markdown
Analyze Performance
###Code
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
y_test_img = np.asarray([11, 17, 25, 33, 12, 14])
print ('class ID truth are: ', y_test_img)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test_img, y_test_img)
print("Test Accuracy = {:.3f}".format(test_accuracy))
###Output
class ID truth are: [11 17 25 33 12 14]
INFO:tensorflow:Restoring parameters from ./lenet
Test Accuracy = 1.000
###Markdown
Output Top 5 Softmax Probabilities For Each Image Found on the Web For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.htmltop_k) could prove helpful here. The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:``` (5, 6) arraya = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, 0.12789202], [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, 0.15899337], [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , 0.23892179], [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , 0.16505091], [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, 0.09155967]])```Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:```TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], [ 0.28086119, 0.27569815, 0.18063401], [ 0.26076848, 0.23892179, 0.23664738], [ 0.29198961, 0.26234032, 0.16505091], [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], [0, 1, 4], [0, 5, 1], [1, 3, 5], [1, 4, 3]], dtype=int32))```Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
###Code
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
top_5 = sess.run(tf.nn.top_k(tf.constant(probs), k=5))
print (top_5)
###Output
TopKV2(values=array([[9.9999940e-01, 3.2776660e-07, 2.9114602e-07, 1.8454999e-08,
4.2180286e-09],
[1.0000000e+00, 1.7869048e-14, 9.4393788e-19, 4.0660042e-22,
3.7322572e-22],
[9.9862385e-01, 7.8410358e-04, 5.2689260e-04, 2.2095543e-05,
1.4150333e-05],
[1.0000000e+00, 8.9690539e-09, 7.5393569e-17, 2.5752147e-18,
1.2876239e-18],
[1.0000000e+00, 3.0966429e-21, 2.8721633e-22, 4.7103988e-26,
9.1462706e-27],
[9.9999905e-01, 9.4512990e-07, 3.9446906e-08, 1.5406231e-08,
8.9201119e-10]], dtype=float32), indices=array([[11, 33, 27, 25, 12],
[17, 22, 14, 34, 29],
[25, 1, 23, 38, 31],
[33, 39, 29, 18, 26],
[12, 9, 26, 13, 36],
[14, 4, 1, 0, 25]], dtype=int32))
###Markdown
Project WriteupOnce you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. --- Step 4 (Optional): Visualize the Neural Network's State with Test Images This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. Your output should look something like this (above)
###Code
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
###Output
_____no_output_____ |
Lesson_2/GradientDescent.ipynb | ###Markdown
Implementing the Gradient Descent AlgorithmIn this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data.
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#Some helper functions for plotting and drawing lines
def plot_points(X, y):
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')
def display(m, b, color='g--'):
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
x = np.arange(-10, 10, 0.1)
plt.plot(x, m*x+b, color)
###Output
_____no_output_____
###Markdown
Reading and plotting the data
###Code
data = pd.read_csv('data.csv', header=None)
X = np.array(data[[0,1]])
y = np.array(data[2])
plot_points(X,y)
plt.show()
###Output
_____no_output_____
###Markdown
TODO: Implementing the basic functionsHere is your turn to shine. Implement the following formulas, as explained in the text.- Sigmoid activation function$$\sigma(x) = \frac{1}{1+e^{-x}}$$- Output (prediction) formula$$\hat{y} = \sigma(w_1 x_1 + w_2 x_2 + b)$$- Error function$$Error(y, \hat{y}) = - y \log(\hat{y}) - (1-y) \log(1-\hat{y})$$- The function that updates the weights$$ w_i \longrightarrow w_i + \alpha (y - \hat{y}) x_i$$$$ b \longrightarrow b + \alpha (y - \hat{y})$$
###Code
# Implement the following functions
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def output_formula(features, weights, bias):
return sigmoid(np.dot(features, weights) + bias)
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
def update_weights(x, y, weights, bias, learnrate):
output = output_formula(x, weights, bias)
error = (y - output)
weights = weights + learnrate * error * x
bias = bias + learnrate * error
return weights, bias
###Output
_____no_output_____
###Markdown
Training functionThis function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm.
###Code
np.random.seed(44)
epochs = 100
learnrate = 0.01
def train(features, targets, epochs, learnrate, graph_lines=False):
errors = []
n_records, n_features = features.shape
last_loss = None
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
bias = 0
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features, targets):
output = output_formula(x, weights, bias)
error = error_formula(y, output)
weights, bias = update_weights(x, y, weights, bias, learnrate)
# Printing out the log-loss error on the training set
out = output_formula(features, weights, bias)
loss = np.mean(error_formula(targets, out))
errors.append(loss)
if e % (epochs / 10) == 0:
print("\n========== Epoch", e,"==========")
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
predictions = out > 0.5
accuracy = np.mean(predictions == targets)
print("Accuracy: ", accuracy)
if graph_lines and e % (epochs / 100) == 0:
display(-weights[0]/weights[1], -bias/weights[1])
# Plotting the solution boundary
plt.title("Solution boundary")
display(-weights[0]/weights[1], -bias/weights[1], 'black')
# Plotting the data
plot_points(features, targets)
plt.show()
# Plotting the error
plt.title("Error Plot")
plt.xlabel('Number of epochs')
plt.ylabel('Error')
plt.plot(errors)
plt.show()
###Output
_____no_output_____
###Markdown
Time to train the algorithm!When we run the function, we'll obtain the following:- 10 updates with the current training loss and accuracy- A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs.- A plot of the error function. Notice how it decreases as we go through more epochs.
###Code
train(X, y, epochs, learnrate, True)
###Output
========== Epoch 0 ==========
Train loss: 0.7135845195381634
Accuracy: 0.4
========== Epoch 10 ==========
Train loss: 0.6225835210454962
Accuracy: 0.59
========== Epoch 20 ==========
Train loss: 0.5548744083669508
Accuracy: 0.74
========== Epoch 30 ==========
Train loss: 0.501606141872473
Accuracy: 0.84
========== Epoch 40 ==========
Train loss: 0.4593334641861401
Accuracy: 0.86
========== Epoch 50 ==========
Train loss: 0.42525543433469976
Accuracy: 0.93
========== Epoch 60 ==========
Train loss: 0.3973461571671399
Accuracy: 0.93
========== Epoch 70 ==========
Train loss: 0.3741469765239074
Accuracy: 0.93
========== Epoch 80 ==========
Train loss: 0.35459973368161973
Accuracy: 0.94
========== Epoch 90 ==========
Train loss: 0.3379273658879921
Accuracy: 0.94
|
Use-case_Cancer_detection/.ipynb_checkpoints/Cancer_encoder_e2-Benign-checkpoint.ipynb | ###Markdown
Imports
###Code
import random
import pandas as pd
import torch
from torchvision import datasets, transforms
#quanutm lib
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import AdamOptimizer
import torch
from torchvision import datasets, transforms
import sys
sys.path.append("..") # Adds higher directory to python modules path
from qencode.initialize import setAB_amplitude, setAux, setEnt
from qencode.encoders import e2_classic
from qencode.training_circuits import swap_t
from qencode.qubits_arrangement import QubitsArrangement
from qencode.utils.mnist import get_dataset
###Output
_____no_output_____
###Markdown
Data
###Code
df=pd.read_csv("cancer.csv", nrows=500)
df.head()
df.info()
df.describe()
#Data seams pretty clean without any nan value
## engineering two new features to have 32 feutures that can be encoded om 5 qubits.
over_average = []
under_average = []
mean = {}
std = {}
for col in df:
if col not in ["id","diagnosis" ]:
mean[col]=df[col].mean()
std[col]=df[col].std()
for index,row in df.iterrows():
o_average=0
u_average=0
for col in df:
if col not in ["id","diagnosis" ]:
if row[col]> mean[col]+2* std[col]:
o_average = o_average + 1
if row[col]< mean[col]+2* std[col]:
u_average= u_average + 1
over_average.append(o_average)
under_average.append(u_average)
df["over_average"] = over_average
df["under_average"] = under_average
df.head()
df.describe()
for col in df:
if col not in ["id","diagnosis" ]:
df[col]=df[col]/df[col].max()
df.describe()
malign=df[df["diagnosis"]=="M"]
malign.head()
benign=df[df["diagnosis"]!="M"]
benign.head()
malign.drop(["id","diagnosis","Unnamed: 32"],axis="columns", inplace=True)
benign.drop(["id","diagnosis","Unnamed: 32"],axis="columns", inplace=True)
malign.head()
input_data=benign.to_numpy()
input_data
###Output
_____no_output_____
###Markdown
Training node
###Code
shots = 2500
nr_trash=1
nr_latent=4
nr_ent=0
spec = QubitsArrangement(nr_trash, nr_latent, nr_swap=1, nr_ent=nr_ent)
print("Qubits:", spec.qubits)
#set up the device
dev = qml.device("default.qubit", wires=spec.num_qubits)
@qml.qnode(dev)
def training_circuit_example(init_params, encoder_params, reinit_state):
#initilaization
setAB_amplitude(spec, init_params)
setAux(spec, reinit_state)
setEnt(spec, inputs=[1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)])
#encoder
for params in encoder_params:
e2_classic(params, [*spec.latent_qubits, *spec.trash_qubits])
#swap test
swap_t(spec)
return [qml.probs(i) for i in spec.swap_qubits]
###Output
_____no_output_____
###Markdown
Training parameters
###Code
epochs = 500
learning_rate = 0.0003
batch_size = 2
num_samples = 0.8 # proportion of the data used for training
beta1 = 0.9
beta2 = 0.999
opt = AdamOptimizer(learning_rate, beta1=beta1, beta2=beta2)
def fid_func(output):
# Implemented as the Fidelity Loss
# output[0] because we take the probability that the state after the
# SWAP test is ket(0), like the reference state
fidelity_loss = 1 / output[0]
return fidelity_loss
def cost(encoder_params, X):
reinit_state = [0 for i in range(2 ** len(spec.aux_qubits))]
reinit_state[0] = 1.0
loss = 0.0
for x in X:
output = training_circuit_example(init_params=x[0], encoder_params=encoder_params, reinit_state=reinit_state)[0]
f = fid_func(output)
loss = loss + f
return loss / len(X)
def fidelity(encoder_params, X):
reinit_state = [0 for i in range(2 ** len(spec.aux_qubits))]
reinit_state[0] = 1.0
loss = 0.0
for x in X:
output = training_circuit_example(init_params=x[0], encoder_params=encoder_params, reinit_state=reinit_state)[0]
f = output[0]
loss = loss + f
return loss / len(X)
def iterate_batches(X, batch_size):
random.shuffle(X)
batch_list = []
batch = []
for x in X:
if len(batch) < batch_size:
batch.append(x)
else:
batch_list.append(batch)
batch = []
if len(batch) != 0:
batch_list.append(batch)
return batch_list
training_data = [ torch.tensor([input_data[i]]) for i in range(int(len(input_data)*num_samples))]
test_data = [torch.tensor([input_data[i]]) for i in range(int(len(input_data)*num_samples),len(input_data))]
training_data[0]
X_training = training_data
X_tes = test_data
# initialize random encoder parameters
nr_encod_qubits = len(spec.trash_qubits) + len(spec.latent_qubits)
nr_par_encoder = 15 * int(nr_encod_qubits*(nr_encod_qubits-1)/2)
encoder_params = np.random.uniform(size=(1, nr_par_encoder), requires_grad=True)
###Output
_____no_output_____
###Markdown
training
###Code
np_malign = malign.to_numpy()
malign_data = [ torch.tensor([np_malign[i]]) for i in range(len(malign.to_numpy()))]
loss_hist=[]
fid_hist=[]
loss_hist_test=[]
fid_hist_test=[]
benign_fid=[]
for epoch in range(epochs):
batches = iterate_batches(X=training_data, batch_size=batch_size)
for xbatch in batches:
encoder_params = opt.step(cost, encoder_params, X=xbatch)
if epoch%5 == 0:
loss_training = cost(encoder_params, X_training )
fidel = fidelity(encoder_params, X_training )
loss_hist.append(loss_training)
fid_hist.append(fidel)
print("Epoch:{} | Loss:{} | Fidelity:{}".format(epoch, loss_training, fidel))
loss_test = cost(encoder_params, X_tes )
fidel = fidelity(encoder_params, X_tes )
loss_hist_test.append(loss_test)
fid_hist_test.append(fidel)
print("Test-Epoch:{} | Loss:{} | Fidelity:{}".format(epoch, loss_test, fidel))
b_fidel = fidelity(encoder_params, malign_data )
benign_fid.append(b_fidel)
print("malign fid:{}".format(b_fidel))
###Output
C:\Users\tomut\anaconda3\envs\qhack2022\lib\site-packages\pennylane\math\multi_dispatch.py:63: UserWarning: Contains tensors of types {'torch', 'autograd'}; dispatch will prioritize TensorFlow and PyTorch over autograd. Consider replacing Autograd with vanilla NumPy.
warnings.warn(
###Markdown
Rezults
###Code
import matplotlib.pyplot as plt
maligig = plt.figure()
plt.plot([x for x in range(0,len(loss_hist)*5,5)],np.array(fid_hist),label="train fid")
plt.plot([x for x in range(0,len(loss_hist)*5,5)],np.array(fid_hist_test),label="test fid")
plt.plot([x for x in range(0,len(loss_hist)*5,5)],np.array(benign_fid),label="malign fid")
plt.legend()
plt.title("Malign 5-1-5->compression fidelity e2",)
plt.xlabel("epoch")
plt.ylabel("fid")
print("fidelity:",fid_hist[-1])
fig = plt.figure()
plt.plot([x for x in range(0,len(loss_hist)*5,5)],np.array(loss_hist),label="train loss")
plt.plot([x for x in range(0,len(loss_hist)*5,5)],np.array(loss_hist_test),label="test loss")
plt.legend()
plt.title("Malign 5-1-5->compression loss e2",)
plt.xlabel("epoch")
plt.ylabel("loss")
print("loss:",loss_hist[-1])
name = "Cancer_encoder_e2"
Circuit_prop={ "shots":shots, "nr_trash":nr_trash, "nr_latent":nr_latent ,"nr_ent":nr_ent }
Training_param = { "num_samples" : num_samples,
"batch_size" :batch_size,
"epochs" :epochs,
"learning_rate" : learning_rate ,
"beta1" : beta1,
"beta2 ":beta2,
"optimizer":"Adam"}
performance={"loss_hist":loss_hist, "fid_hist":fid_hist,
"loss_hist_test":loss_hist_test, "fid_hist_test":fid_hist_test,
"encoder_params":encoder_params}
experiment_data={"Circuit_prop":Circuit_prop,
"Training_param":Training_param,
"performance:":performance,
"Name":name}
# open file for writing
f = open(name+".txt","w")
f.write( str(experiment_data) )
###Output
_____no_output_____
###Markdown
Benign performance
###Code
np_malign = malign.to_numpy()
malign_data = [ torch.tensor([np_malign[i]]) for i in range(len(malign.to_numpy()))]
loss = cost(encoder_params, malign_data )
fidel = fidelity(encoder_params, malign_data )
print("Benign results:")
print("fidelity=",fidel)
print("loss=",loss)
###Output
Benign results:
fidelity= 0.9891166361994982
loss= 1.011144532784921
###Markdown
Classifyer
###Code
malign_flist=[]
for b in malign_data:
f=fidelity(encoder_params, [b])
malign_flist.append(f.item())
print(min(malign_flist))
print(max(malign_flist))
np_benign= benign.to_numpy()
benign_data = [ torch.tensor([np_benign[i]]) for i in range(len(benign.to_numpy()))]
benign_flist=[]
for b in benign_data:
f=fidelity(encoder_params, [b])
benign_flist.append(f.item())
print(min(benign_flist))
print(max(benign_flist))
plt.hist(benign_flist, bins = 100 ,label="benign", color = "skyblue",alpha=0.4)
plt.hist(malign_flist, bins =100 ,label="malign",color = "red",alpha=0.4)
plt.title("Compression fidelity",)
plt.legend()
plt.show()
split=0.99
print("split:",split)
b_e=[]
for i in beningn_flist:
if i<split:
b_e.append(1)
else:
b_e.append(0)
ab_ac=sum(b_e)/len(b_e)
print("malign classification accuracy:",ab_ac)
m_e=[]
for i in malign_flist:
if i>split:
m_e.append(1)
else:
m_e.append(0)
am_ac=sum(m_e)/len(m_e)
print("benign classification accuracy:",am_ac)
t_ac=(sum(b_e)+sum(m_e))/(len(b_e)+len(m_e))
print("total accuracy:",t_ac)
###Output
_____no_output_____ |
notebooks/_old/HappelPolyFit.ipynb | ###Markdown
Find a fitting polynomial for Happel's function
###Code
%reset -f
import numpy as np
import matplotlib.pyplot as plt
Porosity = np.linspace(0.01,0.9,200)
Solids = 1.0 - Porosity
## Calculate Happel's equation
As_Down = 2 - (3*Solids**(1./3.)) + (3*Solids**(5./3.)) - (2*Solids**2.)
As_Up = 2 * (1-Solids**(5./3.))
As = As_Up/As_Down
## Fit a polynomial to Happel's equation
Fit = np.polyfit(1/Porosity,As,2)
reconstructed_As = Fit[0] * (1/Porosity)**2 + Fit[1] * (1/Porosity) + Fit[2]
## Compare the results
fig,axs = plt.subplots(1,2,sharey=True,figsize=(10,6),facecolor="white",\
gridspec_kw = {'wspace':0});
ax = axs[0]
ax.set(ylabel=r"Happel parameter $A_s$",xlabel=r"$\theta$")
ax.plot(Porosity,As,lw=9,c='skyblue',label="Orig. Function")
ax.plot(Porosity,reconstructed_As,lw=1,c='k',label="Polyfitted")
ax = axs[1]
ax.plot(1/Porosity,As,lw=9,c='skyblue',label="Orig. Function")
ax.plot(1/Porosity,reconstructed_As,lw=1,c='k',label="Polyfitted")
ax.set(xlabel=r"$\dfrac{1}{\theta}$",yscale='log')
plt.show()
## Return the coefficients (hardcoded to C++)
print("As_Coeffs = ",end='')
print(Fit)
###Output
As_Coeffs = [ 8.99992117 -7.49203318 0.4119361 ]
###Markdown
End of notebook :)
###Code
import matplotlib.gridspec as gs
from ipywidgets import interact_manual
import ipywidgets as widgets
ClayDiameter = 5.0E-6 #5.0 um
ClayRadio = ClayDiameter/2
alpha = 1.0
d = 0.42/1000 #mm
PorositySand = 0.48
SandDiameter = np.array([1.0E-2,1.0E-3,d,1.0E-4])
SandLabels = ["Gravel [10mm]","Sand [1mm]","Jon's [0.42mm]","Fine Sand [0.1mm]"]
LineWidths = [1.5,1.5,4,1.5]
Ratios = ClayRadio/(SandDiameter/2)
fig = plt.figure(figsize=(6,10),facecolor="white");
ax1 = plt.subplot(2,1,1)
ax2 = plt.subplot(2,1,2)
for i in range(len(Ratios)):
Eta = 1.5 * Up/Down * Ratios[i]**2
Lambda = 0.75 * alpha * Eta * Solids / SandDiameter[i]
ax1.plot(Porosity,Eta,lw=LineWidths[i])
ax2.plot(Porosity,Lambda,lw=LineWidths[i])
ax1.set_yscale('log')
ax2.set_yscale('log')
ax1.set_xlim(0.1,0.9)
ax2.set_xlim(0.1,0.9)
ax1.legend(SandLabels)
ax2.legend(SandLabels)
ax1.axvline(x=PorositySand,ls="dotted",lw=2,c="gray")
ax2.axvline(x=PorositySand,ls="dotted",lw=2,c="gray")
ax1.set_xlabel("Porosity $\\theta$ ")
ax1.set_ylabel("Filtration Efficiency \n$\\eta_{\\rm di}$ ")
ax2.set_xlabel("Porosity $\\theta$ ")
ax2.set_ylabel("Filtration Coefficient \n$\\Lambda_{(\\theta)}$ [1/cm] ")
plt.show()
fig = plt.figure(figsize=(6,6),facecolor="white");
ax2 = plt.subplot(1,1,1)
ax2.plot(1/Porosity,As,lw=9,c='skyblue',label="Orig. Function")
Fit = np.polyfit(1/Porosity,As,2)
X1 = Fit[0] * (1/Porosity)**2
X2 = Fit[1] * (1/Porosity)
X3 = Fit[2]
ax2.plot(1/Porosity,X1+X2+X3,lw=2,c='red',\
label="Polyfit")
ax2.set_xlabel("$\\dfrac{1}{\\theta} = \\dfrac{1}{1-s}$ ",size="large")
ax2.set_xlim(0,10)
ax2.set_ylabel("$A_s$",size="large")
#ax2.annotate("$%.1f \\dfrac{1}{\\theta^2} %.1f \\dfrac{1}{\\theta}$ + %.1f" \
# %(Fit[0],Fit[1],Fit[2]),\
# xy=(7, 30),\
# ha='center',fontsize="large",bbox=dict(facecolor='red', alpha=0.2))
#ax2.annotate("$\\dfrac{3}{2} \\left( \\dfrac{2(1-s^{5/3})}{2-3s^{1/3}+3s^{5/3}-2s^2} \\right)$",\
# xy=(7, 10),\
# ha='center',fontsize="large",bbox=dict(facecolor='blue', alpha=0.1))
ax2.set_yscale('log')
ax2.axvline(x=1/PorositySand,ls="dotted",lw=2,c="gray",label="JDS Porosity")
ax2.legend(ncol=3)
plt.show()
print(Fit)
fig = plt.figure(figsize=(8,5),facecolor="white");
ax2 = plt.subplot(1,1,1)
As = Up/Down
ax2.plot(Porosity,As,lw=9,c='skyblue',label="Happel's function")
Fit = np.polyfit(1/Porosity,As,2)
X1 = Fit[0] * (1/Porosity)**2
X2 = Fit[1] * (1/Porosity)
X3 = Fit[2]
ax2.plot(Porosity,X1+X2+X3,lw=2,c='red',\
label="Polynomial fit")
ax2.set_xlabel("$\\theta = (1-s)$ ",size="large")
ax2.set_ylabel("$A_s$",size="large")
#ax2.annotate("$%.1f \\dfrac{1}{\\theta^2} %.1f \\dfrac{1}{\\theta}$ + %.1f" \
# %(Fit[0],Fit[1],Fit[2]),\
# xy=(7, 30),\
# ha='center',fontsize="large",bbox=dict(facecolor='red', alpha=0.2))
#ax2.annotate("$\\dfrac{3}{2} \\left( \\dfrac{2(1-s^{5/3})}{2-3s^{1/3}+3s^{5/3}-2s^2} \\right)$",\
# xy=(7, 10),\
# ha='center',fontsize="large",bbox=dict(facecolor='blue', alpha=0.1))
ax2.set_yscale('log')
ax2.axvline(x=PorositySand,ls="dotted",lw=2,c="gray",label="JDS Porosity")
ax2.legend(ncol=3)
ax2.set_xlim(0,0.9)
plt.show()
EtaLong_1 = np.log(2.4)+np.log(0.55)+np.log(0.475)
U = 4.0E-5 #m/s
A = 1.0E-20 #J
kBoltz = 1.38E-23#1.380649E-23 #J/K
Temp = 273.0+25.0 #K
rho_clay = 1050 #kg/m3
rho = 997 #kg/m3
g = 9.81 #m/s2
mu = 0.89E-3 #Ns/m
D = kBoltz*Temp/(6*np.pi*mu*ClayRadio)
print("%.2E"%(D*100*100))
NPe = U*d/D
print(NPe)
Nvdw = A/(kBoltz*Temp)
print(Nvdw)
Ngr = 4.0*np.pi/3.0 * (ClayRadio**4) * (rho_clay - rho) * g / (kBoltz*Temp)
print(Ngr)
fig = plt.figure(figsize=(6,10),facecolor="white");
ax1 = plt.subplot(2,1,1)
ax2 = plt.subplot(2,1,2)
for i in [2]:
NR = Ratios[i]
EtaDiff = np.exp( \
np.log(2.4)\
+ (1./3.)*np.log(As)
- 0.081*np.log(NR)\
- 0.715*np.log(NPe)\
+ 0.052*np.log(Nvdw))
EtaInte = np.exp( \
np.log(0.55)\
+ np.log(As)
+ 1.550*np.log(NR)\
- 0.125*np.log(NPe)\
+ 0.125*np.log(Nvdw))
EtaGrav = np.exp( \
np.log(0.475)\
- 1.350*np.log(NR)\
- 1.110*np.log(NPe)\
+ 0.053*np.log(Nvdw)\
+ 1.110*np.log(Ngr))
Eta = EtaDiff + EtaInte + EtaGrav
Lambda = 0.75 * alpha * Eta * Solids / (SandDiameter[i]*100)
ax1.plot(Porosity,Eta,lw=3,label="$\\eta_{\\rm 0} = \\eta_{\\rm D} + \\eta_{\\rm I} + \\eta_{\\rm G}$",c="black")
ax1.plot(Porosity,EtaDiff,lw=3,label="$\\eta_{\\rm D}$")
ax1.plot(Porosity,EtaInte,lw=3,label="$\\eta_{\\rm I}$")
ax1.axhline(y=EtaGrav,lw=2,label="$\\eta_{\\rm G}$",c="gray")
ax2.plot(Porosity,Lambda,lw=LineWidths[i],c="darksalmon")
ax1.set_yscale('log')
ax2.set_yscale('log')
ax1.set_xlim(0,0.9)
ax2.set_xlim(0,0.9)
ax1.legend(fontsize="large",ncol=1)
ax1.axvline(x=PorositySand,ls="dotted",lw=2,c="gray")
ax2.axvline(x=PorositySand,ls="dotted",lw=2,c="gray")
ax1.set_xlabel("Porosity $\\theta$ ")
ax1.set_ylabel("Filtration Efficiency $\\eta$ ")
ax2.set_xlabel("Porosity $\\theta$ ")
ax2.set_ylabel("Filtration Coefficient $\\Lambda_{(\\theta)}$ [1/cm] ")
plt.show()
###Output
_____no_output_____ |
Data_Science/Chapter1_Basic_Python_4_DS.ipynb | ###Markdown
---_You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._--- The Python Programming Language: Functions `add_numbers` is a function that takes two numbers and adds them together.
###Code
def add_numbers(x, y):
return x + y
add_numbers(1, 2)
###Output
_____no_output_____
###Markdown
`add_numbers` updated to take an optional 3rd parameter. Using `print` allows printing of multiple expressions within a single cell.
###Code
def add_numbers(x,y,z=None):
if (z==None):
return x+y
else:
return x+y+z
print(add_numbers(1, 2))
print(add_numbers(1, 2, 3))
###Output
3
6
###Markdown
`add_numbers` updated to take an optional flag parameter.
###Code
def add_numbers(x, y, z=None, flag=False):
if (flag):
print('Flag is true!')
if (z==None):
return x + y
else:
return x + y + z
print(add_numbers(1, 2, flag=True))
###Output
Flag is true!
3
###Markdown
Assign function `add_numbers` to variable `a`.
###Code
def add_numbers(x,y):
return x+y
a = add_numbers
a(1,2)
###Output
_____no_output_____
###Markdown
The Python Programming Language: Types and Sequences Use `type` to return the object's type.
###Code
type('This is a string')
type(None)
type(1)
type(1.0)
type(add_numbers)
###Output
_____no_output_____
###Markdown
Tuples are an immutable data structure (cannot be altered).
###Code
x = (1, 'a', 2, 'b')
type(x)
###Output
_____no_output_____
###Markdown
Lists are a mutable data structure.
###Code
x = [1, 'a', 2, 'b']
type(x)
###Output
_____no_output_____
###Markdown
Use `append` to append an object to a list.
###Code
x.append(3.3)
print(x)
###Output
[1, 'a', 2, 'b', 3.3]
###Markdown
This is an example of how to loop through each item in the list.
###Code
for item in x:
print(item)
###Output
1
a
2
b
3.3
###Markdown
Or using the indexing operator:
###Code
i=0
while( i != len(x) ):
print(x[i])
i = i + 1
###Output
1
a
2
b
3.3
###Markdown
Use `+` to concatenate lists.
###Code
[1,2] + [3,4]
###Output
_____no_output_____
###Markdown
Use `*` to repeat lists.
###Code
[1]*3
###Output
_____no_output_____
###Markdown
Use the `in` operator to check if something is inside a list.
###Code
1 in [1, 2, 3]
###Output
_____no_output_____
###Markdown
Now let's look at strings. Use bracket notation to slice a string.
###Code
x = 'This is a string'
print(x[0]) #first character
print(x[0:1]) #first character, but we have explicitly set the end character
print(x[0:2]) #first two characters
###Output
T
T
Th
###Markdown
This will return the last element of the string.
###Code
x[-1]
###Output
_____no_output_____
###Markdown
This will return the slice starting from the 4th element from the end and stopping before the 2nd element from the end.
###Code
x[-4:-2]
###Output
_____no_output_____
###Markdown
This is a slice from the beginning of the string and stopping before the 3rd element.
###Code
x[:3]
###Output
_____no_output_____
###Markdown
And this is a slice starting from the 4th element of the string and going all the way to the end.
###Code
x[3:]
firstname = 'Christopher'
lastname = 'Brooks'
print(firstname + ' ' + lastname)
print(firstname*3)
print('Chris' in firstname)
###Output
Christopher Brooks
ChristopherChristopherChristopher
True
###Markdown
`split` returns a list of all the words in a string, or a list split on a specific character.
###Code
firstname = 'Christopher Arthur Hansen Brooks'.split(' ')[0] # [0] selects the first element of the list
lastname = 'Christopher Arthur Hansen Brooks'.split(' ')[-1] # [-1] selects the last element of the list
print(firstname)
print(lastname)
###Output
Christopher
Brooks
###Markdown
Make sure you convert objects to strings before concatenating.
###Code
'Chris' + 2
'Chris' + str(2)
###Output
_____no_output_____
###Markdown
Dictionaries associate keys with values.
###Code
x = {'Christopher Brooks': '[email protected]', 'Bill Gates': '[email protected]'}
x['Christopher Brooks'] # Retrieve a value by using the indexing operator
x['Kevyn Collins-Thompson'] = None
x['Kevyn Collins-Thompson']
###Output
_____no_output_____
###Markdown
Iterate over all of the keys:
###Code
for name in x:
print(x[name])
###Output
[email protected]
[email protected]
None
###Markdown
Iterate over all of the values:
###Code
for email in x.values():
print(email)
###Output
[email protected]
[email protected]
None
###Markdown
Iterate over all of the items in the list:
###Code
for name, email in x.items():
print(name)
print(email)
###Output
Christopher Brooks
[email protected]
Bill Gates
[email protected]
Kevyn Collins-Thompson
None
###Markdown
You can unpack a sequence into different variables:
###Code
x = ('Christopher', 'Brooks', '[email protected]')
fname, lname, email = x
fname
lname
###Output
_____no_output_____
###Markdown
Make sure the number of values you are unpacking matches the number of variables being assigned.
###Code
x = ('Christopher', 'Brooks', '[email protected]', 'Ann Arbor')
fname, lname, email = x
###Output
_____no_output_____
###Markdown
The Python Programming Language: More on Strings
###Code
print('Chris' + 2)
print('Chris' + str(2))
###Output
Chris2
###Markdown
Python has a built in method for convenient string formatting.
###Code
sales_record = {
'price': 3.24,
'num_items': 4,
'person': 'Chris'}
sales_statement = '{} bought {} item(s) at a price of {} each for a total of {}'
print(sales_statement.format(sales_record['person'],
sales_record['num_items'],
sales_record['price'],
sales_record['num_items']*sales_record['price']))
###Output
Chris bought 4 item(s) at a price of 3.24 each for a total of 12.96
###Markdown
Reading and Writing CSV files Let's import our datafile mpg.csv, which contains fuel economy data for 234 cars.* mpg : miles per gallon* class : car classification* cty : city mpg* cyl : of cylinders* displ : engine displacement in liters* drv : f = front-wheel drive, r = rear wheel drive, 4 = 4wd* fl : fuel (e = ethanol E85, d = diesel, r = regular, p = premium, c = CNG)* hwy : highway mpg* manufacturer : automobile manufacturer* model : model of car* trans : type of transmission* year : model year
###Code
import csv
%precision 2
with open('mpg.csv') as csvfile:
mpg = list(csv.DictReader(csvfile))
mpg[:3] # The first three dictionaries in our list.
###Output
_____no_output_____
###Markdown
`csv.Dictreader` has read in each row of our csv file as a dictionary. `len` shows that our list is comprised of 234 dictionaries.
###Code
len(mpg)
###Output
_____no_output_____
###Markdown
`keys` gives us the column names of our csv.
###Code
mpg[0].keys()
###Output
_____no_output_____
###Markdown
This is how to find the average cty fuel economy across all cars. All values in the dictionaries are strings, so we need to convert to float.
###Code
sum(float(d['cty']) for d in mpg) / len(mpg)
###Output
_____no_output_____
###Markdown
Similarly this is how to find the average hwy fuel economy across all cars.
###Code
sum(float(d['hwy']) for d in mpg) / len(mpg)
###Output
_____no_output_____
###Markdown
Use `set` to return the unique values for the number of cylinders the cars in our dataset have.
###Code
cylinders = set(d['cyl'] for d in mpg)
cylinders
###Output
_____no_output_____
###Markdown
Here's a more complex example where we are grouping the cars by number of cylinder, and finding the average cty mpg for each group.
###Code
CtyMpgByCyl = []
for c in cylinders: # iterate over all the cylinder levels
summpg = 0
cyltypecount = 0
for d in mpg: # iterate over all dictionaries
if d['cyl'] == c: # if the cylinder level type matches,
summpg += float(d['cty']) # add the cty mpg
cyltypecount += 1 # increment the count
CtyMpgByCyl.append((c, summpg / cyltypecount)) # append the tuple ('cylinder', 'avg mpg')
CtyMpgByCyl.sort(key=lambda x: x[0])
CtyMpgByCyl
###Output
_____no_output_____
###Markdown
Use `set` to return the unique values for the class types in our dataset.
###Code
vehicleclass = set(d['class'] for d in mpg) # what are the class types
vehicleclass
###Output
_____no_output_____
###Markdown
And here's an example of how to find the average hwy mpg for each class of vehicle in our dataset.
###Code
HwyMpgByClass = []
for t in vehicleclass: # iterate over all the vehicle classes
summpg = 0
vclasscount = 0
for d in mpg: # iterate over all dictionaries
if d['class'] == t: # if the cylinder amount type matches,
summpg += float(d['hwy']) # add the hwy mpg
vclasscount += 1 # increment the count
HwyMpgByClass.append((t, summpg / vclasscount)) # append the tuple ('class', 'avg mpg')
HwyMpgByClass.sort(key=lambda x: x[1])
HwyMpgByClass
###Output
_____no_output_____
###Markdown
The Python Programming Language: Dates and Times
###Code
import datetime as dt
import time as tm
###Output
_____no_output_____
###Markdown
`time` returns the current time in seconds since the Epoch. (January 1st, 1970)
###Code
tm.time()
###Output
_____no_output_____
###Markdown
Convert the timestamp to datetime. Handy datetime attributes:
###Code
dtnow = dt.datetime.fromtimestamp(tm.time())
dtnow
dtnow.year, dtnow.month, dtnow.day, dtnow.hour, dtnow.minute, dtnow.second # get year, month, day, etc.from a datetime
###Output
_____no_output_____
###Markdown
`timedelta` is a duration expressing the difference between two dates.
###Code
delta = dt.timedelta(days = 100) # create a timedelta of 100 days
delta
###Output
_____no_output_____
###Markdown
`date.today` returns the current local date.
###Code
today = dt.date.today()
today - delta # the date 100 days ago
today > today-delta # compare dates
###Output
_____no_output_____
###Markdown
The Python Programming Language: Objects and map() An example of a class in python:
###Code
class Person:
department = 'School of Information' #a class variable
def set_name(self, new_name): #a method
self.name = new_name
def set_location(self, new_location):
self.location = new_location
person = Person()
person.set_name('Christopher Brooks')
person.set_location('Ann Arbor, MI, USA')
print('{} live in {} and works in the department {}'.format(person.name, person.location, person.department))
###Output
Christopher Brooks live in Ann Arbor, MI, USA and works in the department School of Information
###Markdown
Here's an example of mapping the `min` function between two lists.
###Code
store1 = [10.00, 11.00, 12.34, 2.34]
store2 = [9.00, 11.10, 12.34, 2.01]
cheapest = map(min, store1, store2)
cheapest
###Output
_____no_output_____
###Markdown
Now let's iterate through the map object to see the values.
###Code
for item in cheapest:
print(item)
###Output
9.0
11.0
12.34
2.01
###Markdown
The Python Programming Language: Lambda and List Comprehensions Here's an example of lambda that takes in three parameters and adds the first two.
###Code
my_function = lambda a, b, c : a + b
my_function(1, 2, 3)
###Output
_____no_output_____
###Markdown
Let's iterate from 0 to 999 and return the even numbers.
###Code
my_list = []
for number in range(0, 1000):
if number % 2 == 0:
my_list.append(number)
my_list
###Output
_____no_output_____
###Markdown
Now the same thing but with list comprehension.
###Code
my_list = [number for number in range(0,1000) if number % 2 == 0]
my_list
###Output
_____no_output_____
###Markdown
The Python Programming Language: Numerical Python (NumPy)
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Creating Arrays Create a list and convert it to a numpy array
###Code
mylist = [1, 2, 3]
x = np.array(mylist)
x
###Output
_____no_output_____
###Markdown
Or just pass in a list directly
###Code
y = np.array([4, 5, 6])
y
###Output
_____no_output_____
###Markdown
Pass in a list of lists to create a multidimensional array.
###Code
m = np.array([[7, 8, 9], [10, 11, 12]])
m
###Output
_____no_output_____
###Markdown
Use the shape method to find the dimensions of the array. (rows, columns)
###Code
m.shape
###Output
_____no_output_____
###Markdown
`arange` returns evenly spaced values within a given interval.
###Code
n = np.arange(0, 30, 2) # start at 0 count up by 2, stop before 30
n
###Output
_____no_output_____
###Markdown
`reshape` returns an array with the same data with a new shape.
###Code
n = n.reshape(3, 5) # reshape array to be 3x5
n
###Output
_____no_output_____
###Markdown
`linspace` returns evenly spaced numbers over a specified interval.
###Code
o = np.linspace(0, 4, 9) # return 9 evenly spaced values from 0 to 4
o
###Output
_____no_output_____
###Markdown
`resize` changes the shape and size of array in-place.
###Code
o.resize(3, 3)
o
###Output
_____no_output_____
###Markdown
`ones` returns a new array of given shape and type, filled with ones.
###Code
np.ones((3, 2))
###Output
_____no_output_____
###Markdown
`zeros` returns a new array of given shape and type, filled with zeros.
###Code
np.zeros((2, 3))
###Output
_____no_output_____
###Markdown
`eye` returns a 2-D array with ones on the diagonal and zeros elsewhere.
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
`diag` extracts a diagonal or constructs a diagonal array.
###Code
np.diag(y)
###Output
_____no_output_____
###Markdown
Create an array using repeating list (or see `np.tile`)
###Code
np.array([1, 2, 3] * 3)
###Output
_____no_output_____
###Markdown
Repeat elements of an array using `repeat`.
###Code
np.repeat([1, 2, 3], 3)
###Output
_____no_output_____
###Markdown
Combining Arrays
###Code
p = np.ones([2, 3], int)
p
###Output
_____no_output_____
###Markdown
Use `vstack` to stack arrays in sequence vertically (row wise).
###Code
np.vstack([p, 2*p])
###Output
_____no_output_____
###Markdown
Use `hstack` to stack arrays in sequence horizontally (column wise).
###Code
np.hstack([p, 2*p])
###Output
_____no_output_____
###Markdown
Operations Use `+`, `-`, `*`, `/` and `**` to perform element wise addition, subtraction, multiplication, division and power.
###Code
print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9]
print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3]
print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18]
print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5]
print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9]
###Output
[1 4 9]
###Markdown
**Dot Product:** $ \begin{bmatrix}x_1 \ x_2 \ x_3\end{bmatrix}\cdot\begin{bmatrix}y_1 \\ y_2 \\ y_3\end{bmatrix}= x_1 y_1 + x_2 y_2 + x_3 y_3$
###Code
x.dot(y) # dot product 1*4 + 2*5 + 3*6
z = np.array([y, y**2])
print(len(z)) # number of rows of array
###Output
2
###Markdown
Let's look at transposing arrays. Transposing permutes the dimensions of the array.
###Code
z = np.array([y, y**2])
z
###Output
_____no_output_____
###Markdown
The shape of array `z` is `(2,3)` before transposing.
###Code
z.shape
###Output
_____no_output_____
###Markdown
Use `.T` to get the transpose.
###Code
z.T
###Output
_____no_output_____
###Markdown
The number of rows has swapped with the number of columns.
###Code
z.T.shape
###Output
_____no_output_____
###Markdown
Use `.dtype` to see the data type of the elements in the array.
###Code
z.dtype
###Output
_____no_output_____
###Markdown
Use `.astype` to cast to a specific type.
###Code
z = z.astype('f')
z.dtype
###Output
_____no_output_____
###Markdown
Math Functions Numpy has many built in math functions that can be performed on arrays.
###Code
a = np.array([-4, -2, 1, 3, 5])
a.sum()
a.max()
a.min()
a.mean()
a.std()
###Output
_____no_output_____
###Markdown
`argmax` and `argmin` return the index of the maximum and minimum values in the array.
###Code
a.argmax()
a.argmin()
###Output
_____no_output_____
###Markdown
Indexing / Slicing
###Code
s = np.arange(13)**2
s
###Output
_____no_output_____
###Markdown
Use bracket notation to get the value at a specific index. Remember that indexing starts at 0.
###Code
s[0], s[4], s[-1]
###Output
_____no_output_____
###Markdown
Use `:` to indicate a range. `array[start:stop]`Leaving `start` or `stop` empty will default to the beginning/end of the array.
###Code
s[1:5]
###Output
_____no_output_____
###Markdown
Use negatives to count from the back.
###Code
s[-4:]
###Output
_____no_output_____
###Markdown
A second `:` can be used to indicate step-size. `array[start:stop:stepsize]`Here we are starting 5th element from the end, and counting backwards by 2 until the beginning of the array is reached.
###Code
s[-5::-2]
###Output
_____no_output_____
###Markdown
Let's look at a multidimensional array.
###Code
r = np.arange(36)
r.resize((6, 6))
r
###Output
_____no_output_____
###Markdown
Use bracket notation to slice: `array[row, column]`
###Code
r[2, 2]
###Output
_____no_output_____
###Markdown
And use : to select a range of rows or columns
###Code
r[3, 3:6]
###Output
_____no_output_____
###Markdown
Here we are selecting all the rows up to (and not including) row 2, and all the columns up to (and not including) the last column.
###Code
r[:2, :-1]
###Output
_____no_output_____
###Markdown
This is a slice of the last row, and only every other element.
###Code
r[-1, ::2]
###Output
_____no_output_____
###Markdown
We can also perform conditional indexing. Here we are selecting values from the array that are greater than 30. (Also see `np.where`)
###Code
r[r > 30]
###Output
_____no_output_____
###Markdown
Here we are assigning all values in the array that are greater than 30 to the value of 30.
###Code
r[r > 30] = 30
r
###Output
_____no_output_____
###Markdown
Copying Data Be careful with copying and modifying arrays in NumPy!`r2` is a slice of `r`
###Code
r2 = r[:3,:3]
r2
r
###Output
_____no_output_____
###Markdown
Set this slice's values to zero ([:] selects the entire array)
###Code
r2[:] = 0
r2
###Output
_____no_output_____
###Markdown
`r` has also been changed!
###Code
r
###Output
_____no_output_____
###Markdown
To avoid this, use `r.copy` to create a copy that will not affect the original array
###Code
r_copy = r.copy()
r_copy
###Output
_____no_output_____
###Markdown
Now when r_copy is modified, r will not be changed.
###Code
r_copy[:] = 10
print(r_copy, '\n')
print(r)
###Output
[[10 10 10 10 10 10]
[10 10 10 10 10 10]
[10 10 10 10 10 10]
[10 10 10 10 10 10]
[10 10 10 10 10 10]
[10 10 10 10 10 10]]
[[ 0 0 0 3 4 5]
[ 0 0 0 9 10 11]
[ 0 0 0 15 16 17]
[18 19 20 21 22 23]
[24 25 26 27 28 29]
[30 30 30 30 30 30]]
###Markdown
Iterating Over Arrays Let's create a new 4 by 3 array of random numbers 0-9.
###Code
test = np.random.randint(0, 10, (4,3))
test
###Output
_____no_output_____
###Markdown
Iterate by row:
###Code
for row in test:
print(row)
###Output
[9 0 6]
[2 0 9]
[7 5 8]
[3 0 3]
###Markdown
Iterate by index:
###Code
for i in range(len(test)):
print(test[i])
###Output
[9 0 6]
[2 0 9]
[7 5 8]
[3 0 3]
###Markdown
Iterate by row and index:
###Code
for i, row in enumerate(test):
print('row', i, 'is', row)
###Output
row 0 is [9 0 6]
row 1 is [2 0 9]
row 2 is [7 5 8]
row 3 is [3 0 3]
###Markdown
Use `zip` to iterate over multiple iterables.
###Code
test2 = test**2
test2
for i, j in zip(test, test2):
print(i,'+',j,'=',i+j)
###Output
[9 0 6] + [81 0 36] = [90 0 42]
[2 0 9] + [ 4 0 81] = [ 6 0 90]
[7 5 8] + [49 25 64] = [56 30 72]
[3 0 3] + [9 0 9] = [12 0 12]
|
2020/passport_processing.ipynb | ###Markdown
Day 4: Passport Processinghttps://adventofcode.com/2020/day/4Passport data is validated in batch files (your puzzle input). Each passport isrepresented as a sequence of `key:value` pairs separated by spaces or newlines.Passports are separated by blank lines.The expected fields are as follows:- `byr` (Birth Year)- `iyr` (Issue Year)- `eyr` (Expiration Year)- `hgt` (Height)- `hcl` (Hair Color)- `ecl` (Eye Color)- `pid` (Passport ID)- `cid` (Country ID)Here is an example batch file containing four passports:
###Code
test_batch_file = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
###Output
_____no_output_____
###Markdown
Count the number of **valid** passports - those that have all required fields.Treat `cid` as optional. **In your batch file, how many passports are valid?**According to the above rules, your improved system would report `2` validpassports.
###Code
import doctest
import re
def passports(batch_file):
passports = re.split(r'\n{2,}', batch_file)
for passport in passports:
pairs = re.split(r'\s+', passport)
fields = dict(f.split(':') for f in pairs if f)
yield fields
def has_required_fields(passport):
required_fields = {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
return set(passport.keys()) >= required_fields
def count_valid_passports(batch_file):
"""
>>> count_valid_passports(test_batch_file)
2
"""
return sum(has_required_fields(passport) for passport
in passports(batch_file))
doctest.testmod()
###Output
_____no_output_____
###Markdown
Part TwoYou can continue to ignore the cid field, but each other field has strict rulesabout what values are valid for automatic validation:- `byr` (Birth Year) - four digits; at least `1920` and at most `2002`.- `iyr` (Issue Year) - four digits; at least `2010` and at most `2020`.- `eyr` (Expiration Year) - four digits; at least `2020` and at most `2030`.- `hgt` (Height) - a number followed by either `cm` or `in`: - If `cm`, the number must be at least `150` and at most `193`. - If `in`, the number must be at least `59` and at most `76`.- `hcl` (Hair Color) - a `` followed by exactly six characters `0`-`9` or `a`-`f`.- `ecl` (Eye Color) - exactly one of: `amb` `blu` `brn` `gry` `grn` `hzl` `oth`.- `pid` (Passport ID) - a nine-digit number, including leading zeroes.- `cid` (Country ID) - ignored, missing or not.Count the number of **valid** passports - those that have all required fields**and valid values**. Continue to treat `cid` as optional. **In your batch file,how many passports are valid?**
###Code
def valid_as_year(text, at_least, at_most):
if re.match(r'\d{4}$', text):
return at_least <= int(text) <= at_most
return False
def valid_as_height(text):
match = re.match(r'(\d+)(cm|in)$', text)
if match:
height, unit = int(match.group(1)), match.group(2)
if unit == 'cm':
return 150 <= height <= 193
elif unit == 'in':
return 59 <= height <= 76
return False
def valid_hair_color(text):
return re.match(r'#[0-9a-f]{6}$', text)
def valid_eye_color(text):
eye_colors = {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'}
return text in eye_colors
def valid_passport_id(text):
return re.match(r'\d{9}$', text)
def part_two(batch_file):
"""
>>> part_two(test_batch_file)
2
"""
valid_passports = 0
for passport in passports(batch_file):
if not has_required_fields(passport): continue
if not valid_as_year(passport['byr'], 1920, 2002): continue
if not valid_as_year(passport['iyr'], 2010, 2020): continue
if not valid_as_year(passport['eyr'], 2020, 2030): continue
if not valid_as_height(passport['hgt']): continue
if not valid_hair_color(passport['hcl']): continue
if not valid_eye_color(passport['ecl']): continue
if not valid_passport_id(passport['pid']): continue
valid_passports += 1
return valid_passports
doctest.testmod()
###Output
_____no_output_____
###Markdown
Running on real input1. Use the file uploader to upload a file2. Re-run the last cell to use the input
###Code
from IPython.display import display
import ipywidgets as widgets
uploader = widgets.FileUpload(accept='.txt', multiple=False)
display(uploader)
batch_file = list(uploader.value.values())[0]['content'].decode('utf-8')
print('[Part 1] valid passports:', count_valid_passports(batch_file))
print('[Part 2] valid passports:', part_two(batch_file))
###Output
_____no_output_____ |
Praticas-em-Python1/4.3.numpy.ipynb | ###Markdown
Lógica de Programaçãonumpy
###Code
import numpy as np
# cria uma matriz unidimensional
mt = np.array([12,34,26,18,10])
print(mt)
print(type(mt))
#criar o array com um tipo específico
# cria o array como float de 64 bits
mtfloat = np.array([1, 2, 3], dtype = np.float64)
print(mtfloat)
print(type(mtfloat))
mtint = np.array([1, 2, 3], dtype = np.int32)
print(mtint)
print(type(mtint))
#mudar o tipo do array
# Podemos transformar tipos de dados de arrays
mtnew = np.array([1.4, 3.6, -5.1, 9.42, 4.999999])
print(mtnew)
# quando transformamos de float para int os valores são truncados
mtnewint = mtnew.astype(np.int32)
print(mtnewint)
# podemos fazer o inverso também.
mt5 = np.array([1, 2, 3, 4])
print(mt5)
mt6 = mt5.astype(float)
print(mt6)
# mais de uma dimensão
# cria um matriz bidimensional
mt7 = np.array([[7,2,23],[12,27,4],[5,34,23]])
print(mt7)
#criar arrays vazios tipificados
#empty significa que não são inicializados, não que são vazios
vazio = np.empty([3,2], dtype = int)
print(vazio)
print("-------")
# cria uma matriz 4x3 com valores zero
zeros = np.zeros([4,3])
print(zeros)
print("-------")
#com valores igual a um
um = np.ones([5,7])
print(um)
print("-------")
# cria matriz quadrada com diagonal principal com valores 1 e os outros valores zero
diagonal = np.eye(5)
print(diagonal)
#valores aleatórios entre zero e um
ale = np.random.random((5))
print(ale)
print("-------")
#valores aleatórios distr. normal contendo negativos
ale2= np.random.randn((5))
print(ale2)
print("-------")
#valores aleatórios 3 x 4
ale3 = (10*np.random.random((3,4)))
print(ale3)
#outra forma de gerar aleatórios
#uso de semente
gnr = np.random.default_rng(1)
ale5 = gnr.random(3)
print (ale5)
#gerar inteiros
ale6 = gnr.integers(10, size=(3, 4))
print(ale6)
#unique remove repetições
j = np.array([11, 12, 13, 14, 15, 16, 17, 12, 13, 11, 18, 19, 20])
j = np.unique(j)
print(j)
#funções específicas
# cria a matriz bidimensional k
k = np.array([[17,22,43],[27,25,14],[15,24,32]])
# Mostra a matriz k
print(k)
# Mostra um elemento específico da matriz k
print(k[0][1])
# Mostra o tamanho das dimensões da matriz k
print(k.shape)
#Funções Matemáticas
# Mostra o maior valor da matriz k
print(k.max())
# Mostra o menor valor da matriz k
print(k.min())
# Mostra a soma dos valores da matriz k
print((k.sum()))
# Mostra o valor da média dos valores da matriz k
print(k.mean())
# Mostra o valor do desvio padrão (standard deviation) dos valores da matiz k
print(k.std())
#funções universais, aplicadas a todos os elementos
# Mostra o valor da raiz quadrada de todos elementos
k1 = np.array([1, 4, 9, 16, 25, 36])
print(np.sqrt(k1))
# Mostra o valor do exponencial de todos elementos
print(np.exp(k1))
#extração de elementos
m = np.array([1, 2, 3, 4, 5, 6])
# Mostra o elemento da posição 2
print(m[1])
print("-------")
# Mostra o array criado a partir da posição 0, dois elementos
print(m[0:2])
print("-------")
# Mostra o array criado a partir da 2a posição
# até todo o restante do array
print(m[1:])
print("-------")
# Mostra o array criado a partir da antepenúltima
#posição até o final
print(m[-3:])
#extração de linhas e colunas
l = np.array([[4, 5], [6, 1], [7, 4]])
print(l)
print("-------")
#primeira linha, todas as colunas
l_linha_1 = l[0, :]
print(l_linha_1)
print("-------")
#segunda linha
l_linha_2 = l[1, :]
print(l_linha_2)
print("-------")
#terceira linha
l_linha_3 = l[2, :]
print(l_linha_3)
print("-------")
#todas as linhas, primeira coluna
l_coluna_1 = l[:, 0]
print(l_coluna_1)
print("-------")
#todas as linhas, segunda coluna
l_coluna_2 = l[:, 1]
print(l_coluna_2)
#adição e multiplicação de matrizes
n = np.array([[1, 2], [3, 4]])
o = np.array([[1, 1], [1, 1]])
res1 = n+o
print(res1)
print("-------")
res2 = n*o
print(res2)
print("-------")
p = np.array([[1, 2], [3, 4], [5, 6]])
q = np.array([[2, 1]])
print(p+q)
# transposição, rearranja um conjunto de 15 elementos de 0 a 14
# em 3 linhas e 5 colunas.
f = np.arange(15).reshape((3, 5))
# mostra a matrizes transposta entre linha e coluna
print(f)
print("-------")
s = f.T
print(s)
#outra forma de fazer, mesmo resultado
r = np.arange(15).reshape((3, 5))
print(r)
print("-------")
# rearranja um conjunto de 15 elementos
# mostra a matrizes transposta entre linha e coluna
s = r.transpose((1,0))
print(s)
#expressões lógicas
#usando where
# criando matriz com valores aleatórios positivos e negativos
v = np.random.randn(4, 4)
print(v)
# criando matriz com valores booleanos baseado no array v
x = (v > 0)
print(x)
# criando matriz com valores -1 e 1 baseado nos valores do array x
z = np.where(x > 0, 1, -1)
print(z)
###Output
[[-4.96563487e-01 -1.51837506e-01 -7.08996776e-01 9.57646480e-01]
[-7.96420579e-01 1.24283349e+00 -2.47639930e-01 6.10613056e-01]
[-1.00554832e+00 -5.54180920e-01 -1.19975797e-04 3.66623806e-01]
[-7.46948885e-01 -2.95980339e+00 -1.09055013e+00 3.64864672e-02]]
[[False False False True]
[False True False True]
[False False False True]
[False False False True]]
[[-1 -1 -1 1]
[-1 1 -1 1]
[-1 -1 -1 1]
[-1 -1 -1 1]]
|
1-Introduction/01-defining-data-science/solution/Assignment_1_related_concepts_of_Machine_learning.ipynb | ###Markdown
Challenge: Analyzing Text about Machine LearningIn this example, let's do a simple exercise that covers all steps of a traditional data science process. You do not have to write any code, you can just click on the cells below to execute them and observe the result. As a challenge, you are encouraged to try this code out with different data. GoalIn this lesson, we have been discussing different concepts related to Machine Learning. Let's try to discover more related concepts by doing some **text mining**. We will start with a text about Machine Learning, extract keywords from it, and then try to visualize the result.As a text, I will use the page on Machine learning from Wikipedia:
###Code
url = 'https://en.wikipedia.org/wiki/Machine_learning'
###Output
_____no_output_____
###Markdown
Step 1: Getting the DataFirst step in every data science process is getting the data. We will use `requests` library to do that:
###Code
import requests
text = requests.get(url).content.decode('utf-8')
print(text[:1000])
###Output
<!DOCTYPE html>
<html class="client-nojs" lang="en" dir="ltr">
<head>
<meta charset="UTF-8"/>
<title>Machine learning - Wikipedia</title>
<script>document.documentElement.className="client-js";RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"7847ff6f-bf40-495a-be9c-eda08a74d936","wgCSPNonce":false,"wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Machine_learning","wgTitle":"Machine learning","wgCurRevisionId":1070537003,"wgRevisionId":1070537003,"wgArticleId":233488,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["CS1 errors: missing periodical","CS1 maint: uses authors parameter","CS1 maint: url-status","Articles with short description","Short description
###Markdown
Step 2: Transforming the DataThe next step is to convert the data into the form suitable for processing. In our case, we have downloaded HTML source code from the page, and we need to convert it into plain text.There are many ways this can be done. We will use the simplest built-in [HTMLParser](https://docs.python.org/3/library/html.parser.html) object from Python. We need to subclass the `HTMLParser` class and define the code that will collect all text inside HTML tags, except `` and `` tags.
###Code
from html.parser import HTMLParser
import re
import string
class MyHTMLParser(HTMLParser):
script = False
res = ""
def handle_starttag(self, tag, attrs):
if tag.lower() in ["script","style"]:
self.script = True
def handle_endtag(self, tag):
if tag.lower() in ["script","style"]:
self.script = False
def handle_data(self, data):
if str.strip(data)=="" or self.script:
return
self.res += ' '+data.replace('[ edit ]','')
parser = MyHTMLParser()
parser.feed(text)
text = parser.res
text = re.sub(r'\d+', '', text)
text = "".join([char for char in text if char not in string.punctuation])
print(text[:1000])
###Output
Machine learning Wikipedia Machine learning From Wikipedia the free encyclopedia Jump to navigation Jump to search Study of algorithms that improve automatically through experience For the journal see Machine Learning journal Statistical learning redirects here For statistical learning in linguistics see statistical learning in language acquisition Part of a series on Machine learning and data mining Problems Classification Clustering Regression Anomaly detection Data Cleaning AutoML Association rules Reinforcement learning Structured prediction Feature engineering Feature learning Online learning Semisupervised learning Unsupervised learning Learning to rank Grammar induction Supervised learning classification • regression Decision trees Ensembles Bagging Boosting Random forest k NN Linear regression Naive Bayes Artificial neural networks Logistic regression Perceptron Relevance vector machine RVM Support vector machine SVM Clustering BIRCH CURE Hierarchical k means Expecta
###Markdown
Step 3: Getting InsightsThe most important step is to turn our data into some form from which we can draw insights. In our case, we want to extract keywords from the text, and see which keywords are more meaningful.We will use Python library called [RAKE](https://github.com/aneesha/RAKE) for keyword extraction. First, let's install this library in case it is not present:
###Code
import sys
!{sys.executable} -m pip install nlp_rake
###Output
Requirement already satisfied: nlp_rake in /usr/local/lib/python3.7/dist-packages (0.0.2)
Requirement already satisfied: regex>=2018.6.6 in /usr/local/lib/python3.7/dist-packages (from nlp_rake) (2019.12.20)
Requirement already satisfied: pyrsistent>=0.14.2 in /usr/local/lib/python3.7/dist-packages (from nlp_rake) (0.18.1)
Requirement already satisfied: langdetect>=1.0.8 in /usr/local/lib/python3.7/dist-packages (from nlp_rake) (1.0.9)
Requirement already satisfied: numpy>=1.14.4 in /usr/local/lib/python3.7/dist-packages (from nlp_rake) (1.19.5)
Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from langdetect>=1.0.8->nlp_rake) (1.15.0)
###Markdown
The main functionality is available from `Rake` object, which we can customize using some parameters. In our case, we will set the minimum length of a keyword to 5 characters, minimum frequency of a keyword in the document to 3, and maximum number of words in a keyword - to 2. Feel free to play around with other values and observe the result.
###Code
import nlp_rake
extractor = nlp_rake.Rake(max_words=2,min_freq=3,min_chars=5)
res = extractor.apply(text)
res
###Output
_____no_output_____
###Markdown
We obtained a list terms together with associated degree of importance. As you can see, the most relevant disciplines, such as machine learning and big data, are present in the list at top positions. Step 4: Visualizing the ResultPeople can interpret the data best in the visual form. Thus it often makes sense to visualize the data in order to draw some insights. We can use `matplotlib` library in Python to plot simple distribution of the keywords with their relevance:
###Code
import matplotlib.pyplot as plt
def plot(pair_list):
k,v = zip(*pair_list)
plt.bar(range(len(k)),v)
plt.xticks(range(len(k)),k,rotation='vertical')
plt.show()
plot(res)
###Output
_____no_output_____
###Markdown
There is, however, even better way to visualize word frequencies - using **Word Cloud**. We will need to install another library to plot the word cloud from our keyword list.
###Code
!{sys.executable} -m pip install wordcloud
###Output
Requirement already satisfied: wordcloud in /usr/local/lib/python3.7/dist-packages (1.5.0)
Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from wordcloud) (7.1.2)
Requirement already satisfied: numpy>=1.6.1 in /usr/local/lib/python3.7/dist-packages (from wordcloud) (1.19.5)
###Markdown
`WordCloud` object is responsible for taking in either original text, or pre-computed list of words with their frequencies, and returns and image, which can then be displayed using `matplotlib`:
###Code
from wordcloud import WordCloud
import matplotlib.pyplot as plt
wc = WordCloud(background_color='white',width=800,height=600)
plt.figure(figsize=(15,7))
plt.imshow(wc.generate_from_frequencies({ k:v for k,v in res }))
###Output
_____no_output_____
###Markdown
We can also pass in the original text to `WordCloud` - let's see if we are able to get similar result:
###Code
plt.figure(figsize=(15,7))
plt.imshow(wc.generate(text))
wc.generate(text).to_file('bd_wordcloud.png')
###Output
_____no_output_____ |
Plagiarism Detection Project/2_Plagiarism_Feature_Engineering.ipynb | ###Markdown
Plagiarism Detection, Feature EngineeringIn this project, you will be tasked with building a plagiarism detector that examines an answer text file and performs binary classification; labeling that file as either plagiarized or not, depending on how similar that text file is to a provided, source text. Your first task will be to create some features that can then be used to train a classification model. This task will be broken down into a few discrete steps:* Clean and pre-process the data.* Define features for comparing the similarity of an answer text and a source text, and extract similarity features.* Select "good" features, by analyzing the correlations between different features.* Create train/test `.csv` files that hold the relevant features and class labels for train/test data points.In the _next_ notebook, Notebook 3, you'll use the features and `.csv` files you create in _this_ notebook to train a binary classification model in a SageMaker notebook instance.You'll be defining a few different similarity features, as outlined in [this paper](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf), which should help you build a robust plagiarism detector!To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook.> All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**.It will be up to you to decide on the features to include in your final training and test data.--- Read in the DataThe cell below will download the necessary, project data and extract the files into the folder `data/`.This data is a slightly modified version of a dataset created by Paul Clough (Information Studies) and Mark Stevenson (Computer Science), at the University of Sheffield. You can read all about the data collection and corpus, at [their university webpage](https://ir.shef.ac.uk/cloughie/resources/plagiarism_corpus.html). > **Citation for data**: Clough, P. and Stevenson, M. Developing A Corpus of Plagiarised Short Answers, Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, In Press. [Download]
###Code
# NOTE:
# you only need to run this cell if you have not yet downloaded the data
# otherwise you may skip this cell or comment it out
# !wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip
# !unzip data
# import libraries
import pandas as pd
import numpy as np
import os
###Output
_____no_output_____
###Markdown
This plagiarism dataset is made of multiple text files; each of these files has characteristics that are is summarized in a `.csv` file named `file_information.csv`, which we can read in using `pandas`.
###Code
csv_file = 'data/file_information.csv'
plagiarism_df = pd.read_csv(csv_file)
# print out the first few rows of data info
plagiarism_df.head()
###Output
_____no_output_____
###Markdown
Types of PlagiarismEach text file is associated with one **Task** (task A-E) and one **Category** of plagiarism, which you can see in the above DataFrame. Tasks, A-EEach text file contains an answer to one short question; these questions are labeled as tasks A-E. For example, Task A asks the question: "What is inheritance in object oriented programming?" Categories of plagiarism Each text file has an associated plagiarism label/category:**1. Plagiarized categories: `cut`, `light`, and `heavy`.*** These categories represent different levels of plagiarized answer texts. `cut` answers copy directly from a source text, `light` answers are based on the source text but include some light rephrasing, and `heavy` answers are based on the source text, but *heavily* rephrased (and will likely be the most challenging kind of plagiarism to detect). **2. Non-plagiarized category: `non`.** * `non` indicates that an answer is not plagiarized; the Wikipedia source text is not used to create this answer. **3. Special, source text category: `orig`.*** This is a specific category for the original, Wikipedia source text. We will use these files only for comparison purposes. --- Pre-Process the DataIn the next few cells, you'll be tasked with creating a new DataFrame of desired information about all of the files in the `data/` directory. This will prepare the data for feature extraction and for training a binary, plagiarism classifier. EXERCISE: Convert categorical to numerical dataYou'll notice that the `Category` column in the data, contains string or categorical values, and to prepare these for feature extraction, we'll want to convert these into numerical values. Additionally, our goal is to create a binary classifier and so we'll need a binary class label that indicates whether an answer text is plagiarized (1) or not (0). Complete the below function `numerical_dataframe` that reads in a `file_information.csv` file by name, and returns a *new* DataFrame with a numerical `Category` column and a new `Class` column that labels each answer as plagiarized or not. Your function should return a new DataFrame with the following properties:* 4 columns: `File`, `Task`, `Category`, `Class`. The `File` and `Task` columns can remain unchanged from the original `.csv` file.* Convert all `Category` labels to numerical labels according to the following rules (a higher value indicates a higher degree of plagiarism): * 0 = `non` * 1 = `heavy` * 2 = `light` * 3 = `cut` * -1 = `orig`, this is a special value that indicates an original file.* For the new `Class` column * Any answer text that is not plagiarized (`non`) should have the class label `0`. * Any plagiarized answer texts should have the class label `1`. * And any `orig` texts will have a special label `-1`. Expected outputAfter running your function, you should get a DataFrame with rows that looks like the following: ``` File Task Category Class0 g0pA_taska.txt a 0 01 g0pA_taskb.txt b 3 12 g0pA_taskc.txt c 2 13 g0pA_taskd.txt d 1 14 g0pA_taske.txt e 0 0......99 orig_taske.txt e -1 -1```
###Code
# Read in a csv file and return a transformed dataframe
def numerical_dataframe(csv_file='data/file_information.csv'):
'''Reads in a csv file which is assumed to have `File`, `Category` and `Task` columns.
This function does two things:
1) converts `Category` column values to numerical values
2) Adds a new, numerical `Class` label column.
The `Class` column will label plagiarized answers as 1 and non-plagiarized as 0.
Source texts have a special label, -1.
:param csv_file: The directory for the file_information.csv file
:return: A dataframe with numerical categories and a new `Class` label column'''
# read input csv and create 'Class' column
plagiarism_df = pd.read_csv(csv_file)
plagiarism_df['Class'] = plagiarism_df['Category']
# create mappings for category labels
category_map = {'orig':-1, 'non':0, 'heavy':1, 'light':2, 'cut':3}
class_map = {'orig':-1, 'non':0, 'heavy':1, 'light':1, 'cut':1}
# convert column values to numerical mappings
plagiarism_df = plagiarism_df.replace({'Category':category_map})
plagiarism_df = plagiarism_df.replace({'Class':class_map})
return plagiarism_df
###Output
_____no_output_____
###Markdown
Test cellsBelow are a couple of test cells. The first is an informal test where you can check that your code is working as expected by calling your function and printing out the returned result.The **second** cell below is a more rigorous test cell. The goal of a cell like this is to ensure that your code is working as expected, and to form any variables that might be used in _later_ tests/code, in this case, the data frame, `transformed_df`.> The cells in this notebook should be run in chronological order (the order they appear in the notebook). This is especially important for test cells.Often, later cells rely on the functions, imports, or variables defined in earlier cells. For example, some tests rely on previous tests to work.These tests do not test all cases, but they are a great way to check that you are on the right track!
###Code
# informal testing, print out the results of a called function
# create new `transformed_df`
transformed_df = numerical_dataframe(csv_file ='data/file_information.csv')
# check work
# check that all categories of plagiarism have a class label = 1
transformed_df.head(10)
# test cell that creates `transformed_df`, if tests are passed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# importing tests
import problem_unittests as tests
# test numerical_dataframe function
tests.test_numerical_df(numerical_dataframe)
# if above test is passed, create NEW `transformed_df`
transformed_df = numerical_dataframe(csv_file ='data/file_information.csv')
# check work
print('\nExample data: ')
transformed_df.head()
###Output
Tests Passed!
Example data:
###Markdown
Text Processing & Splitting DataRecall that the goal of this project is to build a plagiarism classifier. At it's heart, this task is a comparison text; one that looks at a given answer and a source text, compares them and predicts whether an answer has plagiarized from the source. To effectively do this comparison, and train a classifier we'll need to do a few more things: pre-process all of our text data and prepare the text files (in this case, the 95 answer files and 5 original source files) to be easily compared, and split our data into a `train` and `test` set that can be used to train a classifier and evaluate it, respectively. To this end, you've been provided code that adds additional information to your `transformed_df` from above. The next two cells need not be changed; they add two additional columns to the `transformed_df`:1. A `Text` column; this holds all the lowercase text for a `File`, with extraneous punctuation removed.2. A `Datatype` column; this is a string value `train`, `test`, or `orig` that labels a data point as part of our train or test setThe details of how these additional columns are created can be found in the `helpers.py` file in the project directory. You're encouraged to read through that file to see exactly how text is processed and how data is split.Run the cells below to get a `complete_df` that has all the information you need to proceed with plagiarism detection and feature engineering.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import helpers
# create a text column
text_df = helpers.create_text_column(transformed_df)
text_df.head()
# after running the cell above
# check out the processed text for a single file, by row index
row_idx = 0 # feel free to change this index
sample_text = text_df.iloc[0]['Text']
print('Sample processed text:\n\n', sample_text)
###Output
Sample processed text:
inheritance is a basic concept of object oriented programming where the basic idea is to create new classes that add extra detail to existing classes this is done by allowing the new classes to reuse the methods and variables of the existing classes and new methods and classes are added to specialise the new class inheritance models the is kind of relationship between entities or objects for example postgraduates and undergraduates are both kinds of student this kind of relationship can be visualised as a tree structure where student would be the more general root node and both postgraduate and undergraduate would be more specialised extensions of the student node or the child nodes in this relationship student would be known as the superclass or parent class whereas postgraduate would be known as the subclass or child class because the postgraduate class extends the student class inheritance can occur on several layers where if visualised would display a larger tree structure for example we could further extend the postgraduate node by adding two extra extended classes to it called msc student and phd student as both these types of student are kinds of postgraduate student this would mean that both the msc student and phd student classes would inherit methods and variables from both the postgraduate and student classes
###Markdown
Split data into training and test setsThe next cell will add a `Datatype` column to a given DataFrame to indicate if the record is: * `train` - Training data, for model training.* `test` - Testing data, for model evaluation.* `orig` - The task's original answer from wikipedia. Stratified samplingThe given code uses a helper function which you can view in the `helpers.py` file in the main project directory. This implements [stratified random sampling](https://en.wikipedia.org/wiki/Stratified_sampling) to randomly split data by task & plagiarism amount. Stratified sampling ensures that we get training and test data that is fairly evenly distributed across task & plagiarism combinations. Approximately 26% of the data is held out for testing and 74% of the data is used for training.The function **train_test_dataframe** takes in a DataFrame that it assumes has `Task` and `Category` columns, and, returns a modified frame that indicates which `Datatype` (train, test, or orig) a file falls into. This sampling will change slightly based on a passed in *random_seed*. Due to a small sample size, this stratified random sampling will provide more stable results for a binary plagiarism classifier. Stability here is smaller *variance* in the accuracy of classifier, given a random seed.
###Code
random_seed = 1 # can change; set for reproducibility
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import helpers
# create new df with Datatype (train, test, orig) column
# pass in `text_df` from above to create a complete dataframe, with all the information you need
complete_df = helpers.train_test_dataframe(text_df, random_seed=random_seed)
# check results
complete_df.head(10)
###Output
_____no_output_____
###Markdown
Determining PlagiarismNow that you've prepared this data and created a `complete_df` of information, including the text and class associated with each file, you can move on to the task of extracting similarity features that will be useful for plagiarism classification. > Note: The following code exercises, assume that the `complete_df` as it exists now, will **not** have its existing columns modified. The `complete_df` should always include the columns: `['File', 'Task', 'Category', 'Class', 'Text', 'Datatype']`. You can add additional columns, and you can create any new DataFrames you need by copying the parts of the `complete_df` as long as you do not modify the existing values, directly.--- Similarity Features One of the ways we might go about detecting plagiarism, is by computing **similarity features** that measure how similar a given answer text is as compared to the original wikipedia source text (for a specific task, a-e). The similarity features you will use are informed by [this paper on plagiarism detection](https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c412841_developing-a-corpus-of-plagiarised-short-answers/developing-a-corpus-of-plagiarised-short-answers.pdf). > In this paper, researchers created features called **containment** and **longest common subsequence**. Using these features as input, you will train a model to distinguish between plagiarized and not-plagiarized text files. Feature EngineeringLet's talk a bit more about the features we want to include in a plagiarism detection model and how to calculate such features. In the following explanations, I'll refer to a submitted text file as a **Student Answer Text (A)** and the original, wikipedia source file (that we want to compare that answer to) as the **Wikipedia Source Text (S)**. ContainmentYour first task will be to create **containment features**. To understand containment, let's first revisit a definition of [n-grams](https://en.wikipedia.org/wiki/N-gram). An *n-gram* is a sequential word grouping. For example, in a line like "bayes rule gives us a way to combine prior knowledge with new information," a 1-gram is just one word, like "bayes." A 2-gram might be "bayes rule" and a 3-gram might be "combine prior knowledge."> Containment is defined as the **intersection** of the n-gram word count of the Wikipedia Source Text (S) with the n-gram word count of the Student Answer Text (S) *divided* by the n-gram word count of the Student Answer Text.$$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$If the two texts have no n-grams in common, the containment will be 0, but if _all_ their n-grams intersect then the containment will be 1. Intuitively, you can see how having longer n-gram's in common, might be an indication of cut-and-paste plagiarism. In this project, it will be up to you to decide on the appropriate `n` or several `n`'s to use in your final model. EXERCISE: Create containment featuresGiven the `complete_df` that you've created, you should have all the information you need to compare any Student Answer Text (A) with its appropriate Wikipedia Source Text (S). An answer for task A should be compared to the source text for task A, just as answers to tasks B, C, D, and E should be compared to the corresponding original source text.In this exercise, you'll complete the function, `calculate_containment` which calculates containment based upon the following parameters:* A given DataFrame, `df` (which is assumed to be the `complete_df` from above)* An `answer_filename`, such as 'g0pB_taskd.txt' * An n-gram length, `n` Containment calculationThe general steps to complete this function are as follows:1. From *all* of the text files in a given `df`, create an array of n-gram counts; it is suggested that you use a [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) for this purpose.2. Get the processed answer and source texts for the given `answer_filename`.3. Calculate the containment between an answer and source text according to the following equation. >$$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$ 4. Return that containment value.You are encouraged to write any helper functions that you need to complete the function below.
###Code
from sklearn.feature_extraction.text import CountVectorizer
# Calculate the ngram containment for one answer file/source file pair in a df
def calculate_containment(df, n, answer_filename):
'''Calculates the containment between a given answer text and its associated source text.
This function creates a count of ngrams (of a size, n) for each text file in our data.
Then calculates the containment by finding the ngram count for a given answer text,
and its associated source text, and calculating the normalized intersection of those counts.
:param df: A dataframe with columns,
'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype'
:param n: An integer that defines the ngram size
:param answer_filename: A filename for an answer text in the df, ex. 'g0pB_taskd.txt'
:return: A single containment value that represents the similarity
between an answer text and its source text.
'''
# Get task number of answer_file_name
a_task = df[df['File'] == answer_filename]['Task'].values[0]
# Get answer text
a_text = df[df['File'] == answer_filename]['Text'].values[0]
# Lookup source text
s_text = df[(df.Task == a_task) & (df.Class == -1)]['Text'].values[0]
# instantiate an ngram counter
counts = CountVectorizer(analyzer='word', ngram_range=(n,n))
# create array of n-gram counts for the answer and source text
ngrams = counts.fit_transform([a_text, s_text])
# Convert to array
ngram_array = ngrams.toarray()
# Calculate containment
intersect_list = np.amin(ngram_array, axis=0)
# Add number of intersections
intersection = np.sum(intersect_list)
# Add number of n-grams in answer text
count_answer = np.sum(ngram_array[0])
containment_val = intersection/count_answer
return containment_val
###Output
_____no_output_____
###Markdown
Test cellsAfter you've implemented the containment function, you can test out its behavior. The cell below iterates through the first few files, and calculates the original category _and_ containment values for a specified n and file.>If you've implemented this correctly, you should see that the non-plagiarized have low or close to 0 containment values and that plagiarized examples have higher containment values, closer to 1.Note what happens when you change the value of n. I recommend applying your code to multiple files and comparing the resultant containment values. You should see that the highest containment values correspond to files with the highest category (`cut`) of plagiarism level.
###Code
# select a value for n
n = 3
# indices for first few files
test_indices = range(5)
# iterate through files and calculate containment
category_vals = []
containment_vals = []
for i in test_indices:
# get level of plagiarism for a given file index
category_vals.append(complete_df.loc[i, 'Category'])
# calculate containment for given file and n
filename = complete_df.loc[i, 'File']
c = calculate_containment(complete_df, n, filename)
containment_vals.append(c)
# print out result, does it make sense?
print('Original category values: \n', category_vals)
print()
print(str(n)+'-gram containment values: \n', containment_vals)
# run this test cell
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test containment calculation
# params: complete_df from before, and containment function
tests.test_containment(complete_df, calculate_containment)
###Output
Tests Passed!
###Markdown
QUESTION 1: Why can we calculate containment features across *all* data (training & test), prior to splitting the DataFrame for modeling? That is, what about the containment calculation means that the test and training data do not influence each other? **Answer:**Containment values are calculated using the answer and the source text. The source text does not belong to either training or testing data sets so there's no influence from each other. --- Longest Common SubsequenceContainment a good way to find overlap in word usage between two documents; it may help identify cases of cut-and-paste as well as paraphrased levels of plagiarism. Since plagiarism is a fairly complex task with varying levels, it's often useful to include other measures of similarity. The paper also discusses a feature called **longest common subsequence**.> The longest common subsequence is the longest string of words (or letters) that are *the same* between the Wikipedia Source Text (S) and the Student Answer Text (A). This value is also normalized by dividing by the total number of words (or letters) in the Student Answer Text. In this exercise, we'll ask you to calculate the longest common subsequence of words between two texts. EXERCISE: Calculate the longest common subsequenceComplete the function `lcs_norm_word`; this should calculate the *longest common subsequence* of words between a Student Answer Text and corresponding Wikipedia Source Text. It may be helpful to think of this in a concrete example. A Longest Common Subsequence (LCS) problem may look as follows:* Given two texts: text A (answer text) of length n, and string S (original source text) of length m. Our goal is to produce their longest common subsequence of words: the longest sequence of words that appear left-to-right in both texts (though the words don't have to be in continuous order).* Consider: * A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents" * S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents"* In this case, we can see that the start of each sentence of fairly similar, having overlap in the sequence of words, "pagerank is a link analysis algorithm used by" before diverging slightly. Then we **continue moving left -to-right along both texts** until we see the next common sequence; in this case it is only one word, "google". Next we find "that" and "a" and finally the same ending "to each element of a hyperlinked set of documents".* Below, is a clear visual of how these sequences were found, sequentially, in each text.* Now, those words appear in left-to-right order in each document, sequentially, and even though there are some words in between, we count this as the longest common subsequence between the two texts. * If I count up each word that I found in common I get the value 20. **So, LCS has length 20**. * Next, to normalize this value, divide by the total length of the student answer; in this example that length is only 27. **So, the function `lcs_norm_word` should return the value `20/27` or about `0.7408`.**In this way, LCS is a great indicator of cut-and-paste plagiarism or if someone has referenced the same source text multiple times in an answer. LCS, dynamic programmingIf you read through the scenario above, you can see that this algorithm depends on looking at two texts and comparing them word by word. You can solve this problem in multiple ways. First, it may be useful to `.split()` each text into lists of comma separated words to compare. Then, you can iterate through each word in the texts and compare them, adding to your value for LCS as you go. The method I recommend for implementing an efficient LCS algorithm is: using a matrix and dynamic programming. **Dynamic programming** is all about breaking a larger problem into a smaller set of subproblems, and building up a complete result without having to repeat any subproblems. This approach assumes that you can split up a large LCS task into a combination of smaller LCS tasks. Let's look at a simple example that compares letters:* A = "ABCD"* S = "BD"We can see right away that the longest subsequence of _letters_ here is 2 (B and D are in sequence in both strings). And we can calculate this by looking at relationships between each letter in the two strings, A and S.Here, I have a matrix with the letters of A on top and the letters of S on the left side:This starts out as a matrix that has as many columns and rows as letters in the strings S and O **+1** additional row and column, filled with zeros on the top and left sides. So, in this case, instead of a 2x4 matrix it is a 3x5.Now, we can fill this matrix up by breaking it into smaller LCS problems. For example, let's first look at the shortest substrings: the starting letter of A and S. We'll first ask, what is the Longest Common Subsequence between these two letters "A" and "B"? **Here, the answer is zero and we fill in the corresponding grid cell with that value.**Then, we ask the next question, what is the LCS between "AB" and "B"?**Here, we have a match, and can fill in the appropriate value 1**.If we continue, we get to a final matrix that looks as follows, with a **2** in the bottom right corner.The final LCS will be that value **2** *normalized* by the number of n-grams in A. So, our normalized value is 2/4 = **0.5**. The matrix rulesOne thing to notice here is that, you can efficiently fill up this matrix one cell at a time. Each grid cell only depends on the values in the grid cells that are directly on top and to the left of it, or on the diagonal/top-left. The rules are as follows:* Start with a matrix that has one extra row and column of zeros.* As you traverse your string: * If there is a match, fill that grid cell with the value to the top-left of that cell *plus* one. So, in our case, when we found a matching B-B, we added +1 to the value in the top-left of the matching cell, 0. * If there is not a match, take the *maximum* value from either directly to the left or the top cell, and carry that value over to the non-match cell.After completely filling the matrix, **the bottom-right cell will hold the non-normalized LCS value**.This matrix treatment can be applied to a set of words instead of letters. Your function should apply this to the words in two texts and return the normalized LCS value.
###Code
# Compute the normalized LCS given an answer text and a source text
def lcs_norm_word(answer_text, source_text):
'''Computes the longest common subsequence of words in two texts; returns a normalized value.
:param answer_text: The pre-processed text for an answer text
:param source_text: The pre-processed text for an answer's associated source text
:return: A normalized LCS value'''
# Split text
a_text = answer_text.split()
s_text = source_text.split()
# Get length of matrix
n = len(a_text)
m = len(s_text)
# create an empty matrix with n x m size
matrix_lcs = np.zeros((m+1,n+1), dtype=int)
# iterate through each word to find a match
for i, s_word in enumerate(s_text, start=1):
for j, a_word in enumerate(a_text, start=1):
# match
if a_word == s_word:
matrix_lcs[i][j] = matrix_lcs[i-1][j-1] + 1
else:
# no match
matrix_lcs[i][j] = max(matrix_lcs[i-1][j], matrix_lcs[i][j-1])
# normalize lcs
normalized_lcs = matrix_lcs[m][n] / n
return normalized_lcs
###Output
_____no_output_____
###Markdown
Test cellsLet's start by testing out your code on the example given in the initial description.In the below cell, we have specified strings A (answer text) and S (original source text). We know that these texts have 20 words in common and the submitted answer is 27 words long, so the normalized, longest common subsequence should be 20/27.
###Code
# Run the test scenario from above
# does your function return the expected value?
A = "i think pagerank is a link analysis algorithm used by google that uses a system of weights attached to each element of a hyperlinked set of documents"
S = "pagerank is a link analysis algorithm used by the google internet search engine that assigns a numerical weighting to each element of a hyperlinked set of documents"
# calculate LCS
lcs = lcs_norm_word(A, S)
print('LCS = ', lcs)
# expected value test
assert lcs==20/27., "Incorrect LCS value, expected about 0.7408, got "+str(lcs)
print('Test passed!')
###Output
LCS = 0.7407407407407407
Test passed!
###Markdown
This next cell runs a more rigorous test.
###Code
# run test cell
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test lcs implementation
# params: complete_df from before, and lcs_norm_word function
tests.test_lcs(complete_df, lcs_norm_word)
###Output
Tests Passed!
###Markdown
Finally, take a look at a few resultant values for `lcs_norm_word`. Just like before, you should see that higher values correspond to higher levels of plagiarism.
###Code
# test on your own
test_indices = range(5) # look at first few files
category_vals = []
lcs_norm_vals = []
# iterate through first few docs and calculate LCS
for i in test_indices:
category_vals.append(complete_df.loc[i, 'Category'])
# get texts to compare
answer_text = complete_df.loc[i, 'Text']
task = complete_df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = complete_df[(complete_df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate lcs
lcs_val = lcs_norm_word(answer_text, source_text)
lcs_norm_vals.append(lcs_val)
# print out result, does it make sense?
print('Original category values: \n', category_vals)
print()
print('Normalized LCS values: \n', lcs_norm_vals)
###Output
Original category values:
[0, 3, 2, 1, 0]
Normalized LCS values:
[0.1917808219178082, 0.8207547169811321, 0.8464912280701754, 0.3160621761658031, 0.24257425742574257]
###Markdown
--- Create All FeaturesNow that you've completed the feature calculation functions, it's time to actually create multiple features and decide on which ones to use in your final model! In the below cells, you're provided two helper functions to help you create multiple features and store those in a DataFrame, `features_df`. Creating multiple containment featuresYour completed `calculate_containment` function will be called in the next cell, which defines the helper function `create_containment_features`. > This function returns a list of containment features, calculated for a given `n` and for *all* files in a df (assumed to the the `complete_df`).For our original files, the containment value is set to a special value, -1.This function gives you the ability to easily create several containment features, of different n-gram lengths, for each of our text files.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Function returns a list of containment features, calculated for a given n
# Should return a list of length 100 for all files in a complete_df
def create_containment_features(df, n, column_name=None):
containment_values = []
if(column_name==None):
column_name = 'c_'+str(n) # c_1, c_2, .. c_n
# iterates through dataframe rows
for i in df.index:
file = df.loc[i, 'File']
# Computes features using calculate_containment function
if df.loc[i,'Category'] > -1:
c = calculate_containment(df, n, file)
containment_values.append(c)
# Sets value to -1 for original tasks
else:
containment_values.append(-1)
print(str(n)+'-gram containment features created!')
return containment_values
###Output
_____no_output_____
###Markdown
Creating LCS featuresBelow, your complete `lcs_norm_word` function is used to create a list of LCS features for all the answer files in a given DataFrame (again, this assumes you are passing in the `complete_df`. It assigns a special value for our original, source files, -1.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Function creates lcs feature and add it to the dataframe
def create_lcs_features(df, column_name='lcs_word'):
lcs_values = []
# iterate through files in dataframe
for i in df.index:
# Computes LCS_norm words feature using function above for answer tasks
if df.loc[i,'Category'] > -1:
# get texts to compare
answer_text = df.loc[i, 'Text']
task = df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = df[(df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate lcs
lcs = lcs_norm_word(answer_text, source_text)
lcs_values.append(lcs)
# Sets to -1 for original tasks
else:
lcs_values.append(-1)
print('LCS features created!')
return lcs_values
###Output
_____no_output_____
###Markdown
EXERCISE: Create a features DataFrame by selecting an `ngram_range`The paper suggests calculating the following features: containment *1-gram to 5-gram* and *longest common subsequence*. > In this exercise, you can choose to create even more features, for example from *1-gram to 7-gram* containment features and *longest common subsequence*. You'll want to create at least 6 features to choose from as you think about which to give to your final, classification model. Defining and comparing at least 6 different features allows you to discard any features that seem redundant, and choose to use the best features for your final model!In the below cell **define an n-gram range**; these will be the n's you use to create n-gram containment features. The rest of the feature creation code is provided.
###Code
# Define an ngram range
ngram_range = range(1,30)
# The following code may take a minute to run, depending on your ngram_range
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
features_list = []
# Create features in a features_df
all_features = np.zeros((len(ngram_range)+1, len(complete_df)))
# Calculate features for containment for ngrams in range
i=0
for n in ngram_range:
column_name = 'c_'+str(n)
features_list.append(column_name)
# create containment features
all_features[i]=np.squeeze(create_containment_features(complete_df, n))
i+=1
# Calculate features for LCS_Norm Words
features_list.append('lcs_word')
all_features[i]= np.squeeze(create_lcs_features(complete_df))
# create a features dataframe
features_df = pd.DataFrame(np.transpose(all_features), columns=features_list)
# Print all features/columns
print()
print('Features: ', features_list)
print()
# print some results
features_df.head(10)
###Output
_____no_output_____
###Markdown
Correlated FeaturesYou should use feature correlation across the *entire* dataset to determine which features are ***too*** **highly-correlated** with each other to include both features in a single model. For this analysis, you can use the *entire* dataset due to the small sample size we have. All of our features try to measure the similarity between two texts. Since our features are designed to measure similarity, it is expected that these features will be highly-correlated. Many classification models, for example a Naive Bayes classifier, rely on the assumption that features are *not* highly correlated; highly-correlated features may over-inflate the importance of a single feature. So, you'll want to choose your features based on which pairings have the lowest correlation. These correlation values range between 0 and 1; from low to high correlation, and are displayed in a [correlation matrix](https://www.displayr.com/what-is-a-correlation-matrix/), below.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Create correlation matrix for just Features to determine different models to test
corr_matrix = features_df.corr().abs().round(2)
# display shows all of a dataframe
pd.set_option('display.max_columns', 999)
display(corr_matrix)
###Output
_____no_output_____
###Markdown
EXERCISE: Create selected train/test dataComplete the `train_test_data` function below. This function should take in the following parameters:* `complete_df`: A DataFrame that contains all of our processed text data, file info, datatypes, and class labels* `features_df`: A DataFrame of all calculated features, such as containment for ngrams, n= 1-5, and lcs values for each text file listed in the `complete_df` (this was created in the above cells)* `selected_features`: A list of feature column names, ex. `['c_1', 'lcs_word']`, which will be used to select the final features in creating train/test sets of data.It should return two tuples:* `(train_x, train_y)`, selected training features and their corresponding class labels (0/1)* `(test_x, test_y)`, selected training features and their corresponding class labels (0/1)** Note: x and y should be arrays of feature values and numerical class labels, respectively; not DataFrames.**Looking at the above correlation matrix, you should decide on a **cutoff** correlation value, less than 1.0, to determine which sets of features are *too* highly-correlated to be included in the final training and test data. If you cannot find features that are less correlated than some cutoff value, it is suggested that you increase the number of features (longer n-grams) to choose from or use *only one or two* features in your final model to avoid introducing highly-correlated features.Recall that the `complete_df` has a `Datatype` column that indicates whether data should be `train` or `test` data; this should help you split the data appropriately.
###Code
# Takes in dataframes and a list of selected features (column names)
# and returns (train_x, train_y), (test_x, test_y)
def train_test_data(complete_df, features_df, selected_features):
'''Gets selected training and test features from given dataframes, and
returns tuples for training and test features and their corresponding class labels.
:param complete_df: A dataframe with all of our processed text data, datatypes, and labels
:param features_df: A dataframe of all computed, similarity features
:param selected_features: An array of selected features that correspond to certain columns in `features_df`
:return: training and test features and labels: (train_x, train_y), (test_x, test_y)'''
# get the training features
train_x = features_df[complete_df.Datatype == 'train'][selected_features].values
# And training class labels (0 or 1)
train_y = complete_df[complete_df.Datatype == 'train']['Class'].to_numpy()
# get the test features and labels
test_x = features_df[complete_df.Datatype == 'test'][selected_features].values
test_y = complete_df[complete_df.Datatype == 'test']['Class'].to_numpy()
return (train_x, train_y), (test_x, test_y)
###Output
_____no_output_____
###Markdown
Test cellsBelow, test out your implementation and create the final train/test data.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
test_selection = list(features_df)[:2] # first couple columns as a test
# test that the correct train/test data is created
(train_x, train_y), (test_x, test_y) = train_test_data(complete_df, features_df, test_selection)
# params: generated train/test data
tests.test_data_split(train_x, train_y, test_x, test_y)
###Output
Tests Passed!
###Markdown
EXERCISE: Select "good" featuresIf you passed the test above, you can create your own train/test data, below. Define a list of features you'd like to include in your final mode, `selected_features`; this is a list of the features names you want to include.
###Code
# Select your list of features, this should be column names from features_df
# ex. ['c_1', 'lcs_word']
selected_features = ['c_15', 'lcs_word']
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
(train_x, train_y), (test_x, test_y) = train_test_data(complete_df, features_df, selected_features)
# check that division of samples seems correct
# these should add up to 95 (100 - 5 original files)
print('Training size: ', len(train_x))
print('Test size: ', len(test_x))
print()
print('Training df sample: \n', train_x[:10])
###Output
Training size: 70
Test size: 25
Training df sample:
[[0. 0.19178082]
[0.09615385 0.84649123]
[0. 0.31606218]
[0. 0.24257426]
[0. 0.16117216]
[0. 0.30165289]
[0. 0.48430493]
[0. 0.27083333]
[0. 0.22395833]
[0.39393939 0.9 ]]
###Markdown
Question 2: How did you decide on which features to include in your final model? **Answer:**I chose to include LCS_WORD since this is a more complex way to assess text similaries, and included C_15 because this had a correlation of under 90%. --- Creating Final Data FilesNow, you are almost ready to move on to training a model in SageMaker!You'll want to access your train and test data in SageMaker and upload it to S3. In this project, SageMaker will expect the following format for your train/test data:* Training and test data should be saved in one `.csv` file each, ex `train.csv` and `test.csv`* These files should have class labels in the first column and features in the rest of the columnsThis format follows the practice, outlined in the [SageMaker documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html), which reads: "Amazon SageMaker requires that a CSV file doesn't have a header record and that the target variable [class label] is in the first column." EXERCISE: Create csv filesDefine a function that takes in x (features) and y (labels) and saves them to one `.csv` file at the path `data_dir/filename`.It may be useful to use pandas to merge your features and labels into one DataFrame and then convert that into a csv file. You can make sure to get rid of any incomplete rows, in a DataFrame, by using `dropna`.
###Code
def make_csv(x, y, filename, data_dir):
'''Merges features and labels and converts them into one csv file with labels in the first column.
:param x: Data features
:param y: Data labels
:param file_name: Name of csv file, ex. 'train.csv'
:param data_dir: The directory where files will be saved
'''
# make data dir, if it does not exist
if not os.path.exists(data_dir):
os.makedirs(data_dir)
data = pd.concat([pd.DataFrame(y), pd.DataFrame(x)], axis=1).dropna(axis=0)
save_path = os.path.join(data_dir, filename)
data.to_csv(save_path, header=False, index=False)
# nothing is returned, but a print statement indicates that the function has run
print('Path created: '+str(data_dir)+'/'+str(filename))
###Output
_____no_output_____
###Markdown
Test cellsTest that your code produces the correct format for a `.csv` file, given some text features and labels.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
fake_x = [ [0.39814815, 0.0001, 0.19178082],
[0.86936937, 0.44954128, 0.84649123],
[0.44086022, 0., 0.22395833] ]
fake_y = [0, 1, 1]
make_csv(fake_x, fake_y, filename='to_delete.csv', data_dir='test_csv')
# read in and test dimensions
fake_df = pd.read_csv('test_csv/to_delete.csv', header=None)
# check shape
assert fake_df.shape==(3, 4), \
'The file should have as many rows as data_points and as many columns as features+1 (for indices).'
# check that first column = labels
assert np.all(fake_df.iloc[:,0].values==fake_y), 'First column is not equal to the labels, fake_y.'
print('Tests passed!')
# delete the test csv file, generated above
! rm -rf test_csv
###Output
_____no_output_____
###Markdown
If you've passed the tests above, run the following cell to create `train.csv` and `test.csv` files in a directory that you specify! This will save the data in a local directory. Remember the name of this directory because you will reference it again when uploading this data to S3.
###Code
# can change directory, if you want
data_dir = 'plagiarism_data'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
make_csv(train_x, train_y, filename='train.csv', data_dir=data_dir)
make_csv(test_x, test_y, filename='test.csv', data_dir=data_dir)
###Output
Path created: plagiarism_data/train.csv
Path created: plagiarism_data/test.csv
|
notebooks/1_2_basic_regression_tensorflow.ipynb | ###Markdown
Linear Regression in TensorFlow 1. Import libraries
###Code
import tensorflow as tf
from tensorflow import keras as K
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
2. Hyperparameters
###Code
EPOCHS = 500
NUM_HIDDEN_UNITS = 64
OUTPUT_DIMENSION = 1
LEARNING_RATE = 0.001
BATCH_SIZE = 32
DISP_FREQ = 100
###Output
_____no_output_____
###Markdown
3. Load the data
###Code
# Load the Boston Housing Prices dataset
boston_housing = K.datasets.boston_housing
(X_train, y_train), (X_test, y_test) = boston_housing.load_data()
num_features = X_train.shape[1]
def randomize(x, y):
""" Randomizes the order of data samples and their corresponding labels"""
permutation = np.random.permutation(y.shape[0])
shuffled_x = x[permutation, :]
shuffled_y = y[permutation]
return shuffled_x, shuffled_y
# Shuffle the training set
X_train, y_train = randomize(X_train, y_train)
print("Train data size -> input: {}, output: {}".format(X_train.shape, y_train.shape))
print("Test data size: -> input: {}, output: {}".format(X_test.shape, y_test.shape))
###Output
Train data size -> input: (404, 13), output: (404,)
Test data size: -> input: (102, 13), output: (102,)
###Markdown
4. Normalize the data
###Code
# Test data is *not* used when calculating the mean and std
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
# Create validation data
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2)
###Output
_____no_output_____
###Markdown
5. Create the model (i.e. Graph)
###Code
# Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape=[None, num_features], name='X')
y = tf.placeholder(tf.float32, shape=[None], name='Y')
def DenseLayer(inputs, num_units, layer_name, activation=None):
input_dim = inputs.get_shape().as_list()[-1]
with tf.variable_scope(layer_name):
W = tf.get_variable('W',
dtype=tf.float32,
shape=[input_dim, num_units],
initializer=tf.truncated_normal_initializer(stddev=0.01))
b = tf.get_variable('b',
dtype=tf.float32,
initializer=tf.constant(0., shape=[num_units], dtype=tf.float32))
logits = tf.matmul(inputs, W) + b
if activation:
return activation(logits)
return logits
# Hidden Layer
fc1 = DenseLayer(x, NUM_HIDDEN_UNITS, 'FC1', activation=tf.nn.relu)
# Output Layer
predictions = DenseLayer(fc1, OUTPUT_DIMENSION, 'FC2')
# Define the loss function and optimizer
loss = tf.reduce_mean(tf.losses.mean_squared_error(labels=y, predictions=tf.squeeze(predictions)))
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss)
# Creating the op for initializing all variables
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
6. Train
###Code
def get_next_batch(x, y, start, end):
x_batch = x[start:end]
y_batch = y[start:end]
return x_batch, y_batch
sess = tf.InteractiveSession()
# Initialize all variables
sess.run(init)
# Number of training iterations in each epoch
NUM_TR_ITERS = int(len(y_train) / BATCH_SIZE)
print('------------------------------------')
for epoch in range(EPOCHS):
# Randomly shuffle the training data at the beginning of each epoch
x_train, y_train = randomize(X_train, y_train)
for iteration in range(NUM_TR_ITERS):
start = iteration * BATCH_SIZE
end = (iteration + 1) * BATCH_SIZE
x_batch, y_batch = get_next_batch(X_train, y_train, start, end)
# Run optimization op (backprop)
feed_dict_batch = {x: x_batch, y: y_batch}
sess.run(optimizer, feed_dict=feed_dict_batch)
if not epoch % DISP_FREQ:
# Run validation after every epoch
feed_dict_valid = {x: X_valid, y: y_valid}
loss_valid = sess.run(loss, feed_dict=feed_dict_valid)
print("Epoch: {0}, validation loss: {1:.2f}".format(epoch, loss_valid))
print('------------------------------------')
###Output
------------------------------------
Epoch: 0, validation loss: 644.91
------------------------------------
Epoch: 100, validation loss: 106.93
------------------------------------
Epoch: 200, validation loss: 106.98
------------------------------------
Epoch: 300, validation loss: 108.70
------------------------------------
Epoch: 400, validation loss: 106.54
------------------------------------
|
clasificacion/supervisado/ejemplos/FourClass.ipynb | ###Markdown
Caso de estudio: Clasificación BinariaEn este documento se presenta un conjunto de datos de pueba conocido como _FourClass_ presentado en la referencia (1) de estedocumento. El propósito es comparar el resultado y eficiencia de los siguientes métodos:- **Clasificador euclidiano**- **Gaussian Naive Bayes**- **k-Nearest Neighbors** _FourClass_Este conjunto de datos cuenta con dos características principales, es un problema de **clasificación binaria**, esto es, solamenteexisten dos clases diferentes dentro del conjunto de datos. Algunas características principales de este conjuto de datos son:- **No** es linealmente separable- Tiene una distribución espacial _irregular_, esto es, no sigue algún patrón específico.- Existen regiones y secciones no conexas, esto es, aunque un subconjunto de datos pertenece a una clase particular no está dentro del mismo conjunto de puntos.Como se puede ver este es un conjunto de puntos difícil de clasificar y es un caso de estudio importante para tanto para analizarcomo para comprender el verdadero alcance de los métodos de clasificación usuales. Precisión de los clasificadoresEn general no se sabe la eficiencia de los clasificadores dado un conjunto de datos; pero normalmente se realizan pruebas con conjuntosde datos que se ha realizado en alguna prueba o experimento. Siempre es útil revisar la literatura para este tipo de problemas. En especial,en este ejemplo existen muchos experimentos realizados para este conjunto de datos pero aquí se van a hacer algunas hipótesis sobre la verdadera**eficiencia** esperada de los clasificadores.- **Clasificador euclidiano:** Dado que este clasificador espera que el conjunto de datos tengan clases _linealmente separables_ se espera que este clasificador tenga la peor precisión de todos los métodos presentados en este documento; existen técnicas alternativas para cambiar este tipo de conjuntos de datos a espacios donde sí sea _linealmente separable_, pero se estudiarán en algún otro documento.- **Gaussian Naive Bayes** Este clasificador espera un conjunto de datos con una alta dimensionalidad, que el conjunto de datos contenga clases provenientes de distribuciones normales Gaussianas entre otras cosas. Este conjunto de datos no contiene ninguna de estas condiciones, por lo que es posible que este clasificador tenga una baja precisión.- **k-Nearest Neighbors** Este clasificador no tiene ninguna condición excepto que no sea un conjunto de datos con alta dimensionalidada tal que el _k-d tree_ que utiliza para la búsqueda de los vecinos más cercanos no se pueda implementar por su baja eficiencia. Por lo tanto se espera que este sea el clasificador con la mejor precisión de todos dado que no depende de la distrución espacial del conjunto de datos.
###Code
import matplotlib as mpl
mpl.rcParams["figure.figsize"] = (21, 10)
import os
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# Cargar el archivo de datos que está dentro del repositorio
datos = np.loadtxt(
os.path.relpath("datasets/fourclass.csv", start="intelicompu"),
delimiter=",",
skiprows=1
)
# Separar los datos y las etiquetas
X = datos[:, :-1]
# Las etiquetas corresponden a la última fila de los datos
y = datos[:, -1]
# Graficar los datos
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap="summer")
###Output
_____no_output_____
###Markdown
Como se puede ver en la figura, las clases están separadas por color, existen 2 y tal como se describe en la introducción de este documento existen regiones donde los datos no están en el mismo conglomerado de datos según la clase.Claramente no es _linealmente separable_, no se tienen tantos puntos y solamente existen dos características.
###Code
# Sustituir el nombre de la clase para evitar ciertos errores
y[y == -1] = 0
# Separar los datos para predicción y entrenamiento
x_entre, x_prueba, y_entre, y_prueba = train_test_split(X, y, test_size=0.3, random_state=7, shuffle=False)
###Output
_____no_output_____
###Markdown
**NOTA:** Se sustituye la clase -1 por 0 para evitar problemas numéricos dentro de las implementaciones de los métodos de clasificación. No siempre es necesario hacer este tipo de modificaciones, depende del conjunto de datos, la persona a cargo de realizar el estudio de los datos, entre otras cosas. Clasificador euclidiano
###Code
# Cargar el código que está dentro del directorio
%run DiscriminanteLineal.py
# Instanciar, entrenar y realizar la clasificación
euclidiano = DiscriminanteLineal()
euclidiano.entrenamiento(x_entre, y_entre)
resultado = euclidiano.prediccion(x_prueba)
# Crear el reporte de clasificación
print(classification_report(y_prueba, resultado, labels=[0, 1]))
###Output
precision recall f1-score support
0 0.73 0.92 0.81 156
1 0.80 0.50 0.61 103
micro avg 0.75 0.75 0.75 259
macro avg 0.77 0.71 0.71 259
weighted avg 0.76 0.75 0.73 259
###Markdown
Precisión y resultadosEl _reporte de clasificación_ para este clasificador produce valores interesantes. Este clasificador puede clasificar correctamente la _clase 0_ cuando se presenta sola, pero cuando tiene que discernir entre la _clase 1_ y la _clase 0_ tiene un bajo desempeño.Este era de esperarse dado que no existe una separación lineal entre las clases, solamente algunas regiones espaciales dentro del conjunto de datos se pueden considerar tal que pasa una línea recta entre ellas, pero en general esto no es cierto.Sin embargo, para ser un clasificador totalmente lineal, tiene un desempeño adecuado pero deficiente. k-Nearest Neighbors
###Code
# Cargar el código que está dentro del directorio
%run kNearestNeighbors.py
# Instanciar, entrenar y realizar la clasificación
knn_clf = kNearestNeighbors(num_vecinos=5)
knn_clf.entrenamiento(x_entre, y_entre)
resultado = knn_clf.predecir(x_prueba)
###Output
_____no_output_____
###Markdown
**NOTA:** Sobre el número de vecinos `num_vecinos` se eligió el valor de 5 por coveniencia, sin embargo lo correcto es realizar algún tipo de _validación_ o técnica de ajuste para que este valor sea el que proporcione los mejores resultados de clasificación.
###Code
print(classification_report(y_prueba, resultado, labels=[0, 1]))
###Output
precision recall f1-score support
0 1.00 1.00 1.00 156
1 1.00 1.00 1.00 103
micro avg 1.00 1.00 1.00 259
macro avg 1.00 1.00 1.00 259
weighted avg 1.00 1.00 1.00 259
###Markdown
Precisión y resultadosComo era de esperarse, este clasificador realiza una clasifición perfecta de los datos. No solamente puede clasificar correctamente entre clases, sino que también puede distinguir entre una y otra. Esto se debe claramente a la naturaleza de este clasificador dado que no requiere de ninguna hipótesis sobre los datos, solamente importa qué tipo de vecino está cerca y a qué clase pertenece. Gaussian Naive Bayes
###Code
# Cargar el código que está dentro del directorio
%run GNaiveBayes.py
# Instanciar, entrenar y realizar la clasificación
gnb_clf = GNaiveBayes()
gnb_clf.entrenamiento(x_entre, y_entre)
resultado = gnb_clf.predecir(x_prueba)
# Crear el reporte de clasificación
print(classification_report(y_prueba, resultado, labels=[0, 1]))
###Output
precision recall f1-score support
0 0.76 0.96 0.85 156
1 0.89 0.54 0.67 103
micro avg 0.79 0.79 0.79 259
macro avg 0.82 0.75 0.76 259
weighted avg 0.81 0.79 0.78 259
|
tutorials/Certification_Trainings/Healthcare/15.German_Legal_Model.ipynb | ###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/15.German_Legal_Model.ipynb) Colab Setup
###Code
import json
with open('workshop_license_keys_365.json') as f_in:
license_keys = json.load(f_in)
license_keys.keys()
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
version = license_keys['PUBLIC_VERSION']
jsl_version = license_keys['JSL_VERSION']
! pip install --ignore-installed -q pyspark==2.4.4
! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
! pip install --ignore-installed -q spark-nlp==$version
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
from pyspark.sql.types import StructType, StructField, StringType
import itertools
spark = sparknlp_jsl.start(secret)
###Output
_____no_output_____
###Markdown
Legal NER The dataset used to train this model is taken from Leitner, et.al (2019)*Leitner, E., Rehm, G., and Moreno-Schneider, J. (2019). Fine-grained Named Entity Recognition in Legal Documents. In Maribel Acosta, et al., editors, Semantic Systems. The Power of AI and Knowledge Graphs. Proceedings of the 15th International Conference (SEMANTiCS2019), number 11702 in Lecture Notes in Computer Science, pages 272–287, Karlsruhe, Germany, 9. Springer. 10/11 September 2019.***Source of the annotated text:**Court decisions from 2017 and 2018 were selected for the dataset, published online by the Federal Ministry of Justice and Consumer Protection. The documents originate from seven federal courts: Federal Labour Court (BAG), Federal Fiscal Court (BFH), Federal Court of Justice (BGH), Federal Patent Court (BPatG), Federal Social Court (BSG), Federal Constitutional Court (BVerfG) and Federal Administrative Court (BVerwG).  **Macro-average prec: 0.9210195, rec: 0.91861916, f1: 0.91981775****Micro-average prec: 0.9833763, rec: 0.9837547, f1: 0.9835655**
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
# Sentence Detector annotator, processes various sentences per line
sentenceDetector = SentenceDetector()\
.setInputCols(["document"])\
.setOutputCol("sentence")\
# Tokenizer splits words in a relevant format for NLP
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("token")\
word_embeddings = WordEmbeddingsModel.pretrained("w2v_cc_300d",'de','clinical/models')\
.setInputCols(["sentence", 'token'])\
.setOutputCol("embeddings")\
.setCaseSensitive(False)
legal_ner = NerDLModel.pretrained("ner_legal",'de','clinical/models') \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
legal_ner_converter = NerConverterInternal() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")\
legal_pred_pipeline = Pipeline(
stages = [
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
legal_ner,
legal_ner_converter
])
empty_df = spark.createDataFrame([['']]).toDF("text")
legal_pred_model = legal_pred_pipeline.fit(empty_df)
legal_light_model = LightPipeline(legal_pred_model)
import pandas as pd
def get_ner_df (light_model, text):
light_result = light_model.fullAnnotate(text)
chunks = []
entities = []
for n in light_result[0]['ner_chunk']:
chunks.append(n.result)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'entities':entities})
return df
text = '''
Jedoch wird der Verkehr darin naheliegend den Namen eines der bekanntesten Flüsse Deutschlands erkennen, welcher als Seitenfluss des Rheins durch Oberfranken, Unterfranken und Südhessen fließt und bei Mainz in den Rhein mündet.
Klein , in : Maunz / Schmidt-Bleibtreu / Klein / Bethge , BVerfGG , § 19 Rn. 9
Richtlinien zur Bewertung des Grundvermögens – BewRGr – vom19. I September 1966 (BStBl I, S.890)
'''
df = get_ner_df (legal_light_model, text)
df
###Output
_____no_output_____
###Markdown
German Public NER
###Code
from sparknlp.pretrained import PretrainedPipeline
public_pipeline = PretrainedPipeline('entity_recognizer_lg','de')
text = """William Henry Gates III (* 28. Oktober 1955 in London) ist ein US-amerikanischer Geschäftsmann, Softwareentwickler, Investor und Philanthrop. Er ist bekannt als Mitbegründer der Microsoft Corporation. Während seiner Karriere bei Microsoft war Gates Vorsitzender, Chief Executive Officer (CEO), Präsident und Chief Software Architect und bis Mai 2014 der größte Einzelaktionär. Er ist einer der bekanntesten Unternehmer und Pioniere der Mikrocomputer-Revolution der 1970er und 1980er Jahre. Gates wurde in Seattle, Washington, geboren und wuchs dort auf. 1975 gründete er Microsoft zusammen mit seinem Freund aus Kindertagen, Paul Allen, in Albuquerque, New Mexico. Es entwickelte sich zum weltweit größten Unternehmen für Personal-Computer-Software. Gates leitete das Unternehmen als Chairman und CEO, bis er im Januar 2000 als CEO zurücktrat. Er blieb jedoch Chairman und wurde Chief Software Architect. In den späten neunziger Jahren wurde Gates für seine Geschäftstaktiken kritisiert, die als wettbewerbswidrig angesehen wurden. Diese Meinung wurde durch zahlreiche Gerichtsurteile bestätigt. Im Juni 2006 gab Gates bekannt, dass er eine Teilzeitstelle bei Microsoft und eine Vollzeitstelle bei der Bill & Melinda Gates Foundation, der privaten gemeinnützigen Stiftung, die er und seine Frau Melinda Gates im Jahr 2000 gegründet haben, übernehmen wird. [ 9] Er übertrug seine Aufgaben nach und nach auf Ray Ozzie und Craig Mundie. Im Februar 2014 trat er als Vorsitzender von Microsoft zurück und übernahm eine neue Position als Technologieberater, um den neu ernannten CEO Satya Nadella zu unterstützen.
Die Mona Lisa ist ein Ölgemälde aus dem 16. Jahrhundert, das von Leonardo geschaffen wurde. Es findet im Louvre in Paris statt."""
result = public_pipeline.fullAnnotate(text)[0]
chunks=[]
entities=[]
status=[]
for n in result['entities']:
chunks.append(n.result)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'entities':entities})
df
###Output
_____no_output_____
###Markdown
Highlight Entities
###Code
import random
from IPython.core.display import display, HTML
def get_color():
r = lambda: random.randint(100,255)
return '#%02X%02X%02X' % (r(),r(),r())
from spacy import displacy
def display_entities(annotated_text, filter_labels=True):
label_list = []
sent_dict_list = []
for n in annotated_text['ner_chunk']:
ent = {'start': n.begin, 'end':n.end+1, 'label':n.metadata['entity'].upper()}
label_list.append(n.metadata['entity'].upper())
sent_dict_list.append(ent)
document_text = [{'text':annotated_text['document'][0].result, 'ents':sent_dict_list,'title':None}]
label_list = list(set(label_list))
label_color={}
for l in label_list:
label_color[l]=get_color()
colors = {k:label_color[k] for k in label_list}
html_text = displacy.render(document_text, style='ent', jupyter=True, manual=True, options= {"ents": label_list, 'colors': colors})
return html_text
text = '''
Jedoch wird der Verkehr darin naheliegend den Namen eines der bekanntesten Flüsse Deutschlands erkennen, welcher als Seitenfluss des Rheins durch Oberfranken, Unterfranken und Südhessen fließt und bei Mainz in den Rhein mündet.
Klein , in : Maunz / Schmidt-Bleibtreu / Klein / Bethge , BVerfGG , § 19 Rn. 9
Richtlinien zur Bewertung des Grundvermögens – BewRGr – vom19. I September 1966 (BStBl I, S.890)
'''
ann_text = legal_light_model.fullAnnotate(text)
display_entities (ann_text[0])
###Output
_____no_output_____ |
notebooks/week6.ipynb | ###Markdown
plot statistics from the dataset
###Code
# Load data
X_train, X_test = dataset.get_X(format=pd.DataFrame)
y_train, y_test = dataset.get_y(format=pd.Series)
A_train, A_test = dataset.get_sensitive_features(name='race', format=pd.Series)
# Combine all training data into a single data frame and glance at a few rows
all_train_raw = pd.concat([X_train, A_train, y_train], axis=1)
all_test_raw = pd.concat([X_test, A_test, y_test], axis=1)
all_data = pd.concat([all_train_raw, all_test_raw], axis=0)
X = all_data[['lsat', 'ugpa', 'race']]
y = all_data[['pass_bar']]
le = preprocessing.LabelEncoder()
X.loc[:,'race'] = le.fit_transform(X['race'])
scaler = preprocessing.StandardScaler()
X.loc[:,['lsat', 'ugpa']] = scaler.fit_transform(X[['lsat', 'ugpa']])
A = X['race']
A_idx = 'race'
all_train_grouped = all_data.groupby('race')
counts_by_race = all_train_grouped[['lsat']].count().rename(
columns={'lsat': 'count'})
quartiles_by_race = all_train_grouped[['lsat', 'ugpa']].quantile([.25, .50, .75]).rename(
index={0.25: "25%", 0.5: "50%", 0.75: "75%"}, level=1).unstack()
rates_by_race = all_train_grouped[['pass_bar']].mean().rename(
columns={'pass_bar': 'pass_bar_rate'})
summary_by_race = pd.concat([counts_by_race, quartiles_by_race, rates_by_race], axis=1)
display(summary_by_race)
A_p = np.sum(A)/A.shape[0]
print(f"{A_p*100}% of the labels for 'race' is 'white'")
display(X)
X, X_t, y, y_t = train_test_split(
X, y, test_size=0.15, random_state=0
)
# pass bar rate should be equal for race='white' or race='balck'
def create_tpr_ghat(A_idx, A_val):
def tpr_ab(X, y_true, y_pred, delta, n=None):
tp_a = tpr_rate(A_idx, 1)(X, y_true, y_pred)
tp = tpr_rate(A_idx, 0)(X, y_true, y_pred)
if method == 'ttest':
bound = abs(ttest_bounds(tp, delta, n) - ttest_bounds(tp_a, delta, n))
else:
bound = abs(hoeffdings_bounds(tp, delta, n) - hoeffdings_bounds(tp_a, delta, n))
return bound.upper - 0.3
return tpr_ab
A_idx= 2
tpr_ab = ghat_tpr_diff(A_idx, threshold=0.2)
#Construct the ghat
ghats = []
ghats.append({
'fn': tpr_ab,
'delta': 0.05
})
method='ttest'
op_method = 'Powell'
###Output
_____no_output_____
###Markdown
Experiment setup
###Code
exp = {
'num_trials': 10
}
def get_estimator(name):
if name == 'Const Powell':
return LogisticRegressionSeldonianModel(X.to_numpy(), y.to_numpy().flatten(), g_hats=ghats,
hard_barrier = False)
elif name == 'Const CMA-ES':
return SeldonianAlgorithmLogRegCMAES(X.to_numpy(), y.to_numpy().flatten(), g_hats=ghats, verbose=True,
hard_barrier = False)
elif name=='Unconst Powell':
return LogisticRegressionSeldonianModel(X.to_numpy(), y.to_numpy().flatten(), g_hats=[])
elif name=='Constrained LogReg CMA-ES BBO optimizer':
return LogisticRegressionCMAES(X.to_numpy(), y.to_numpy().flatten(), verbose=True)
else:
return LogisticRegression(penalty='none')
res = {
'Const Powell': {
},
'Const CMA-ES': {
},
'Unconst Powell': {
},
'Unconst CMA-ES': {
},
'Unconst Scikit': {
}
}
for r in res:
fr = []
ac = []
ghat = []
print(f"Running for {r}")
for t in range(exp['num_trials']):
estimator = get_estimator(r)
try:
estimator.fit()
except:
estimator.fit(X, y.to_numpy().flatten())
acc = accuracy_score(y_t, estimator.predict(X_t))
ac.append(acc)
g = tpr_ab(X_t.to_numpy(), y_t.to_numpy().flatten(), estimator.predict(X_t), delta=0.05, ub=False)
fr.append((g > 0.0).astype(int))
ghat.append(g)
res[r]['failure_rate'] = np.mean(fr)
res[r]['failure_rate_std'] = np.std(fr)
res[r]['accuracy'] = np.mean(ac)
res[r]['ghat'] = np.mean(ghat)
###Output
Running for Const Powell
Optimization terminated successfully.
Current function value: 0.919456
Iterations: 7
Function evaluations: 560
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 1509.053747
Iterations: 3
Function evaluations: 264
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.951527
Iterations: 8
Function evaluations: 683
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.740105
Iterations: 6
Function evaluations: 648
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.956433
Iterations: 11
Function evaluations: 904
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 20.940549
Iterations: 3
Function evaluations: 401
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 20.626545
Iterations: 3
Function evaluations: 335
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 20.449983
Iterations: 3
Function evaluations: 326
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 21.022932
Iterations: 3
Function evaluations: 399
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.952122
Iterations: 7
Function evaluations: 573
Optimization result: Optimization terminated successfully.
Running for Const CMA-ES
Max number of iters: 1000
max iterations: 800
Max number of iters: 1000average loss:0.7170843016123776
max iterations: 800
Max number of iters: 1000average loss:0.7050905095318827
max iterations: 800
Max number of iters: 1000average loss:0.6872392779122091
max iterations: 800
Max number of iters: 1000average loss:0.6893379261571025
max iterations: 800
Max number of iters: 1000average loss:0.7119209909663106
max iterations: 800
Max number of iters: 1000average loss:0.6840384167348388
max iterations: 800
Max number of iters: 1000average loss:0.7082727558420707
max iterations: 800
Max number of iters: 1000average loss:0.6946008882119009
max iterations: 800
Max number of iters: 1000average loss:0.7014161277814377
max iterations: 800
Running for Unconst Powellverage loss:0.7126912843142283
Optimization terminated successfully.
Current function value: 0.153541
Iterations: 7
Function evaluations: 329
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153540
Iterations: 7
Function evaluations: 333
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153542
Iterations: 7
Function evaluations: 328
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153541
Iterations: 7
Function evaluations: 342
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153543
Iterations: 7
Function evaluations: 338
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153541
Iterations: 7
Function evaluations: 331
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153540
Iterations: 7
Function evaluations: 325
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153541
Iterations: 6
Function evaluations: 279
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153542
Iterations: 6
Function evaluations: 276
Optimization result: Optimization terminated successfully.
Optimization terminated successfully.
Current function value: 0.153540
Iterations: 7
Function evaluations: 344
Optimization result: Optimization terminated successfully.
Running for Unconst CMA-ES
Running for Unconst Scikit
###Markdown
Plot results
###Code
res
res_n = {
'Const Powell': '[C]Powell',
'Const CMA-ES': '[C]CMAES',
'Unconst Powell': '[UC]Powell',
'Unconst CMA-ES': '[UC]CMAES',
'Unconst Scikit': '[UC]Sklearn'
}
x = list(res.keys())
for k in res['Const Powell']:
plt.bar(list(map(lambda x: res_n[x], x)), list(map(lambda x: res[x][k], res)), width=0.6)
plt.title(f"{k} for various estimators [C]- Constrained ; [UC]- Unconstrained")
plt.show()
###Output
_____no_output_____
###Markdown
$\hat{g}$ result for the constrained optimizer
###Code
print(f"mean ghat value: {tpr_ab(X.to_numpy(), y.to_numpy().flatten(), estimator.predict(X), delta=0.05)}")
###Output
mean ghat value: 0.6623616497622877
###Markdown
$\hat{g}$ for constrained estimator using CMAES
###Code
estimator_cmaes = SeldonianAlgorithmLogRegCMAES(X.to_numpy(), y.to_numpy().flatten(), g_hats=ghats, verbose=True)
estimator_cmaes.fit()
print(f"\nAccuracy: {balanced_accuracy_score(y, estimator_cmaes.predict(X))}\n")
print(f"mean ghat value: {tpr_ab(X.to_numpy(), y.to_numpy().flatten(), estimator_cmaes.predict(X), delta=0.05)}")
###Output
Max number of iters: 1000
max iterations: 800
Current evaluation: 800 average loss:0.6927924554145636
Accuracy: 0.32674882812268513
mean ghat value: -0.10744880141361346
###Markdown
$\hat{g}$ for unconstrained optimizer using `scipy.optimize` package
###Code
uc_estimator = LogisticRegressionSeldonianModel(X.to_numpy(), y.to_numpy().flatten(), g_hats=[]).fit(
opt=op_method)
print(f"Accuracy: {balanced_accuracy_score(y, uc_estimator.predict(X))}\n")
print(f"mean ghat value: {tpr_ab(X.to_numpy(), y.to_numpy().flatten(), uc_estimator.predict(X), delta=0.05)}")
###Output
Optimization terminated successfully.
Current function value: 0.153541
Iterations: 10
Function evaluations: 477
Optimization result: Optimization terminated successfully.
Accuracy: 0.5305618796351435
mean ghat value: 0.6625296178578743
###Markdown
$\hat{g}$ for unconstrained CMA-ES optimizer
###Code
uc_estimator_cmaes = LogisticRegressionCMAES(X.to_numpy(), y.to_numpy().flatten(), verbose=True)
uc_estimator_cmaes.fit()
print(f"Accuracy: {balanced_accuracy_score(y, uc_estimator_cmaes.predict(X))}\n")
print(f"mean ghat value: {tpr_ab(X.to_numpy(), y.to_numpy().flatten(), uc_estimator_cmaes.predict(X), delta=0.05)}")
###Output
Max number of iters: 1000
max iterations: 800
Accuracy: 0.528130183873321erage loss:0.15437804313816825
mean ghat value: 0.6623616497622877
###Markdown
Sklearn estimator
###Code
logreg_sk = LogisticRegression().fit(X.to_numpy(),y.to_numpy().flatten())
print(f"Accuracy: {balanced_accuracy_score(y, uc_estimator.predict(X))}\n")
print(f"mean ghat value: {tpr_ab(X.to_numpy(), y.to_numpy().flatten(), logreg_sk.predict(X), delta=0.05)}")
###Output
Accuracy: 0.5305618796351435
mean ghat value: 0.6623616497622877
|
PRACTICE_Function_v3_QUES.ipynb | ###Markdown
PRACTICE PROBLEMS ON FUNCTION**[MUST MAINTAIN VARIABLE NAMING CONVENTIONS FOR ALL THE TASKS]****[Solve all the tasks sequentially]** Task 1Write a function called **check_awesomeness** that takes a number as an argument and Checks whether the number is Awesome or not. If the number is Awesome, it returns True. Otherwise False. **Awesome number:** a number where every digit is less than its immediate left digit is called an Awesome number. A single digit number cannot be a awesome number(e.g. 5421 is an Awesome number).==========================================================**Function Call1:**\check_awesomeness(976321)\**Output1:**\True==========================================================**Function Call2:**\check_awesomeness(9766321)\**Output2:**\False==========================================================**Function Call3:**\check_awesomeness(9)\**Output3:**\False==========================================================**Function Call4:**\check_awesomeness(78)\**Output4:**\False==========================================================**Function Call5:**\check_awesomeness(87)\**Output4:**\True
###Code
#todo
###Output
_____no_output_____
###Markdown
Task 2Write a function called **check_awesome** that takes a list of numbers(integer) as an argument and Prints whether the number is Awesome or not.**Must reuse the check_awesomeness() function.**==========================================================**Function Call1:**\check_awesome([976321, 321, 9763221, 9742, 876, 3211])\**Output1:**\976321 is an awesome number.\321 is a awesome number.\9763221 is a not-so-awesome number.\9742 is an awesome number.\876 is an awesome number.\3211 is a not-so-awesome number.==========================================================**Function Call2:**\check_awesome([97821, 97210, 979210])\**Output2:**\97821 is a not-so-awesome number.\97210 is an awesome number.\979210 is a not-so-awesome number.
###Code
#todo
###Output
_____no_output_____
###Markdown
Task 3Write a function called **find_max_min** that takes a list of numbers(integers) as a function parameter and finds the numbers with maximum and minimum value. Then returns these two numbers as a **tuple** and prints the results using tuple unpacking in the function call accorrding to the given format. Both valid and invalid numbers should be considered for finding maximum and minimum.[Must use tuple packing & unpacking] ==========================================================**Function Call1:**\find_max_min([976321, 321, 9763221, 9742, 876, 3211, 976321, 9742])\**Output1:**\Returned value from find_max_min() is: (9763221, 321)\Number with maximum value is 9763221\Number with minimum value is 321==========================================================**Function Call2:**\find_max_min([97821, 1, 97210, 963, 979210, 979210])\**Output2:**\Returned value from find_max_min() is: (979210, 1)\Number with maximum value is 979210\Number with minimum value is 1
###Code
#todo
###Output
_____no_output_____
###Markdown
Task 4Write a function called **data_cleaning** that takes a string as an argument. Then gets the numbers from the given string and removes all extra spaces. Stores the clean numbers (integers) in a list and **returns** it to the function call.==========================================================**Function Call1:**\data_cleaning("97821, 1 , 97210, 963, 979210 , 979210 ")\**Output1:**\Data after cleaning: [97821, 1, 97210, 963, 979210, 979210]==========================================================**Function Call2:**\data_cleaning("976321, 321 , 9763221, 9742, 876, 3211")\**Output2:**\Data after cleaning: [976321, 321, 9763221, 9742, 876, 3211]==========================================================**Function Call3:**\data_cleaning("976321, 321 , 9763221, 9742, 876, 3211, 976321 , 9742")\**Output3:**\Data after cleaning: [976321, 321, 9763221, 9742, 876, 3211, 976321, 9742]
###Code
#todo
###Output
_____no_output_____
###Markdown
Task 5Write a function called **grouping_data** that takes a list of numbers(integers) as a function parameter and creates a dictionary where "valid", "invalid", and "duplicate" are the keys. The numbers that falls in those categories are the values.**Valid & Invalid numbers:** A number with a minimum length of 4 and a maximum length of 6 is considered to be valid for this assignment. All the other numbers are considered to be Invalid numbers.\**Duplicate numbers:** If a number appears more than once in the string, then that is a dupliacte number. Both valid and invalid numbers can be duplicates.Lastly, print the dictionary **inside the function** and **return** the dictionary to the function call.==========================================================**Function Call1:**\grouping_data([97821, 1, 97210, 963, 979210, 979210])\**Output1:**\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [97821, 97210, 979210], 'invalid': [1, 963], 'duplicate': [979210]}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [97821, 97210, 979210], 'invalid': [1, 963], 'duplicate': [979210]}\Valid number list obtained from the dictionary: [97821, 97210, 979210]==========================================================**Function Call2:**\grouping_data([976321, 321, 9763221, 9742, 876, 3211])\**Output2:**\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': []}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': []}\Valid number list obtained from the dictionary: [976321, 9742, 3211]==========================================================**Function Call3:**\grouping_data([976321, 321, 9763221, 9742, 876, 3211, 976321, 9742])\**Output3:**\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': [976321, 9742]}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': [976321, 9742]}\Valid number list obtained from the dictionary: [976321, 9742, 3211]
###Code
#todo
###Output
_____no_output_____
###Markdown
Task6Write a function called **individual_line_data** that takes a string as an argument. This function should do the following. * clean and prints the data.* group the numbers in a dictionary obtained from the given string.* check whether the **valid numbers** are awesome or not.* find and print the maximum and minimum number among the **valid numbers** obtained from the given string.*Hints:*\You MUST reuse data_cleaning(), grouping_data(), check_awesome() and find_max_min() functions. ==========================================================**Function Call1:**\individual_line_data("97821, 1 , 97210, 963, 979210 , 979210 ")\**Output1:**\Data after cleaning: [97821, 1, 97210, 963, 979210, 979210]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [97821, 97210, 979210], 'invalid': [1, 963], 'duplicate': [979210]}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [97821, 97210, 979210], 'invalid': [1, 963], 'duplicate': [979210]}\Valid number list obtained from the dictionary: [97821, 97210, 979210]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\97821 is a not-so-awesome number.\97210 is an awesome number.\979210 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (979210, 97210)\Valid number with maximum value is 979210\Valid number with minimum value is 97210**Function Call2:**\individual_line_data("976321, 321 , 9763221, 9742, 876, 3211")\**Output2:**\Data after cleaning: [976321, 321, 9763221, 9742, 876, 3211]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': []}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': []}\Valid number list obtained from the dictionary: [976321, 9742, 3211]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\976321 is an awesome number.\9742 is an awesome number.\3211 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (976321, 3211)\Valid number with maximum value is 976321\Valid number with minimum value is 3211**Function Call3:**\individual_line_data("976321, 321 , 9763221, 9742, 876, 3211, 976321 , 9742")\**Output3:**\Data after cleaning: [976321, 321, 9763221, 9742, 876, 3211, 976321, 9742]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': [976321, 9742]}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': [976321, 9742]}\Valid number list obtained from the dictionary: [976321, 9742, 3211]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\976321 is an awesome number.\9742 is an awesome number.\3211 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (976321, 3211)\Valid number with maximum value is 976321\Valid number with minimum value is 3211
###Code
#todo
###Output
_____no_output_____
###Markdown
Task 7Write a function called **number_analysis** that takes a list of strings as an argument and analyse those strings. \**Must reuse the individual_line_data() function.**
###Code
#todo
###Output
_____no_output_____
###Markdown
Task 8Run the following block of code for seeing the final output of this Problem.**Output:** Printing data for line no: 01 \Data after cleaning: [321, 976322]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976322], 'invalid': [321], 'duplicate': []}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976322], 'invalid': [321], 'duplicate': []}\Valid number list obtained from the dictionary: [976322]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\976322 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (976322, 976322)\Valid number with maximum value is 976322\Valid number with minimum value is 976322  Printing data for line no: 02 \Data after cleaning: [97821, 1, 97210, 963, 979210, 979210]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [97821, 97210, 979210], 'invalid': [1, 963], 'duplicate': [979210]}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [97821, 97210, 979210], 'invalid': [1, 963], 'duplicate': [979210]}\Valid number list obtained from the dictionary: [97821, 97210, 979210]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\97821 is a not-so-awesome number.\97210 is an awesome number.\979210 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (979210, 97210)\Valid number with maximum value is 979210\Valid number with minimum value is 97210  Printing data for line no: 03 \Data after cleaning: [976321, 321, 9763221, 9742, 876, 3211]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': []}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': []}\Valid number list obtained from the dictionary: [976321, 9742, 3211]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\976321 is an awesome number.\9742 is an awesome number.\3211 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (976321, 3211)\Valid number with maximum value is 976321\Valid number with minimum value is 3211  Printing data for line no: 04 \Data after cleaning: [976321, 321, 9763221, 9742, 876, 3211, 976321, 9742]\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Data dictionary printing inside the function:\{'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': [976321, 9742]}\XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\Returned dictionary: {'valid': [976321, 9742, 3211], 'invalid': [321, 9763221, 876], 'duplicate': [976321, 9742]}\Valid number list obtained from the dictionary: [976321, 9742, 3211]\=================================================\^_^ Awesomeness checking of the Valid numbers ^_^\976321 is an awesome number.\9742 is an awesome number.\3211 is a not-so-awesome number.\=================================================\Returned value from find_max_min() is: (976321, 3211)\Valid number with maximum value is 976321\Valid number with minimum value is 3211
###Code
lines_of_data = [
"321 , 976322",
"97821, 1 , 97210, 963, 979210 , 979210",
"976321, 321 , 9763221, 9742, 876, 3211",
"976321, 321 , 9763221, 9742, 876, 3211, 976321, 9742"
]
number_analysis(lines_of_data)
###Output
_____no_output_____ |
exemples/HP_exemple.ipynb | ###Markdown
Test LP Filter Setup
###Code
import numpy as np
try:
from SecondOrderElec import HP
from SecondOrderElec.plot import plot_time
except ImportError:
import sys
sys.path.append('../.')
from SecondOrderElec import HP
from SecondOrderElec.plot import plot_time
###Output
_____no_output_____
###Markdown
Let's create some filters
###Code
T1 = HP(2, 0.8, 100)
T2 = HP(2, 1.5, 100)
###Output
_____no_output_____
###Markdown
Let's create a logspace for later
###Code
w = np.logspace(1,3,1000)
###Output
_____no_output_____
###Markdown
Poles / Zeros
###Code
for T in [T1,T2]:
T.pzmap()
###Output
_____no_output_____
###Markdown
Time Response
###Code
t = np.arange(0,0.5,0.001)
for T in [T1,T2]:
T.step(T=t)
for index, T in enumerate([T1,T2]):
print("system {}".format(index))
print("wp = {} rad/s".format(T.wp))
print("Tp = {} rad/s".format(T.Tp))
print("R = {}".format(T.R))
###Output
system 0
wp = 59.999999999999986 rad/s
Tp = 0.10471975511965981 rad/s
R = 4348.4746593769105
system 1
wp = None rad/s
Tp = None rad/s
R = 0
###Markdown
Frequency Response
###Code
for T in [T1,T2]:
T.freqresp(w=w)
###Output
_____no_output_____
###Markdown
Output
###Code
x = np.linspace(0, 1, 1000)
y = np.cos(x*0.02)+np.sin(x*100)*0.2
plot_time(x,y)
t,s,x = T1.output(U=y, T=x)
###Output
_____no_output_____ |
Week 15 - Numerical Integration/NuMeth_6_Numerical_Integration.ipynb | ###Markdown
Numerical Integration$_{\text{©D.J. Lopez | 2021 | Computational Methods for Computer Engineers}}$Reviving your integral calculus classes, we will be implementing the fundamental concepts of integration to computational and numerical methods. Numerical integration or the quadrature greatly helps, again, with the field of optimziation and estimation. This module will cover:* The Trapezoidal Rule* Simpson's 1/4 Integration Rule* Simpson's 3/8 Integration Rule* Monte Carlo Simulations/Integration
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
6.1 Trapezoidal ruleThe concept behind the Trapezoidal rule is a good review on what is integration and how it can be converted to its numerical and computational implementation.Integration is usually defined as the area under a cruve or area of the function. Like the image below, integration is usually seen as the sum of the areas of the boxes (in this case trapezoids) that make up the area under the curve.The Trapezoidal rule takes advantage of this concept by summing the areas of those trapezoids. If you would recall, the area of the Trapezoid is given as:$$A_{trapz}=\frac{h(b-a)}{2} \\ _{\text{(Eq. 6.1)}}$$Whereas $A_{trapz}$ is the area of a trapezoid, $a$ is the shorter base, $b$ is the longer base, and $h$ is the height of the Trapezoid. Use the image below as a visual reference.In the trapezoidal rule, we can see that the trapezoids are right trapezoids. And we can formally construct the represtnative equation modelling the concept of the trapezoidal rule as:$$\int^b_af(x)dx \approx h\left[ \frac{f(x_0)+f(x_n)}{2} +\sum^{n-1}_{i=1}f(x_i) \right]\\ _{\text{(Eq. 6.2)}}$$For our example, we will mode the equation:$$\int^{\frac{\pi}{2}}_0x\sin(x)dx = 1$$ and $$\int^{10}_0x^2dx = \frac{1000}{3}$$
###Code
f = lambda x : x*np.sin(x)
a, b = 0, np.pi/2
n = 5
h = (b-a)/n
A= (f(a)+f(b))/2
for i in range(1,n):
A += f(a+i*h)
S = h*A
S
h*(0.5*(f(a)+f(b))+np.sum(f(a+h*np.arange(1,n))))
def trapz_rule(func,lb,ub,size):
h = (ub-lb)/size
return h*(0.5*(func(lb)+func(ub))+np.sum(func(lb+h*np.arange(1,size))))
f = lambda x: x**2
sum = trapz_rule(f, 0,10,1e4)
sum
###Output
_____no_output_____
###Markdown
Simpson's 1/3 RuleSimpson's 1/3 Rule, unlike the Trapezoidal Rule, computes more than 2 strips of trapezoids at a time. And rather than trapezoids, Simpson's 1/3 rule uses parabolas ($P(x)$) in approximating areas under the curve.The Simpson's 1/3 Rule cane be formulated as:$$\int^b_af(x)dx \approx \frac{(b-a)}{6}\left(f(a)+4f\frac{(a+b)}{2}+f(b)\right)\\ _{\text{(Eq. 6.3)}}$$It can be discretized as:$$\int^b_af(x)dx \approx \frac{h}{3}\left[f(x_0)+4*\sum^{n-1}_{i\in odd}+f(x_i)+2*\sum^{n-2}_{i\in even}+f(x_n)\right]\\ _{\text{(Eq. 6.4)}}$$
###Code
f = lambda x : x*np.sin(x)
a, b = 0, np.pi/2
n = 6
h = (b-a)/n
A= (f(a)+f(b))
for i in range(1,n,2):
A += 4*f(a+i*h)
for i in range(2,n,2):
A += 2*f(a+i*h)
S = h/3*(A)
S
def simp_13(func,lb,ub,divs):
h = (ub-lb)/divs
A = (func(lb)+func(ub))+ \
np.sum(4*func(lb+h*np.arange(1,divs,2)))+ \
np.sum(2*func(lb+h*np.arange(2,divs,2)))
S = (h/3)*A
return S
h = lambda x: x**2
sum = simp_13(h, 0,10,1e4)
sum
###Output
_____no_output_____
###Markdown
Simpson's 3/8 RuleSimpson's 3/8 rule or Simpson's second rule is ismilar to the 1/3 rule but instead of having a parabolic or quadratic approximation, it uses a cubic approximation.$$\int^b_af(x)dx \approx \frac{(b-a)}{8}\left(f(a)+3f\frac{(2a+b)}{3}+3f\frac{(a+2b)}{3}+f(b)\right)\\ _{\text{(Eq. 6.5)}}$$It can be discretized as:$$\int^b_af(x)dx \approx \frac{3h}{8}\left[f(x_0)+3*\sum^{n-1}_{i=1,4,7,..}+f(x_i)+3*\sum^{n-2}_{i=2,5,8,..}+f(x_i)+2*\sum^{n-3}_{i=3,6,9,..}+f(x_n)\right]\\ _{\text{(Eq. 6.6)}}$$
###Code
def simp_38(func,lb,ub,divs):
h = (ub-lb)/divs
A = (func(lb)+func(ub))+ \
np.sum(3*(func(lb+h*np.arange(1,divs,3))))+ \
np.sum(3*(func(lb+h*np.arange(2,divs,3))))+ \
np.sum(2*func(lb+h*np.arange(3,divs,3)))
S = (3*h/8)*A
return S
f = lambda x: x*np.sin(x)
sum = simp_38(f, 0,np.pi/2,1e4)
sum
h = lambda x: x**2
sum = simp_38(h, 0,10,1e4)
sum
###Output
_____no_output_____
###Markdown
Monte Carlo IntegrationThe Monte Carlo Simulation or integration uses a different approach in approximating the area under a curve or function. It differs from the Trapezoidal and Simpson's Rules since it does not use a polynomial for interpolating the curve. The Monte Carlo integration uses the idea of uniform random sampling in a given space and computes the samples that are under the curve of the equation. In this implementation, we will use the most vanilla version of the Monte Carlo integration. We will use the definition of the mean of a function given as:$$\left = \frac{1}{(b-a)}\int^b_af(x)dx \\ _{\text{(Eq. 6.7)}}$$We can then perform algebraic manipulation to solve to isolate the integral of the function:$$(b-a)\left = \int^b_af(x)dx \\ _{\text{(Eq. 6.8)}}$$Then by the definition of means we can use the discretized mean formula and substitute it with $\left$:$$(b-a)\frac{1}{N}\sum^N_{i=0}f(x_i) \approx \int^b_af(x)dx \\ _{\text{(Eq. 6.9)}}$$
###Code
a, b = 0, np.pi/2`
n = 1e3
samples = np.random.uniform(a,b,int(n))
f = lambda x: x*np.sin(x)
A = np.sum(f(samples))
S = (b-a)/n)
S
###Output
_____no_output_____
###Markdown
End of Module Activity$\text{Use another notebook to answer the following problems and create a report for the activities in this notebook.}$ 1.) Research on the different numerical integration functions implemented in `scipy`. Explain in your report the function/s with three (3) different functions as examples.2.) Create numerical integration of two sample cases for each of the following functions: higher-order polynomials (degrees greater than 4), trigonometric functions, and logarithmic functions.> a.) Implement the numerical integration techniques used in this notebook including the `scipy` function/s.> b.) Measure and compare the errors of each integration technique to the functions you have created.3.) Research on the "Law of Big Numbers" and explain the law through:> a.) Testing Simpson's 3/8 Rule by initializing the bin sizes to be arbitrarily large. Run this for 100 iterations while decreasing the bin sizes by a factor of 100. Graph the errors using `matplotlib`.> b.) Testing the Monte Carlo Simulation with initializing the sample size from an arbitrarily small size. Run this for 100 iterations while increasing the sample sizes by a factor of 100. Graph the errors using `matplotlib`.
###Code
###Output
_____no_output_____ |
opc_python/hulab/collaboration/feature_preparation.ipynb | ###Markdown
creates the features matrix with the descriptor data plus morgan adds the squared values as well
###Code
from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.linear_model import RandomizedLasso
import sys
import os
# load the descriptors and fill nan values with 0
descriptors = pd.read_csv(os.path.abspath('__file__' + "/../../../../data/molecular_descriptors_data.txt"),sep='\t')
descriptors.set_index('CID', inplace=True)
descriptors.sort_index(inplace=True)
descriptors.fillna(value=0,inplace=True)
min_max_scaler = preprocessing.MinMaxScaler()
descriptors.ix[:,:]= min_max_scaler.fit_transform(descriptors)
# add squared values to the feature vector
descriptors_squares = descriptors**2
descriptors_squares.columns = [name + '_2' for name in descriptors.columns]
descriptors = pd.concat((descriptors,descriptors_squares),axis=1)
#descriptors.reset_index(inplace=1)
descriptors.head()
# load morgan similarity features
morgan = pd.read_csv(os.path.abspath('__file__' + "/../../../../data/morgan_sim.csv"), index_col=0)
# convert the column names (CIDs) to strings
morgan.columns = [str(name) for name in morgan.columns]
# add squared values to the feature vector
morgan_squares = morgan **2
# rename features to CID + '_2'
morgan_squares.columns = [name + '_2' for name in morgan.columns]
# concat
morgan = pd.concat((morgan,morgan_squares),axis=1)
features = pd.concat((descriptors, morgan),axis=1)
features.shape
features.head()
features.to_csv('features.csv')
###Output
_____no_output_____ |
My notebooks/T8 - 4 - SVM - Face Recognition.ipynb | ###Markdown
Reconocimiento Facial
###Code
from sklearn.datasets import fetch_lfw_people
import matplotlib.pyplot as plt
import numpy as np
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
fig, ax = plt.subplots(5,5, figsize=(16,9))
for i, ax_i in enumerate(ax.flat):
ax_i.imshow(faces.images[i], cmap="bone")
ax_i.set(xticks=[], yticks=[], xlabel=faces.target_names[faces.target[i]])
ax
from sklearn.svm import SVC
from sklearn.decomposition import RandomizedPCA
from sklearn.pipeline import make_pipeline
pca = RandomizedPCA(n_components=150, whiten=True, random_state=42)
svc = SVC(kernel="rbf", class_weight="balanced")
model = make_pipeline(pca, svc)
from sklearn.cross_validation import train_test_split
Xtrain, Xtest, Ytrain, Ytest = train_test_split(faces.data, faces.target, random_state=42)
from sklearn.grid_search import GridSearchCV
param_grid = {
"svc__C": [0.1, 1, 5, 10, 50],
"svc__gamma": [0.0001, 0.0005, 0.001, 0.005, 0.01]
}
grid = GridSearchCV(model, param_grid)
%time grid.fit(Xtrain, Ytrain)
print(grid.best_params_)
classifier = grid.best_estimator_
yfit = classifier.predict(Xtest)
fig, ax = plt.subplots(8,6, figsize=(16,9))
for i, ax_i in enumerate(ax.flat):
ax_i.imshow(Xtest[i].reshape(62,47), cmap="bone")
ax_i.set(xticks=[], yticks=[])
#Vemos si el sistema se ha equivocado identificando a las personas
ax_i.set_ylabel(faces.target_names[yfit[i]].split()[-1], color = "black" if yfit[i]==Ytest[i] else "red")
fig.suptitle("Predicciones de las imágenes (incorrectas en rojo)", size=15)
from sklearn.metrics import classification_report
print(classification_report(Ytest, yfit, target_names=faces.target_names))
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(Ytest, yfit)
import seaborn as sns; sns.set()
sns.heatmap(mat.T, square=True, annot=True, fmt="d", cbar=True,
xticklabels=faces.target_names, yticklabels=faces.target_names)
###Output
_____no_output_____ |
week_02/Spiced academy Project-Week-2- machine learning DRAFT.ipynb | ###Markdown
Step 1Read the file train.csv into Python and print a few rows.
###Code
import pandas as pd
df = pd.read_csv('data/train.csv',index_col = 0)
###Output
_____no_output_____
###Markdown
Step 2Calculate the number of surviving/non-surviving passengers and display it as a bar plot.
###Code
import matplotlib.pyplot as plt
import seaborn as sns
# df.groupby('Survived').count()
# df_01count = df.groupby('Survived')['Survived'].count()
# pd.Series(df_01count)
# bar chart
# ax = pd.Series(df_01count).plot.bar(subplots=True, label='', figsize = (8, 6))
###Output
_____no_output_____
###Markdown
Step 3Calculate the proportion of surviving 1st class passengers with regards to the total number of 1st class passengers.
###Code
# total number of 1st passenger
# m = df['Pclass'].count()
# %
# n = df[(df['Pclass'] == 1) & (df['Survived'] == 1)]['Pclass'].count()
# per_1_sur = n/m
# print('the % of surviving 1st class passengers with regards to the total number of 1st class passengers is', per_1_sur)
###Output
the % of surviving 1st class passengers with regards to the total number of 1st class passengers is 0.1526374859708193
###Markdown
Step 4Create a bar plot with separate bars for male/female passengers and 1st/2nd/3rd class passengers.
###Code
# df_02count = df.groupby(['Sex','Pclass'])['Pclass'].count()
# pd.Series(df_02count)
# bar chart
# ax = pd.Series(df_02count).plot.bar(subplots=True, label='', figsize = (8, 6))
#Step 5
#Create a histogram showing the age distribution of passengers. Compare surviving/non-surviving passengers.
import seaborn as sns
# Plot the histogram thanks to the distplot function
# sns.distplot(a=df[df['Survived'] == 1]['Age'], hist=True, kde=True,label='Survived', rug=False)
# sns.distplot(a=df[df['Survived'] == 0]['Age'], hist=True, kde=True,label='Vanished',rug=False)
# plt.legend()
# plt.show()
###Output
/home/guo/anaconda3/lib/python3.8/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
/home/guo/anaconda3/lib/python3.8/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Step 6Calculate the average age for survived and drowned passengers separately.
###Code
# df_02count = df.groupby(['Survived'])['Age'].mean()
# pd.Series(df_02count)
###Output
_____no_output_____
###Markdown
Step 7Replace missing age values by the mean age.
###Code
plt.figure(figsize=(12, 8))
sns.heatmap(df.isna(), cbar=False)
# mean_Age = df.groupby('Survived')['Age'].transform('mean')
# print(mean_Age)
# compare with origin data
# df['Age']
# REPLACE NA values with age of non- and survived passengers
# mean_Age = df.groupby('Survived')['Age'].transform('mean')
# df['Age'].fillna(mean_Age, inplace=True)
# REPLACE NA values with age of non- and survived passengers and ...
# mean_Age = df.groupby(['Survived','Sex','Pclass'])['Age'].transform('mean')
# df['Age'].fillna(mean_Age, inplace=True)
# REPLACE NA values with age of all passengers
# df['Age'].fillna(df['Age'].mean(),inplace=True)
plt.figure(figsize=(12, 8))
sns.heatmap(df.isna(), cbar=False)
###Output
_____no_output_____
###Markdown
Step 8Create a table counting the number of surviving/dead passengers separately for 1st/2nd/3rd class and male/female.
###Code
# the order is important for display
# df_03count = df.groupby(['Pclass','Sex','Survived'])['Pclass'].count()
# pd.Series(df_03count)
# pd.DataFrame(df_03count)
from sklearn.model_selection import train_test_split as tts
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import math
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.pipeline import make_pipeline
# REPLACE NA values with age of non- and survived passengers and ...
mean_Age = df.groupby(['Survived','Sex','Pclass'])['Age'].transform('mean')
df['Age'].fillna(mean_Age, inplace=True)
import pandas as pd
from sklearn.preprocessing import KBinsDiscretizer
# transform a numerical column: Age
kbins = KBinsDiscretizer(n_bins=10, encode='onehot-dense', strategy='kmeans')
columns = df[['Age']]
kbins.fit(columns)
t = kbins.transform(columns)
# fill NA of Age, then scaling
# pipeline1 = make_pipeline(
# SimpleImputer(strategy='mean_Age'),
# KBinsDiscretizer(n_bins=10, encode='onehot-dense', strategy='kmeans')
# )
# Cabin data
#isolating the rooms and letters
df['Cabin_nr'] = df['Cabin'].fillna('Z',inplace=False)
df["Deck"] = df["Cabin_nr"].str.slice(0,1)
def one_hot_column(df, label, drop_col=False):
one_hot = pd.get_dummies(df[label], prefix=label)
if drop_col:
df = df.drop(label, axis=1)
df = df.join(one_hot)
return df
def one_hot(df, labels, drop_col=False):
for label in labels:
df = one_hot_column(df, label, drop_col)
return df
df = one_hot(df, ["Deck"],drop_col=True)
df.head(2)
# fill NA of Embarked with most frequent values, then binning
pipeline2 = make_pipeline(
SimpleImputer(strategy='most_frequent'),
OneHotEncoder(sparse=False, handle_unknown='ignore')
)
#Train-Test Split
from sklearn.model_selection import train_test_split
X = df.iloc[:, 1:]
y = df['Survived']
Xtrain, Xtest,ytrain,ytest = tts(X,y,train_size=0.75,test_size=0.25, random_state=40)
Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape
trans = ColumnTransformer([
('onehot', OneHotEncoder(sparse=False, handle_unknown='ignore'), ['Sex','Pclass']),
('scale', MinMaxScaler(), ['Fare']),
('impute_then_scale', KBinsDiscretizer(n_bins=10, encode='onehot-dense', strategy='kmeans'), ['Age']),
('impute_then_onehot',pipeline2, ['Embarked']),
('do_nothing', 'passthrough', ['SibSp','Parch','Deck_A','Deck_B','Deck_C','Deck_D','Deck_E','Deck_F','Deck_G','Deck_T','Deck_Z']),
])
# fit and transform training data
trans.fit(Xtrain)
Xtrain_transformed = trans.transform(Xtrain)
Xtrain_transformed.shape
# fit a log-reg model
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(max_iter=1000)
model.fit(Xtrain_transformed, ytrain)
#transform test data set
Xtest_transform = trans.transform(Xtest)
Xtest_transform.shape
#Evaluating metrics
from sklearn.metrics import accuracy_score, classification_report
# predict
ypred = model.predict(Xtrain_transformed)
acc = accuracy_score(ytrain,ypred)
print('Train accuracy is:', round(acc,3))
print(classification_report(ytrain,ypred))
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(model,Xtrain_transformed,ytrain)
#transform test data
Xtest_transform = trans.transform(Xtest)
Xtest_transform.shape
ypred = model.predict(Xtest_transform)
acc = accuracy_score(ytest, ypred)
print('Test accuracy is:', round(acc,3))
ypred = model.predict(Xtest_transform)
acc = accuracy_score(ytest, ypred)
print('Test accuracy is:', round(acc,3))
# ROC Curve
# from sklearn.metrics import roc_curve
# Advanced: ROC Curve
# probs = model.predict_proba(Xtrain_transformed)
# roc_curve(ytrain, probs, pos_label=2)
df.Cabin
#Embaked dummy codding
pd.get_dummies(df['Embarked'])
df = df.join(pd.get_dummies(df.Embarked))
df
# dummy codding SEX
pd.get_dummies(df['Sex'])
df = df.join(pd.get_dummies(df.Sex))
df
#isolating the rooms
df['Cabin_nr'] = df['Cabin'].fillna('Z',inplace=False)
df["Deck"] = df["Cabin_nr"].str.slice(0,1)
df["Room"] = df["Cabin_nr"].str.slice(1,5).str.extract("([0-9]+)", expand=False).astype("float")
df["Deck"],df['Room']
df['Room'] = df['Room'].fillna('0',inplace=False)
# dummy codding Deck
pd.get_dummies(df['Deck'])
df = df.join(pd.get_dummies(df.Deck))
df
def one_hot_column(df, label, drop_col=False):
'''
This function will one hot encode the chosen column.
Args:
df: Pandas dataframe
label: Label of the column to encode
drop_col: boolean to decide if the chosen column should be dropped
Returns:
pandas dataframe with the given encoding
'''
one_hot = pd.get_dummies(df[label], prefix=label)
if drop_col:
df = df.drop(label, axis=1)
df = df.join(one_hot)
return df
def one_hot(df, labels, drop_col=False):
'''
This function will one hot encode a list of columns.
Args:
df: Pandas dataframe
labels: list of the columns to encode
drop_col: boolean to decide if the chosen column should be dropped
Returns:
pandas dataframe with the given encoding
'''
for label in labels:
df = one_hot_column(df, label, drop_col)
return df
df = one_hot(df, ["Deck"],drop_col=True)
# carbin data -> boll value
df["Cabin_Data"] = df["Cabin"].isnull().apply(lambda x: not x)
df = one_hot(df, ["Cabin_Data"],drop_col=True) # not valid for this test, as it represents the availability of data
df
df['Cabin_nr'] = df['Cabin'].fillna(0) # zero,
df
# df['Cabin_nr1'] = df['Cabin'].fillna(df.Cabin.isna()) # zero,
# df
#Cabin dummy codding
# convert Cabin to a Boolean
# df['cabin'] = df['Cabin_nr'].str[0]
# pd.get_dummies(df['cabin'])
# Logistic Regression
X = df[['Pclass', 'Age', 'SibSp', 'Fare','C','Q','S','female','male',
'Deck_A','Deck_B','Deck_C','Deck_D','Deck_E','Deck_F','Deck_G','Deck_T','Deck_Z']] # input data, independent vars
y = df['Survived'] # target data, dependent var
X.shape, y.shape
Xtrain, Xtest,ytrain,ytest = tts(X,y,train_size=0.75,test_size=0.25, random_state=40)
Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape
model = LogisticRegression()
model.fit(Xtrain,ytrain) # trains the model
model.coef_
model.intercept_
ypred = model.predict(Xtrain)
accuracy_score(ytrain,ypred) # --> proportion of correct predictions
# evaluate on the test set
ypred_test = model.predict(Xtest)
accuracy_score(ytest,ypred_test)
###Output
_____no_output_____ |
4. FeedForward Neural Networks/4.3.2mist1layer.ipynb | ###Markdown
Test Sigmoid, Tanh, and Relu Activations Functions on the MNIST Dataset Table of ContentsIn this lab, you will test sigmoid, tanh, and relu activation functions on the MNIST dataset. Neural Network Module and Training Function Make Some Data Define Several Neural Network, Criterion Function, and Optimizer Test Sigmoid, Tanh, and Relu Analyze ResultsEstimated Time Needed: 25 min Preparation We'll need the following libraries
###Code
# Import the libraries we need for this lab
# Using the following line code to install the torchvision library
# !conda install -y torchvision
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import torch.nn.functional as F
import matplotlib.pylab as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Neural Network Module and Training Function Define the neural network module or class using the sigmoid activation function:
###Code
# Build the model with sigmoid function
class Net(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(Net, self).__init__()
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
# Prediction
def forward(self, x):
x = torch.sigmoid(self.linear1(x))
x = self.linear2(x)
return x
###Output
_____no_output_____
###Markdown
Define the neural network module or class using the Tanh activation function:
###Code
# Build the model with Tanh function
class NetTanh(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(NetTanh, self).__init__()
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
# Prediction
def forward(self, x):
x = torch.tanh(self.linear1(x))
x = self.linear2(x)
return x
###Output
_____no_output_____
###Markdown
Define the neural network module or class using the Relu activation function:
###Code
# Build the model with Relu function
class NetRelu(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(NetRelu, self).__init__()
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
# Prediction
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
###Output
_____no_output_____
###Markdown
Define a function to train the model. In this case, the function returns a Python dictionary to store the training loss for each iteration and accuracy on the validation data.
###Code
# Define the function for training the model
def train(model, criterion, train_loader, validation_loader, optimizer, epochs = 100):
i = 0
useful_stuff = {'training_loss':[], 'validation_accuracy':[]}
for epoch in range(epochs):
for i, (x, y) in enumerate(train_loader):
optimizer.zero_grad()
z = model(x.view(-1, 28 * 28))
loss = criterion(z, y)
loss.backward()
optimizer.step()
useful_stuff['training_loss'].append(loss.data.item())
correct = 0
for x, y in validation_loader:
yhat = model(x.view(-1, 28 * 28))
_, label=torch.max(yhat, 1)
correct += (label == y).sum().item()
accuracy = 100 * (correct / len(validation_dataset))
useful_stuff['validation_accuracy'].append(accuracy)
return useful_stuff
###Output
_____no_output_____
###Markdown
Make Some Data Load the training dataset by setting the parameters train to True and convert it to a tensor by placing a transform object in the argument transform.
###Code
# Create the training dataset
train_dataset = dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())
###Output
_____no_output_____
###Markdown
Load the testing dataset by setting the parameter train to False and convert it to a tensor by placing a transform object in the argument transform.
###Code
# Create the validation dataset
validation_dataset = dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())
###Output
_____no_output_____
###Markdown
Create the criterion function:
###Code
# Create the criterion function
criterion = nn.CrossEntropyLoss()
###Output
_____no_output_____
###Markdown
Create the training-data loader and the validation-data loader object:
###Code
# Create the training data loader and validation data loader object
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=2000, shuffle=True)
validation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=5000, shuffle=False)
###Output
_____no_output_____
###Markdown
Define the Neural Network, Criterion Function, Optimizer, and Train the Model Create the criterion function:
###Code
# Create the criterion function
criterion = nn.CrossEntropyLoss()
###Output
_____no_output_____
###Markdown
Create the model with 100 hidden neurons:
###Code
# Create the model object
input_dim = 28 * 28
hidden_dim = 100
output_dim = 10
model = Net(input_dim, hidden_dim, output_dim)
###Output
_____no_output_____
###Markdown
Test Sigmoid, Tanh, and Relu Train the network by using the sigmoid activations function:
###Code
# Define a training function to train the model
def train(model, criterion, train_loader, validation_loader, optimizer, epochs=100):
i = 0
useful_stuff = {'training_loss': [],'validation_accuracy': []}
for epoch in range(epochs):
for i, (x, y) in enumerate(train_loader):
optimizer.zero_grad()
z = model(x.view(-1, 28 * 28))
loss = criterion(z, y)
loss.backward()
optimizer.step()
#loss for every iteration
useful_stuff['training_loss'].append(loss.data.item())
correct = 0
for x, y in validation_loader:
#validation
yhat = model(x.view(-1, 28 * 28))
_, label = torch.max(yhat, 1)
correct += (label == y).sum().item()
accuracy = 100 * (correct / len(validation_dataset))
useful_stuff['validation_accuracy'].append(accuracy)
return useful_stuff
# Train a model with sigmoid function
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
training_results = train(model, criterion, train_loader, validation_loader, optimizer, epochs=30)
###Output
_____no_output_____
###Markdown
Train the network by using the Tanh activations function:
###Code
# Train a model with Tanh function
model_Tanh = NetTanh(input_dim, hidden_dim, output_dim)
optimizer = torch.optim.SGD(model_Tanh.parameters(), lr=learning_rate)
training_results_tanch = train(model_Tanh, criterion, train_loader, validation_loader, optimizer, epochs=30)
###Output
_____no_output_____
###Markdown
Train the network by using the Relu activations function:
###Code
# Train a model with Relu function
modelRelu = NetRelu(input_dim, hidden_dim, output_dim)
optimizer = torch.optim.SGD(modelRelu.parameters(), lr=learning_rate)
training_results_relu = train(modelRelu,criterion, train_loader, validation_loader, optimizer, epochs=30)
###Output
_____no_output_____
###Markdown
Analyze Results Compare the training loss for each activation:
###Code
# Compare the training loss
plt.plot(training_results_tanch['training_loss'], label='tanh')
plt.plot(training_results['training_loss'], label='sigmoid')
plt.plot(training_results_relu['training_loss'], label='relu')
plt.ylabel('loss')
plt.title('training loss iterations')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Compare the validation loss for each model:
###Code
# Compare the validation loss
plt.plot(training_results_tanch['validation_accuracy'], label='tanh')
plt.plot(training_results['validation_accuracy'], label='sigmoid')
plt.plot(training_results_relu['validation_accuracy'], label='relu')
plt.ylabel('validation accuracy')
plt.xlabel('epochs ')
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/benchmarking/performance_profiling.ipynb | ###Markdown
Load data
###Code
# @time df_spatial, gene_names = Baysor.load_df("../run_results/spacejam2/allen_sm_fish/no_dapi/segmentation.csv");
# df_spatial[!, :x] = round.(Int, 10 .* (df_spatial.x .- minimum(df_spatial.x)));
# df_spatial[!, :y] = round.(Int, 10 .* (df_spatial.y .- minimum(df_spatial.y)));
# length(gene_names)
@time df_spatial, gene_names = Baysor.load_df("../run_results/merfish_moffit/segmentation.csv");
length(gene_names)
###Output
_____no_output_____
###Markdown
Molecule clustering Baysor
###Code
bench_df = @where(df_spatial, :x .< -3300, :y .< -3300) |> deepcopy;
gn_bench = gene_names;
# confidence_nn_id = Baysor.default_param_value(:confidence_nn_id, 10);
confidence_nn_id = Baysor.default_param_value(:confidence_nn_id, 50);
@show confidence_nn_id
size(bench_df, 1)
bench_clust = BenchmarkGroup();
B.append_confidence!(bench_df, nn_id=confidence_nn_id);
bench_clust["confidence"] = @benchmarkable B.append_confidence!($bench_df, nn_id=$confidence_nn_id);
adjacent_points, adjacent_weights = B.build_molecule_graph(bench_df, filter=false);
bench_clust["mol_graph"] = @benchmarkable B.build_molecule_graph($bench_df, filter=false);
for cl in [2, 4, 6, 8, 10]
bench_clust["clust_$cl"] = @benchmarkable B.cluster_molecules_on_mrf($bench_df.gene, $adjacent_points, $adjacent_weights, $bench_df.confidence;
n_clusters=$cl, max_iters=5000, n_iters_without_update=100, verbose=false);
end
bench_clust_res = run(bench_clust)
bench_res_df = vcat([DataFrame("Key" => k, "Mean time, sec" => mean(v.times) ./ 1e9, "Std time, sec" => std(v.times) ./ 1e9,
"Num. samples" => length(v.times)) for (k,v) in bench_clust_res]...)
###Output
_____no_output_____
###Markdown
Leiden
###Code
using RCall
nm_bench = B.neighborhood_count_matrix(bench_df, 50, normalize=false);
size(nm_bench)
R"""
library(pagoda2)
library(conos)
library(microbenchmark)
cm <- as($nm_bench, "dgCMatrix")
rownames(cm) <- $gn_bench
colnames(cm) <- paste0("c", 1:ncol(cm))
getClusters <- function(cm, verbose=FALSE) {
p2 <- Pagoda2$new(cm, trim=5, n.cores=1, verbose=FALSE, log.scale=verbose)
p2$calculatePcaReduction(nPcs=50, odgenes=rownames(cm), maxit=1000, verbose=verbose, var.scale=FALSE)
p2$makeKnnGraph(k=30, type="PCA", center=T, distance="cosine", weight.type="none", verbose=verbose)
p2$getKnnClusters(method=conos::leiden.community, type="PCA", name="leiden", resolution=1.0)
return(p2$clusters$PCA$leiden)
}
b <- microbenchmark(
"clustering" = {getClusters(cm)},
times=5,
control=list(warmup=1)
)
"""
###Output
_____no_output_____
###Markdown
Aggregate
###Code
leiden_times = rcopy(R"b").time;
bench_res_df
df1 = hcat(DataFrame("Method" => "MRF", "Num. clusters" => 2:2:10), bench_res_df[[3, 1, 5, 4, 2],2:end]);
df2 = vcat(df1, DataFrame("Method" => "Leiden", "Num. clusters" => "Any", "Mean time, sec" => mean(leiden_times) / 1e9,
"Std time, sec" => std(leiden_times) / 1e9, "Num. samples" => 5));
df2[:, 3:4] .= round.(df2[:, 3:4], digits=2);
df2
CSV.write("plots/clustering_profiling.csv", df2)
###Output
_____no_output_____
###Markdown
Color embedding
###Code
@time neighb_cm = B.neighborhood_count_matrix(df_spatial, 40);
@time color_transformation = B.gene_composition_transformation(neighb_cm, df_spatial.confidence; sample_size=20000, spread=2.0, min_dist=0.1);
@time color_emb = B.transform(color_transformation, neighb_cm);
bench_emb = BenchmarkGroup();
bench_emb["neighborhood_count_matrix_40"] = @benchmarkable B.neighborhood_count_matrix($df_spatial, 40)
bench_emb["gene_composition_transformation_20k"] = @benchmarkable B.gene_composition_transformation(neighb_cm, df_spatial.confidence;
sample_size=20000, spread=2.0, min_dist=0.1)
bench_emb["transform"] = @benchmarkable B.transform(color_transformation, neighb_cm)
bench_emb_res = run(bench_emb)
bench_df = vcat([DataFrame("Key" => k, "Mean time, sec" => mean(v.times) ./ 1e9, "Std time, sec" => std(v.times) ./ 1e9,
"Num. samples" => length(v.times)) for (k,v) in bench_emb_res]...)
###Output
_____no_output_____
###Markdown
Segmentation
###Code
bench_segmentation = BenchmarkGroup();
@time df_spatial, gene_names = B.load_df("../run_results/iss_hippo/ca1_no_prior/segmentation.csv");
df_spatial[!, :cell_dapi] = df_spatial.parent_id;
dapi_arr = Float16.(Images.load("/home/vpetukhov/data/spatal/iss/hippocampus/CA1/Viktor/CA1DapiBoundaries_4-3_right.tif"));
iss = Dict(:df => df_spatial, :gene_names => gene_names, :name => "ISS", :dapi_arr => dapi_arr);
B.append_confidence!(df_spatial, (args["prior_segmentation"]===nothing ? nothing : df_spatial.prior_segmentation), nn_id=confidence_nn_id, prior_confidence=args["prior-segmentation-confidence"])
adjacent_points, adjacent_weights = build_molecule_graph(df_spatial, filter=false)[1:2];
mol_clusts = cluster_molecules_on_mrf(df_spatial.gene, adjacent_points, adjacent_weights, df_spatial.confidence;
n_clusters=args["n-clusters"], weights_pre_adjusted=true)
df_spatial[!, :cluster] = mol_clusts.assignment;
bm_data_arr = initial_distribution_arr(df_spatial; n_frames=args["n-frames"], scale=args["scale"], scale_std=args["scale-std"],
n_cells_init=args["num-cells-init"], prior_seg_confidence=args["prior-segmentation-confidence"],
min_molecules_per_cell=args["min-molecules-per-cell"], confidence_nn_id=0);
bm_data = run_bmm_parallel!(bm_data_arr, args["iters"], new_component_frac=args["new-component-fraction"], new_component_weight=args["new-component-weight"],
min_molecules_per_cell=args["min-molecules-per-cell"], assignment_history_depth=history_depth);
cur_df = deepcopy(iss[:df]);
bm_data = B.initial_distribution_arr(cur_df; n_frames=1, scale=14, scale_std="25%", min_molecules_per_cell=3)[1];
@time B.bmm!(bm_data, n_iters=350, new_component_frac=0.3, min_molecules_per_cell=3, assignment_history_depth=30, log_step=100);
cur_df[!, :cell] = B.estimate_assignment_by_history(bm_data)[1];
B.plot_comparison_for_cell(cur_df, B.val_range(cur_df.x), B.val_range(cur_df.y), nothing, iss[:dapi_arr];
ms=2.0, bandwidth=5.0, size_mult=0.25, plot_raw_dapi=false)
###Output
_____no_output_____
###Markdown
Full run Run
###Code
using ProgressMeter
dataset_paths = "/home/vpetukhov/spatial/Benchmarking/run_results/" .*
["iss_hippo/ca1_no_prior", "merfish_moffit", "osm_fish", "star_map/vis_1020_cl0", "spacejam2/allen_sm_fish/no_dapi"];
param_dumps = dataset_paths .* "/segmentation_params.dump";
dataset_names = ["iss", "merfish", "osm_fish", "starmap_1020", "allen_smfish"];
param_strings = [open(p) do f readlines(f)[1][16:end-1] end for p in param_dumps];
baysor_path = "/home/vpetukhov/local/bin/baysor";
for i in 2:length(param_strings)
# for i in 2:2
dataset = dataset_names[i]
params = split(param_strings[i], ' ')
out_path = expanduser("/home/vpetukhov/spatial/Benchmarking/run_results/profiling/$dataset/")
mkpath(out_path)
cmd = `/usr/bin/time -f '%e %U %P %M %t %K' -o ./profiling_output/$dataset.prof -a $baysor_path run --debug -o $out_path $params`;
# cmd = `/usr/bin/time -f '%e %U %P %M %t %K' -o ./profiling_output/$dataset.prof -a $baysor_path run --debug --n-clusters=0 -o $out_path $params`;
@show cmd
println(dataset)
@showprogress for ri in 1:5
run(pipeline(cmd, stdout="./profiling_output/$dataset.log", stderr="./profiling_output/$dataset.err", append=true))
run(pipeline(`echo -e \\n\\n\\n ----- RUN $ri ----- \\n\\n\\n`, stdout="./profiling_output/$dataset.log", append=true))
end
end
###Output
_____no_output_____
###Markdown
Summarize
###Code
using DataFrames
using Statistics
printed_names = ["ISS", "MERFISH", "osmFISH", "STARmap 1020", "Allen smFISH"];
seg_results = dataset_paths .* "/segmentation.csv";
dataset_parameters = hcat([[size(df, 1), length(unique(df.gene))] for df in DataFrame!.(CSV.File.(seg_results))]...);
bench_vals = [hcat(split.(readlines("./profiling_output/$ds.prof"), ' ')...) for ds in dataset_names];
mem_vals = hcat([parse.(Float64, x[4,:]) / 1e6 for x in bench_vals]...);
cpu_vals = hcat([parse.(Float64, x[1,:]) / 60 for x in bench_vals]...);
bench_mat = round.(vcat(mean(cpu_vals, dims=1), std(cpu_vals, dims=1), mean(mem_vals, dims=1), std(mem_vals, dims=1))', digits=2);
bench_strs = [["$(r[i[1]]) ± $(r[i[2]])" for r in eachrow(bench_mat)] for i in ((1, 2), (3, 4))];
bench_df = DataFrame("Dataset" => printed_names, "Num. molecules" => dataset_parameters[1,:], "Num. genes" => dataset_parameters[2,:],
"CPU time, min" => bench_strs[1], "Max RSS, GB" => bench_strs[2], "Num. samples" => 5)
CSV.write("./plots/segmentation_profiling.csv", bench_df)
###Output
_____no_output_____
###Markdown
Parameter table
###Code
import Pkg: TOML
using DataFrames
import CSV
data_paths = ["Allen smFISH" => "allen_smfish", "ISS" => "iss_hippo", "osmFISH" => "osmfish", "STARmap 1020" => "starmap_vis1020", "MERFISH Hypothalamus" => "merfish_moffit", "MERFISH Gut" => "merfish_membrane"];
prior_subfolders = ["No" => "baysor", "Paper" => "baysor_prior", "DAPI" => "baysor_dapi_prior", "Membrane" => "baysor_membrane_prior"];
p_keys = ["gene-composition-neigborhood", "scale", "prior-segmentation-confidence", "min-molecules-per-gene", "min-molecules-per-cell", "n-clusters",
"iters", "force-2d", "x-column", "y-column", "z-column", "gene-column", "prior_segmentation", "nuclei-genes", "cyto-genes"];
path_df = DataFrame([Dict(:Dataset => d, :Prior => pr, :Path => datadir("exp_pro", md, sd, "segmentation_params.dump")) for (d, md) in data_paths for (pr, sd) in prior_subfolders]);
path_df = path_df[isfile.(path_df.Path),:];
param_dicts = [OrderedDict(k => get(d, k, "NA") for k in p_keys) for d in TOML.parsefile.(path_df.Path)];
param_df = hcat(path_df[:,[:Dataset, :Prior]], vcat(DataFrame.(param_dicts)...))
CSV.write(plotsdir("parameters.csv"), param_df)
###Output
_____no_output_____ |
codeSheets/SEAS6401/PGAProject/API_Pull_and_Data_Wrangling.ipynb | ###Markdown
Importing the Players and Tournament Datasets
###Code
instance_id = "https://api.sportsdata.io/golf/v2/json/Players?key="
key = "c76c6101adbf4b0abb54a7a6eb5ddbb4"
url = f"{instance_id}{key}"
response = requests.get(
url = url)
Players = pd.DataFrame((json.loads(response.text)))
#Players.to_csv('/dbfs/FileStore/karbide/Players.txt')
instance_id = "https://api.sportsdata.io/golf/v2/json/Tournaments?key="
key = "c76c6101adbf4b0abb54a7a6eb5ddbb4"
url = f"{instance_id}{key}"
response = requests.get(
url = url)
Tournaments = pd.DataFrame((json.loads(response.text)))
Tournaments["New_Date"] = pd.to_datetime(Tournaments["StartDate"])
Last_Season = Tournaments.loc[Tournaments["New_Date"]>"2020-09-08"]
Last_Season = Last_Season.loc[Last_Season["New_Date"]<"2021-09-10"]
Last_Season = Last_Season.loc[Last_Season["Name"] != 'QBE Shootout']
#Last_Season.to_csv('/dbfs/FileStore/karbide/Last_Season.txt')
###Output
_____no_output_____
###Markdown
Lets pull just the top 150 players
###Code
top150 = spark.read.csv("/FileStore/karbide/top150players.csv")
top150 = top150.toPandas()
top150.columns = ["Id","Name","Rating"]
Players['Full Name'] = Players[['FirstName','LastName']].agg(' '.join, axis=1)
top150Players = Players.merge(top150, how = "inner", left_on= "Full Name", right_on = "Name")
top150Players.shape
#we lost some players, lets try to see why
top150_list = top150["Name"].tolist()
mergedPlayers_list = top150Players["Name"].tolist()
dropped = [x for x in top150_list if x not in mergedPlayers_list]
droppednames = pd.DataFrame(dropped)
droppednames.columns = ["Name"]
droppednames[["Name1","Name2","Name3"]] = droppednames["Name"].str.split(" ",2,expand=True)
#We lose less players if we use the draft kings names
top150Players2 = Players.merge(top150, how = "inner", left_on= "DraftKingsName", right_on = "Name")
top150Players2.shape
mergedPlayers_list = top150Players2["Name"].tolist()
dropped = [x for x in top150_list if x not in mergedPlayers_list]
droppednames = pd.DataFrame(dropped)
droppednames.columns = ["Name"]
droppednames[["Name1","Name2","Name3"]] = droppednames["Name"].str.split(" ",2,expand=True)
droppednames
#this is not small enough that we can search 1 by 1
print(Players.loc[Players["FirstName"] == 'Erik'])
Players.at[4443,'DraftKingsName'] = "Erik van Rooyen"
print(Players["DraftKingsName"].loc[Players["LastName"] == 'Lee'])
print(top150.loc[top150["Name"]== 'K.H. Lee'])
top150.at[58,"Name"] = "Kyoung-Hoon Lee"
print(Players["DraftKingsName"].loc[Players["FirstName"] == 'Robert'])
Players.at[2688,"DraftKingsName"] = "Robert MacIntyre"
print(Players["DraftKingsName"].loc[Players["LastName"] == 'Munoz'])
print(top150.loc[top150["Name"]== 'Sebasti�n Mu�oz'])
top150.at[66,"Name"] = "Sebastian Munoz"
print(Players["DraftKingsName"].loc[Players["LastName"] == 'Davis'])
print(top150.loc[top150["Name"]== 'Cam Davis'])
top150.at[69,"Name"] = "Cameron Davis"
Players.loc[Players["LastName"] == 'Van Tonder']
Players.at[4446,"DraftKingsName"] = "Daniel van Tonder"
top150Players2 = Players.merge(top150, how = "inner", left_on= "DraftKingsName", right_on = "Name")
top150Players2.shape
#now we have all the players
#the sport data api requires us to input the player and tournament ID for the hole by hole scores, lets see if we can loop and dowload automatically
top150PlayerIDs = top150Players2["PlayerID"].tolist()
Tourney_IDs = Last_Season["TournamentID"].tolist()
#top150Players2.to_csv('/dbfs/FileStore/karbide/top150playersexpanded.txt')
#tournament ID
a = 453
#player ID
b = 40000047
instance_id = "https://api.sportsdata.io/golf/v2/json/PlayerTournamentStatsByPlayer/"
key = "?key=c76c6101adbf4b0abb54a7a6eb5ddbb4"
url = f"{instance_id}{a}/{b}{key}"
response = requests.get(
url = url)
test = json.loads(response.text)
test2 = test['Rounds']
RoundData = pd.DataFrame(test2)
R1Scores = test2[0]["Holes"]
R2Scores = test2[1]["Holes"]
R3Scores = test2[2]["Holes"]
R4Scores = test2[3]["Holes"]
R1Scoresdf = pd.DataFrame(R1Scores)
def holeScore(data):
ScoresSet = data.drop(["PlayerRoundID","Par","Score","HoleInOne","ToPar"], axis = 1)
ScoresSet.set_index("Number", inplace = True)
HoleScores = ScoresSet[ScoresSet == 1].stack()
HoleScoresdf = pd.DataFrame(HoleScores).reset_index()
HoleScoresdf.columns = ["Number","Hole_Score","1"]
HoleScoresdf.drop(["1"],axis = 1, inplace = True)
Scoresdf = data.merge(HoleScoresdf, on = "Number")
return(Scoresdf)
def numScore(data):
data["Hole_ScoreNum"] = [-3 if x == "DoubleEagle" else -2 if x == "Eagle" else -1 if x == "Birdie" else 0 if x == "IsPar" else 1 if x == "Bogey" else 2 if x == "DoubleBogey" else 3 for x in data["Hole_Score"]]
R1Scoresdf = holeScore(R1Scoresdf)
numScore(R1Scoresdf)
roundScore = sum(R1Scoresdf['Hole_ScoreNum'])
roundShots = sum(R1Scoresdf['Par'])-roundScore
birdies = sum(R1Scoresdf["Birdie"])
def roundSummary(data,roundNum, playerid, tournamentid):
roundScore = sum(data['Hole_ScoreNum'])
roundShots = sum(data['Par'])+roundScore
doubleeagles = sum(data["DoubleEagle"])
eagles = sum(data["Eagle"])
birdies = sum(data["Birdie"])
pars = sum(data["IsPar"])
bogeys = sum(data["Bogey"])
doublebogeys = sum(data['DoubleBogey'])
worsethandoublebogeys = sum(data['WorseThanDoubleBogey'])
roundID = data["PlayerRoundID"][0]
roundStats = pd.DataFrame(np.array([[roundScore,roundShots,doubleeagles,eagles,birdies,pars,bogeys,doublebogeys,worsethandoublebogeys,roundID,roundNum,playerid,tournamentid]]), columns=['RoundScore','RoundShots','DoubleEagles','Eagles','Birdies','Pars','Bogeys','DoubleBogeys','WorseThanDoubleBogeys','PlayerRoundID','RoundNum','PlayerID','TournamentID'])
return(roundStats)
roundSummary(R1Scoresdf,1,b,a)
def dictToDf(scoresDict,playerid,tournamentid):
roundDict = scoresDict['Rounds']
rounddf = pd.DataFrame(roundDict)
rounddf = rounddf.loc[rounddf["Par"] > 0]
rounds = rounddf["Number"].tolist()
dfTournamentRounds = pd.DataFrame(columns = ['RoundScore','RoundShots','DoubleEagles','Eagles','Birdies','Pars','Bogeys','DoubleBogeys','WorseThanDoubleBogeys','PlayerRoundID','RoundNum','PlayerID','TournamentID'])
dfTournamentHoles = pd.DataFrame(columns = ['PlayerRoundID', 'Number', 'Par', 'Score', 'ToPar', 'HoleInOne','DoubleEagle', 'Eagle', 'Birdie', 'IsPar', 'Bogey', 'DoubleBogey','WorseThanDoubleBogey', 'Round','Hole_Score', 'Hole_ScoreNum', "Player_ID", "Tournament_ID"])
for x in rounds:
roundHoles = roundDict[x-1]["Holes"]
roundHoles = pd.DataFrame(roundHoles)
roundHoles = holeScore(roundHoles)
numScore(roundHoles)
roundHoles["Round"] = x
roundHoles["Player_ID"] = playerid
roundHoles["Tournament_ID"] = tournamentid
roundstat = roundSummary(roundHoles,x,playerid,tournamentid)
dfTournamentRounds = pd.concat([dfTournamentRounds,roundstat])
dfTournamentHoles = pd.concat([dfTournamentHoles,roundHoles])
return(dfTournamentRounds,dfTournamentHoles)
# testRounds,testHoles = dictToDf(test,b,a)
#print(testHoles.head(20))
#print(testRounds)
###Output
_____no_output_____
###Markdown
Testing a condition if the player didnt play in one the tournament/ the dict is empty
###Code
# Example of a empty tournament
url = f"{instance_id}450/40000047{key}"
response = requests.get(url = url)
bool(response.text)
# Example of a valid tournament
url = f"{instance_id}451/40000047{key}"
response = requests.get(url = url)
bool(response.text)
def allTournaments(playerID,tournamentList,key):
allTournamentRounds = pd.DataFrame(columns = ['RoundScore','RoundShots','DoubleEagles','Eagles','Birdies','Pars','Bogeys','DoubleBogeys','WorseThanDoubleBogeys','PlayerRoundID','RoundNum','PlayerID','TournamentID'])
allTournamentHoles = pd.DataFrame(columns = ['PlayerRoundID', 'Number', 'Par', 'Score', 'ToPar', 'HoleInOne','DoubleEagle', 'Eagle', 'Birdie', 'IsPar', 'Bogey', 'DoubleBogey','WorseThanDoubleBogey', 'Round','Hole_Score', 'Hole_ScoreNum'])
key = f"?key={key}"
instance_id = "https://api.sportsdata.io/golf/v2/json/PlayerTournamentStatsByPlayer/"
for x in tournamentList:
url = f"{instance_id}{x}/{playerID}{key}"
response = requests.get(url = url)
if bool(response.text):
importdata = json.loads(response.text)
testDict = importdata['Rounds']
if bool(testDict):
xroundDf,xholesDf = dictToDf(importdata,playerID,x)
allTournamentRounds = pd.concat([allTournamentRounds,xroundDf])
allTournamentHoles = pd.concat([allTournamentHoles,xholesDf])
return(allTournamentRounds,allTournamentHoles)
#testRounds,testHoles = allTournaments(b,Tourney_IDs,"c76c6101adbf4b0abb54a7a6eb5ddbb4")
def allPlayers(playerList,tournamentList,key):
allPlayerRounds = pd.DataFrame(columns = ['RoundScore','RoundShots','DoubleEagles','Eagles','Birdies','Pars','Bogeys','DoubleBogeys','WorseThanDoubleBogeys','PlayerRoundID','RoundNum','PlayerID','TournamentID'])
allPlayerHoles = pd.DataFrame(columns = ['PlayerRoundID', 'Number', 'Par', 'Score', 'ToPar', 'HoleInOne','DoubleEagle', 'Eagle', 'Birdie', 'IsPar', 'Bogey', 'DoubleBogey','WorseThanDoubleBogey', 'Round','Hole_Score', 'Hole_ScoreNum'])
for i in playerList:
yrounds,yholes = allTournaments(i,tournamentList,key)
allPlayerRounds = pd.concat([allPlayerRounds,yrounds])
allPlayerHoles = pd.concat([allPlayerHoles,yholes])
return(allPlayerRounds,allPlayerHoles)
#RoundsDf, HolesDf = allPlayers(top150PlayerIDs,Tourney_IDs,"c76c6101adbf4b0abb54a7a6eb5ddbb4")
#RoundsDf.to_csv('/dbfs/FileStore/karbide/Rounds.txt')
#HolesDf.to_csv('/dbfs/FileStore/karbide/Holes.txt')
#so I dont have to run the API pull again
RoundsDf = pd.read_csv('/dbfs/FileStore/karbide/Rounds.txt')
HolesDf = pd.read_csv("/dbfs/FileStore/karbide/Holes.txt")
# if a tournament doesnt have at least 100 rounds
def dropSmallTournaments(data,threshold):
tournamentCounts = data.groupby("TournamentID").size().reset_index(name="counts")
bigTournaments = tournamentCounts.loc[tournamentCounts["counts"] > threshold]
result = data.merge(bigTournaments, how = "inner", on = "TournamentID")
result.drop(["counts"],axis=1)
dropTournaments = tournamentCounts.loc[tournamentCounts["counts"] < threshold]
dropped = dropTournaments["TournamentID"].tolist()
print("Dropped Tournaments")
for x in dropped:
print(x)
return(result)
RoundsDf = dropSmallTournaments(RoundsDf,100)
RoundsDf.groupby("TournamentID").size().reset_index(name="counts")
#RoundsDf.to_csv('/dbfs/FileStore/karbide/Rounds.txt')
testRoundDf = pd.read_csv('/dbfs/FileStore/karbide/Rounds.txt')
pStats = pd.read_csv('/dbfs/FileStore/karbide/pga_tour_stats_2020.csv')
pStats.columns
pStats.describe()
# I'm going to select ~30 of these stats, then later check for independence
pStatsKeep = pStats[['PLAYER NAME',"GIR_PCT_FAIRWAY_BUNKER", "GIR_PCT_FAIRWAY", "GIR_PCT_OVERALL", 'GIR_PCT_OVER_100', 'GIR_PCT_OVER_200', 'GIR_PCT_UNDER_100', 'GREEN_PCT_SCRAMBLE_SAND', 'GREEN_PCT_SCRAMBLE_ROUGH', 'FINISHES_TOP10', 'TEE_AVG_BALL_SPEED', 'TEE_AVG_DRIVING_DISTANCE', 'TEE_DRIVING_ACCURACY_PCT','TEE_AVG_LAUNCH_ANGLE', 'TEE_AVG_LEFT_ROUGH_TENDENCY_PCT', 'TEE_AVG_RIGHT_ROUGH_TENDENCY_PCT', 'TEE_AVG_SPIN_RATE', 'PUTTING_AVG_ONE_PUTTS',
'PUTTING_AVG_TWO_PUTTS', 'PUTTING_AVG_DIST_BIRDIE', "PUTTING_AVG_PUTTS"]]
# Average Birdie Putt Distance is currently in feet and inches, can we change this to just inches
def split_Dist(item):
if item != "nan":
spDist = item.split("' ")
ft_ = float(spDist[0])
in_ = float(spDist[1].replace("\"",""))
return (12*ft_) + in_
pStatsKeep = pStatsKeep.astype({"PUTTING_AVG_DIST_BIRDIE":"str"})
pStatsKeep["PUTTING_AVG_DIST_BIRDIE_INCH"] = pStatsKeep["PUTTING_AVG_DIST_BIRDIE"].apply(lambda x:split_Dist(x))
pStatsKeep.describe()
pStatsKeep["FINISHES_TOP10"].fillna(0,inplace = True)
pStatsKeep.dropna(inplace = True)
pStatsKeep.drop_duplicates(inplace = True)
pStatsKeep.groupby("PLAYER NAME").agg({"GIR_PCT_FAIRWAY_BUNKER": "count"}).sort_values("GIR_PCT_FAIRWAY_BUNKER", ascending = False).head(3)
# we still have 8 zach johnsons so lets dump him
pStatsKeep = pStatsKeep.loc[pStatsKeep["PLAYER NAME"] != "Zach Johnson"]
#Now we have to add Player IDs
PlayerNames = pd.read_csv("/dbfs/FileStore/karbide/Players.txt")
PlayerNames = PlayerNames[["DraftKingsName","PlayerID"]]
pStatsKeepIDs = pStatsKeep.merge(PlayerNames, how = "left", left_on = "PLAYER NAME", right_on = "DraftKingsName")
pStatsKeepIDsDropped = pStatsKeepIDs.loc[pStatsKeepIDs["PlayerID"].isna()]
pStatsKeepIDsDropped["PLAYER NAME"]
#again, we have to manually adjust these names
PlayerNames2 = pd.read_csv("/dbfs/FileStore/karbide/Players.txt")
PlayerNames2 = PlayerNames2[["DraftKingsName","PlayerID","FirstName","LastName"]]
print(PlayerNames2.loc[PlayerNames2["FirstName"] == "Ted"])
pStatsKeepIDs.at[5,"PlayerID"] = 40001173
print(PlayerNames2.loc[PlayerNames2["FirstName"] == "Fabian"])
pStatsKeepIDs.at[25,"PlayerID"] = 40000514
print(PlayerNames2.loc[PlayerNames2["LastName"] == "Gordon"])
pStatsKeepIDs.at[36,"PlayerID"] = 40003663
print(PlayerNames2.loc[PlayerNames2["LastName"] == "Ventura"])
pStatsKeepIDs.at[74,"PlayerID"] = 40003179
print(PlayerNames2.loc[PlayerNames2["LastName"] == "Fitzpatrick"])
pStatsKeepIDs.at[136,"PlayerID"] = 40000430
print(PlayerNames2.loc[PlayerNames2["LastName"] == "Pan"])
pStatsKeepIDs.at[140,"PlayerID"] = 40001109
print(PlayerNames2.loc[PlayerNames2["FirstName"] == "Sebastian"])
pStatsKeepIDs.at[161,"PlayerID"] = 40001682
sum(pStatsKeepIDs["PlayerID"].isna())
#now every player has a name and ID
#I might consider runnning the API pull again for this list of players
pStatsKeepIDs.drop(["DraftKingsName"], axis = 1, inplace = True)
pStatsKeepIDs = pStatsKeepIDs.astype({"PlayerID":"int"})
#pStatsKeepIDs.to_csv('/dbfs/FileStore/karbide/PlayerStats.txt')
StatPlayers = pStatsKeepIDs["PlayerID"].tolist()
#RoundsDf, HolesDf = allPlayers(StatPlayers,Tourney_IDs,"c76c6101adbf4b0abb54a7a6eb5ddbb4")
RoundsDf = dropSmallTournaments(RoundsDf,100)
#RoundsDf.to_csv('/dbfs/FileStore/karbide/Rounds.txt')
#HolesDf.to_csv('/dbfs/FileStore/karbide/Holes.txt')
StrokesGained = pd.read_csv("/dbfs/FileStore/karbide/StrokesGained.csv", encoding = 'latin-1')
StrokesGained.head()
print(StrokesGained.shape())
pIDs = pStatsKeepIDs[["PLAYER NAME", "PlayerID"]]
StrokesGainedIDs = StrokesGained.merge(pIDs, how = "inner", on = "PLAYER NAME")
#StrokesGainedIDs.to_csv("/dbfs/FileStore/karbide/StrokesGainedIDs.txt")
###Output
_____no_output_____ |
task_06/HW06.ipynb | ###Markdown
ДЗ №6 - автокодировщики для идентификации аномалий В этом ДЗ вам предстоит применить модель сврточного автокодировщика для идентификации аномалий в данных. Для этого вам потребуется создать сверточный автокодировщик, обучить его и применить к тестовым данным.Основная идея фильтрации аномалий состоит в том, что экземпляры выборки, являющиеся аномалиями, сильно отличаются от всех остальных объектов. Кроме того, их мало по сранению с размером всей выборки.Этот набор факторов приводит к тому, что автокодировщик, обученный на данных тренировочной выборки, будет довольно плохо восстанавливать примеры-аномалии. То есть, значения функции потерь на таких примерах ожидается нетипично высоким.
###Code
# Эту ячейку следует выоплнять в окружении, в котором еще не установлены необходимые библиотеки. В подготовленном окружении эту ячейку можно пропустить.
!pip3 install torch torchvision numpy matplotlib
!pip3 install -U albumentations
import numpy as np
from scipy import stats
%matplotlib inline
import matplotlib.pylab as plt
import seaborn as sns
plt.style.use('ggplot')
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from PIL import Image
from skimage.io import imshow
from sklearn.model_selection import train_test_split
# trying new augmentation library
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
from typing import Tuple, List, Type, Dict, Any
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'Using {device} device')
###Output
Using cuda device
###Markdown
Константы
###Code
RESIZE_SHAPE = (28, 28)
BATCH_SIZE = 256
###Output
_____no_output_____
###Markdown
Свёрточный автокодировщик (convolutional autoencoder, CAE)Данными в этой задаче будут все так же набор рукописных цифр MNIST. Однако некоторые экземпляры тестовой выборки оказываются испорченными. Ваша цель - найти эти экземпляры в предположении, что они представляют собой аномалии.Данные MNIST с дефектами нужно скачать в виде файла по ссылкеПрежде всего следует построить и обучить свёрточный автокодировщик.>Кодирующая часть автокодировщика (encoder, кодировщик) может состоять из сверточных слоев (convolutional layers) и слоев субдискретизации (pooling layers), но может быть и сложнее. Здесь предлагается применить ваши знания относительно возможной структуры сверточных сетей. Кодировщик, будучи обученным, позволяет извлечь скрытое представление (hidden representation, embeddings) входных примеров, содержащее достаточно информации для восстановления этих примеров декодером.> Декодер (decoder) может состоять из слоев типа **transpose convolution** и операций масштабирования (upsampling), но также, как и кодировщик, может быть сложнее. Декодер должен восстанавливать примеры, руководствуюясь их векторами скрытого представления. Скрытое представление (hidden representation, compressed representation)Скрытое представление может содержать семантически насыщенную информацию о входных примерах. С использованием этих данных можно проводить фильтрацию шума в примерах, восстанавливать сами примеры, и иногда даже проводить некоторые операции в семантическом пространстве.
###Code
!wget https://www.dropbox.com/s/r7mgjn83y9ygpzq/mnist_corrupted.npz
###Output
--2021-05-08 14:10:33-- https://www.dropbox.com/s/r7mgjn83y9ygpzq/mnist_corrupted.npz
Resolving www.dropbox.com (www.dropbox.com)... 162.125.3.18, 2620:100:6018:18::a27d:312
Connecting to www.dropbox.com (www.dropbox.com)|162.125.3.18|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: /s/raw/r7mgjn83y9ygpzq/mnist_corrupted.npz [following]
--2021-05-08 14:10:33-- https://www.dropbox.com/s/raw/r7mgjn83y9ygpzq/mnist_corrupted.npz
Reusing existing connection to www.dropbox.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com/cd/0/inline/BOEhW2vb4yQEX5LghYRUaeT8l21kBoQ6qxtRc2XZRxNRBsKhA0u-rLhkkXw8bz2ixBypKexWRvgvx_Atyif0bMi8uT8FOX2J9LXBHU344SgSYEEZpsqJobdRZAYFjSb55r6-n3pma-WQWj-Cf2mcA9O3/file# [following]
--2021-05-08 14:10:33-- https://uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com/cd/0/inline/BOEhW2vb4yQEX5LghYRUaeT8l21kBoQ6qxtRc2XZRxNRBsKhA0u-rLhkkXw8bz2ixBypKexWRvgvx_Atyif0bMi8uT8FOX2J9LXBHU344SgSYEEZpsqJobdRZAYFjSb55r6-n3pma-WQWj-Cf2mcA9O3/file
Resolving uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com (uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com)... 162.125.3.15, 2620:100:6018:15::a27d:30f
Connecting to uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com (uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com)|162.125.3.15|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: /cd/0/inline2/BOE9hUpD4qmU4rCuIzMVvpcdiCmQ2V_ZqiTXuJ-Uhw2UT60T16h-c4Jlb_uq89hGnYtps6ygYLRv9HbhvECK8AkoD7Nks86OT38w_hDbIF_nWwCB48_ZJWkEZsrusjIp3MOgQc5B5R0dQGAQ0UTt771ldkZfHZOUfAvxfDyMD4l00B5aa6plxHZluzWXlocqNl4aVxsa6myJPyWGuHwgiBdmgJOwvO-nHD2SvA3LHb5IZr5TzoefZMVwGxhep7roYw7DiKdscRt3PfdccYXPkxCAFBEafurs6C7O1z4dMSwVGVXpcH0yCwi5ViX-pMDwg7Ncp9o_bAdV7AvBH3GOP6COsbew_4ofE6eTcejzTrmaTYu0_E830GFkv-BgY_enrpU/file [following]
--2021-05-08 14:10:34-- https://uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com/cd/0/inline2/BOE9hUpD4qmU4rCuIzMVvpcdiCmQ2V_ZqiTXuJ-Uhw2UT60T16h-c4Jlb_uq89hGnYtps6ygYLRv9HbhvECK8AkoD7Nks86OT38w_hDbIF_nWwCB48_ZJWkEZsrusjIp3MOgQc5B5R0dQGAQ0UTt771ldkZfHZOUfAvxfDyMD4l00B5aa6plxHZluzWXlocqNl4aVxsa6myJPyWGuHwgiBdmgJOwvO-nHD2SvA3LHb5IZr5TzoefZMVwGxhep7roYw7DiKdscRt3PfdccYXPkxCAFBEafurs6C7O1z4dMSwVGVXpcH0yCwi5ViX-pMDwg7Ncp9o_bAdV7AvBH3GOP6COsbew_4ofE6eTcejzTrmaTYu0_E830GFkv-BgY_enrpU/file
Reusing existing connection to uc715f48f47c6ce906f40a5b2b82.dl.dropboxusercontent.com:443.
HTTP request sent, awaiting response... 200 OK
Length: 54880512 (52M) [application/octet-stream]
Saving to: ‘mnist_corrupted.npz’
mnist_corrupted.npz 100%[===================>] 52.34M 71.1MB/s in 0.7s
2021-05-08 14:10:35 (71.1 MB/s) - ‘mnist_corrupted.npz’ saved [54880512/54880512]
###Markdown
В предположении, что файл данных `mnist_corrupted.npz` загружен и находится в той же директории, что и этот нотбук, генераторы данных можно описать следующим образом:
###Code
class DS(Dataset):
def __init__(self, data, transform=None):
self.data = data
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
if self.transform:
x = self.transform(image=x)['image']
return x
def __len__(self):
return len(self.data)
train_val_transforms = A.Compose(
[
A.ToFloat(max_value=255),
A.Resize(height=RESIZE_SHAPE[0], width=RESIZE_SHAPE[1]),
A.Rotate(limit=20),
A.RandomBrightness(limit=0.1),
ToTensorV2(),
]
)
test_transforms = A.Compose(
[
A.ToFloat(max_value=255),
A.Resize(height=RESIZE_SHAPE[0], width=RESIZE_SHAPE[1]),
ToTensorV2(),
]
)
mnist = np.load('./mnist_corrupted.npz')
train_val_samples = mnist['x_train']
test_samples = mnist['x_test']
train_val_dataset = DS(train_val_samples, train_val_transforms)
test_dataset = DS(test_samples, test_transforms)
train_dataset, val_dataset = train_test_split(train_val_dataset, test_size=0.10)
print(f'size of data for training: {len(train_dataset)}, size of data for validation: {len(val_dataset)}')
###Output
size of data for training: 54000, size of data for validation: 6000
###Markdown
Инициализация Dataloader'ов
###Code
train_dataloader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=BATCH_SIZE, shuffle=True)
dataloaders = {
'train': train_dataloader,
'val': val_dataloader,
}
###Output
_____no_output_____
###Markdown
Визуализация исходных данныхКак и в любой задаче, имеет смысл визуализировать исходные данные, чтобы понимать, с чем мы имеем дело
###Code
indices = np.random.randint(0, len(train_val_dataset), size=8)
fig, axes = plt.subplots(nrows=1, ncols=8, figsize=(8, 2), dpi=300)
for i, ax in enumerate(axes):
sample_index = indices[i]
sample = train_val_dataset[sample_index]
ax.imshow(np.squeeze(sample.cpu().numpy()), cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.patch.set_facecolor('white')
###Output
_____no_output_____
###Markdown
--- Свёрточный автокодировщик Кодировщик (Encoder)Кодировщик можно реализовать в подходе AlexNet или VGG: сверточные (convolutional) слои чередуются со слоями субдискретизации (pooling). Последние применяются для снижения пространственных размерностей промежуточных представлений входных примеров. Нередко после сверточной части добавляют дополнительные полносвязные слои, позволяющие еще сильнее снизить размерность скрытого представления, извлекаемого кодировщиком.Предлагаемая структура кодировщика не единственно верная. Можно реализовывать и другие. ДекодерДекодер должен преобразовать вектор скрытого представления (тензор ранга 1) в изображение, реконструкцию входного примера. Для этого следует вектор скрытого представления перевести в ранг 2 (например, операцией `.view()`). После этого следует последовательно применять операции Transpose Convolution (`torch.nn.ConvTranspose2d`) и масштабирования (upsampling, а именно `torch.nn.functional.interpolate`). В некоторых случаях применяют `torch.nn.ConvTranspose2d` с аргументом `stride=2` или больше. Однако такое использование может привести в т.н. ["эффекту шахматной доски"](https://distill.pub/2016/deconv-checkerboard/). Рекомендуемым вариантом сейчас считается применение масштабирования типа билинейного или бикубического.Результатом работы декодера должно получиться изображение, по размеру совпадающее с входным примером, то есть, 28x28.Не следует забывать, что одной из целей применения автокодировщиков является снижение размерности примеров с сохранением ключевой информации. Экспериментируйте с количеством слоев и размерностью скрытого представления! Попробуйте снизить его до 2 или вообще до 1. Хорошо ли будут воспроизводиться примеры выборки? Transpose ConvolutionsВ этом ДЗ в декодере предлагается использовать слои типа **transposed convolutional**. Они работают практически так же, как свёрточные слои, но "задом наперед". Например, ядро размером 3x3 в случае свёрточной операции дает в результате одно значение. Операция **transposed convolutional**, наоборот, одно значение входного представления трансформирует в патч размером с его ядро (3x3). В PyTorch есть уже готовая реализация слоев [`nn.ConvTranspose2d`](https://pytorch.org/docs/stable/nn.htmlconvtranspose2d).Повторимся, альтернативой использованию **transposed convolutional layer** с аргументом `stride=2` или больше может быть применение операций изменения размера (resizing) с интерполяцией типа "nearest neighbor", "bilinear" или "bicubic" и применением свёрточной операции к результату. Задание 1: Описать класс нейросети-автокодировщика, описываемой в этом задании.
###Code
# define the NN architecture
class ConvAutoencoder(nn.Module):
def __init__(self, hidden_dim=32):
super(ConvAutoencoder, self).__init__()
self.embedding = None
## слои кодировщика ##
self.enc_conv_1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3, stride=1, padding=1) # 4 x 28 x 28
self.enc_pool_1 = nn.MaxPool2d(kernel_size=2, stride=2) # 4 x 14 x 14
self.enc_batch_norm_1 = nn.BatchNorm2d(num_features=4)
self.enc_conv_2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3, stride=1, padding=1) # 8 x 14 x 14
self.enc_pool_2 = nn.MaxPool2d(kernel_size=2, stride=2) # 8 x 7 x 7
self.enc_batch_norm_2 = nn.BatchNorm2d(num_features=8)
self.flatten = nn.Flatten() # 1 x 392
self.enc_linear_1 = nn.Linear(in_features=392, out_features=hidden_dim)
## слои декодера ##
self.de_linear_1 = nn.Linear(in_features=hidden_dim, out_features=392)
self.de_conv_1 = nn.ConvTranspose2d(in_channels=8, out_channels=4, kernel_size=3, stride=1, padding=1)
self.de_pool_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.de_batch_norm_1 = nn.BatchNorm2d(num_features=4)
self.de_conv_2 = nn.ConvTranspose2d(in_channels=4, out_channels=1, kernel_size=3, stride=1, padding=1)
self.de_pool_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.de_batch_norm_2 = nn.BatchNorm2d(num_features=1)
def forward(self, x):
## операции кодировщика ##
original_shape = x.shape
x = F.relu(self.enc_conv_1(x)) # 4 x 28 x 28
x = self.enc_pool_1(x) # 4 x 14 x 14
x = self.enc_batch_norm_1(x) # 4 x 14 x 14
x = F.relu(self.enc_conv_2(x)) # 8 x 14 x 14
x = self.enc_pool_2(x) # 8 x 7 x 7
x = self.enc_batch_norm_2(x) # 8 x 7 x 7
x_shape = x.shape
x = self.flatten(x) # 1 x 392
self.embedding = F.relu(self.enc_linear_1(x)) # 1 x hidden_dim
## операции декодера ##
x = self.de_linear_1(self.embedding) # 1 x 392
x = x.view(*x_shape) # 8 x 7 x 7
x = F.relu(self.de_conv_1(x)) # 4 x 7 x 7
x = self.de_pool_1(x) # 4 x 14 x 14
x = self.de_batch_norm_1(x) # 4 x 14 x 14
x = F.relu(self.de_conv_2(x)) # 1 x 14 x 14
x = self.de_pool_2(x) # 1 x 28 x 28
x = torch.sigmoid(x) # 1 x 28 x 28
assert original_shape == x.shape, f'{original_shape} != {x.shape}'
return x
model = ConvAutoencoder()
print(model)
model = model.to(device)
###Output
_____no_output_____
###Markdown
Задание 2: Напишите пайплайн для предобработки и аугументации данных.В `torchvision.transforms` есть готовые реализации большинства распространённых техник, если вы хотите добавить что-то своё, вы можете воспользоваться `torchvision.transforms.Lambda` или встроить аугментации на этапе подготовки данных в классе `DS`. Написан выше при помощи `Albumentations` Всегда имеет смысл посмотреть, как происходит предобработка данных, и как происходит обработка данных нейросетью (если это возможно). В этом ДЗ предлагается визуализировать произвольные примеры из обучающей выборки, а также один из произвольных примеров, обработанных только что созданной (но не обученной) моделью. Задание 3: отобразите несколько произвольных примеров обучающей выборки.
###Code
NUM = 2
fig, ax = plt.subplots(NUM, NUM, figsize=(7, 7))
plt.subplots_adjust(left=NUM*(-0.2), bottom=NUM*(-0.1))
for i in range(NUM**2):
idx = np.random.randint(low=0, high=len(val_dataset))
image = val_dataset[idx]
ax[i // NUM, i % NUM].imshow(image.squeeze(), cmap='gray')
ax[i // NUM, i % NUM].set_xticks([])
ax[i // NUM, i % NUM].set_yticks([])
ax[i // NUM, i % NUM].grid(False);
###Output
_____no_output_____
###Markdown
Задание 4: отобразите один произвольный пример обучающей выборки и результат вычисления нейросети на этом примере.
###Code
example_index = int(np.random.randint(0, len(train_dataset), size=1))
example = train_dataset[example_index]
## compute model output for this example;
## Transfer the result to CPU and convrt it from tensor to numpy array
example_transformed = model(example.unsqueeze(0).to(device))
fig, ax = plt.subplots(1, 2, figsize=(7, 6))
for i, img in enumerate((example, example_transformed)):
img = img.cpu().detach().numpy().squeeze()
ax[i].imshow(img, cmap='gray')
ax[i].grid(False)
ax[i].set_xticks([])
ax[i].set_yticks([])
###Output
_____no_output_____
###Markdown
Обучение моделиТеперь, когда вы реализовали модель и подготовили данные, можно приступить к непосредственному обучению модели.Костяк функции обучения написан ниже, далее вы должны будете реализовать ключевые части этого алгоритма
###Code
def train_model(model: torch.nn.Module,
train_dataset: torch.utils.data.Dataset,
val_dataset: torch.utils.data.Dataset,
loss_function: torch.nn.Module = nn.MSELoss(reduction='mean'),
metrics_function: torch.nn.Module=nn.L1Loss(reduction='mean'),
optimizer_class: Type[torch.optim.Optimizer] = torch.optim.Adam,
optimizer_params: Dict = {},
lr_scheduler_class: Any = torch.optim.lr_scheduler.StepLR,
lr_scheduler_params: Dict = {},
batch_size = 64,
max_epochs = 100,
early_stopping_patience = 10
):
metrics = {'loss': [], 'metrics': []}
optimizer = optimizer_class(model.parameters(), **optimizer_params)
lr_scheduler = lr_scheduler_class(optimizer, **lr_scheduler_params)
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size)
best_val_loss = None
best_epoch = None
for epoch in range(max_epochs):
print(f'Epoch {epoch+1} of {max_epochs}')
train_single_epoch(model, optimizer, loss_function, train_loader)
val_metrics = validate_single_epoch(model, loss_function, metrics_function, val_loader)
metrics['loss'].append(val_metrics['loss'])
metrics['metrics'].append(val_metrics['metrics'])
print(f'Validation metrics: \n{val_metrics}')
lr_scheduler.step()
if best_val_loss is None or best_val_loss > val_metrics['loss']:
print(f'Best model yet, saving')
best_val_loss = val_metrics['loss']
best_epoch = epoch
# torch.save(model, './best_model.pth')
if epoch - best_epoch > early_stopping_patience:
print('Early stopping triggered')
break
return metrics
###Output
_____no_output_____
###Markdown
Задание 5: Реализуйте функцию, производящую обучение сети на протяжении одной эпохи ( полного прохода по всей обучающей выборке ). На вход будет приходить модель, оптимизатор, функция потерь и объект типа `DataLoader`.> ВНИМАНИЕ!!! В задаче обучения автокодировщика нет меток-цифр. Есть только входные примеры. При итерировании по `data_loader` вы будете получать только сами примеры! Подумайте, что должно выступать в качестве целевой переменной, когда вы вычисляете функцию потерь.
###Code
def train_single_epoch(model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_function: torch.nn.Module,
data_loader: torch.utils.data.DataLoader):
model.train()
for X in data_loader:
# send data on correct device type
X = X.to(device)
# vanish gradient
optimizer.zero_grad()
# forward-pass
X_pred = model(X)
# calculating loss value
loss = loss_function(X_pred, X)
# backward-pass
loss.backward()
# optimization step
optimizer.step()
###Output
_____no_output_____
###Markdown
Задание 6: Реализуйте функцию производящую расчёт функции потерь на тестовой выборке. На вход будет приходить модель, функция потерь и DataLoader. На выходе ожидается словарь с вида:```{ 'loss': , 'accuracy': }```
###Code
def validate_single_epoch(model: torch.nn.Module,
loss_function: torch.nn.Module,
metric_function: torch.nn.Module,
data_loader: torch.utils.data.DataLoader):
model.eval()
test_loss = 0.0
running_loss = 0.0
running_metrics = 0.0
with torch.no_grad():
for X in data_loader:
# send data on correct device type
X = X.to(device)
# forward-pass
X_pred = model(X)
# accumulating statistics
running_loss += loss_function(X_pred, X).item()
running_metrics += metric_function(X_pred, X).item()
return {
'loss': running_loss / (RESIZE_SHAPE[0]*RESIZE_SHAPE[1]),
'metrics': running_metrics / (RESIZE_SHAPE[0]*RESIZE_SHAPE[1]),
}
###Output
_____no_output_____
###Markdown
Если вы корректно реализовали все предыдущие шаги и ваша модель имеет достаточное количество обучаемых параметров, то в следующей ячейке должен пойти процесс обучения. Задание 7: придумайте функцию потерь.Обратите внимание, что в предложенном скелетном коде функция потерь по умолчанию прописана неверно. Вы, скорее всего, не сможете обучить автокодировщик с этой функцией потерь. Подумайте, какая должна быть функция потерь при условии, что она должна оценивать качество воспроизведения значений в каждом отдельном пикселе изображения. Впишите в ячейке ниже правильную функцию потерь. Подумайте, можно ли использовать уже предложенную функцию потерь, и что нужно сделать с данными, чтобы с ней можно было обучить вашу модель. **Ответ**:Да, на мой взгляд тоже, кросс-энтропия не подходит для решения задачи, так как по своей задумке, кросс-энтропия используется в том случае, когда ответ алгоритма - вероятность принадлежности классу `y`.Но ведь у нас же, хотя значение пикселя и лежит от нуля до единицы, но не является вероятностью, так как при такой интерпретации, единица отвечала бы тому, что пиксель белый, а ноль - черный. Но на изначальном изображении, яркости пискелей принимают не бинарное значение, а непрерывное в отрезке [0, 1]. Поэтому по своей сути мы решаем задачу *регрессии*, а не классификации.В таком случае, я предлагаю использовать `pixel-wise MSE`.Впрочем, кросс-энтропию тоже можно было бы использовать, если, например, преобразовать исходные данные таким образом, что они принимали бы лишь значения вида 0.0, 0.1, 0.2,... 1.0. Тогда бы это уже была классификация.
###Code
metrics = train_model(model,
train_dataset=train_dataset,
val_dataset=val_dataset,
optimizer_params={'lr': 1e-2},
lr_scheduler_params={'step_size': 60},
batch_size=BATCH_SIZE,
max_epochs=200)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(14, 4))
ax[0].plot(metrics['loss'])
ax[0].set_title('MSE score (loss)')
ax[1].plot(metrics['metrics'])
ax[1].set_title('MAE score (metric)');
###Output
_____no_output_____
###Markdown
Проверка результатовПосмотрите, как ваш обученный автокодировщик преобразует входные примеры. В ячейке ниже приведен код для отображения произвольной пары пример-реконструкция.
###Code
index = int(np.random.randint(0, len(train_dataset), size=1))
sample = train_dataset[index][0]
sample_np = np.squeeze(sample.detach().cpu().numpy())
sample_ae = model(sample.view(1,1,28,28).to(device))
sample_ae_np = np.squeeze(sample_ae.detach().cpu().numpy())
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(4, 2), dpi=200)
for i, ax in enumerate(axes):
img = sample_np if i==0 else sample_ae_np
ax.imshow(img, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.patch.set_facecolor('white')
plt.imshow(img, cmap='gray')
###Output
_____no_output_____
###Markdown
Идентификация аномалий.Идея идентификации аномалий состоит в том, чтобы разделить "обычные" экземпляры и "необычные" по значению функции потерь автокодировщика на этих примерах. Предполагается, что автокодировщик, обученный на обычных примерах не будет способен достаточно точно воспроизвести необычные примеры. То есть, значение функции потерь на необычных экземплярах будет большим. В этом ДЗ предлагается найти все экземпляры-выбросы, встречающиеся в тестовой выборке, руководствуюясь только значениями функции потерь автокодировщика. Для этого на всех объектах тестовой выборки следует вычислить функцию потерь обученного автокодировщика, и определить, какие экземпляры являются аномальными.В качестве решения всего задания следует получить список значений 0 или 1, соответствующих объектам тестовой выборки. Признак `1` означает, что этот объект является аномалией, `0` - означает, что объект обычный.Например, следующий список `[1,1,1,0,0,0,0,0,0,0,1,0]` означает, что в выборке из 12 объектов тестовой выборки аномалиями считаются первые три и предпоследний. Остальные считаются обычными.> ВНИМАНИЕ! Сопоставление при проверке будет производиться только по номерам объектов в тестовой выборке. Поэтому выборку при вычислении функции потерь не следует перемешивать. То есть, при создании загрузчика данных `torch.utils.data.DataLoader` аргумент перемешивания должен быть выключен: `shuffle=False` Задание 8: примените обученную модель автокодировщика к данным тестовой выборки. Вычислите функцию потерь на каждом объекте тестовой выборки.
###Code
model.eval()
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)
losses = []
metric = nn.MSELoss()
with torch.no_grad():
with tqdm(total=len(test_loader)) as pbar:
for data in test_loader:
## здесь следует вычислить значения функции потерь для всех элементов тестовой выборки.
data = data.to(device)
data_pred = model(data)
curr_loss = metric(data_pred, data).item() / (RESIZE_SHAPE[0] * RESIZE_SHAPE[1])
losses.append(curr_loss)
pbar.update(1)
###Output
0%| | 0/10000 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:3458: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
"See the documentation of nn.Upsample for details.".format(mode)
100%|██████████| 10000/10000 [00:12<00:00, 828.74it/s]
###Markdown
Анализ значений функции потерьПроанализируйте распределение значений функции потерь и найдите объекты, на которых она слишком большая. Задание 9:- Отобразите гистограмму значений функции потерь. Сделайте выводы (напишите ТЕКСТ) относительно значений для обычных объектов и аномалий.- Найдите объекты-аномалии, отобразите их.- Вычислите на них обученный вами автокодировщик. Отобразите рядом объекты-аномалии и их реконструкцию, вычисленную вашим автокодировщиком. Я предлагаю сделать следующее: давайте посмотрим на примерную плотность, расчитанную по KDE и оценим ее визуально. Видим, что у нас оооооочень длинный хвост в направлении маленького лосса, что довольно странно.Буквально методом проб и ошибок рассмотрим за границу "доверительного интервала" для лосса квантиль порядка 0.002 и изобразим объекты, лосс на которых меньше этого граничного значения.
###Code
plt.figure(figsize=(10, 4))
mean = np.mean(losses)
left_border = np.quantile(losses, 0.0019)
right_border = mean + 2 * np.std(losses)
plt.axvline(x=mean, ymin=0, ymax=1, color='royalblue')
plt.axvline(x=left_border, ymin=0, ymax=1, color='aqua')
plt.axvline(x=right_border, ymin=0, ymax=1, color='aqua')
sns.histplot(losses, kde=True);
def tensor2numpy(tensor):
img = tensor.numpy()
img = np.moveaxis(img, source=(0, 1, 2), destination=(2, 0, 1)).squeeze()
return img
losses_np = np.array(losses)
outliers = np.where(losses_np < (left_border))
outliers
fig, ax = plt.subplots(2, int(len(outliers[0])), figsize=(120, 5))
for idx, outlier_idx in enumerate(outliers[0]):
data = test_dataset[outlier_idx]
img = tensor2numpy(data)
data_pred = model(data.unsqueeze(0).to(device))
img_pred = tensor2numpy(data_pred.cpu().detach())
ax[0, idx].imshow(img, cmap='gray')
ax[1, idx].imshow(img_pred, cmap='gray')
ax[0, idx].set_title(f'image index: {outlier_idx}')
ax[0, idx].grid(False)
ax[1, idx].grid(False)
fig.tight_layout()
fig.subplots_adjust(wspace=0.4)
fig.patch.set_facecolor('white')
###Output
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:3458: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
"See the documentation of nn.Upsample for details.".format(mode)
###Markdown
Как видим, на аномалиях лосс почему-то сильно МЕНЬШЕ, чем на объектах из генеральной совокупности. Причем, что интересно, глянем на гистограмму изображений аномалий, реконструированных через автокодировщик:
###Code
index = int(np.random.randint(0, len(outliers[0]), size=1))
sample = test_dataset[outliers[0][index]]
sample_np = np.squeeze(sample.detach().cpu().numpy())
sample_ae = model(sample.view(1,1,28,28).to(device))
sample_ae_np = np.squeeze(sample_ae.detach().cpu().numpy())
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6, 4), dpi=200)
axes[0].imshow(sample_np)
axes[0].set_title('outlier')
axes[1].hist(np.concatenate(sample_ae_np), bins=20);
axes[1].set_title('histogram of model(outlier)')
###Output
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:3458: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
"See the documentation of nn.Upsample for details.".format(mode)
###Markdown
Как видим, большинство значений в районе 0.5.Пока что для меня это больше загадка, почему автокодировщик именно так реагирует на выбросы, так как все-таки ожидались большие значения лосса. Задание 10: создайте файл маркировки аномалийВ этом задании требуется записать в файл признаки аномальности для всех объектов тестовой выборки в том порядке, в котором эти объекты идут в выборке. Это должен быть просто текстовый файл. В нем не должно быть никаких заголовков, никаких дополнительных символов. Только `0` или `1`пример содержимого файла (для выборки длиной 244 объекта, из которых 6 оказались помечены как аномалии):`0000000000000010000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000001100000000`Финальным решением этого ДЗ является этот файл. Его нужно сдать вместе с ноутбуком с вашим кодом.
###Code
final_outliers = np.array([311, 1619, 1774, 3194, 3474, 3592, 3603, 4007, 5367, 5455, 6573, 8284, 8337, 8659, 9170, 9783])
final_outliers.shape
answer = np.array([1 if (i in final_outliers) else 0 for i in range(len(test_loader.dataset))])
assert (answer[final_outliers] == np.ones(shape=final_outliers.shape)).all()
answer_str = ''.join([str(i) for i in answer])
answer_str
f = open("answer.txt", "w")
f.write(answer_str)
f.close()
###Output
_____no_output_____ |
Sandbox_Testing_Heart Disease.ipynb | ###Markdown
Predictions
###Code
predictions = model.predict(X)
print(f"True output: {y[0]}")
print(f"Predicted output: {predictions[0]}")
print(f"Prediction Error: {predictions[0]-y[0]}")
#pd.DataFrame({"Predicted": predictions, "Actual": y, "Error": predictions - y})[["Predicted", "Actual", "Error"]]
X_min = X.min()
X_max = X.max()
y_min_actual = y.min()
y_max_actual = y.max()
###Output
_____no_output_____
###Markdown
Output from Min / Max
###Code
y_min = 0.04178734 + 0.12850252 * X_min
y_max = 0.04178734 + 0.12850252 * X_max
print(f"Actual Min Value: {y_min_actual}")
print(f"Calculated Min Value: {y_min}")
print(f"Actual Max Value: {y_max_actual}")
print(f"Calculated Max Value: {y_max}")
###Output
Actual Min Value: 0
Calculated Min Value: 0.04178734
Actual Max Value: 1
Calculated Max Value: 0.17028986000000002
###Markdown
Prediction based on Min / Max
###Code
y_min_predicted = model.predict([[X_min]])
y_max_predicted = model.predict([[X_max]])
print(f"Actual Min Value: {y_min_actual}")
print(f"Predicted Min Value: {y_min_predicted}")
print(f"Actual Max Value: {y_max_actual}")
print(f"Predicted Max Value: {y_max_predicted}")
###Output
Actual Min Value: 0
Predicted Min Value: [[0.04178734]]
Actual Max Value: 1
Predicted Max Value: [[0.17028986]]
###Markdown
Model Fit Illustration
###Code
plt.scatter(X, y, c='blue')
plt.plot([X_min, X_max], [y_min, y_max], c='red')
###Output
_____no_output_____ |
Nomogram.ipynb | ###Markdown
Here are the simple examples for plotting nomogram, ROC curves, Calibration curves, and Decision curves in training and test dataset by using R language.
###Code
# Library and data
library(rms)
library(pROC)
library(rmda)
train <-read.csv("E:/Experiments/YinjunDong/nomogram/EGFR-nomogram.csv")
test <-read.csv("E:/Experiments/YinjunDong/nomogram/EGFR-nomogram-test.csv")
# Nomogram
dd=datadist(train)
options(datadist="dd")
f1 <- lrm(EGFR~ Rad
+Smoking
+Type
,data = train,x = TRUE,y = TRUE)
nom <- nomogram(f1, fun=plogis,fun.at=c(.001, .01, seq(.1,.9, by=.4)), lp=F, funlabel="EGFR Mutations")
plot(nom)
# ROC train
f2 <- glm(EGFR~ Rad
+Smoking
+Type
,data = train,family = "binomial")
pre <- predict(f2, type='response')
plot.roc(train$EGFR, pre,
main="ROC Curve", percent=TRUE,
print.auc=TRUE,
ci=TRUE, ci.type="bars",
of="thresholds",
thresholds="best",
print.thres="best",
col="blue"
#,identity=TRUE
,legacy.axes=TRUE,
print.auc.x=ifelse(50,50),
print.auc.y=ifelse(50,50)
)
# ROC test
pre1 <- predict(f2,newdata = test)
plot.roc(test$EGFR, pre1,
main="ROC Curve", percent=TRUE,
print.auc=TRUE,
ci=TRUE, ci.type="bars",
of="thresholds",
thresholds="best",
print.thres="best",
col="blue",legacy.axes=TRUE,
print.auc.x=ifelse(50,50),
print.auc.y=ifelse(50,50)
)
# Calibration Curve train
rocplot1 <- roc(train$EGFR, pre)
ci.auc(rocplot1)
cal <- calibrate(f1, method = "boot", B = 1000)
plot(cal, xlab = "Nomogram Predicted Mutation", ylab = "Actual Mutation",main = "Calibration Curve")
# Calibration Curve test
rocplot2 <- roc(test$EGFR,pre1)
ci.auc(rocplot2)
f3 <- lrm(test$EGFR ~ pre1,x = TRUE,y = TRUE)
cal2 <- calibrate(f3, method = "boot", B = 1000)
plot(cal2, xlab = "Nomogram Predicted Mutation", ylab = "Actual Mutation",main = "Calibration Curve")
# Decision Curve train
Rad<- decision_curve(EGFR~
Rad, data = train, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
Clinical<- decision_curve(EGFR~
Smoking+Type, data = train, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
clinical_Rad<- decision_curve(EGFR~ Rad
+Smoking+Type, data = train,
family = binomial(link ='logit'), thresholds = seq(0,1, by = 0.01),
confidence.intervals= 0.95,study.design = 'case-control',
population.prevalence= 0.3)
List<- list(Clinical,Rad,clinical_Rad)
plot_decision_curve(List,curve.names= c('Clinical','Rad-Score','Nomogram'),
cost.benefit.axis =FALSE,col = c('green','red','blue'),
confidence.intervals =FALSE,standardize = FALSE,
#legend.position = "none"
legend.position = "bottomleft"
)
# Decision Curve test
Rad1<- decision_curve(EGFR~
Rad, data = test, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
Clinical1<- decision_curve(EGFR~
Smoking+Type, data = test, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
clinical_Rad1<- decision_curve(EGFR~ Rad
+Smoking+Type, data = test,
family = binomial(link ='logit'), thresholds = seq(0,1, by = 0.01),
confidence.intervals= 0.95,study.design = 'case-control',
population.prevalence= 0.3)
List1<- list(Clinical1, Rad1, clinical_Rad1)
plot_decision_curve(List1,curve.names= c('Clinical','Rad-Score','Nomogram'),
cost.benefit.axis =FALSE,col = c('green','red','blue'),
confidence.intervals =FALSE,standardize = FALSE,
legend.position = "bottomleft")
###Output
Calculating net benefit curves for case-control data. All calculations are done conditional on the outcome prevalence provided.
Calculating net benefit curves for case-control data. All calculations are done conditional on the outcome prevalence provided.
Note: The data provided is used to both fit a prediction model and to estimate the respective decision curve. This may cause bias in decision curve estimates leading to over-confidence in model performance.
Calculating net benefit curves for case-control data. All calculations are done conditional on the outcome prevalence provided.
Note: The data provided is used to both fit a prediction model and to estimate the respective decision curve. This may cause bias in decision curve estimates leading to over-confidence in model performance.
Note: When multiple decision curves are plotted, decision curves for 'All' are calculated using the prevalence from the first DecisionCurve object in the list provided.
###Markdown
Here are the simple examples for plotting nomogram, ROC curves, Calibration curves, and Decision curves in training and test dataset by using R language.
###Code
# Library and data
library(rms)
library(pROC)
library(rmda)
train <-read.csv("E:/Experiments/YinjunDong/nomogram/EGFR-nomogram.csv")
test <-read.csv("E:/Experiments/YinjunDong/nomogram/EGFR-nomogram-test.csv")
# Nomogram
dd=datadist(train)
options(datadist="dd")
f1 <- lrm(EGFR~ Rad
+Smoking
+Type
,data = train,x = TRUE,y = TRUE)
nom <- nomogram(f1, fun=plogis,fun.at=c(.001, .01, seq(.1,.9, by=.4)), lp=F, funlabel="EGFR Mutations")
plot(nom)
# ROC train
f2 <- glm(EGFR~ Rad
+Smoking
+Type
,data = train,family = "binomial")
pre <- predict(f2, type='response')
plot.roc(train$EGFR, pre,
main="ROC Curve", percent=TRUE,
print.auc=TRUE,
ci=TRUE, ci.type="bars",
of="thresholds",
thresholds="best",
print.thres="best",
col="blue"
#,identity=TRUE
,legacy.axes=TRUE,
print.auc.x=ifelse(50,50),
print.auc.y=ifelse(50,50)
)
# ROC test
pre1 <- predict(f2,newdata = test)
plot.roc(test$EGFR, pre1,
main="ROC Curve", percent=TRUE,
print.auc=TRUE,
ci=TRUE, ci.type="bars",
of="thresholds",
thresholds="best",
print.thres="best",
col="blue",legacy.axes=TRUE,
print.auc.x=ifelse(50,50),
print.auc.y=ifelse(50,50)
)
# Calibration Curve train
rocplot1 <- roc(train$EGFR, pre)
ci.auc(rocplot1)
cal <- calibrate(f1, method = "boot", B = 1000)
plot(cal, xlab = "Nomogram Predicted Survival", ylab = "Actual Survival",main = "Calibration Curve")
# Calibration Curve test
rocplot2 <- roc(test$EGFR,pre1)
ci.auc(rocplot2)
f3 <- lrm(test$EGFR ~ pre1,x = TRUE,y = TRUE)
cal2 <- calibrate(f3, method = "boot", B = 1000)
plot(cal2, xlab = "Nomogram Predicted Survival", ylab = "Actual Survival",main = "Calibration Curve")
# Decision Curve train
Rad<- decision_curve(EGFR~
Rad, data = train, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
Clinical<- decision_curve(EGFR~
Smoking+Type, data = train, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
clinical_Rad<- decision_curve(EGFR~ Rad
+Smoking+Type, data = train,
family = binomial(link ='logit'), thresholds = seq(0,1, by = 0.01),
confidence.intervals= 0.95,study.design = 'case-control',
population.prevalence= 0.3)
List<- list(Clinical,Rad,clinical_Rad)
plot_decision_curve(List,curve.names= c('Clinical','Rad-Score','Nomogram'),
cost.benefit.axis =FALSE,col = c('green','red','blue'),
confidence.intervals =FALSE,standardize = FALSE,
#legend.position = "none"
legend.position = "bottomleft"
)
# Decision Curve test
Rad1<- decision_curve(EGFR~
Rad, data = test, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
Clinical1<- decision_curve(EGFR~
Smoking+Type, data = test, family = binomial(link ='logit'),
thresholds= seq(0,1, by = 0.01),
confidence.intervals =0.95,study.design = 'case-control',
population.prevalence = 0.3)
clinical_Rad1<- decision_curve(EGFR~ Rad
+Smoking+Type, data = test,
family = binomial(link ='logit'), thresholds = seq(0,1, by = 0.01),
confidence.intervals= 0.95,study.design = 'case-control',
population.prevalence= 0.3)
List1<- list(Clinical1, Rad1, clinical_Rad1)
plot_decision_curve(List1,curve.names= c('Clinical','Rad-Score','Nomogram'),
cost.benefit.axis =FALSE,col = c('green','red','blue'),
confidence.intervals =FALSE,standardize = FALSE,
legend.position = "bottomleft")
###Output
Calculating net benefit curves for case-control data. All calculations are done conditional on the outcome prevalence provided.
Calculating net benefit curves for case-control data. All calculations are done conditional on the outcome prevalence provided.
Note: The data provided is used to both fit a prediction model and to estimate the respective decision curve. This may cause bias in decision curve estimates leading to over-confidence in model performance.
Calculating net benefit curves for case-control data. All calculations are done conditional on the outcome prevalence provided.
Note: The data provided is used to both fit a prediction model and to estimate the respective decision curve. This may cause bias in decision curve estimates leading to over-confidence in model performance.
Note: When multiple decision curves are plotted, decision curves for 'All' are calculated using the prevalence from the first DecisionCurve object in the list provided.
|
kaggle_notebook/exo_1_fetch_datea.ipynb | ###Markdown
**[SQL Micro-Course Home Page](https://www.kaggle.com/learn/intro-to-sql)**--- IntroductionThe first test of your new data exploration skills uses data describing crime in the city of Chicago.Before you get started, run the following cell. It sets up the automated feedback system to review your answers.
###Code
# Set up feedack system
from learntools.core import binder
binder.bind(globals())
from learntools.sql.ex1 import *
print("Setup Complete")
###Output
Using Kaggle's public dataset BigQuery integration.
Setup Complete
###Markdown
Use the next code cell to fetch the dataset.
###Code
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "chicago_crime" dataset
dataset_ref = client.dataset("chicago_crime", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
###Output
Using Kaggle's public dataset BigQuery integration.
###Markdown
Exercises 1) Count tables in the datasetHow many tables are in the Chicago Crime dataset?
###Code
# List all the tables in the "Chicago Crime" dataset
tables = list(client.list_tables(dataset))
# print number of tables
print(len(tables))
# print list of tables
for table in tables:
print(table.table_id)
num_tables = len(tables) # Store the answer as num_tables and then run this cell
q_1.check()
###Output
_____no_output_____
###Markdown
For a hint or the solution, uncomment the appropriate line below.
###Code
#q_1.hint()
#q_1.solution()
###Output
_____no_output_____
###Markdown
2) Explore the table schemaHow many columns in the `crime` table have `TIMESTAMP` data?
###Code
# construct a reference to the "crime" table
table_ref = dataset_ref.table("crime")
# API request - fetch the table
table = client.get_table(table_ref)
# Print information on all the columns in the "crime" table in the "Chicago Crime" dataset
#table.schema
# Preview the first five lines of the "crime" table
crime = client.list_rows(table,max_results = 5).to_dataframe()
# Preview the types of crime columns
crime.info()
num_timestamp_fields = 2 # Put your answer here
q_2.check()
###Output
_____no_output_____
###Markdown
For a hint or the solution, uncomment the appropriate line below.
###Code
#q_2.hint()
#q_2.solution()
###Output
_____no_output_____
###Markdown
3) Create a crime mapIf you wanted to create a map with a dot at the location of each crime, what are the names of the two fields you likely need to pull out of the `crime` table to plot the crimes on a map?
###Code
# Write the code here to explore the data so you can find the answer
# Preview the first five lines of the "crime" table
client.list_rows(table, max_results = 5).to_dataframe()
fields_for_plotting = ['latitude', 'longitude'] # Put your answers here
q_3.check()
###Output
_____no_output_____
###Markdown
For a hint or the solution, uncomment the appropriate line below.
###Code
#q_3.hint()
#q_3.solution()
###Output
_____no_output_____
###Markdown
Thinking about the question above, there are a few columns that appear to have geographic data. Look at a few values (with the `list_rows()` command) to see if you can determine their relationship. Two columns will still be hard to interpret. But it should be obvious how the `location` column relates to `latitude` and `longitude`.
###Code
# block: street
# x_coordinate, y_coordinate (other )
# location: made of (latitude, longitude)
# maybe also check district, community_area, iucr?
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/01_data_collection-checkpoint.ipynb | ###Markdown
Data Collection--- For this project, I'm building a model to identify periods of coastal upwelling off the coast of Oregon using data collected by the Ocean Observatories Initiatve (OOI). I intend to use environmental variables, such as seawater temperature, salinity, and dissolved oxygen, as features in a classification model, and I'll be labeling my target variable using the CUTI upwelling index. The OOI has several instrument packages off the Washington and Oregon coasts; for this project, I'll be focusing on the Oregon Offshore location, located offshore from Newport, Oregon. The instrument packages found here include a surface mooring that has a bulk meteorology package, a shallow profiler that collects data in the upper ~200 meters of the water column, a stationary platform located at a depth of 200 meters, and a deep profiler that collects data in the lower portion of the water column.
###Code
# Imports
import numpy as np
import sys, os
import xarray as xr
import pandas as pd
import cmocean.cm as cmo
import requests
import re
import datetime as dt
import seaborn as sns
from netCDF4 import Dataset, num2date, date2num
from datetime import datetime, timedelta
from numpy import datetime64 as dt64, timedelta64 as td64
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
OOI APIIn order to run this notebook, you'll need to set up an account with the OOI and get a username and temporary token to use for data requests. You can do this here: https://ooinet.oceanobservatories.org/.Once you've made an account, copy and paste your username and token into the cell below.
###Code
# enter your OOI API username and token
API_USERNAME = 'OOIAPI-xx' # this will be similar to U6ZIZ5UNB1LIMA
API_TOKEN = 'xx' # this will be similar to VUO6PXYMNLE
###Output
_____no_output_____
###Markdown
Make sure you don't upload your API username and token combination to a public repository! If you accidentally do, you can go to the OOI website and get a new token - do this as soon as possible to prevent your credentials being used without your consent. --- Create output directory Set up an output directory to store the data pulled by this notebook - these files are fairly large, so they won't be saved to this repository. Instead, they'll be stored in a directory called `coastal_upwelling_output` that will be parallel to this repository on your local machine.
###Code
parent_dir = os.path.dirname(os.getcwd())
grandparent_dir = os.path.dirname(parent_dir)
output_dir = os.path.join(grandparent_dir, 'coastal_upwelling_output')
try:
os.mkdir(output_dir)
except OSError as error:
pass
print(f'Data will be stored in {output_dir}.')
###Output
Data will be stored in C:\Users\Derya\Documents\GitHub\coastal_upwelling_output.
###Markdown
--- Pull data Start by looking at just a small selection of the data available:* pull data from the Oregon Offshore location (CE04)* use the surface mooring, 200m platform, and shallow profiler* was going to start with March-June 2017 but ended up pulling data for all of 2017* 2017 had poor continuity for the shallow profiler, so I also ended up pulling data for all of 2018 as well The following two functions were provided by the OOI for requesting and downloading data.`request_data` takes your API username and temporary token and inputs a request for data from the OOI. This function returns the URL where your requested data is stored, but the URl is not populated right away because these requests take time, especially if you request an entire year's worth of data! If you pass these URLs to the `get_data` function right away, you might get nothing but errors because the data isn't ready yet. When it is ready, you'll get an email notification with the same URL in it as is returned by the `request_data` function. Then you'll know it's time to run the next function!The URLs don't expire so you can keep using them if you get the data but don't save it locally to your machine, which I highyl recommend doing. I've saved all the data requests I've done in the file `data_urls.txt` for use again later.
###Code
def request_data(reference_designator, method, stream, start_date=None, end_date=None):
site = reference_designator[:8]
node = reference_designator[9:14]
instrument = reference_designator[15:]
# Create the request URL
api_base_url = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv'
data_request_url = '/'.join((api_base_url, site, node, instrument, method, stream))
print(data_request_url)
# All of the following are optional, but you should specify a date range
params = {
'format': 'application/netcdf',
'include_provenance': 'true',
'include_annotations': 'true'
}
if start_date:
params['beginDT'] = start_date
if end_date:
params['endDT'] = end_date
# Make the data request
r = requests.get(data_request_url, params=params, auth=(API_USERNAME, API_TOKEN))
data = r.json()
# Return just the THREDDS URL
return data['allURLs'][0]
###Output
_____no_output_____
###Markdown
`get_data` accesses the URLs provided by the `request_data` function and accesses the .nc folders and OPeNDAP server data files. These files are the standard .netCDF file type, and are initially accessed using xarray, but this function returns them to you as a pandas dataframe. Running `get_data` can take a while if you are getting a lot of data at once.
###Code
def get_data(url, variables, deployments=None):
# Function to grab all data from specified directory
tds_url = 'https://opendap.oceanobservatories.org/thredds/dodsC'
dataset = requests.get(url).text
ii = re.findall(r'href=[\'"]?([^\'" >]+)', dataset)
# x = re.findall(r'(ooi/.*?.nc)', dataset)
x = [y for y in ii if y.endswith('.nc')]
for i in x:
if i.endswith('.nc') == False:
x.remove(i)
for i in x:
try:
float(i[-4])
except:
x.remove(i)
# dataset = [os.path.join(tds_url, i) for i in x]
datasets = [os.path.join(tds_url, i.split('=')[-1]).replace("\\","/") for i in x]
# remove deployments not in deployment list, if given
if deployments is not None:
deploy = ['deployment{:04d}'.format(j) for j in deployments]
datasets = [k for k in datasets if k.split('/')[-1].split('_')[0] in deploy]
# remove collocated data files if necessary
catalog_rms = url.split('/')[-2][20:]
selected_datasets = []
for d in datasets:
if catalog_rms == d.split('/')[-1].split('_20')[0][15:]:
selected_datasets.append(d)
# create a dictionary to populate with data from the selected datasets
data_dict = {'time': np.array([], dtype='datetime64[ns]')}
unit_dict = {}
for v in variables:
data_dict.update({v: np.array([])})
unit_dict.update({v: []})
print('Appending data from files')
for sd in selected_datasets:
try:
url_with_fillmismatch = f'{sd}#fillmismatch' # I had to add this line to get the function to work
ds = xr.open_dataset(url_with_fillmismatch, mask_and_scale=False)
data_dict['time'] = np.append(data_dict['time'], ds['time'].values)
for var in variables:
data_dict[var] = np.append(data_dict[var], ds[var].values)
units = ds[var].units
if units not in unit_dict[var]:
unit_dict[var].append(units)
except:
pass
# convert dictionary to a dataframe
df = pd.DataFrame(data_dict)
df.sort_values(by=['time'], inplace=True) # make sure the timestamps are in ascending order
return df, unit_dict
###Output
_____no_output_____
###Markdown
You can uncomment the three cells below and run the requests, but you'll need to have entered your own API credentials near the start of the notebook. You only need to run the requests once, because the resulting URLs don't expire. However, requesting a full year's worth of data takes several minutes! The cells below will output a URL right away, but the `get_data()` function won't work until the request is actually fulfilled - you'll get an email from the OOI when your request is completed, and then you'll be able to continue.
###Code
# Request data from the bulk meteorology package on the surface mooring
# METBK_url = request_data('CE04OSSM-SBD11-06-METBKA000', 'recovered_host',
# 'metbk_a_dcl_instrument_recovered',
# '2017-01-01T00:00:00.000Z', '2017-12-31T12:00:00.000Z')
# print('METBK_url: %s' %METBK_url)
# Request data from the CTD-O on the shallow profiler
# profiler_url = request_data('CE04OSPS-PC01B-4A-CTDPFA107', 'streamed', 'ctdpf_sbe43_sample',
# '2017-01-01T00:00:00.000Z', '2017-12-31T12:00:00.000Z')
# print('profiler_url: %s' %profiler_url)
# Request data from the CTD-O on the 200 meter platform
# platform_url = request_data('CE04OSPS-PC01B-4A-CTDPFA109', 'streamed',
# 'ctdpf_optode_sample',
# '2017-01-01T00:00:00.000Z', '2017-12-31T12:00:00.000Z')
# print('platform_url: %s' %platform_url)
###Output
_____no_output_____
###Markdown
Since I used my own credentials to get these URLs, I'm not sure they'll work for you. You may need to enter your own credentials, run the `request_data()` cells above, and replace the URLs below with the output.Here are three URLs that have data for the year 2017. We can use these to load in data files. Putting these URLs into your browser window will bring you to the OPeNDAP server where you can see variable names and descriptions. There are a lot of folders to navigate through, but [here](https://opendap.oceanobservatories.org/thredds/dodsC/ooi/[email protected]/20210422T030848056Z-CE04OSPS-SF01B-2A-CTDPFA107-streamed-ctdpf_sbe43_sample/deployment0004_CE04OSPS-SF01B-2A-CTDPFA107-streamed-ctdpf_sbe43_sample_20170801T160709.510843-20170916T121340.481090.nc.html) is an example of the CTD data, and [here](https://opendap.oceanobservatories.org/thredds/dodsC/ooi/[email protected]/20210422T030752259Z-CE04OSSM-SBD11-06-METBKA000-recovered_host-metbk_a_dcl_instrument_recovered/deployment0006_CE04OSSM-SBD11-04-VELPTA000-recovered_host-velpt_ab_dcl_instrument_recovered_20180403T183000-20180403T183000.nc.html) is an example of the METBK data. You can navigate to these examples by using the URLs below, selecting a .nc folder, and then clicking on the OPeNDAP link.
###Code
METBK_2017_url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210422T030752259Z-CE04OSSM-SBD11-06-METBKA000-recovered_host-metbk_a_dcl_instrument_recovered/catalog.html'
profiler_2017_url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210422T030848056Z-CE04OSPS-SF01B-2A-CTDPFA107-streamed-ctdpf_sbe43_sample/catalog.html'
platform_2017_url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210428T021551666Z-CE04OSPS-PC01B-4A-CTDPFA109-streamed-ctdpf_optode_sample/catalog.html'
###Output
_____no_output_____
###Markdown
Get 2017 data Time to actually get the data! The `get_data` function returns a pandas dataframe, so if you'd rather use xarray you can convert the resulting dataframe to a data array or alter the `get_data` function to return xarray data array.
###Code
# Specify the variable(s) of interest
METBK_2017_var = ['sea_surface_temperature', 'met_windavg_mag_corr_east', 'met_windavg_mag_corr_north']
profiler_2017_var = ['seawater_pressure', 'density', 'practical_salinity', 'seawater_temperature', 'corrected_dissolved_oxygen']
platform_2017_var = ['seawater_pressure', 'density', 'practical_salinity', 'seawater_temperature', 'dissolved_oxygen']
###Output
_____no_output_____
###Markdown
The cell below takes a few minutes to run because the datasets we're getting from the OOI are quite large!
###Code
# Get the data!
METBK_2017_data, METBK_2017_units = get_data(METBK_2017_url, METBK_2017_var)
profiler_2017_data, profiler_2017_units = get_data(profiler_2017_url, profiler_2017_var)
platform_2017_data, platform_2017_units = get_data(platform_2017_url, platform_2017_var)
# Check the variable units
print(METBK_2017_units)
print(profiler_2017_units)
print(platform_2017_units)
# Save the unit dictionaries above in case you accidentally overwrite the output:
METBK_2017_units = {'sea_surface_temperature': ['ºC'], 'met_windavg_mag_corr_east': ['m s-1'], 'met_windavg_mag_corr_north': ['m s-1']}
profiler_2017_units = {'seawater_pressure': ['dbar'], 'density': ['kg m-3'], 'practical_salinity': ['1'], 'seawater_temperature': ['ºC'], 'corrected_dissolved_oxygen': ['µmol kg-1']}
platform_2017_units = {'seawater_pressure': ['dbar'], 'density': ['kg m-3'], 'practical_salinity': ['1'], 'seawater_temperature': ['ºC'], 'dissolved_oxygen': ['µmol kg-1']}
###Output
_____no_output_____
###Markdown
Save these data files as `.csv`s so we can use them in the rest of the notebooks. This will take a few minutes!
###Code
# Save 2017 dataframes to the output folder parallel to this GitHub repo
METBK_2017_data.to_csv('../../coastal_upwelling_output/metbk_data_2017.csv', index=False)
profiler_2017_data.to_csv('../../coastal_upwelling_output/profiler_data_2017.csv', index=False)
platform_2017_data.to_csv('../../coastal_upwelling_output/platform_data_2017.csv', index=False)
###Output
_____no_output_____
###Markdown
--- Get 2018 data The data availability in 2017 wasn't very good for the shallow profiler (it spent quite a number of months stuck near 200 meters), so I want to pull in the 2018 data to see if it's any better. The code below is all the same as the code above - the only differences are the dates that I used in the data requests.
###Code
# METBK_url = request_data('CE04OSSM-SBD11-06-METBKA000', 'recovered_host',
# 'metbk_a_dcl_instrument_recovered',
# '2018-01-01T00:00:00.000Z', '2018-12-31T12:00:00.000Z')
# print('METBK_url: %s' %METBK_url)
# profiler_url = request_data('CE04OSPS-SF01B-2A-CTDPFA107', 'streamed', 'ctdpf_sbe43_sample',
# '2018-01-01T00:00:00.000Z', '2018-12-31T12:00:00.000Z')
# print('profiler_url: %s' %profiler_url)
# platform_url = request_data('CE04OSPS-PC01B-4A-CTDPFA109', 'streamed',
# 'ctdpf_optode_sample',
# '2018-01-01T00:00:00.000Z', '2018-12-31T12:00:00.000Z')
# print('platform_url: %s' %platform_url)
###Output
https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample
platform_url: https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210502T005215562Z-CE04OSPS-PC01B-4A-CTDPFA109-streamed-ctdpf_optode_sample/catalog.html
###Markdown
Again, all of these URLs and their associated `request_data` inputs are saved in the `data_urls.txt` file in the repo in case you lose them.
###Code
METBK_2018_url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210502T005210982Z-CE04OSSM-SBD11-06-METBKA000-recovered_host-metbk_a_dcl_instrument_recovered/catalog.html'
profiler_2018_url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210502T005211652Z-CE04OSPS-SF01B-2A-CTDPFA107-streamed-ctdpf_sbe43_sample/catalog.html'
platform_2018_url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/[email protected]/20210502T005215562Z-CE04OSPS-PC01B-4A-CTDPFA109-streamed-ctdpf_optode_sample/catalog.html'
# Specify the variable(s) of interest
METBK_2018_var = ['sea_surface_temperature', 'met_windavg_mag_corr_east', 'met_windavg_mag_corr_north']
profiler_2018_var = ['seawater_pressure', 'density', 'practical_salinity', 'seawater_temperature', 'corrected_dissolved_oxygen']
platform_2018_var = ['seawater_pressure', 'density', 'practical_salinity', 'seawater_temperature', 'dissolved_oxygen']
###Output
_____no_output_____
###Markdown
For some reason, the platform data was throwing an error in the `get_data()` function for one of the `.nc` files, so I had to go back and add a try/except block to it. This means the platform data collected by this code may not be all of the data available, but I'm not sure what's causing that to happen.
###Code
# get the data!
METBK_data_2018, METBK_2018_units = get_data(METBK_2018_url, METBK_2018_var)
profiler_data_2018, profiler_2018_units = get_data(profiler_2018_url, profiler_2018_var)
platform_data_2018, platform_2018_units = get_data(platform_2018_url, platform_2018_var)
# check the variable units
print(METBK_2018_units)
print(profiler_2018_units)
print(platform_2018_units)
# Save the unit dictionaries above in case you accidentally overwrite the output:
METBK_2018_units = {'sea_surface_temperature': ['ºC'], 'met_windavg_mag_corr_east': ['m s-1'], 'met_windavg_mag_corr_north': ['m s-1']}
profiler_2018_units = {'seawater_pressure': ['dbar'], 'density': ['kg m-3'], 'practical_salinity': ['1'], 'seawater_temperature': ['ºC'], 'corrected_dissolved_oxygen': ['µmol kg-1']}
platform_2018_units = {'seawater_pressure': ['dbar'], 'density': ['kg m-3'], 'practical_salinity': ['1'], 'seawater_temperature': ['ºC'], 'dissolved_oxygen': ['µmol kg-1']}
METBK_data_2018
profiler_data_2018
platform_data_2018
###Output
_____no_output_____
###Markdown
Looking at the start and end dates in the dataframe displays above, it doesn't look like the full year of 2018 was covered by any of these instrument packages. How unfortunate! I think the best bet will be to make a model with the 2017 data first, and then come back to the 2018 afterwards and see if there's anything I can do with it in addition. Save these data files as `.csv`s so we can use them in the rest of the notebooks.
###Code
# Save 2018 dataframes to the output folder parallel to this GitHub repo
METBK_data_2018.to_csv('../../coastal_upwelling_output/metbk_data_2018.csv', index=False)
profiler_data_2018.to_csv('../../coastal_upwelling_output/profiler_data_2018.csv', index=False)
platform_data_2018.to_csv('../../coastal_upwelling_output/platform_data_2018.csv', index=False)
###Output
_____no_output_____ |
dmu1/dmu1_ml_CDFS-SWIRE/1.10_CANDELS-GOODS-S.ipynb | ###Markdown
CDFS-SWIRE master catalogue Preparation of CANDELS-GOODS-S dataCANDELS-GOODS-N catalogue: the catalogue comes from `dmu0_CANDELS-GOODS-S`.In the catalogue, we keep:- The identifier (it's unique in the catalogue);- The position;- The stellarity;- The total magnitude.We don't know when the maps have been observed. We will use the year of the reference paper.
###Code
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
from collections import OrderedDict
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from herschelhelp_internal.flagging import gaia_flag_column
from herschelhelp_internal.masterlist import nb_astcor_diag_plot, remove_duplicates
from herschelhelp_internal.utils import astrometric_correction, flux_to_mag
OUT_DIR = os.environ.get('TMP_DIR', "./data_tmp")
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
RA_COL = "candels_ra"
DEC_COL = "candels_dec"
###Output
_____no_output_____
###Markdown
I - Column selection
###Code
imported_columns = OrderedDict({
'ID': "candels_id",
'RA': "candels_ra",
'DEC': "candels_dec",
'CLASS_STAR': "candels_stellarity",
#HST data
'ACS_F435W_FLUX': "f_acs_f435w",
'ACS_F435W_FLUXERR': "ferr_acs_f435w",
'ACS_F606W_FLUX': "f_acs_f606w",
'ACS_F606W_FLUXERR': "ferr_acs_f606w",
'ACS_F775W_FLUX': "f_acs_f775w",
'ACS_F775W_FLUXERR': "ferr_acs_f775w",
'ACS_F814W_FLUX': "f_acs_f814w",
'ACS_F814W_FLUXERR': "ferr_acs_f814w",
'ACS_F850LP_FLUX': "f_acs_f850lp",
'ACS_F850LP_FLUXERR': "ferr_acs_f850lp",
'WFC3_F098M_FLUX': "f_wfc3_f098m",
'WFC3_F098M_FLUXERR': "ferr_wfc3_f098m",
'WFC3_F105W_FLUX': "f_wfc3_f105w",
'WFC3_F105W_FLUXERR': "ferr_wfc3_f105w",
'WFC3_F125W_FLUX': "f_wfc3_f125w",
'WFC3_F125W_FLUXERR': "ferr_wfc3_f125w",
'WFC3_F160W_FLUX': "f_wfc3_f160w",
'WFC3_F160W_FLUXERR': "ferr_wfc3_f160w",
#ISAAC?
'ISAAC_KS_FLUX':"f_isaac_k",
'ISAAC_KS_FLUXERR':"ferr_isaac_k",
#HAWKI WIRCAM
'HAWKI_KS_FLUX': "f_hawki_k",# 33 WIRCAM_K_FLUX Flux density (in μJy) in the Ks-band (CFHT/WIRCam) (3)
'HAWKI_KS_FLUXERR': "ferr_hawki_k",# 34 WIRCAM_K_FLUXERR
#Spitzer/IRAC
'IRAC_CH1_FLUX': "f_candels-irac_i1",# 47 IRAC_CH1_FLUX Flux density (in μJy) in the 3.6μm-band (Spitzer/IRAC) (3)
'IRAC_CH1_FLUXERR': "ferr_candels-irac_i1",# 48 IRAC_CH1_FLUXERR Flux uncertainty (in μJy) in the 3.6μm-band (Spitzer/IRAC) (3)
'IRAC_CH2_FLUX': "f_candels-irac_i2",# 49 IRAC_CH2_FLUX Flux density (in μJy) in the 4.5μm-band (Spitzer/IRAC) (3)
'IRAC_CH2_FLUXERR': "ferr_candels-irac_i2",# 50 IRAC_CH2_FLUXERR Flux uncertainty (in μJy) in the 4.5μm-band (Spitzer/IRAC) (3)
'IRAC_CH3_FLUX': "f_candels-irac_i3",# 51 IRAC_CH3_FLUX Flux density (in μJy) in the 5.8μm-band (Spitzer/IRAC) (3)
'IRAC_CH3_FLUXERR': "ferr_candels-irac_i3",# 52 IRAC_CH3_FLUXERR Flux uncertainty (in μJy) in the 5.8μm-band (Spitzer/IRAC) (3)
'IRAC_CH4_FLUX': "f_candels-irac_i4",# 53 IRAC_CH4_FLUX Flux density (in μJy) in the 8.0μm-band (Spitzer/IRAC) (3)
'IRAC_CH4_FLUXERR': "ferr_candels-irac_i4"# 54 IRAC_CH4_FLUXERR
})
catalogue = Table.read("../../dmu0/dmu0_CANDELS-GOODS-S/data/hlsp_candels_hst_wfc3_goodss-tot-multiband_f160w_v1_cat.fits")[list(imported_columns)]
for column in imported_columns:
catalogue[column].name = imported_columns[column]
epoch = 2011
# Clean table metadata
catalogue.meta = None
# Adding flux and band-flag columns
for col in catalogue.colnames:
if col.startswith('f_'):
errcol = "ferr{}".format(col[1:])
# Some object have a magnitude to 0, we suppose this means missing value
mask = np.isclose(catalogue[col], -99.)
catalogue[col][mask] = np.nan
catalogue[errcol][mask] = np.nan
mag, error = flux_to_mag(np.array(catalogue[col])*1.e-6, np.array(catalogue[errcol])*1.e-6)
# Fluxes are added in µJy
catalogue.add_column(Column(mag, name="m{}".format(col[1:])))
catalogue.add_column(Column(error, name="m{}".format(errcol[1:])))
# Band-flag column
if "ap" not in col:
catalogue.add_column(Column(np.zeros(len(catalogue), dtype=bool), name="flag{}".format(col[1:])))
catalogue['candels_stellarity'] = catalogue['candels_stellarity'].astype(float)
catalogue[:10].show_in_notebook()
###Output
_____no_output_____
###Markdown
II - Removal of duplicated sources We remove duplicated objects from the input catalogues.
###Code
SORT_COLS = ["ferr_acs_f435w","ferr_acs_f606w","ferr_acs_f775w","ferr_acs_f814w","ferr_acs_f850lp",
"ferr_wfc3_f098m","ferr_wfc3_f105w","ferr_wfc3_f125w","ferr_wfc3_f160w",
"ferr_isaac_k","ferr_hawki_k",
"ferr_candels-irac_i1","ferr_candels-irac_i2","ferr_candels-irac_i3","ferr_candels-irac_i4"]
FLAG_NAME = 'candels_flag_cleaned'
nb_orig_sources = len(catalogue)
catalogue = remove_duplicates(catalogue, RA_COL, DEC_COL, sort_col=SORT_COLS,flag_name=FLAG_NAME)
nb_sources = len(catalogue)
print("The initial catalogue had {} sources.".format(nb_orig_sources))
print("The cleaned catalogue has {} sources ({} removed).".format(nb_sources, nb_orig_sources - nb_sources))
print("The cleaned catalogue has {} sources flagged as having been cleaned".format(np.sum(catalogue[FLAG_NAME])))
###Output
The initial catalogue had 34930 sources.
The cleaned catalogue has 34926 sources (4 removed).
The cleaned catalogue has 4 sources flagged as having been cleaned
###Markdown
III - Astrometry correctionWe match the astrometry to the Gaia one. We limit the Gaia catalogue to sources with a g band flux between the 30th and the 70th percentile. Some quick tests show that this give the lower dispersion in the results.
###Code
gaia = Table.read("../../dmu0/dmu0_GAIA/data/GAIA_CDFS-SWIRE.fits")
gaia_coords = SkyCoord(gaia['ra'], gaia['dec'])
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
delta_ra, delta_dec = astrometric_correction(
SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]),
gaia_coords
)
print("RA correction: {}".format(delta_ra))
print("Dec correction: {}".format(delta_dec))
catalogue[RA_COL].unit = u.deg
catalogue[DEC_COL].unit = u.deg
catalogue[RA_COL] = catalogue[RA_COL] + delta_ra.to(u.deg)
catalogue[DEC_COL] = catalogue[DEC_COL] + delta_dec.to(u.deg)
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
###Output
_____no_output_____
###Markdown
IV - Flagging Gaia objects
###Code
catalogue.add_column(
gaia_flag_column(SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]), epoch, gaia)
)
GAIA_FLAG_NAME = "candels_flag_gaia"
catalogue['flag_gaia'].name = GAIA_FLAG_NAME
print("{} sources flagged.".format(np.sum(catalogue[GAIA_FLAG_NAME] > 0)))
###Output
123 sources flagged.
###Markdown
V - Saving to disk
###Code
catalogue.write("{}/CANDELS.fits".format(OUT_DIR), overwrite=True)
###Output
_____no_output_____ |
win32_outlook_snippets.ipynb | ###Markdown
Send Mail with Local Outlook
###Code
import win32com.client as win32
def sendMail(to,subject,body,attach_path=None):
try:
outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = to
mail.Subject = subject
mail.Body = body
# mail.HTMLBody = '<h2>HTML Message body</h2>' #this field is optional
# To attach a file to the email (optional):
if attach_path != None:
attachment = attach_path
mail.Attachments.Add(attachment)
return mail.Send()##Sadly return None
except:
return False
###Output
_____no_output_____
###Markdown
Get mails from inbox / a specific folder
###Code
import win32com.client
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
# inbox = outlook.GetDefaultFolder(6).Folders.Item("SubFolderName") ##Read from a particular subfolder
inbox = outlook.GetDefaultFolder(6)#Inbox
messages = inbox.Items
all_mails = []
for mail in messages:
all_mails.append(mail)
all_mails
###Output
_____no_output_____
###Markdown
Save mails to local folder in .msg format
###Code
import os
import re
for message in all_mails:
name = str(message.subject)
name = name+'.msg'
message.SaveAs(os.getcwd()+'//SavedMails//'+name)
###Output
_____no_output_____ |
notebooks/Melbourne_COVID_cases.ipynb | ###Markdown
Victorian LGA COVID cases timeseries
###Code
# import libraries
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import datetime
from datetime import datetime
# base url to scarpe
base_url = 'https://covidlive.com.au/vic/'
# read in list of Victorian LGA names
LGAs = pd.read_csv('../data/vic_LGAs.csv')
LGAs = LGAs['LGA'].tolist()
###Output
_____no_output_____
###Markdown
Scrape the daily COVID data from https://covidlive.com.au/vic/
###Code
# clean LGA names - repalce space with hyphen to append to base url
LGA_url = []
for l in LGAs:
a = l.replace(" ", "-").lower()
LGA_url.append(a)
# scrape data from https://covidlive.com.au/vic/ for all LGAs
# table structure appears to change frequently
# may need to tweak code to account for changes in table structure
columns = ["Date", "Cumulative_cases", "Daily_cases", "LGA_name", "Active" "Active_cases_change"]
master_df = pd.DataFrame(columns=columns)
# iterate over each LGA
for i in LGA_url:
response = requests.get(base_url + i)
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.find('table', {'class': 'DAILY-CASES-BY-LGA'})
try:
table_rows = table.find_all('tr')
print("Retrieved URL " + i)
except:
print("No data for URL " + i)
l = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text for tr in td]
l.append(row)
df = pd.DataFrame(l, columns=["Date", "-1", "Cumulative_cases", "-2", "Daily_cases", "Active_cases", "-3", "Active_cases_change"])
del df['-1']
del df['-2']
del df['-3']
#del df['Active_cases']
df = df.drop(df.index[0])
df['LGA_name'] = i
master_df = pd.concat([master_df, df],ignore_index=True)
# delete dirty columns
# check the master dataframe
del master_df['ActiveActive_cases_change']
del master_df['Active_cases_change']
master_df.head(10)
###Output
_____no_output_____
###Markdown
covidlive.com.au formatted digits using a thousands comma which is nice for presentation, but can be a pain when scraping data. When converting to a dataframe, pandas has inferred the data as a string, not numeric. This will need to be cleaned and converted to numeric.
###Code
# replace all commas with nothing
master_df['Cumulative_cases'] = master_df['Cumulative_cases'].str.replace(",", "")
master_df['Active_cases'] = master_df['Active_cases'].str.replace(",", "")
# convert columns to numeric
master_df['Cumulative_cases'] = pd.to_numeric(master_df['Cumulative_cases'])
master_df['Active_cases'] = pd.to_numeric(master_df['Active_cases'])
# check the data types
master_df.dtypes
###Output
_____no_output_____
###Markdown
Check a couple of different LGAs, one single word and one with a hyphen, to ensure the data has been scraped correctly.
###Code
# inspect a couple of LGAs to ensure data has been scraped correctly
# check Hume
master_df[master_df['LGA_name']== "wyndham"].head(5)
# check Mount-Alexander
master_df[master_df['LGA_name']== "mount-alexander"].head(5)
###Output
_____no_output_____
###Markdown
I intend to use the values in the LGA name column as labels. As such, these need to be cleaned (i.e. hyphen removed).
###Code
# update LGA where space was replaced with hypen for visualisation, and covert to proper case
master_df['LGA_name'] = master_df['LGA_name'].str.replace("-", " ").str.title()
# check LGAs updated
master_df[master_df['LGA_name'] == "Mount Alexander"].head(5)
###Output
_____no_output_____
###Markdown
Next, a flag is created to indentify which LGAs are part of greater Melbourne. While this information could be scraped, it was easier to manually pull these LGAs from https://en.wikipedia.org/wiki/Local_government_areas_of_VictoriaFinally, we run a quick count of LGA by region to check we've classified all LGAs.
###Code
# add flag to each LGA indicating if it is metro or reginal
greater_melb = ['Melbourne',
'Port Phillip',
'Stonnington',
'Yarra',
'Banyule',
'Bayside',
'Boroondara',
'Darebin',
'Glen Eira',
'Hobsons Bay',
'Kingston',
'Manningham',
'Maribyrnong',
'Monash',
'Moonee Valley',
'Moreland',
'Whitehorse',
'Brimbank',
'Cardinia',
'Casey',
'Frankston',
'Greater Dandenong',
'Hume',
'Knox',
'Maroondah',
'Melton',
'Mornington Peninsula',
'Nillumbik',
'Whittlesea',
'Wyndham',
'Yarra Ranges']
# check count of LGA by region
master_df["Region"] = np.where(master_df["LGA_name"].isin(greater_melb), "Greater Melbourne", "Regional")
print(master_df.groupby('Region')['LGA_name'].nunique())
# check the flag has been applied to the dataframe
master_df[master_df['Region'] == "Greater Melbourne"]
###Output
_____no_output_____
###Markdown
Next issue is the date. Currently, the date is represented in dd-mmm format - ideally we need this in a longer format so will convert to yyyy-mm-dd.
###Code
# convert date time
master_df['Date'] = pd.to_datetime(master_df['Date'], format='%d %b')
master_df['Date'] = master_df['Date'].apply(lambda dt: dt.replace(year=2020))
master_df.head()
###Output
_____no_output_____
###Markdown
Importing ShapefileThe next section of the notebook brings in the shapefile used to create the base layer of the map. The shapefile was sourced from the ABS: https://www.abs.gov.au/AUSSTATS/[email protected]/DetailsPage/1270.0.55.003July%202016?OpenDocument* Read in the Victorian LGA shapefile* Remove administrative LGAs note used for mapping* Clean LGA names for merge with the master_df
###Code
#import libraries
import matplotlib.pyplot as plt
import geopandas as gpd
import shapefile as shp
import re
# read in the shapefle of all Australian LGAs
sf_aus = gpd.read_file('../data/AUS_LGA_SHP/LGA_2020_AUST.shp')
sf_aus.head(5)
# subset aus shapefile to vic LGAs only
vic_sf = sf_aus[sf_aus['STE_NAME16'] == 'Victoria']
# remove LGA without polygons
vic_sf = vic_sf[vic_sf['LGA_NAME20'] != 'Migratory - Offshore - Shipping (Vic.)']
vic_sf = vic_sf[vic_sf['LGA_NAME20'] != 'No usual address (Vic.)']
vic_sf = vic_sf[vic_sf['LGA_NAME20'] != 'Unincorporated Vic']
# remove text within parentheses
vic_sf['LGA_NAME20'] = vic_sf['LGA_NAME20'].str.replace(r"(\(.+\))", "")
# strip remaining whitespace from LGA name
vic_sf['LGA_NAME20'] = vic_sf['LGA_NAME20'].str.rstrip()
# check LGAs have been cleaned
vic_sf[vic_sf['LGA_NAME20'] == "Wyndham"].head()
# check the head of the dataframe
master_df.head()
###Output
_____no_output_____
###Markdown
Merged the COVID data with the shapefileFinally, we merge the COVID data with the VIC shapefile. This methodology was sources from the following TDS blog: https://towardsdatascience.com/lets-make-a-map-using-geopandas-pandas-and-matplotlib-to-make-a-chloropleth-map-dddc31c1983d
###Code
# merge the vic_sf and covid data
merged = vic_sf.set_index('LGA_NAME20').join(master_df.set_index('LGA_name'))
# check the (n) of rows and columns
merged.shape
# insepct the first few rows
merged.head()
# clean the data frame
# remove rows with missing dates
df_plot = merged[merged['Date'].notna()]
# subset to only Greater Melbourne LGAs
df_plot = df_plot[df_plot['Region'] == 'Greater Melbourne']
# subset to only included required columns
df_plot = df_plot[['Cumulative_cases', 'Active_cases','geometry', 'Date', 'Region']]
###Output
_____no_output_____
###Markdown
Create a single plotThe next section creates a snapshot plot of a single date to ensure the code works to create the cumulative COID-19 case count for each LGA. The test plot below uses data from 10 August 2020 to generate a single plot.The colour scale can easily be changed to one of many matplotlib default scales. for more info see: https://matplotlib.org/3.1.0/tutorials/colors/colormaps.htmlThe name of the 'colour' variable can be edited to change to the desired colour scale.
###Code
# subset the data to a specific day
dfa = df_plot[df_plot['Date'] == '2020-08-10']
# colour pallette
colour = 'YlOrRd'
# set a variable that will call whatever column we want to visualise on the map
variable = 'Cumulative_cases'
# set the range for the choropleth
vmin, vmax = 0, 1600
# create figure and axes for Matplotlib
fig, ax = plt.subplots(1, figsize=(14, 8))
dfa.plot(column=variable, cmap=colour, linewidth=0.8, ax=ax, edgecolor='0.8')
# remove the axis
ax.axis('off')
# add a title
ax.set_title('Cumulative COVID-19 cases for Greater Melbourne LGAs', fontdict={'fontsize': '18','fontweight' : '3'})
# create an annotation for the data source
ax.annotate('Data source: https://covidlive.com.au',
xy=(0.1, .08), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=10, color='#555555')
# Create colorbar as a legend
sm = plt.cm.ScalarMappable(cmap=colour, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cbar = fig.colorbar(sm)
fig.savefig('../plots/single_plot/2020-08-10_cumulative_cases.png', dpi=300)
###Output
_____no_output_____
###Markdown
Loop to create plots for each dayFinally we extract all dates from the master df dataframe. This will be used to iterate over each date and subset the dataframe to date [i]. We will generate one plot for each date and output the plot to the destinctation file folder. from there, the pots will be stitched together to create a GIF or video. Again, the bulk of the code to create plots for each day was adapted from: https://towardsdatascience.com/lets-make-a-map-using-geopandas-pandas-and-matplotlib-to-make-a-chloropleth-map-dddc31c1983d
###Code
# reformat the dates in the covid data dataframe
master_df['Date_only'] = [d.strftime("%Y-%m-%d") for d in master_df['Date']]
# extract a list of the unique dates - this will be used to iterate over in the for loop below
dates = list(master_df['Date_only'].unique())
# merge the covid data and the shapefile dataframes
#merged = vic_sf.set_index('LGA_NAME20').join(master_df.set_index('LGA_name'))
#merged = merged[merged['Region'] == 'Greater Melbourne']
# remove NAs, subset to metro, remove columns
#df1 = merged[merged['Date'].notna()]
#df1 = df1[df1['Region'] == 'Greater Melbourne']
#df1 = df1[['Cumulative_cases', 'Active_cases','geometry', 'Date', 'Region']]
# reformat dates
#df1['Day'] = df1['Date'].dt.day
#df1['Month'] = df1['Date'].dt.month
#import calendar
#df1['Month'] = df1['Month'].apply(lambda x: calendar.month_abbr[x])
# start the for loop to create one map per day
import os
import warnings
warnings.filterwarnings('ignore')
# set the range for the choropleth
vmin, vmax = 0, 1500
output_path = '../plots/cumulative_cases'
variable = 'Cumulative_cases'
i = 1
colour = 'YlOrRd'
for date in dates:
#subset data to each day
data = df_plot[df_plot['Date'] == date]
data['Cumulative_cases'] = pd.to_numeric(data['Cumulative_cases'])
# create map, UDPATE: added plt.Normalize to keep the legend range the same for all maps
fig = data.plot(column=variable, cmap=colour, figsize=(15,8), linewidth=0.8, edgecolor='0.8', vmin=vmin, vmax=vmax, legend=True, norm=plt.Normalize(vmin=vmin, vmax=vmax))
# remove axis of chart
fig.axis('off')
# add a title
fig.set_title('Cumulative COVID-19 cases\nGreater Melbourne - ' + str(date), fontdict={'fontsize': '20','fontweight' : '3'})
# create an annotation for the data source
fig.annotate('Data source: https://covidlive.com.au',
xy=(0.1, .08), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=10, color='#555555')
# this will save the figure as a high-res png in the output path. you can also save as svg if you prefer.
filepath = os.path.join(output_path, str(date) +'_covid_cases.jpg')
chart = fig.get_figure()
chart.savefig(filepath, dpi=350)
print("Saved image: " + output_path + str(date) +'_covid_cases.jpg')
i = i + 1
###Output
_____no_output_____
###Markdown
Create another set of plots using active cases- We need to create a single plot to make sure we get all the plot features correct before running the loop. - Before that, we need to find the max a min values to ensure the plot colour scale is set to a sensible range - active cases and cumulative cases will have two very different ranges.
###Code
# get min and max values
active_max = df_plot['Active_cases'].max()
active_min = df_plot['Active_cases'].min()
# subset the data to a specific day
dfa = df_plot[df_plot['Date'] == '2020-08-10']
# colour pallette
colour = 'YlOrRd'
# set a variable that will call whatever column we want to visualise on the map
variable = 'Active_cases'
# set the range for the choropleth
vmin, vmax = active_min, active_max
# create figure and axes for Matplotlib
fig, ax = plt.subplots(1, figsize=(14, 8))
dfa.plot(column=variable, cmap=colour, linewidth=0.8, ax=ax, edgecolor='0.8')
# remove the axis
ax.axis('off')
# add a title
ax.set_title('Cumulative COVID-19 cases for Greater Melbourne LGAs', fontdict={'fontsize': '18','fontweight' : '3'})
# create an annotation for the data source
ax.annotate('Data source: https://covidlive.com.au',
xy=(0.1, .08), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=10, color='#555555')
# Create colorbar as a legend
sm = plt.cm.ScalarMappable(cmap=colour, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cbar = fig.colorbar(sm)
fig.savefig('../plots/single_plot/2020-08-10_active_cases.png', dpi=300)
# start the for loop to create one map per day
import os
import warnings
warnings.filterwarnings('ignore')
# set the range for the choropleth
vmin, vmax = active_min, active_max
output_path = '../plots/active_cases'
variable = 'Active_cases'
i = 1
colour = 'YlOrRd'
for date in dates:
#subset data to each day
data = df_plot[df_plot['Date'] == date]
data['Active_cases'] = pd.to_numeric(data['Active_cases'])
# create map, UDPATE: added plt.Normalize to keep the legend range the same for all maps
fig = data.plot(column=variable, cmap=colour, figsize=(15,8), linewidth=0.8, edgecolor='0.8', vmin=vmin, vmax=vmax, legend=True, norm=plt.Normalize(vmin=vmin, vmax=vmax))
# remove axis of chart
fig.axis('off')
# add a title
fig.set_title('Daily active COVID-19 cases\nGreater Melbourne - ' + str(date), fontdict={'fontsize': '20','fontweight' : '3'})
# create an annotation for the data source
fig.annotate('Data source: https://covidlive.com.au',
xy=(0.1, .08), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=10, color='#555555')
# this will save the figure as a high-res png in the output path. you can also save as svg if you prefer.
filepath = os.path.join(output_path, str(date) +'_covid_cases.jpg')
chart = fig.get_figure()
chart.savefig(filepath, dpi=350)
print("Saved image: " + output_path + str(date) +'_covid_cases.jpg')
i = i + 1
###Output
_____no_output_____ |
2020-04-16.solarAnalytics/04.Analysis/2020-05-17.Model_eng.001.MultiLinear.ipynb | ###Markdown
Model engineering 001: MultiLinearRegressionIn this part of the project to predict the photovoltaic production of solar cells on a roof we are considering a simple regression model - MultiLinearRegression. We will treat this as a regression problem, not taking the temporal aspect, i.e. time series forcasting, into account. Data mining and missing value treatment of weather data from the DarkSky API and data from the photovoltaic system were covered in other notebooks:- Data mining and EDA of weather data: https://kyso.io/heiko/predicting-solar-panel-output-eda-of-photovoltaic-data- Data mining and EDA of photovoltaic data: https://kyso.io/heiko/predicting-solar-panel-output-eda-of-weather-data- Missing value treatment: https://kyso.io/heiko/predicting-solar-panel-output-missing-value-treatment-of-weather-data MethodologyWe will apply multilinear regression by following these steps:1. Load the data into one dataframe2. Select the features we will use for the prediction. We can look at the correlation matrix and remove redundant features that are correlated. Multicollinearity undermines the statistical significance of an independent variable. While it should not have a major impact on the model’s accuracy, it does affect the variance associated with the prediction, as well as, reducing the quality of the interpretation of the independent variables. In other words, the effect your data has on the model isn’t trustworthy. Your explanation of how the model takes the inputs to produce the output will not be reliable. (You can read more about this here: https://towardsdatascience.com/multicollinearity-why-is-it-a-problem-398b010b77ac). We will just drop (one of the) columns that are correlated. We could feature engineer another features that combines the correlated features, but at this point this will not be considered.3. Consider missing values. This is still part of the feature selection process. We remove features that have lots of missing values that we could not interpolate.4. Model selection. We will consider different techniques to determine which features to include in our model. We will use backward eliminiation and forward elimination. Import libraries
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.metrics import r2_score,mean_squared_error
import eli5
sns.set()
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
###Output
_____no_output_____
###Markdown
Import datasetLet's begin by importing the datasets for weather and photovoltaic data after missing value treatment. Check the links above for some insights on the data streams and treatment processes. DarkSky - Weather data
###Code
df_weather = pd.read_csv('../02.Prepared_data/DarkSky/data_after_missing_value_treatment.csv', parse_dates=['time', 'sunriseTime', 'sunsetTime'])
df_weather.head()
df_weather.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 929 entries, 0 to 928
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 time 929 non-null datetime64[ns]
1 apparentTemperatureHigh 929 non-null float64
2 apparentTemperatureLow 847 non-null float64
3 cloudCover 744 non-null float64
4 precipProbability 837 non-null float64
5 precipType 839 non-null object
6 sunriseTime 929 non-null datetime64[ns]
7 sunsetTime 929 non-null datetime64[ns]
8 temperatureHigh 929 non-null float64
9 uvIndex 895 non-null float64
10 precipIntensityMax_cm 839 non-null float64
11 sun_uptime 922 non-null float64
dtypes: datetime64[ns](3), float64(8), object(1)
memory usage: 87.2+ KB
###Markdown
With datetime values in the dataframe, it is always a good idea to check if pandas has correctly parsed the dates. It seems here that no datetime was incorrectly stored as an object datatype in the dataframe, so all good. Solar output data
###Code
df_prod = pd.read_csv('../02.Prepared_data/photovoltaic/integrated_daily.csv', parse_dates=['day'])
df_prod.head()
df_prod.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 624 entries, 0 to 623
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 day 624 non-null datetime64[ns]
1 energy 624 non-null float64
2 season 624 non-null object
dtypes: datetime64[ns](1), float64(1), object(1)
memory usage: 14.8+ KB
###Markdown
Merge datasetsLet's combine all the dataframes.
###Code
df = pd.merge(df_prod, df_weather, left_on='day', right_on='time')
df = df.set_index('day')
# what missing values there are
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,
'percent_missing': percent_missing,
'absolute_missing': df.isnull().sum()})
missing_value_df
df_prod.shape, df.shape
###Output
_____no_output_____
###Markdown
Looks good, we have the two datasets merged. Let us now consider which features we are going to use for our prediction. Feature selectionThis step is non-trivial, but we already know from the correlation matrix that there are some highly correlated values among the temperature columns.
###Code
# %matplotlib inline
fig, ax = plt.subplots(figsize=(6,5))
sns.heatmap(df_weather.corr(), ax=ax, annot=True, cmap='viridis', fmt="0.2f");
plt.xticks(fontsize=7)
plt.yticks(fontsize=7)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Since the `apparentTemperatureHigh` column has no missing values, we will use this column as the temperature information and drop the others. Also we can drop `time`, `sunriseTime`, and `sunSetTime` since they are not relevant further (we know the difference between sunset and rise from the `sun_uptime` column).
###Code
df_cleaned = df.drop(columns=['time', 'apparentTemperatureLow', 'temperatureHigh', 'sunriseTime', 'sunsetTime'])
df_cleaned.head()
# %matplotlib inline
fig, ax = plt.subplots(figsize=(6,5))
sns.heatmap(df_cleaned.corr(), ax=ax, annot=True, cmap='viridis', fmt="0.2f");
plt.xticks(fontsize=7)
plt.yticks(fontsize=7)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
We can see now that the target column, `energy` is highly positively correlated with the temperature and uptime and moderately negatively correlated with the `cloudCover` column which is not very surprising and agrees with our intuition - when the temperature and sun hours are high, the energy produced during one day will be high and if there are a lot of clouds, there will be less energy produced. Missing valuesWe already know that there are a bunch of missing values in 2018 and they are clumped, so there is nothing to be done about it. We will drop these columns. We will also drop the `cloudCover` column because it has the most missing values and is correlated with the probability for precipitation for which we have more datapoints. That way we do not loose 30% of the data, but only around 15%.
###Code
df_cleaned = df_cleaned.drop(columns=['cloudCover'])
size_before = df_cleaned.shape[0]
df_cleaned = df_cleaned.dropna()
size_after = df_cleaned.dropna().shape[0]
print(f"Dropped {size_before-size_after} ({100 * (size_before-size_after)/size_before:.2f}%) entries because of missing values. New size is {size_after} entries.")
###Output
Dropped 93 (14.90%) entries because of missing values. New size is 531 entries.
###Markdown
Export this datasetExporting this dataset makes sense because we can save the analysis steps for other models that we consider and make sure that we use the same baseline for the other models.
###Code
df_cleaned.to_csv('../02.Prepared_data/dataset.Model_eng.001.csv', index=False)
###Output
_____no_output_____
###Markdown
Select features
###Code
# X = df_cleaned.iloc[:, 1:].values
X = df_cleaned.iloc[:, 1:]
X = X.reset_index(drop=True)
# y = df_cleaned.iloc[:, 0].values
y = df_cleaned.iloc[:, 0]
y = y.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
PipelineLet's make our pipeline for the preprocessing here. That allows later some easier changes.
###Code
X.head()
###Output
_____no_output_____
###Markdown
Numeric features: StandardScalerStandardize features by removing the mean and scaling to unit variance
###Code
numeric_features = ['apparentTemperatureHigh', 'precipProbability', 'uvIndex', 'precipIntensityMax_cm', 'sun_uptime']
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
###Output
_____no_output_____
###Markdown
Categorical features: OneHotEncoderThis preprocessing step will encode the `season` and `precipType` column values into a vector of length 3. For example, winter will be encoded as [1,0,0], summer as [0,1,0], and so on for the other seasons.
###Code
categorical_features = ['season', 'precipType']
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder())])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
###Output
_____no_output_____
###Markdown
Model
###Code
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LinearRegression())])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
###Output
_____no_output_____
###Markdown
FitLet's fit the model to the training data
###Code
clf.fit(X_train, y_train);
###Output
_____no_output_____
###Markdown
Score (Full model, M0)
###Code
def adjR2(R2, n, p):
"""
Returns the adjusted R2 score using R2 score, n, and p.
n: size of dataset
p: number of predictors
"""
return 1-(1-R2)*(n-1)/(n-p-1)
y_test_predict = clf.predict(X_test)
y_train_predict = clf.predict(X_train)
M0_R2 = r2_score(y_test, y_test_predict)
M0_R2_train = r2_score(y_train, y_train_predict)
n = X_train.shape[0]
p = 1
M0_adj_R2 = adjR2(M0_R2, n, p)
M0_adj_R2_train = adjR2(M0_R2_train, n, p)
print(f"Train dataset -- R2 score: {M0_R2_train:.2f}, adjusted R2: {M0_adj_R2_train:.2f}.")
print(f"Test dataset -- R2 score: {M0_R2:.2f}, adjusted R2: {M0_adj_R2:.2f}.")
print(f"r2_score: {r2_score(y_test, y_test_predict)}")
print(f"MSE: {mean_squared_error(y_test, y_test_predict)}")
###Output
Train dataset -- R2 score: 0.77, adjusted R2: 0.77.
Test dataset -- R2 score: 0.72, adjusted R2: 0.72.
r2_score: 0.7190894719908503
MSE: 54.70055724462856
###Markdown
Feature importanceLet's assess the feature importances that the model has found.
###Code
# get importance
importance = clf['classifier'].coef_
# get feature names
onehot_columns = list(clf.named_steps['preprocessor'].named_transformers_['cat'].named_steps['onehot'].get_feature_names(input_features=categorical_features))
numeric_features_list = list(numeric_features)
numeric_features_list.extend(onehot_columns)
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
fig, ax = plt.subplots(figsize=(9,5))
plt.bar([x for x in range(len(importance))], importance, align='center')
plt.xticks(np.arange(0, len(importance)), numeric_features_list, rotation=90)
plt.show()
eli5.explain_weights(clf.named_steps['classifier'], top=20, feature_names=numeric_features_list)
###Output
_____no_output_____
###Markdown
We see that the temperature and sun uptime have high linear coefficients. For model selection, we will now use forward stepwise selection. Backward eliminationLet's use the p-values to select parameters to stay in the model. For that we will use backward elimination.1. Select significance level to stay in the model. We use $\alpha=0.05$.2. Fit full model with all predictors3. Consider predictor with highest p-value. If that p-value $> \alpha$, go to step 4, otherwise end.4. Remove the predictor with the largest p-value.5. Fit model without this variable, back to step 3. Get the full model
###Code
pipe_preprocess = clf.named_steps['preprocessor']
# transform the feature data using the training data
X_train_transformed = pipe_preprocess.fit_transform(X_train)
X_test_transformed = pipe_preprocess.fit_transform(X_test)
# add back the column headers
# get feature names
onehot_columns = list(clf.named_steps['preprocessor'].named_transformers_['cat'].named_steps['onehot'].get_feature_names(input_features=categorical_features))
numeric_features_list = list(numeric_features)
numeric_features_list.extend(onehot_columns)
X_train_transformed = pd.DataFrame(X_train_transformed, columns=numeric_features_list, dtype=np.float)
X_test_transformed = pd.DataFrame(X_test_transformed, columns=numeric_features_list, dtype=np.float)
X_train_transformed_full = X_train_transformed.copy()
X_train_transformed.head()
###Output
_____no_output_____
###Markdown
This is the full model, let's now implement the backward elimination procedure.
###Code
import statsmodels.api as sm
alpha = 0.05 # significance level
pvals_max = 10
while pvals_max >= alpha:
#Adding constant column of ones, mandatory for sm.OLS model
X_train_1 = sm.add_constant(X_train_transformed)
#Fitting sm.OLS model
model = sm.OLS(y_train.values, X_train_1).fit()
pvals = model.pvalues.sort_values()
# largest pval is at the last position of the series
pvals_max = pvals.iloc[-1]
col_to_drop = pvals.index[-1]
X_train_transformed = X_train_transformed.drop(columns=[col_to_drop])
pvals
features = pvals.index.tolist()
features = [f for f in features if f is not 'const']
features
###Output
_____no_output_____
###Markdown
Using backward elimination we found the features to be used in the model to be:`apparentTemperatureHigh`, `sun_uptime`,`precipProbability`,`precipType_snow`,`precipType_rain`,`season_spring`,`season_winter`,`precipIntensityMax_cm`,`precipType_none` Let's run the regression model with these features.
###Code
# drop the features we did not want to consider
cols_to_drop = X_train_transformed_full.columns
cols_to_drop = [c for c in cols_to_drop if c not in features]
X_train_transformed_full = X_train_transformed_full.drop(columns=cols_to_drop)
X_test_transformed = X_test_transformed.drop(columns=cols_to_drop)
# run model
pipe_lm = clf.named_steps['classifier']
pipe_lm.fit(X_train_transformed_full, y_train)
y_test_predict = pipe_lm.predict(X_test_transformed)
y_train_predict = pipe_lm.predict(X_train_transformed_full)
M1_R2 = r2_score(y_test, y_test_predict)
M1_R2_train = r2_score(y_train, y_train_predict)
n = X_train.shape[0]
p = 1
M1_adj_R2 = adjR2(M1_R2, n, p)
M1_adj_R2_train = adjR2(M1_R2_train, n, p)
print("Backward eliminated model:")
print(f"Train dataset -- R2 score: {M1_R2_train:.2f}, adjusted R2: {M1_adj_R2_train:.2f}.")
print(f"Test dataset -- R2 score: {M1_R2:.2f}, adjusted R2: {M1_adj_R2:.2f}.")
print("Full model:")
print(f"Train dataset -- R2 score: {M0_R2_train:.2f}, adjusted R2: {M0_adj_R2_train:.2f}.")
print(f"Test dataset -- R2 score: {M0_R2:.2f}, adjusted R2: {M0_adj_R2:.2f}.")
###Output
Full model:
Train dataset -- R2 score: 0.77, adjusted R2: 0.77.
Test dataset -- R2 score: 0.72, adjusted R2: 0.72.
###Markdown
We see that from the R2 score, our eliminated model performed worse compared to the full model. Forward stepwise selectionThe approach for forward stepwise selection is the following:1. Start with the null model, $M_0$ that contains no predictors.2. For $k = 1, ... p-1$: a. Consider all $p-k$ models that augment the predictors in $M_k$ with one additional predictor b. Choose the best among these $p-k$ models, call it $M_{k+1}$. Best is defined as having the smallest RSS or highest R$^2$ 3. Select a single best model from among $M_0, ..., M_p$ using crossvalidated prediction error, $C_p$ (AIC), BIC, or adjusted R$^2$.
###Code
adjR2s = []
CPs = []
BICs = []
features_good = []
###Output
_____no_output_____
###Markdown
Null model
###Code
#Fitting sm.OLS model
X_train_tmp = sm.add_constant(X_train)['const']
model = sm.OLS(y_train.values, X_train_tmp).fit()
adjR2s.append(model.rsquared_adj)
CPs.append(model.aic)
BICs.append(model.bic)
###Output
_____no_output_____
###Markdown
Forward stepwise selectionTo implement the forward stepwise selection, we first preprocess all the features.
###Code
pipe_preprocess = clf.named_steps['preprocessor']
# transform the feature data using the training data
X_train_tf = pipe_preprocess.fit_transform(X_train)
X_test_tf = pipe_preprocess.fit_transform(X_test)
# add back the column headers
# get feature names
onehot_columns = list(clf.named_steps['preprocessor'].named_transformers_['cat'].named_steps['onehot'].get_feature_names(input_features=categorical_features))
numeric_features_list = list(numeric_features)
numeric_features_list.extend(onehot_columns)
X_train_tf = pd.DataFrame(X_train_tf, columns=numeric_features_list, dtype=np.float)
X_test_tf = pd.DataFrame(X_test_tf, columns=numeric_features_list, dtype=np.float)
# features that can be added to the model
features = X_train_tf.columns
while len(features) > 0:
r2 = pd.Series() # rsquared values in this iteration
for f in features:
features_select = features_good.copy()
features_select.append(f)
# features to select from the dataframe
X_tmp = sm.add_constant(X_train_tf[features_select])
model = sm.OLS(y_train.values, X_tmp).fit()
# check if that r2 is higher
r2[f] = model.rsquared
# feature with maximum r2 gets added to the selected feature list
f = r2.sort_values().index[-1]
features_good.append(f)
# remove the feature from the features for the next iteration
features = features.drop(f)
# compute metrics from these models
X_tmp = sm.add_constant(X_train_tf[features_good])
model = sm.OLS(y_train.values, X_tmp).fit()
adjR2s.append(model.rsquared_adj)
CPs.append(model.aic)
BICs.append(model.bic)
adjR2s = np.asarray(adjR2s)
CPs = np.asarray(CPs)
BICs = np.asarray(BICs)
fig, axs = plt.subplots(2, 2, sharex=True)
axs[0][0].plot(np.arange(len(adjR2s)), adjR2s, marker='o', color='blue', alpha=0.4)
axs[0][0].set_xlabel('Model')
axs[0][0].set_ylabel('adjusted R2 Score')
adjR2_best = np.argmax(adjR2s)
axs[0][0].scatter([adjR2_best], [np.max(adjR2s)], marker='+', s=100, color='black')
axs[0][1].plot(np.arange(len(CPs)), CPs, marker='o', color='red', alpha=0.4)
axs[0][1].set_xlabel('Model')
axs[0][1].set_ylabel('AIC Score')
CPs_best = np.argmin(CPs)
axs[0][1].scatter([CPs_best], [np.min(CPs)], marker='+', s=100, color='black')
axs[1][0].plot(np.arange(len(BICs)), BICs, marker='o', color='orange', alpha=0.4)
axs[1][0].set_xlabel('Model')
axs[1][0].set_ylabel('BIC Score')
BIC_best = np.argmin(BICs)
axs[1][0].scatter([BIC_best], [np.min(BICs)], marker='+', s=100, color='black')
plt.tight_layout()
plt.savefig('model_selection.png', dpi=900)
plt.show()
adjR2_best, CPs_best, BIC_best
###Output
_____no_output_____
###Markdown
On the training data set, the model with 9 or 7 features (index in the list starts at 0) performs best, let's see how that performs on the test dataset.
###Code
adjR2s_test = []
CPs_test = []
BICs_test = []
M_adjR2 = features_good[0:adjR2_best+1] # best model from adjusted R2
X_design = sm.add_constant(X_test_tf[M_adjR2])
model = sm.OLS(y_test.values, X_design).fit()
adjR2s_test.append(model.rsquared_adj)
CPs_test.append(model.aic)
BICs_test.append(model.bic)
M_CP = features_good[0:CPs_best+1] # best model from CP
X_design = sm.add_constant(X_test_tf[M_CP])
model = sm.OLS(y_test.values, X_design).fit()
adjR2s_test.append(model.rsquared_adj)
CPs_test.append(model.aic)
BICs_test.append(model.bic)
M_BIC = features_good[0:BIC_best+1] # best model from BIC
X_design = sm.add_constant(X_test_tf[M_BIC])
model = sm.OLS(y_test.values, X_design).fit()
adjR2s_test.append(model.rsquared_adj)
CPs_test.append(model.aic)
BICs_test.append(model.bic)
# full model
X_design = sm.add_constant(X_test_tf)
model = sm.OLS(y_test.values, X_design).fit()
adjR2s_test.append(model.rsquared_adj)
CPs_test.append(model.aic)
BICs_test.append(model.bic)
adjR2s_test = np.asarray(adjR2s_test)
CPs_test = np.asarray(CPs_test)
BICs_test = np.asarray(BICs_test)
print("Performance on test set: Best model adjusted r2, CP, BIC, full model")
print("Adjusted r2:")
print(adjR2s_test)
print("CP:")
print(CPs_test)
print("BIC")
print(BICs_test)
###Output
Performance on test set: Best model adjusted r2, CP, BIC, full model
Adjusted r2:
[0.72016406 0.72016406 0.71423215 0.74028067]
CP:
[741.94934462 741.94934462 742.37755474 734.85811558]
BIC
[768.67763297 768.67763297 763.76018541 764.25923276]
|
examples/hypothesis.ipynb | ###Markdown
Using hypothesis to find interesting examplesHypothesis is a powerful and unique library for testing code. It also includes a `find` function for finding examples that satisfy an arbitrary predicate. Here, we will explore some of the neat things that can be found using this function.
###Code
from hypothesis import find
import dit
from dit.abc import *
from dit.pid import *
from dit.utils.testing import distribution_structures
dit.ditParams['repr.print'] = dit.ditParams['print.exact'] = True
###Output
_____no_output_____
###Markdown
To illustrate what the distribution source looks like, here we instantiate it with a `size` of 3 and an alphabet of `2`:
###Code
a = distribution_structures(size=3, alphabet=2)
a.example()
###Output
_____no_output_____
###Markdown
Negativity of co-information
###Code
def pred(value):
return lambda d: dit.multivariate.coinformation(d) < value
ce = find(distribution_structures(3, 2), pred(-1e-5))
print(ce)
print("The coinformation is: {}".format(dit.multivariate.coinformation(ce)))
ce = find(distribution_structures(3, 2), pred(-0.5))
print(ce)
print("The coinformation is: {}".format(dit.multivariate.coinformation(ce)))
###Output
Class: Distribution
Alphabet: (0, 1) for all rvs
Base: linear
Outcome Class: tuple
Outcome Length: 3
RV Names: None
x p(x)
(0, 0, 0) 1/4
(0, 1, 1) 1/4
(1, 0, 1) 1/4
(1, 1, 0) 1/4
The coinformation is: -1.0
###Markdown
The Gács-Körner common information is bound from above by the dual total correlationAs we will see, hypothesis can not find an example of $K > B$, because one does not exist.
###Code
def b_lt_k(d):
k = dit.multivariate.gk_common_information(d)
b = dit.multivariate.dual_total_correlation(d)
return k > b
find(distribution_structures(size=3, alphabet=3, uniform=True), b_lt_k)
###Output
_____no_output_____
###Markdown
BROJA is not ProjWe know that the BROJA and Proj PID measures are not the same, but the BROJA paper did not provide any simple examples of this. Here, we find one.
###Code
ce = find(distribution_structures(3, 2, True), lambda d: PID_BROJA(d) != PID_Proj(d))
ce
print(PID_BROJA(ce))
print(PID_Proj(ce))
###Output
+---------+--------+--------+
| I_broja | I_r | pi |
+---------+--------+--------+
| {0:1} | 0.5000 | 0.0000 |
| {0} | 0.3113 | 0.1887 |
| {1} | 0.3113 | 0.1887 |
| {0}{1} | 0.1226 | 0.1226 |
+---------+--------+--------+
+--------+--------+--------+
| I_proj | I_r | pi |
+--------+--------+--------+
| {0:1} | 0.5000 | 0.0425 |
| {0} | 0.3113 | 0.1462 |
| {1} | 0.3113 | 0.1462 |
| {0}{1} | 0.1650 | 0.1650 |
+--------+--------+--------+
|
semester2/notebooks/1.5-writing-functions-solutions.ipynb | ###Markdown
Writing Functions--- **EXERCISES** _1. Suppose we have the following String variable:_```pythony = "You just called a function on y"```_Please, write a function `f()` in python code that prints the above `y` string variable_. **SOLUTION**
###Code
# create a variable y
y = "You just called a function on y"
def f(y):
print(y)
# call the function
f(y)
###Output
You just called a function on y
###Markdown
_2. Suppose we have a function that adds 5 to a given integer as following:_```pythondef addFive(x): x = x + 5 return x```_What does this return?_```python5 + addFive(5)``` **SOLUTION**
###Code
def addFive(x):
x = x + 5
return x
5 + addFive(5) #add 5 to our addFive function
###Output
_____no_output_____
###Markdown
_3. Write a function `bmi(height, weight)` that returns the Body Mass Index._ The formula for BMI is kg/m2. **SOLUTION**
###Code
def bmi(height,weight):
bmi = weight/(height**2)
return(bmi)
# test your function
bmi(187,79)
###Output
_____no_output_____ |
training/dlscore03_train.ipynb | ###Markdown
Training workflow for DLScore version 3 Changes: PDB ids of the test files are saved in a pickle file to use later for testing purpose. Networks are sorted depending on validation parformance Sensoring method wasn't used
###Code
from __future__ import print_function
import numpy as np
import pandas as pd
import keras
from keras import metrics
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras import backend as K
from keras import regularizers
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.utils.training_utils import multi_gpu_model
from keras.utils import plot_model
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
import random
import os.path
import itertools
import pickle
import json
from tqdm import *
import glob
import re
import csv
import multiprocessing as mp
from tqdm import *
random.seed(12345)
# Sensoring outliers
def sensoring(test_x, train_y, pred):
""" Sensor the predicted data to get rid of outliers
"""
mn = np.min(train_y)
mx = np.max(train_y)
pred = np.minimum(pred, mx)
pred = np.maximum(pred, mn)
return pred
def split_data(x, y, pdb_ids, valid_size=0.1, test_size=0.1):
"""Converts the pandas dataframe into a matrix.
Splits the data into train, test and validations set.
Returns numpy arrays"""
# Load the indices of the non-zero columns.
# The same indices need to be used during the evaluation of test data
#with open("nonzero_column_indices.pickle", "rb") as f:
# non_zero_columns = pickle.load(f)
# Filter the zero columns out
#data = data[:, non_zero_columns]
pdb_ids = np.array(pdb_ids)
# Validation set
val_count = int(x.shape[0]*valid_size) # Number of examples to take
val_ids = np.random.choice(x.shape[0], val_count) # Select rows randomly
val_x = x[val_ids, :]
val_y = y[val_ids]
# Save the pdb ids of the validation set in disk
with open('val_pdb_ids.pickle', 'wb') as f:
pickle.dump(pdb_ids[val_ids], f)
# Remove validation set from data
mask = np.ones(x.shape[0], dtype=bool)
mask[val_ids] = False
x = x[mask, :]
y = y[mask]
pdb_ids = pdb_ids[mask]
# Test set
test_count = int(x.shape[0]*test_size)
test_ids = np.random.choice(x.shape[0], test_count)
test_x = x[test_ids, :]
test_y = y[test_ids]
# Save the pdb ids of the test set in disk
with open('test_pdb_ids.pickle', 'wb') as f:
pickle.dump(pdb_ids[test_ids], f)
# Remove test set from data
mask = np.ones(x.shape[0], dtype=bool)
mask[test_ids] = False
x = x[mask, :]
y = y[mask]
return x, y, val_x, val_y, test_x, test_y
def train_test_split(x, y, pdb_ids, test_size=0.1):
"""Converts the pandas dataframe into a matrix.
Splits the data into train, test and validations set.
Returns numpy arrays"""
# Load the indices of the non-zero columns.
# The same indices need to be used during the evaluation of test data
#with open("nonzero_column_indices.pickle", "rb") as f:
# non_zero_columns = pickle.load(f)
# Filter the zero columns out
#data = data[:, non_zero_columns]
pdb_ids = np.array(pdb_ids)
# Test set
test_count = int(x.shape[0]*test_size)
test_ids = np.random.choice(x.shape[0], test_count)
test_x = x[test_ids, :]
test_y = y[test_ids]
# Save the pdb ids of the test set in disk
with open('test_pdb_ids.pickle', 'wb') as f:
pickle.dump(pdb_ids[test_ids], f)
# Remove test set from data
mask = np.ones(x.shape[0], dtype=bool)
mask[test_ids] = False
x = x[mask, :]
y = y[mask]
return x, y, test_x, test_y
# Build the model
def get_model(x_size, hidden_layers, dr_rate=0.5, l2_lr=0.01):
model = Sequential()
model.add(Dense(hidden_layers[0], activation="relu", kernel_initializer='normal', input_shape=(x_size,)))
model.add(Dropout(0.2))
for i in range(1, len(hidden_layers)):
model.add(Dense(hidden_layers[i],
activation="relu",
kernel_initializer='normal',
kernel_regularizer=regularizers.l2(l2_lr),
bias_regularizer=regularizers.l2(l2_lr)))
model.add(Dropout(dr_rate))
model.add(Dense(1, activation="linear"))
return(model)
# def get_hidden_layers():
# x = [128, 256, 512, 768, 1024, 2048]
# hl = []
# for i in range(1, len(x)):
# hl.extend([p for p in itertools.product(x, repeat=i+1)])
# return hl
def run(serial=0):
if serial:
print('Running in parallel')
else:
print('Running standalone')
# Create the output directory
output_dir = "output_0313/"
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Preprocess the data
pdb_ids = []
x = []
y = []
with open('Data_new.csv', 'r') as f:
reader = csv.reader(f)
next(reader, None) # Skip the header
for row in reader:
pdb_ids.append(str(row[0]))
x.append([float(i) for i in row[1:349]])
y.append(float(row[349]))
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
# Normalize the data
mean = np.mean(x, axis=0)
std = np.std(x, axis=0) + 0.00001
x_n = (x - mean) / std
# Write things down
transform = {}
transform['std'] = std
transform['mean'] = mean
with open(output_dir + 'transform.pickle', 'wb') as f:
pickle.dump(transform, f)
# Read the 'best' hidden layers
with open("best_hidden_layers.pickle", "rb") as f:
hidden_layers = pickle.load(f)
# Determine if running all alone or in parts (if in parts, assuming 8)
if serial:
chunk_size = (len(hidden_layers)//8) + 1
hidden_layers = [hidden_layers[i*chunk_size:i*chunk_size+chunk_size] for i in range(8)][serial-1]
# Network parameters
epochs = 100
batch_size = 128
keras_callbacks = [EarlyStopping(monitor='val_mean_squared_error',
min_delta = 0,
patience=20,
verbose=0)
]
# Split the data into training and test set
train_x, train_y, test_x, test_y = train_test_split(x_n, y, pdb_ids, test_size=0.1)
#train_x, train_y, val_x, val_y, test_x, test_y = split_data(x_n, y, pdb_ids)
pbar = tqdm_notebook(total=len(hidden_layers),
desc='GPU: ' + str(serial))
for i in range(len(hidden_layers)):
if serial:
model_name = 'model_' + str(serial) + '_' + str(i)
else:
model_name = 'model_' + str(i)
# Set dynamic memory allocation in a specific gpu
config = K.tf.ConfigProto()
config.gpu_options.allow_growth = True
if serial:
config.gpu_options.visible_device_list = str(serial-1)
K.set_session(K.tf.Session(config=config))
# Build the model
model = get_model(train_x.shape[1], hidden_layers=hidden_layers[i])
# Save the model
with open(output_dir + model_name + ".json", "w") as json_file:
json_file.write(model.to_json())
if not serial:
# If not running with other instances then use 4 GPUs
model = multi_gpu_model(model, gpus=4)
model.compile(
loss='mean_squared_error',
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=[metrics.mse])
#Save the initial weights
ini_weights = model.get_weights()
# 10 fold cross validation
kf = KFold(n_splits=10)
val_fold_score = 0.0
train_fold_score = 0.0
for _i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
# Reset the weights
model.set_weights(ini_weights)
# Train the model
train_info = model.fit(train_x[train_index], train_y[train_index],
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=0,
validation_split=0.1,
#validation_data=(train_x[valid_index], train_y[valid_index]),
callbacks=keras_callbacks)
current_val_predict = model.predict(train_x[valid_index]).flatten()
current_val_r2 = pearsonr(current_val_predict, train_y[valid_index])[0]
# If the current validation score is better then save it
if current_val_r2 > val_fold_score:
val_fold_score = current_val_r2
# Save the predicted values for both the training set
train_predict = model.predict(train_x[train_index]).flatten()
train_fold_score = pearsonr(train_predict, train_y[train_index])[0]
# Save the training history
with open(output_dir + 'history_' + model_name + '_' + str(_i) + '.pickle', 'wb') as f:
pickle.dump(train_info.history, f)
# Save the results
dict_r = {}
dict_r['hidden_layers'] = hidden_layers[i]
dict_r['pearsonr_train'] = train_fold_score
dict_r['pearsonr_valid'] = val_fold_score
pred = model.predict(test_x).flatten()
dict_r['pearsonr_test'] = pearsonr(pred, test_y)[0]
#pred = sensoring(test_x, test_y, model.predict(test_x)).flatten()
# Write the result in a file
with open(output_dir + 'result_' + model_name + '.pickle', 'wb') as f:
pickle.dump(dict_r, f)
# Save the model weights
model.save_weights(output_dir + "weights_" + model_name + ".h5")
# Clear the session and the model from the memory
del model
K.clear_session()
pbar.update()
jobs = [mp.Process(target=run, args=(i,)) for i in range(1, 9, 1)]
for j in jobs:
j.start()
###Output
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
Running in parallel
###Markdown
Result Analysis
###Code
# Get the network number and pearson coffs. of train, test and validation set in a list (in order)
output_dir = 'output_0313/'
model_files = sorted(glob.glob(output_dir + 'model_*'))
weight_files = sorted(glob.glob(output_dir + 'weights_*'))
result_files = sorted(glob.glob(output_dir + 'result_*'))
models = []
r2 = []
hidden_layers = []
weights = []
# net_layers = []
for mod, res, w in zip(model_files, result_files, weight_files):
models.append(mod)
weights.append(w)
with open(res, 'rb') as f:
r = pickle.load(f)
coeff = [r['pearsonr_train'], r['pearsonr_test'], r['pearsonr_valid']]
r2.append(coeff)
hidden_layers.append(r['hidden_layers'])
###Output
_____no_output_____
###Markdown
Sort the indices according to the validation result
###Code
r2_ar = np.array(r2)
sorted_indices = list((-r2_ar)[:, 2].argsort())
sorted_r2 = [r2[i] for i in sorted_indices]
sorted_r2[:5]
sorted_models = [models[i] for i in sorted_indices]
sorted_models[:5]
sorted_weights = [weights[i] for i in sorted_indices]
sorted_weights[:5]
###Output
_____no_output_____
###Markdown
Save the lists in the disk
###Code
with open(output_dir + 'sorted_models.pickle', 'wb') as f:
pickle.dump(sorted_models, f)
with open(output_dir + 'sorted_r2.pickle', 'wb') as f:
pickle.dump(sorted_r2, f)
with open(output_dir + 'sorted_weights.pickle', 'wb') as f:
pickle.dump(sorted_weights, f)
m_sorted_models = []
m_sorted_weights = []
modified_folder = 'dl_networks_03/'
for m in sorted_models:
m_sorted_models.append(modified_folder+ m[12:])
for w in sorted_weights:
m_sorted_weights.append(modified_folder+w[12:])
with open('sorted_models.pickle', 'wb') as f:
pickle.dump(m_sorted_models, f)
with open('sorted_weights.pickle', 'wb') as f:
pickle.dump(m_sorted_weights, f)
###Output
_____no_output_____ |
KEY_Lab_04.ipynb | ###Markdown
Lab 4: Linear regression using matrix algebra**Data Science for Biologists** &8226; University of Washington &8226; BIOL 419/519 &8226; Winter 2019Course design and lecture material by [Bingni Brunton](https://github.com/bwbrunton) and [Kameron Harris](https://github.com/kharris/). Lab design and materials by [Eleanor Lutz](https://github.com/eleanorlutz/), with helpful comments and suggestions from Bing and Kam. Table of Contents1. Reading in data using the Pandas library2. Review of linear regression 3. Bonus exercises Helpful resources- [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas- [10 minute Pandas tutorial](http://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html)- [Pandas Cheat Sheet](https://datacamp-community-prod.s3.amazonaws.com/9f0f2ae1-8bd8-4302-a67b-e17f3059d9e8) by Python for Data Science- [Importing Data Cheat Sheet](https://datacamp-community-prod.s3.amazonaws.com/50d31142-3de0-4159-89b9-18b718a728ef) by Python for Data Science Data- The data in this lab is from [Tager et al 1983](https://www.nejm.org/doi/full/10.1056/NEJM198309223091204) and was edited for teaching purposes. Lab 4 Part 1: Reading in data using the Pandas libraryThe Pandas library is a powerful tool for working with large datasets. We'll work with Pandas in depth throughout the quarter, so don't worry about understanding every single detail by the end of this lab. Today we'll mainly use Pandas to load in data to use for linear regression practice. A Pandas `dataframe` is a type of object (like a Numpy `array`) that stores information. However, unlike a Numpy `array`, a Pandas `dataframe` can store values of many different types, such as strings or numbers. This can be very useful when working with biology data, which often includes descriptive variables like sex, color, or location. It's conventional to import the Pandas library using the nickname `pd`:
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Also import the other libraries we plan to use today, and set up Matplotlib for inline plotting:
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Importing data in Pandas In today's lab we'll investigate data from [Tager et al 1983](https://www.nejm.org/doi/full/10.1056/NEJM198309223091204) on the effects of smoking on lung function. The dataset includes 654 children aged 3 to 19. Tager's team collected information on each child's age, sex, and smoking status (non-smoker or smoking). Tager also recorded the child's height in inches, and measured the FEV, or Forced Expiratory Volume (a measure of healthy lung function).In the following code block we'll read in this data from `FEV_data.csv`, located in the `Lab_04` folder. We'll load the data in this file as a variable called `df` (short for "dataframe").
###Code
df = pd.read_csv("./data/Lab_04/FEV_data.csv")
###Output
_____no_output_____
###Markdown
Inspecting data in PandasPandas has its own set of useful functions to inspect data. Two examples of these functions are `.head()` and `.tail()`. In each of these functions, we first reference the name of our Pandas dataframe - `df` - and follow this by `.head()` or `.tail()`. `df.head()` prints the first five rows of the `df` dataframe, and `df.tail()` prints the last five rows.**Exercise 1:** Run the code in the block below to look at the output. Then, create a new code block that prints the *last* five rows instead of the first five.
###Code
df.head()
# Your code here
df.tail()
###Output
_____no_output_____
###Markdown
Notice that the Pandas dataframe has bold column names at the top of the table. Unlike in Numpy, we can use Pandas column names to directly reference a specific column. For example, `df["ht"]` refers to all values in just the **ht** (or height) column. We can use Numpy functions we already know to find interesting attributes of these columns, such as the median or mean height:
###Code
np.median( df["ht"] )
np.mean( df["ht"] )
###Output
_____no_output_____
###Markdown
**Exercise 2:** Print the minimum and maximum age of people in this dataset using the built-in functions `min()` and `max()`.
###Code
print( "Minimum age is:", min(df["age"]) ) # df["age"].min() will also work.
print( "Maximum age is:", max(df["age"]) ) # df["age"].max() will also work.
###Output
Minimum age is: 3
Maximum age is: 19
###Markdown
Describing interesting properties of data in PandasWe can use the Pandas function `describe` to calculate interesting attributes of our dataset. In the output below, you should see a new table with the same columns as `df.head()`. However, instead of showing the original data, we see descriptive variables such as `count` (the number of data points), `mean` (the mean), `std` (the standard deviation), etc.
###Code
df.describe(include="all")
###Output
_____no_output_____
###Markdown
Cleaning data in PandasWe'll talk more extensively about data hygiene later on in the course, but for now it's sufficient to know that we can use Pandas to filter out problematic data. For example, we can use a logical statement to remove all rows that say "Equipment malfunction" in the comment column.
###Code
df = df[df["comments"] != 'Equipment malfunction']
###Output
_____no_output_____
###Markdown
Now when we print the head of the dataset, the problem rows at the 0 and 2 index have been removed (try comparing this to the output of Exercise 1).
###Code
df.head()
###Output
_____no_output_____
###Markdown
In this lab we'll use least squares linear regression to describe the relationship between different variables in this dataset. For example, let's try to describe the relationship between child age and FEV using the equation ${y = p_1x+p_2}$, where ${x}$ is age and ${y}$ is FEV. To get a rough idea of the data we're working with, plot the ${x}$ age column against the ${y}$ FEV column in Matplotlib:
###Code
x = df["age"]
y = df["FEV"]
plt.scatter(x, y, alpha=0.25, color="blue")
plt.xlabel("Age (years)")
plt.ylabel("FEV (liters)")
plt.title("Relationship of age and forced exhalation volume")
plt.show()
###Output
_____no_output_____
###Markdown
Lab 4 Part 2 Review of linear regressionIn lecture we used matrix algebra to solve for ${p_1}$ and ${p_2}$ given datasets ${x}$ and ${y}$ and the equation ${y = p_1x + p_2}$. Once we make the matrices ${A}$ and ${C}$ in Python, we can solve for ${B}$ (and therefore ${p_1}$ and ${p_2}$) using the Numpy linear algebra library. So if we want to find the least squares regression between ${x}$ = age and ${y}$ = FEV from our dataset, we want ${A}$ and ${C}$ matrices that look like this: **Exercise 3:** Create a matrix called ${A}$ with the first column containing all ${x}$ values from the `df` age column and the second column containing all 1s. Print ${A}$.
###Code
x = df["age"]
ones = np.ones(len(x))
A = np.vstack([x, ones]).T
print(A)
###Output
[[ 8. 1.]
[ 9. 1.]
[ 9. 1.]
...,
[ 18. 1.]
[ 16. 1.]
[ 15. 1.]]
###Markdown
**Exercise 4:** Create a column vector called ${C}$ containing all ${y}$ values from the `df` FEV column.
###Code
y = df["FEV"]
C = np.vstack(y)
###Output
_____no_output_____
###Markdown
Now that we have ${A}$ and ${C}$, we can use Numpy to solve this system of equations. The function `np.linalg.lstsq` solves matrix equations, and returns a variety of different values representing things like the p value and the solution constants. The first item returned is a list of each constant in order.
###Code
ps = np.linalg.lstsq(A, C)[0]
print(ps)
###Output
[[ 0.22178472]
[ 0.43570982]]
###Markdown
For the matrix equation ${y = p_1x + p_2}$ we just solved, ${p_1}$ is the first constant and ${p_2}$ is the second:
###Code
p1 = ps[0]
p2 = ps[1]
###Output
_____no_output_____
###Markdown
Using these constants we can plot our linear regression line and see how it compares to the actual data. To plot this line, we'll create a Numpy array of ${x}$ values spanning the range of our data, and calculate the predicted ${y}$ value for each ${x}$:
###Code
# Create predicted y values for a range of x values
xhat = np.arange(min(x), max(x)+1)
yhat = p1*xhat + p2
# Plot the actual data
plt.scatter(x, y, color="blue", alpha=0.25)
# Plot the predicted y values from our regression
plt.plot(xhat, yhat, color="black")
plt.xlabel("Age (years)")
plt.ylabel("FEV (liters)")
plt.title("Forced exhalation volume increases with age")
plt.show()
###Output
_____no_output_____
###Markdown
Working with subsets of data in PandasSo far we have one equation to describe our entire dataset. However, let's say that we're interested in creating two different models - one for smokers and one for non-smokers. We can select just the smokers in this Pandas dataframe by using a logical statement to pick just the rows where the `smoke` column value is `Yes`. This code creates a new Pandas dataframe containing just data from smokers.
###Code
df_smokers = df[df["smoke"] == "Yes"]
df_smokers.head()
###Output
_____no_output_____
###Markdown
**Exercise 5A:** Construct ${A}$ and ${C}$ for data in `df_smokers`. Use ${A}$, ${C}$, and `np.linalg.lstsq` to calculate ${p_1}$ and ${p_2}$ values for ${y = p_1x + p_2}$. Save the ${p_1}$ value as a variable called `p1_smokers`, and save ${p_2}$ as another variable called `p2_smokers`.
###Code
x_smokers = df_smokers["age"]
y_smokers = df_smokers["FEV"]
ones = np.ones(len(x_smokers))
A = np.vstack([x_smokers, ones]).T
C = np.vstack(y_smokers)
p1_smokers, p2_smokers = np.linalg.lstsq(A, C)[0]
print(p1_smokers, p2_smokers)
###Output
[ 0.07985574] [ 2.19696626]
###Markdown
**Exercise 5B:** Similarly, calculate the least squares regression for data in `df_nonsmokers`. Save ${p_1}$ as `p1_nonsmokers` and ${p_2}$ as `p2_nonsmokers`.
###Code
df_nonsmokers = df[df["smoke"] == "No"]
df_nonsmokers.head()
# Your code here
x_nonsmokers = df_nonsmokers["age"]
y_nonsmokers = df_nonsmokers["FEV"]
ones = np.ones(len(x_nonsmokers))
A = np.vstack([x_nonsmokers, ones]).T
C = np.vstack(y_nonsmokers)
p1_nonsmokers, p2_nonsmokers = np.linalg.lstsq(A, C)[0]
print(p1_nonsmokers, p2_nonsmokers)
###Output
[ 0.24233598] [ 0.25715252]
###Markdown
**Exercise 5C:** Create a scatterplot that shows the `df_smokers` age and FEV data in red and `df_nonsmokers` in blue. Plot the linear regression line for `df_smokers` in red and `df_nonsmokers` in blue.
###Code
# Create a scatterplot of both sets of data
plt.scatter(x_smokers, y_smokers, color="red", alpha=0.25, label="Smokers data")
plt.scatter(x_nonsmokers, y_nonsmokers, color="blue", alpha=0.25, label="Nonsmokers data")
# Plot regression line for smokers only
xhat_smokers = np.arange(min(x_smokers), max(x_smokers)+1)
yhat_smokers = p1_smokers*xhat_smokers + p2_smokers
plt.plot(xhat_smokers, yhat_smokers, color="red", label="Smokers regression")
# Plot regression line for nonsmokers only
xhat_nonsmokers = np.arange(min(x_nonsmokers), max(x_nonsmokers)+1)
yhat_nonsmokers = p1_nonsmokers*xhat_nonsmokers + p2_nonsmokers
plt.plot(xhat_nonsmokers, yhat_nonsmokers, color="blue", label="Nonsmokers regression")
plt.xlabel("Age (years)")
plt.ylabel("FEV (liters)")
plt.title("Forced exhalation volume vs age for smokers and nonsmokers")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lab 4 Bonus exercises**Bonus Exercise 1:** The principles we discussed in *Exercise 3* can be used to solve linear regression equations with more than two variables. For example, to find the least squares regression line for the equation ${y = p_1x^2 + p_2x + p_3}$, we would construct the following ${A}$ and ${C}$ matrices: In Python, create ${A}$ and ${C}$ where ${x}$ is age and ${y}$ is FEV. Use `np.linalg.lstsq` to solve for ${p_1}$, ${p_2}$, and ${p_3}$. Plot the resulting equation alongside the data.
###Code
x = df["age"]
y = df["FEV"]
A = np.vstack([x**2, x, np.ones(len(x))]).T
C = np.vstack(y)
p1, p2, p3 = np.linalg.lstsq(A, C)[0]
xhat = np.arange(min(x), max(x)+1)
yhat = p1*xhat**2 + p2*xhat + p3
plt.scatter(x, y, color="blue", alpha=0.25)
plt.plot(xhat, yhat, color="black")
plt.xlabel("Age (years)")
plt.ylabel("FEV (liters)")
plt.title("Forced exhalation volume vs age")
plt.show()
###Output
_____no_output_____
###Markdown
**Bonus Exercise 2:** Create ${A}$ and ${C}$ to solve for ${p_1}$, ${p_2}$, and ${p_3}$ given the equation ${z = p_1x + p_2y + p_3}$ where ${x}$ is age, ${y}$ is height, and ${z}$ is FEV. Make a plot that includes the original data and the fitted regression line. The code to create a 3D matplotlib plot is given to you below.
###Code
# your code here to solve for p
x = df["age"]
y = df["ht"]
z = df["FEV"]
A = np.vstack([x, y, np.ones(len(x))]).T
C = np.vstack(z)
bonus_p1, bonus_p2, bonus_p3 = np.linalg.lstsq(A, C)[0]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection='3d')
# Plot the original data
ax.scatter(df["age"], df["ht"], df["FEV"], color="green", alpha=0.25)
# Your code here to plot your regression line:
xhat = np.linspace(min(x), max(x)+1, 100)
yhat = np.linspace(min(y), max(y)+1, 100)
zhat = bonus_p1*xhat + bonus_p2*yhat + bonus_p3
ax.plot(xhat, yhat, zhat, color="k", lw=2)
ax.set_xlabel("Age (years)")
ax.set_ylabel("Height (inches)")
ax.set_zlabel("FEV (liters)")
plt.show()
###Output
_____no_output_____
###Markdown
**Bonus Exercise 3:** Create ${A}$ and ${C}$ to solve for each ${p}$ constant given the equation ${z = p_1x^2 + p_2y^2 + p_3x + p_4y + p_5}$ where ${x}$ is age, ${y}$ is height, and ${z}$ is FEV. Make a 3D plot that includes the original data and the fitted regression line.
###Code
x = df["age"]
y = df["ht"]
z = df["FEV"]
A = np.vstack([x**2, y**2, x, y, np.ones(len(x))]).T
C = np.vstack(z)
p1, p2, p3, p4, p5 = np.linalg.lstsq(A, C)[0]
xhat = np.linspace(min(x), max(x)+1, 100)
yhat = np.linspace(min(y), max(y)+1, 100)
zhat = p1*xhat**2 + p2*yhat**2 + p3*xhat + p4*yhat + p5
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, color="green", alpha=0.25)
ax.plot(xhat, yhat, zhat, color="k", lw=2)
ax.set_xlabel("Age (years)")
ax.set_ylabel("Height (inches)")
ax.set_zlabel("FEV (liters)")
plt.show()
###Output
_____no_output_____ |
tirgulim/tirgul9/tirgul9_3.ipynb | ###Markdown
Tirgul 9 - a sample project analysis According to EDA (Exploratory data analysis) & modeling steps:- Wrangling the data- Understanding the data - Preparing the data- Modeling
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
###Output
_____no_output_____
###Markdown
The DatasetThe dataset contains information on students and their grades in math, reading and writing.[link to the data source](https://www.kaggle.com/spscientist/students-performance-in-exams)We read the data from a github repository
###Code
url = 'https://raw.githubusercontent.com/ShaiYona/Data-Science2021B/main/tirgulim/tirgul9/StudentsPerformance.csv'
data = pd.read_csv(url)
data.tail()
###Output
_____no_output_____
###Markdown
1. Wrangling the data:- Treat missing values (if needed)- Treat column names (if needed)- Treat data types (if needed)- Treat any other weird thing your data might have Treat missing valuesCheck if there are missing values:
###Code
data.isnull().sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Apearantly there weren't any 'na's' in the data Fixing data typesCheck if any of the data types need to be fixed:
###Code
data.dtypes
###Output
_____no_output_____
###Markdown
We'll leave them as objects for now, but might change them later, depending on what we will want to do 2. Understanding the dataLet's see a summary in a pivot table (note that the default is 'mean'):
###Code
data.pivot_table(['math score','reading score','writing score' ],'gender')
###Output
_____no_output_____
###Markdown
- Looks like the male students are leading in Math, but are behind on Reading and Writing- How many males and how many females?
###Code
data['gender'].value_counts()
data['gender'].value_counts().plot.pie(autopct='%1.1f%%')
###Output
_____no_output_____
###Markdown
Study the differences between males and females: Seperate into two datasets:
###Code
female = data.loc[data.gender == 'female']
male = data.loc[data.gender == 'male']
male.head()
plt.hist(male['math score'], alpha=0.4, label='male')
plt.hist(female['math score'], alpha=0.4, label='female')
plt.legend(loc='upper right')
plt.hist(male['reading score'], alpha=0.4, label='male')
plt.hist(female['reading score'], alpha=0.4, label='female')
plt.legend(loc='upper right')
plt.hist(male['writing score'], alpha=0.4, label='male')
plt.hist(female['writing score'], alpha=0.4, label='female')
plt.legend(loc='upper right')
###Output
_____no_output_____
###Markdown
We can see that male students tend to have a smaller variance then the female students. Let's calculate the standard deviation and the range of scores
###Code
data.groupby('gender').std()
###Output
_____no_output_____
###Markdown
Correlation between scores
###Code
scoreData = data[['math score','reading score','writing score']]
scoreData.tail()
scoreData.corr()
# cmap='jet' refers to table colors
# vmin=0.0 , vmax = 1 indicate the lower and upper bounderies of legend
# annot=True display the value of each square
sns.heatmap(scoreData.corr(), vmin=0.0 , vmax = 1,cmap='jet' , annot=True)
###Output
_____no_output_____
###Markdown
Obeservation: >> The corrolation across subjects is quite high, between reading and writing is near perfect.>
###Code
sns.regplot(x='reading score', y='writing score', data=data);
###Output
_____no_output_____
###Markdown
> Decreased correlation displays a higher spread
###Code
sns.regplot(x='reading score', y='math score', data=data);
#
###Output
_____no_output_____
###Markdown
Looking at parnetal level of education
###Code
parentEducData = data[["parental level of education"]]
parentEducData.tail()
parentEducData.value_counts() # counts the amount from each categorized value
###Output
_____no_output_____
###Markdown
[pie charts docs](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.plot.pie.html)
###Code
parentEducData.value_counts().plot.pie(autopct='%1.1f%%')
# autopct display percents for each part
sns.countplot(x="parental level of education", data=data)
###Output
_____no_output_____
###Markdown
--- Project tip: When there are more than 2-3 categories, a countplot is ALWAYS BETTER than a pie plotIt's just much easier to read it.The only problem with our countplot is that the labels ovelap. There are many ways to fix it. Google it. [for example](https://stackoverflow.com/questions/42528921/how-to-prevent-overlapping-x-axis-labels-in-sns-countplot)We'll just adjust the figure size: ---
###Code
plt.figure(figsize=(14,6))
sns.countplot(x="parental level of education", data=data)
###Output
_____no_output_____
###Markdown
--- Project tip: make sure your plots are readable (like we have just done).You don't need to show both the unreadable version and the readable version. We know you have worked hard and struggled. Present your best!!--- Searching for more correlations Let's add a column with the mean score across all subjects:
###Code
data['mean score'] = scoreData.mean(axis=1)
data.tail()
###Output
_____no_output_____
###Markdown
Let's check - is there any connection between parental level of education and lunch to grades?
###Code
EducLunchMean_ScoreData = data[['parental level of education','lunch','mean score']].copy(deep=True) # not shallow/reference copy, change in data will not be affected in EducLucnh<ean
EducLunchMean_ScoreData.tail()
EducLunchMean_ScoreData.pivot_table('mean score','parental level of education').sort_values("mean score")
###Output
_____no_output_____
###Markdown
Don't present in an incomprehensible way. For example (of what NOT to do):
###Code
EducLunchMean_ScoreData.groupby('parental level of education')['mean score'].hist(alpha=0.5,legend=True,figsize=(10,10))
# We cannot use 'pivot_table' here since we do not wish to aggregate the data
###Output
_____no_output_____
###Markdown
The connection bewtween lunch and student's mean score:
###Code
EducLunchMean_ScoreData.pivot_table('mean score','lunch')
###Output
_____no_output_____
###Markdown
We can see some connection here
###Code
EducLunchMean_ScoreData.groupby('lunch')['mean score'].hist(alpha=0.5,legend=True)
###Output
_____no_output_____
###Markdown
Observation:> The lunch type tells us more about the student grades.> Students with a standard lunch do better.> This may say more about the studen't background then about their real abilites The connection between parent education level and lunch type:Turn the lunch into a category Standard = 1free/reduced = 0
###Code
EducLunchMean_ScoreData['lunch_cat'] = EducLunchMean_ScoreData['lunch'].astype('category').cat.codes
EducLunchMean_ScoreData
ptLunchEduc = EducLunchMean_ScoreData.pivot_table('lunch_cat','parental level of education').sort_values(by='lunch_cat')
ptLunchEduc
###Output
_____no_output_____
###Markdown
> Observation:> It is interesting to see, that the lunch type is spread more or less equaly between the parent education levels. > Superficially, if lunch represents parents financial level, it was not affected by their education. Project tip:An observation is always better if it is also visual
###Code
# We manually orderd the plot according to the degrees
order = [5,2,4,3,1,0]
plt.figure(figsize=(10,5))
plt.scatter(ptLunchEduc.index[order],ptLunchEduc.values[order])
plt.ylim(0,1)
###Output
_____no_output_____
###Markdown
The connection between parents education level and mean score: The mean score grouped by parent's education:
###Code
mean_parent = EducLunchMean_ScoreData.groupby('parental level of education')['mean score'].mean()
mean_parent
###Output
_____no_output_____
###Markdown
In a scatter plot:
###Code
# We manually orderd the plot according to the degrees
order = [5,2,4,0,1,3]
plt.figure(figsize=(10,5))
plt.scatter(mean_parent.index[order],mean_parent.values[order])
###Output
_____no_output_____
###Markdown
Project tip: Think of which figure will present your data in the best wayIn this case - a boxplot is better than a scatter plot Present a boxplot, with rotated labels on x-axis
###Code
fig, axes = plt.subplots(figsize=(20, 5), ncols=3)
sns.boxplot(ax=axes[0], x='parental level of education', y='reading score', data=data)
sns.boxplot(ax=axes[1], x='parental level of education', y='writing score', data=data)
sns.boxplot(ax=axes[2], x='parental level of education', y='math score', data=data)
for i, ax in enumerate(fig.axes):
axes[i].tick_params(axis='x', rotation=45) # chage to y axis and -45 and see what happens
#ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Boxplot for mean score
###Code
ax = sns.boxplot(x='parental level of education', y='mean score', data=data)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
The connection between education level and lunch type and mean score:The mean score grouped by parent's education: and student's mean score:
###Code
EducLunchMean_ScoreData.pivot_table('mean score','parental level of education','lunch',margins=True)
###Output
_____no_output_____
###Markdown
- The bottom margin shows the score according to the lunch (free/standard)- The right margin shows the score according to the parents degree- The mean for students with standard lunch is 8.5 points higher! > Observation:> The parent's education level does not have a direct effect on the lunch type. >> The parent's education level does not have a direct effect on the mean score. >> But - the parent's education level combined with the lunch type has an effect on the mean score. 3. Building a model from the data We will try to predict mean score using decision tree, based on gender, race and test preparation. Preparing the data for learning
###Code
X = pd.get_dummies(data[['gender','race/ethnicity','lunch','test preparation course']])
y = data[['mean score']]
X.head()
###Output
_____no_output_____
###Markdown
Remove the reduntant fields
###Code
X = X.drop(columns=['gender_male','lunch_standard','test preparation course_none'])
X.head()
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=42)
y_test.head()
print("Train STD of {}".format(y_train.std()))
print("Test STD of {}".format(y_test.std()))
###Output
Train STD of mean score 13.876059
dtype: float64
Test STD of mean score 15.039556
dtype: float64
###Markdown
Build the model
###Code
model = DecisionTreeRegressor(random_state=42)
model.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Evaluation
###Code
def eval(x_test,y_test,model):
pred = model.predict(x_test)
print("MSE: {:.3f}".format(mean_squared_error(pred,y_test,squared=False)))
eval(X_test,y_test,model)
###Output
MSE: 13.962
###Markdown
Plot the tree[plot_tree docs](https://scikit-learn.org/stable/modules/generated/sklearn.tree.plot_tree.html) Write a function that plots the tree
###Code
import sklearn.tree as tree
def plot_tree(tree_model,feat,size=(15,10)):
fig = plt.figure(figsize=size)
tree.plot_tree(tree_model,
feature_names = feat,
filled=True,
fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
Tree Pruning
###Code
model = DecisionTreeRegressor(max_depth=3,random_state=42)
model.fit(X_train,y_train)
eval(X_test,y_test,model)
plot_tree(model,X_test.columns,size=(30,20))
###Output
MSE: 13.899
###Markdown
Let's look at the mean of the train label and the value in root node
###Code
y_train.mean()
model = DecisionTreeRegressor(min_samples_split=100,random_state=42)
model.fit(X_train,y_train)
eval(X_test,y_test,model)
plot_tree(model,X_test.columns,size=(60,20))
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(n_estimators=10000,max_depth=4,max_samples=100,random_state=42)
# RandomForestRegressor fits a number of classifying decision trees
# n_estimators is the numbers of trees to be used in the forest
model.fit(X_train,y_train.values.ravel())
eval(X_test,y_test,model)
###Output
MSE: 13.752
###Markdown
Let's check the error percentage The fraction of difference a-b from b is: $\frac{|a-b|}{b} $
###Code
pred=model.predict(X_test)
(np.abs(pred-y_test.values.ravel())/y_test.values.ravel()).mean()
###Output
_____no_output_____ |
DAY 1- PYTHON CLASS.ipynb | ###Markdown
DATA TYPE 1: LIST
###Code
lst=["pari", 2, 9.78, [1,2,3]]
lst
lst[1]
lst[-3]
lst.append("saha")
lst
lst.index(9.78)
lst.count(2)
lst.copy()
lst.insert(2,2002)
lst
lst.pop(-2)
lst
lst.extend("loo")
lst
lst.remove(9.78)
lst
lst.reverse()
lst
lst.clear()
lst
###Output
_____no_output_____
###Markdown
DATA TYPE 2: DICT- DICTIONARIES
###Code
dit= {"NAME": "PARIJAT", "AGE": "18", "MOTHER": "SONALI SAHA", "FATHER": "PARTHA PRATIM SAHA"}
dit
dit.get("age")
dit.get("AGE")
dit.items()
dit.keys()
dit.pop("MOTHER")
dit
dit["SCHOOL"]= "DPS"
dit
type(dit)
dit.copy()
dit.fromkeys("poo", 54)
dit.popitem()
dit
dit.setdefault("HOMETOWM", "PDP")
dit
dit.update()
dit
dit.values()
###Output
_____no_output_____
###Markdown
SETS
###Code
s= {"PARI","LETSUPGRADE", 1,2,3,4,4,5,6,5,7}
s
type(s)
a={1}
a.issubset(s)
a.isdisjoint(s)
a.intersection(s)
a.discard(s)
s
a.difference(s)
s.difference(a)
a.intersection_update(s)
s
s.intersection_update(a)
a
a.issuperset(s)
a.pop()
s.pop()
s.union(a)
s
s.copy()
s.clear()
s
###Output
_____no_output_____
###Markdown
TUPLE
###Code
t= ("pari", "letsupgrade", "@")
t
t.count
t.count("@")
t.index("@")
t.count("pari")
t.count(4)
t.index("letsupgrade")
###Output
_____no_output_____
###Markdown
STRING
###Code
c = "parijat"
c.capitalize()
c.count("parijat")
c.endswith("p")
c.isascii()
c.isalnum()
c.isdecimal()
c.isdigit()
c.islower()
c.isspace()
c.isupper()
c.casefold()
c.isidentifier()
c.replace("parijat", "goodness")
c.swapcase()
c.find("JAT")
c.startswith("p")
c.isalpha()
c.index("r")
c.translate("o")
###Output
_____no_output_____ |
Lecture-Notes/2019/PSS-2019-Day2.ipynb | ###Markdown
Python course Day 2
###Code
num = 5
print(num)
num = num + 1 # compute num + 1 and assign its value to num
print(num)
num += 1 # num = num + 1
print(num)
num++
num -= 1
print(num)
num += 10
print(num)
num *= 5
num /= 4
num //= 5
print(num)
8 / 3
8 // 3
name = "Devesh"
print(name * 3)
number = "5"
print(number * 3)
print (name + 4)
print(name + " Very long")
print(name - "sh")
print("<3" * 80)
pi = 3.14
pi += 1
print(pi)
is_Sunny = True
print(is_Sunny)
5 < 7
5 > 7
5 <= 5
5 >= 5
7 >= 5
5 == 5
5 == 4
x = 3 # assign 3 to x
x == 3 # Check whether x is equal to 3
X = 4
# Variable names are case sensitive. X and x are different variables.
X == x
print (X)
print(x)
"Devesh" == "Unmesh"
name == "Devesh"
print(name)
name != "Unmesh"
num = 4
# .num = 5 # error
num2 = 14
#2num = 45 # error
number_of_students = 29
#num$ = 6 # error
#$num = 7 # error
#num-even = 2 # error
_num = 5
# number of students = 29 # error
###Output
_____no_output_____
###Markdown
* Variable names must start with a letter or an underscore* Variable names cannot contain any special characters (except underscores)* Variable names may contain digits but not at the beginning* Variable names cannot have spaces
###Code
number___OfStUdEnTsIn2Thousand19_Sum_Schhool = 29
print(number___OfStUdEnTsIn2Thousand19_Sum_Schhool)
name = "Nikita"
firstName = "Nikita" # Camel case
first_name = "Nikita" # Snake case
numberOfStudents = 29 # Camel case
number_of_students = 29 # Snake case
number_1 = 1 # Snake case
number_1 = "string"
###Output
_____no_output_____
###Markdown
Conditional statements
###Code
it_is_sunny = False
if it_is_sunny:
print("I will go to a beach")
print("And I will swim")
print("and then I will bask")
else:
print("I will play a board game")
n = 5
if n > 2:
print(n, "is greater than 2")
else:
print(n, " is less than 2")
n = eval(input("Enter a number: "))
if n > 20 and n < 100:
print(n, "is between 20 and 100")
else:
print(n, "is not between 20 and 100")
int("5")
int("five")
num = eval('4')
type(num)
num = eval('3.14')
type(num)
num = eval('True')
type(num)
num = eval('test')
"hello" == "hello"
"hello" == "hell0"
"hello" < "hell0"
"A" < "B"
"B" < "Z"
"A" < "a"
ord('A')
ord('a')
"A" < "a"
"B" < "a"
print('Hello "Carlien"')
print("Hello 'Carlien'")
print("Hello \"Carlien\"")
"Hello" < "Hell0"
ord('o')
ord('0')
"Carlien" < "Devesh"
chr(65)
chr(87)
chr(56)
num_1 = 200
num_2 = 200
num_1 is num_2
num_1 = 2000
num_2 = 2000
num_1 is num_2
num_1 = 200
num_2 = 200
num_1 is num_2
id(num_1)
id(num_2)
num_1 = 256
num_2 = 256
num_1 is num_2
num_1 = 257
num_2 = 257
num_1 is num_2
id(num_1)
id(num_2)
dec_1 = 3.14
dec_2 = 3.14
dec_1 is dec_2 # checks for the memory location
id(dec_1)
id(dec_2)
dec_1 == dec_2 # checks for the value
###Output
_____no_output_____
###Markdown
Functions
###Code
print("Hello") # Call the function with input "Hello"
print("Hello" , "World") # # Input to the function is called argument / parameter
print("Hi",5,"World")
print("Hi"+str(5)+"World")
print("Hi",5,"World",sep='-')
?print # ... is ellipses
print("a" , 5, 3.14, "hello")
print("Hello")
print("World")
print("Hello", end=":")
print("World")
# single line comment
'''
this is a multiple line comment
We are going to define a function
Name of the function is greet
def is the part of the syntax
def is a keyword (reserved word) in the language
After the name of the function you have to write parantheses
followed by a colon (:)
'''
# Function with no input and no output
def greet():
print("Hello")
greet() # call the function
# Function with one argument and no output
def greet_at_home(greeting):
print(greeting)
greet_at_home("Namaste")
greet_at_home("Hoi, Goedemiddag")
greet_at_home("Ni hao")
greet_at_home("Hola")
greet_at_home("Terve")
greet_at_home("Shalom")
greet_at_home("Hey")
# Function with 2 arguments and 1 output / return value
def add(num_1, num_2):
#result = num_1 + num_2
return num_1 + num_2 # this is how you return the output
test = 5 + 6
print(test)
test = add(5,6) # call of the function
print(test)
var = add(345, 56)
print(var)
test = greet_at_home("Hello")
print(test)
type(test)
def square(num):
return num ** 2
square(5)
print(square(21))
print(square(4))
###Output
16
###Markdown
Loops
###Code
def table(n):
print(n * 1)
print(n * 2)
print(n * 3)
print(n * 4)
print(n * 5)
print(n * 6)
print(n * 7)
print(n * 8)
table(7)
for i in range(10):
print(i)
for i in range(10):
print(i+1)
for i in range(1,11):
print(i)
def table(n):
for i in range(1,11):
print(n*i)
table(6)
###Output
6
12
18
24
30
36
42
48
54
60
|
module1-regression-1/Ofer_Baharav_Copy_of_assignment_regression_classification_1.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 1*--- Regression 1 AssignmentYou'll use another **New York City** real estate dataset. But now you'll **predict how much it costs to rent an apartment**, instead of how much it costs to buy a condo.The data comes from renthop.com, an apartment listing website.- [ ] Look at the data. Choose a feature, and plot its relationship with the target.- [ ] Use scikit-learn for linear regression with one feature. You can follow the [5-step process from Jake VanderPlas](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API).- [ ] Define a function to make new predictions and explain the model coefficient.- [ ] Organize and comment your code.> [Do Not Copy-Paste.](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) You must type each of these exercises in, manually. If you copy and paste, you might as well not even do them. The point of these exercises is to train your hands, your brain, and your mind in how to read, write, and see code. If you copy-paste, you are cheating yourself out of the effectiveness of the lessons. Stretch Goals- [ ] Do linear regression with two or more features.- [ ] Read [The Discovery of Statistical Regression](https://priceonomics.com/the-discovery-of-statistical-regression/)- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 2.1: What Is Statistical Learning?
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# Read New York City apartment rental listing data
import pandas as pd
df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove outliers:
# the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= 1375) & (df['price'] <= 15500) &
(df['latitude'] >=40.57) & (df['latitude'] < 40.99) &
(df['longitude'] >= -74.1) & (df['longitude'] <= -73.38)]
df.head(2)
df.shape
df.dtypes
df_test = df
df_test = df_test.assign(bed_elevator=lambda df: df_test.bedrooms * df_test.elevator)
df_copy['bed_elevator'].value_counts()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
# Arrange X features matrix and y target vector
features = ['bedrooms']
target = 'price'
x = df[features]
y = df[target]
print(x.shape, y.shape) # makes sure they are equal
# Fit the model
model.fit(x,y)
# Apply model to new data
bedrooms = 3
elevators = 3
_test = bedrooms * elevators
x_test = [[_test]]
y_pred = model.predict(x_test)
print(f'Predicted price for (x)bed, (y)elevators NYC apt rent: {y_pred}')
model.coef_
model.intercept_
###Output
_____no_output_____ |
pynq_dpu/notebooks/dpu_mnist_classifier.ipynb | ###Markdown
DPU example: MNIST Classifier---- Aim/sThis notebook shows how to deploy Convolutional Neural Network (CNN)model for hand-written digit recognition. The network was trained onthe MNIST dataset,quantized using Vitis AI compiler tools, anddeployed on the DPU.Compared to the other notebooks delivered in this folder, this notebookshows how to deploy a **user-trained** DPU model on PYNQ image; i.e.,the model used in this notebook does not come from the model zoo. References* [Train your own DPU models](https://github.com/Xilinx/DPU-PYNQ/tree/master/hosttrain-your-own-dpu-models-from-scratch)* [Vitis AI model zoo](https://github.com/Xilinx/Vitis-AI/tree/master/models/AI-Model-Zoo) Last revised* Mar 8, 2021 * Initial revision---- 1. Prepare the overlayWe will download the overlay onto the board.
###Code
from pynq_dpu import DpuOverlay
overlay = DpuOverlay("dpu.bit")
###Output
_____no_output_____
###Markdown
The `load_model()` method will automatically prepare the `graph`which is used by VART.
###Code
overlay.load_model("dpu_mnist_classifier.xmodel")
###Output
_____no_output_____
###Markdown
Let's import some libraries as well. The `mnist` packagerequires some additional headers for URL requests.
###Code
from time import time
import numpy as np
import mnist
import matplotlib.pyplot as plt
%matplotlib inline
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
###Output
_____no_output_____
###Markdown
2. Load test dataThe `mnist` package enables the following data for users:* `test_images()`: returns test images stored as a numpy array. Each image is a grayscale 28x28 pixels, representing a digit between 0 and 9.* `test_labels()`: returns a list of the true labels stored as numpy array.There are 2 pre-processing steps we need to do to the test images before we can use it:1. The raw numpy array delivered by `mnist` has a data type of uint8 (data ranges from 0 to 255); we need to normalize the elements to floating-point numbers ranging from 0 to 1.2. The VART API will expect each input sample to have 3 dimensions; so we need to expand the original numpy array.
###Code
raw_data = mnist.test_images()
normalized_data = np.asarray(raw_data/255, dtype=np.float32)
test_data = np.expand_dims(normalized_data, axis=3)
test_label = mnist.test_labels()
print("Total number of test images: {}".format(test_data.shape[0]))
print(" Dimension of each picture: {}x{}".format(test_data.shape[1],
test_data.shape[2]))
plt.imshow(test_data[1,:,:,0], 'gray')
plt.title('Label: {}'.format(test_label[1]))
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
3. Use VARTNow we should be able to use VART API to do the task.
###Code
dpu = overlay.runner
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
shapeIn = tuple(inputTensors[0].dims)
shapeOut = tuple(outputTensors[0].dims)
outputSize = int(outputTensors[0].get_data_size() / shapeIn[0])
softmax = np.empty(outputSize)
###Output
_____no_output_____
###Markdown
We can define a few buffers to store input and output data.They will be reused during multiple runs.
###Code
output_data = [np.empty(shapeOut, dtype=np.float32, order="C")]
input_data = [np.empty(shapeIn, dtype=np.float32, order="C")]
image = input_data[0]
###Output
_____no_output_____
###Markdown
We will also define a few functions to calculate softmax.
###Code
def calculate_softmax(data):
result = np.exp(data)
return result
###Output
_____no_output_____
###Markdown
4. Run DPU to make predictionsWe can now classify a couple of digit pictures. For each picture, the classification result (shown as 'Prediction') is displayed on top of the picture.
###Code
num_pics = 10
fix, ax = plt.subplots(1, num_pics, figsize=(12,12))
plt.tight_layout()
for i in range(num_pics):
image[0,...] = test_data[i]
job_id = dpu.execute_async(input_data, output_data)
dpu.wait(job_id)
temp = [j.reshape(1, outputSize) for j in output_data]
softmax = calculate_softmax(temp[0][0])
prediction = softmax.argmax()
ax[i].set_title('Prediction: {}'.format(prediction))
ax[i].axis('off')
ax[i].imshow(test_data[i,:,:,0], 'gray')
###Output
_____no_output_____
###Markdown
We can also evaluate on the entire test dataset.
###Code
total = test_data.shape[0]
predictions = np.empty_like(test_label)
print("Classifying {} digit pictures ...".format(total))
start = time()
for i in range(total):
image[0,...] = test_data[i]
job_id = dpu.execute_async(input_data, output_data)
dpu.wait(job_id)
temp = [j.reshape(1, outputSize) for j in output_data]
softmax = calculate_softmax(temp[0][0])
predictions[i] = softmax.argmax()
stop = time()
correct = np.sum(predictions==test_label)
execution_time = stop-start
print("Overall accuracy: {}".format(correct/total))
print(" Execution time: {:.4f}s".format(execution_time))
print(" Throughput: {:.4f}FPS".format(total/execution_time))
###Output
Classifying 10000 digit pictures ...
Overall accuracy: 0.9871
Execution time: 3.6281s
Throughput: 2756.2394FPS
###Markdown
5. Clean upWe will need to remove references to `vart.Runner` and let Python garbage-collectthe unused graph objects. This will make sure we can run other notebooks withoutany issue.
###Code
del overlay
del dpu
###Output
_____no_output_____ |
0-fachliche-komponenten.ipynb | ###Markdown
Expertenanalyse mit Software Analytics Intro zur Analyse der Anwendung "Spring Data MongoDB"* Programmierschnittstelle zur Anbindung einer dokumentenbasierten MongoDB-Datenbank an ein Java-Spring-Projekt Gründe für Wahl* bekanntes Open Source Java Projekt, das zum Spring-Framework gehört* Hosting auf GitHub (zur Analyse der GitHub-Issues)* Verwendung von Maven als Build-Management-Tool (anstatt Gradle, das in vielen Spring-Projekten genutzt wird)* nicht zu groß und nicht zu klein* mehrere Hauptentwickler* Git-Historie geht bis ins Jahr 2010 zurück, sodass der Analysezeitraum ca. 10 Jahre umfasst* ca. 2.800 Github-Issues seit 2013* Issues können von allen Git-Nutzern erstellt werden Fragestellung* In welche fachlichen Komponenten lässt sich der Quellcode der Anwendung für die weitere Analyse sinnvoll strukturieren? Datenquelle* Java-Strukturen des Projekts mit jQAssistant gescannt und in Neo4j abfragbar Annahmen* Fachliche Komponenten lassen sich durch die Subpackages von `org.springframework.data.mongodb` aufteilen.* Das Haupt-Subpackage `core` kann nochmals in dessen Subpackages aufgeteilt werden. Validierung* Grafische Übersicht über die existierenden fachlichen Komponenten und deren Anteile am Projekt Implementierung
###Code
%%cypher
// Alle Artefakte
MATCH (a:Main:Artifact) RETURN a.name AS ArtefactName, a.group AS GroupName
%%cypher
// Java-Artefakte
MATCH (a:Java:Main:Artifact) RETURN a.name AS JavaArtefactName, a.group AS GroupName
###Output
1 rows affected.
###Markdown
* `spring-data-mongodb` ist das einzige Java-Artefakt, das auch den Anwendungscode enthält.* `spring-data-mongodb-parent` ist das Wurzelverzeichnis, das hauptsächlich Konfigurationsdateien enthält.* `spring-data-mongodb-distribution` enthält Anweisungen zum Bauen einer Distribution.Von den drei Artefakten wird im Folgenden nur das Java-Artefakt `spring-data-mongodb` betrachtet.
###Code
%%cypher
// Anzahl Java-Typen im Artefakt
MATCH (a:Java:Main:Artifact)-[:CONTAINS]->(type:Type:Java)
WHERE a.name = "spring-data-mongodb"
RETURN a.name AS Artifact, count(type) AS JavaTypesInArtifact
%%cypher
// Markierung aller SpringDataMongoDb-Knoten
// Added 1332 labels
MATCH (artifact:Main:Artifact{name: "spring-data-mongodb"})
SET artifact:SpringDataMongoDb
WITH artifact
MATCH (artifact)-[:CONTAINS]->(c)
SET c:SpringDataMongoDb
###Output
0 rows affected.
###Markdown
Aufteilung in fachliche Komponenten* Strukturierung soll nach Subpackages von `org.springframework.data.mongodb` sowie Subpackages von `org.springframework.data.mongodb.core` erfolgen, wobei Typen im `core`-Package nicht doppelt gezählt werden sollen.* Die Zuordnung der Klassen (weiter unten) erfolgt daher für das `core`-Package separat nur auf der ersten Ebene.* Anreicherung des Graphen um zusätzliche Knoten je fachlicher Komponente (`BoundedContext`)* Zuordnung aller Typen in Packages mit dem Namen einer fachlichen Komponente zu eben diesem Bounded Context mit `[:CONTAINS]`
###Code
%%cypher
// Packages, die in mongodb und core enhalten sind
MATCH (p:Package:SpringDataMongoDb)-[:CONTAINS]->(bC:Package:SpringDataMongoDb)
WHERE p.fqn = "org.springframework.data.mongodb" OR p.fqn = "org.springframework.data.mongodb.core"
WITH p, collect(DISTINCT bC.name) AS boundedContexts
RETURN p.name AS PackageName, boundedContexts
%%cypher
// Anlegen eines Knoten je Fachlichkeit
// Added 19 labels, created 19 nodes, set 19 properties
MATCH (p:Package:SpringDataMongoDb)-[:CONTAINS]->(bC:Package:SpringDataMongoDb)
WHERE p.fqn = "org.springframework.data.mongodb" OR p.fqn = "org.springframework.data.mongodb.core"
WITH collect(DISTINCT bC.name) AS boundedContexts
UNWIND boundedContexts AS boundedContext
MERGE (bC:BoundedContext {name: boundedContext})
%%cypher
// Zuordnen der Klassen zu den Bounded Contexts (inkl. Subpackages, ohne core-Package)
// Created 1024 relationships
MATCH (bC:BoundedContext),
(p:Package:SpringDataMongoDb)-[:CONTAINS*]->(t:Type:SpringDataMongoDb)
WHERE p.name = bC.name AND bC.name <> "core"
MERGE (bC)-[:CONTAINS]->(t)
RETURN bC.name AS BoundedContext, count(t) AS Size
ORDER BY Size DESC
%%cypher
// Zuordnen der Klassen zu den Bounded Contexts (nur Wurzelebene des core-Packages)
// Created 279 relationships
MATCH (bC:BoundedContext),
(p:Package:SpringDataMongoDb)-[:CONTAINS*1..1]->(t:Type:SpringDataMongoDb)
WHERE p.name = bC.name AND bC.name = "core"
MERGE (bC)-[:CONTAINS]->(t)
RETURN bC.name AS BoundedContext, count(t) AS Size
ORDER BY Size DESC
###Output
1 rows affected.
###Markdown
Ergebnisse
###Code
%%cypher
// Prozentualer Anteil der zugeordneten Klassen in Prozent (97%)
MATCH (t:Type:SpringDataMongoDb)
WITH count(DISTINCT t) AS Total
MATCH (:BoundedContext)-[:CONTAINS]->(t:Type:SpringDataMongoDb)
RETURN 100 * count(DISTINCT t) / Total AS overage
%%cypher
// Anzahl SpringDataMongoDb-Typen pro Package
MATCH (bC:BoundedContext)-[:CONTAINS*]->(t:Type:SpringDataMongoDb)
RETURN bC.name AS BoundedContext, count(DISTINCT t) AS ClassCount
ORDER BY ClassCount DESC
###Output
19 rows affected.
###Markdown
BoundedContext `core` enthält nur die Typen im eigenen Wurzel-Package. Typen in Subpackages von `core` sind jeweils als eigener BoundedContext aufgeführt.
###Code
subdomainSize = %cypher MATCH (bC:BoundedContext)-[:CONTAINS*]->(t:Type:SpringDataMongoDb) \
RETURN bC.name AS BoundedContext, count(DISTINCT t) AS TypeCount
df = subdomainSize.get_dataframe()
fig = px.pie(df, values='TypeCount', names='BoundedContext', title='Größe der fachlichen Komponenten nach Anzahl enthaltener Typen')
fig.show()
###Output
19 rows affected.
|
SSGAN.ipynb | ###Markdown
Discriminator and Generator architecture should mirror each other
###Code
############ Defining Discriminator ############
def discriminator(x, dropout_rate = 0., is_training = True, reuse = False):
# input x -> n+1 classes
with tf.variable_scope('Discriminator', reuse = reuse):
# x = ?*64*64*1
print('Discriminator architecture: ')
#Layer 1
conv1 = tf.layers.conv2d(x, 128, kernel_size = [4,4], strides = [2,2],
padding = 'same', activation = tf.nn.leaky_relu, name = 'conv1') # ?*32*32*128
print(conv1.shape)
#No batch-norm for input layer
dropout1 = tf.nn.dropout(conv1, dropout_rate)
#Layer2
conv2 = tf.layers.conv2d(dropout1, 256, kernel_size = [4,4], strides = [2,2],
padding = 'same', activation = tf.nn.leaky_relu, name = 'conv2') # ?*16*16*256
batch2 = tf.layers.batch_normalization(conv2, training = is_training)
dropout2 = tf.nn.dropout(batch2, dropout_rate)
print(conv2.shape)
#Layer3
conv3 = tf.layers.conv2d(dropout2, 512, kernel_size = [4,4], strides = [4,4],
padding = 'same', activation = tf.nn.leaky_relu, name = 'conv3') # ?*4*4*512
batch3 = tf.layers.batch_normalization(conv3, training = is_training)
dropout3 = tf.nn.dropout(batch3, dropout_rate)
print(conv3.shape)
# Layer 4
conv4 = tf.layers.conv2d(dropout3, 1024, kernel_size=[3,3], strides=[1,1],
padding='valid',activation = tf.nn.leaky_relu, name='conv4') # ?*2*2*1024
# No batch-norm as this layer's op will be used in feature matching loss
# No dropout as feature matching needs to be definite on logits
print(conv4.shape)
# Layer 5
# Note: Applying Global average pooling
flatten = tf.reduce_mean(conv4, axis = [1,2])
logits_D = tf.layers.dense(flatten, (1 + num_classes))
out_D = tf.nn.softmax(logits_D)
return flatten,logits_D,out_D
############ Defining Generator ############
def generator(z, dropout_rate = 0., is_training = True, reuse = False):
# input latent z -> image x
with tf.variable_scope('Generator', reuse = reuse):
print('\n Generator architecture: ')
#Layer 1
deconv1 = tf.layers.conv2d_transpose(z, 512, kernel_size = [4,4],
strides = [1,1], padding = 'valid',
activation = tf.nn.relu, name = 'deconv1') # ?*4*4*512
batch1 = tf.layers.batch_normalization(deconv1, training = is_training)
dropout1 = tf.nn.dropout(batch1, dropout_rate)
print(deconv1.shape)
#Layer 2
deconv2 = tf.layers.conv2d_transpose(dropout1, 256, kernel_size = [4,4],
strides = [4,4], padding = 'same',
activation = tf.nn.relu, name = 'deconv2')# ?*16*16*256
batch2 = tf.layers.batch_normalization(deconv2, training = is_training)
dropout2 = tf.nn.dropout(batch2, dropout_rate)
print(deconv2.shape)
#Layer 3
deconv3 = tf.layers.conv2d_transpose(dropout2, 128, kernel_size = [4,4],
strides = [2,2], padding = 'same',
activation = tf.nn.relu, name = 'deconv3')# ?*32*32*256
batch3 = tf.layers.batch_normalization(deconv3, training = is_training)
dropout3 = tf.nn.dropout(batch3, dropout_rate)
print(deconv3.shape)
#Output layer
deconv4 = tf.layers.conv2d_transpose(dropout3, 1, kernel_size = [4,4],
strides = [2,2], padding = 'same',
activation = None, name = 'deconv4')# ?*64*64*1
out = tf.nn.tanh(deconv4)
print(deconv4.shape)
return out
############ Building model ############
def build_GAN(x_real, z, dropout_rate, is_training):
fake_images = generator(z, dropout_rate, is_training)
D_real_features, D_real_logits, D_real_prob = discriminator(x_real, dropout_rate,
is_training)
D_fake_features, D_fake_logits, D_fake_prob = discriminator(fake_images, dropout_rate,
is_training, reuse = True)
#Setting reuse=True this time for using variables trained in real batch training
return D_real_features, D_real_logits, D_real_prob, D_fake_features, D_fake_logits, D_fake_prob, fake_images
############ Preparing Mask ############
# Preparing a binary label_mask to be multiplied with real labels
def get_labeled_mask(labeled_rate, batch_size):
labeled_mask = np.zeros([batch_size], dtype = np.float32)
labeled_count = np.int(batch_size * labeled_rate)
labeled_mask[range(labeled_count)] = 1.0
np.random.shuffle(labeled_mask)
return labeled_mask
############ Preparing Extended label ############
def prepare_extended_label(label):
# add extra label for fake data
extended_label = tf.concat([tf.zeros([tf.shape(label)[0], 1]), label], axis = 1)
return extended_label
############ Defining losses ############
# The total loss inculcates D_L_Unsupervised + D_L_Supervised + G_feature_matching loss + G_R/F loss
def loss_accuracy(D_real_features, D_real_logit, D_real_prob, D_fake_features,
D_fake_logit, D_fake_prob, extended_label, labeled_mask):
### Discriminator loss ###
# Supervised loss -> which class the real data belongs to
temp = tf.nn.softmax_cross_entropy_with_logits_v2(logits = D_real_logit,
labels = extended_label)
# Don't confuse labeled_rate with labeled_mask
# Labeled_mask and temp are of same size = batch_size where temp is softmax
# cross_entropy calculated over whole batch
D_L_Supervised = tf.reduce_sum(tf.multiply(temp,labeled_mask)) / tf.reduce_sum(labeled_mask)
# Multiplying temp with labeled_mask gives supervised loss on labeled_mask
# data only, calculating mean by dividing by no of labeled samples
# Unsupervised loss -> R/F
D_L_RealUnsupervised = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = D_real_logit[:, 0], labels = tf.zeros_like(D_real_logit[:, 0], dtype=tf.float32)))
D_L_FakeUnsupervised = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = D_fake_logit[:, 0], labels = tf.ones_like(D_fake_logit[:, 0], dtype=tf.float32)))
D_L = D_L_Supervised + D_L_RealUnsupervised + D_L_FakeUnsupervised
### Generator loss ###
# G_L_1 -> Fake data wanna be real
G_L_1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = D_fake_logit[:, 0],labels = tf.zeros_like(D_fake_logit[:, 0], dtype=tf.float32)))
# G_L_2 -> Feature matching
data_moments = tf.reduce_mean(D_real_features, axis = 0)
sample_moments = tf.reduce_mean(D_fake_features, axis = 0)
G_L_2 = tf.reduce_mean(tf.square(data_moments-sample_moments))
G_L = G_L_1 + G_L_2
prediction = tf.equal(tf.argmax(D_real_prob[:, 1:], 1),
tf.argmax(extended_label[:, 1:], 1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return D_L, G_L, accuracy
############ Defining Optimizer ############
def optimizer(D_Loss, G_Loss, learning_rate, beta1):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
all_vars = tf.trainable_variables()
D_vars = [var for var in all_vars if var.name.startswith('Discriminator')]
G_vars = [var for var in all_vars if var.name.startswith('Generator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1,
name = 'd_optimiser').minimize(D_Loss, var_list=D_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1,
name = 'g_optimiser').minimize(G_Loss, var_list=G_vars)
return d_train_opt, g_train_opt
############ Plotting Results ############
def show_result(test_images, num_epoch, show = True, save = False, path = 'result.png'):
size_figure_grid = 5
fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(5, 5))
for i in range(0, size_figure_grid):
for j in range(0, size_figure_grid):
ax[i, j].get_xaxis().set_visible(False)
ax[i, j].get_yaxis().set_visible(False)
for k in range(size_figure_grid*size_figure_grid):
i = k // size_figure_grid
j = k % size_figure_grid
ax[i, j].cla()
ax[i, j].imshow(np.reshape(test_images[k], (64, 64)), cmap='gray')
label = 'Epoch {0}'.format(num_epoch)
fig.text(0.5, 0.04, label, ha='center')
if save:
plt.savefig(path)
if show:
plt.show()
else:
plt.close()
def show_train_hist(hist, show = False, save = False, path = 'Train_hist.png'):
x = range(len(hist['D_losses']))
y1 = hist['D_losses']
y2 = hist['G_losses']
plt.plot(x, y1, label='D_loss')
plt.plot(x, y2, label='G_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc=4)
plt.grid(True)
plt.tight_layout()
if save:
plt.savefig(path)
if show:
plt.show()
else:
plt.close()
############ TRAINING ############
def train_GAN(batch_size, epochs):
train_hist = {}
train_hist['D_losses'] = []
train_hist['G_losses'] = []
tf.reset_default_graph()
x = tf.placeholder(tf.float32, shape = [None, height ,width, channels], name = 'x')
z = tf.placeholder(tf.float32, shape = [None, 1, 1, latent], name = 'z')
label = tf.placeholder(tf.float32, name = 'label', shape = [None, num_classes])
labeled_mask = tf.placeholder(tf.float32, name = 'labeled_mask', shape = [None])
dropout_rate = tf.placeholder(tf.float32, name = 'dropout_rate')
is_training = tf.placeholder(tf.bool, name = 'is_training')
lr_rate = 2e-4
model = build_GAN(x, z, dropout_rate, is_training)
D_real_features, D_real_logit, D_real_prob, D_fake_features, D_fake_logit, D_fake_prob, fake_data = model
extended_label = prepare_extended_label(label)
# Fake_data of size = batch_size*28*28*1
loss_acc = loss_accuracy(D_real_features, D_real_logit, D_real_prob,
D_fake_features, D_fake_logit, D_fake_prob,
extended_label, labeled_mask)
D_L, G_L, accuracy = loss_acc
D_optimizer, G_optimizer = optimizer(D_L, G_L, lr_rate, beta1 = 0.5)
print ('...Training begins...')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mnist_data = get_data()
no_of_batches = int (mnist_data.train.images.shape[0]/batch_size) + 1
for epoch in range(epochs):
train_accuracies, train_D_losses, train_G_losses = [], [], []
for it in range(no_of_batches):
batch = mnist_data.train.next_batch(batch_size, shuffle = False)
# batch[0] has shape: batch_size*28*28*1
batch_reshaped = tf.image.resize_images(batch[0], [64, 64]).eval()
# Reshaping the whole batch into batch_size*64*64*1 for disc/gen architecture
batch_z = np.random.normal(0, 1, (batch_size, 1, 1, latent))
mask = get_labeled_mask(labeled_rate, batch_size)
train_feed_dict = {x : scale(batch_reshaped), z : batch_z,
label : batch[1], labeled_mask : mask,
dropout_rate : 0.7,
is_training : True}
#The label provided in dict are one hot encoded in 10 classes
D_optimizer.run(feed_dict = train_feed_dict)
G_optimizer.run(feed_dict = train_feed_dict)
train_D_loss = D_L.eval(feed_dict = train_feed_dict)
train_G_loss = G_L.eval(feed_dict = train_feed_dict)
train_accuracy = accuracy.eval(feed_dict = train_feed_dict)
train_D_losses.append(train_D_loss)
train_G_losses.append(train_G_loss)
train_accuracies.append(train_accuracy)
print('Batch evaluated: ' +str(it+1))
tr_GL = np.mean(train_G_losses)
tr_DL = np.mean(train_D_losses)
tr_acc = np.mean(train_accuracies)
print ('After epoch: '+ str(epoch+1) + ' Generator loss: '
+ str(tr_GL) + ' Discriminator loss: ' + str(tr_DL) + ' Accuracy: ' + str(tr_acc))
gen_samples = fake_data.eval(feed_dict = {z : np.random.normal(0, 1, (25, 1, 1, latent)), dropout_rate : 0.7, is_training : False})
# Dont train batch-norm while plotting => is_training = False
test_images = tf.image.resize_images(gen_samples, [64, 64]).eval()
show_result(test_images, (epoch + 1), show = True, save = False, path = '')
train_hist['D_losses'].append(np.mean(train_D_losses))
train_hist['G_losses'].append(np.mean(train_G_losses))
show_train_hist(train_hist, show=True, save = True, path = 'train_hist.png')
sess.close()
return train_D_losses,train_G_losses
key = train_GAN( 128 , 7)
###Output
_____no_output_____ |
code/AppleWatch Acc Feature Extraction.ipynb | ###Markdown
Extracting bradykinesia features from downloaded Accelerometer dataThe function requires already downloaded (Apple watch) accelerometry (acc) data, saved in csv-files, per day, per patient. Saved with filenames as in Notebook Data Download (e.g. 'RCS02_10Jun2020_userAcc.csv')The function requires all acc-data files to be in one folder, with the patient-code as a name (e.g. RCS02).The function will extract features per day, write csv files with all features per day, for every day in the defined time span in the function input.
###Code
path = os.path.join(os.path.dirname(os.getcwd())) # changed to main folder, instead of results
print(path)
###Output
/Users/roee/Starr_Lab_Folder/Data_Analysis/medStateDetection/results
###Markdown
Load in Accelerometry data, bandpass Filter, and Extract Features
###Code
def load_filterWatchData(pt, y0,m0,d0,y1,m1,d1):
'''
Input:
- pt: patient as string (e.g. 'RCS02')
- y0,m0,d0 : start date of desired timeperiod (year, month, date, e.g. 2020, 5, 1)
- y1,m1,d1 : end date of desired timeperiod (year, month, date, e.g. 2020, 6, 1)
Calculates features per day.
Saves features as .csv
Returns: one DF with raw AW data, and 1 DF with filtered AW data
'''
sr = 50 # sample ratio apple watch accelerometry, used in filter function below
bandPassLow = 0 # lower cutoff of bandpass filter
bandPassHigh = 3.5 # higher cutoff of bandpass filter
filteredData = {} # empty dict to store filtered rcs data
# define days in given timespan
def datetime_range(start=None, end=None):
span = end - start
for i in range(span.days + 1):
yield start + timedelta(days=i)
# create list with datetime dates for every day in timespan
datetimeDays = list(datetime_range(start=datetime(y0, m0, d0), end=datetime(y1, m1, d1)))
# extract all file 'userAccel.csv'-filenames from specified patient-folder
patient_dir_name = os.path.join(path,'data',pt)
folderFiles= [s for s in listdir(patient_dir_name) if s[-15:] =='watch_accel.csv']
for fileDay in datetimeDays: # loop over all days in requested timespan
# define name of day-file
day = fileDay.strftime("%d") # generate 2-digit day code
month = fileDay.strftime("%d") # generate 2-digit month code
year = fileDay.strftime("%Y") # generate 4-digit year code
fileName = '%s_%s%s%s_watch_accel.csv' % (pt,year,month,day) # first pt is for specific pt-folder
# check if acc-data file exist in folder, if not: skip day and continue with next
if fileName in folderFiles:
fileName = fileName # go on
else:
print('no file for %s' %fileName)
continue # skips rest of itiration and takes next iteration
# read csv file
csv_full_path = os.path.join(path,pt,fileName)
rawFile = pd.read_csv(csv_full_path , header=0)
## DATA LOADING
timeStamps = [] # create empty list for timestamps
timeDelta = [0] # list for time difference per sample vs previous sample (0 for fist) (check for timestamp consistency)
for row in np.arange(len(rawFile['time'])): # loop over every sample
timeStamps.append(datetime.fromtimestamp(rawFile['time'][row])) # add timestamp to list
if row > 0: # add timediff to a list, except for first sample...
timeDelta.append((timeStamps[row] - timeStamps[row-1]).total_seconds())
# select only acc axes
dat = rawFile[['x','y','z']].rename(columns={"x": "X", "y": "Y", "z": "Z"})
# calculate raw SVM before filtering
dat['SVM'] = np.sqrt(dat['X']**2 + dat['Y']**2 + dat['Z']**2 )
dat.insert(loc=0, column='timeStamp', value=timeStamps) # add timestamps as first column
# dat is now ready raw acc file
## DATA FILTERING
dat = dat.sort_values(by=['timeStamp']).reset_index(drop=True) # sort by timestamp and reset indices
# filter raw RCS acc with wrist-feature relevant bandwidths
# make new dataframe for filtered data, with same timestamps
filtered = pd.DataFrame(data = dat['timeStamp'], columns = ['timeStamp'])
for col in ['X' ,'Y', 'Z', 'SVM']: # loop over all acc-data columns to filter
# bandpass filter excluding tremor frequencies > 4hz
filteredCol = filter_data(np.array(dat[col]),sr,bandPassLow,bandPassHigh,method='iir',verbose='WARNING')
# filtered data per column stored in dat, write dat to dataframe column in filtered data DF
filtered[col] = filteredCol
filteredData[day+month] = filtered
return filteredData
###Output
_____no_output_____
###Markdown
Feature Extraction
###Code
## Bradykinesia faetures extracted from Github Mahadevan 2020
## Source: https://github.com/NikhilMahadevan/analyze-tremor-bradykinesia-PD
def histogram(signal_x):
'''
Calculate histogram of sensor signal.
:param signal_x: 1-D numpy array of sensor signal
:return: Histogram bin values, descriptor
'''
descriptor = np.zeros(3)
ncell = np.ceil(np.sqrt(len(signal_x)))
max_val = np.nanmax(signal_x.values)
min_val = np.nanmin(signal_x.values)
delta = (max_val - min_val) / (len(signal_x) - 1)
descriptor[0] = min_val - delta / 2
descriptor[1] = max_val + delta / 2
descriptor[2] = ncell
h = np.histogram(signal_x, ncell.astype(int), range=(min_val, max_val))
return h[0], descriptor
def dominant_frequency(signal_df, sampling_rate, cutoff ):
'''
Calculate dominant frequency of sensor signals.
:param signal_df: Pandas DataFrame housing desired sensor signals
:param sampling_rate: sampling rate of sensor signal
:param cutoff: desired cutoff for filter
:param channels: channels of signal to measure dominant frequency
:return: Pandas DataFrame of calculated dominant frequency for each signal channel
'''
dominant_freq_df = pd.DataFrame()
signal_x = signal_df
padfactor = 1
dim = signal_x.shape
nfft = 2 ** ((dim[0] * padfactor).bit_length())
freq_hat = np.fft.fftfreq(nfft) * sampling_rate
freq = freq_hat[0: int(nfft / 2)]
idx1 = freq <= cutoff
idx_cutoff = np.argwhere(idx1)
freq = freq[idx_cutoff]
sp_hat = np.fft.fft(signal_x, nfft)
sp = sp_hat[0: int(nfft / 2)] * np.conjugate(sp_hat[0: int(nfft / 2)])
sp = sp[idx_cutoff]
sp_norm = sp / sum(sp)
max_freq = freq[sp_norm.argmax()][0]
max_freq_val = sp_norm.max().real
idx2 = (freq > max_freq - 0.5) * (freq < max_freq + 0.5)
idx_freq_range = np.where(idx2)[0]
dom_freq_ratio = sp_norm[idx_freq_range].real.sum()
# Calculate spectral flatness
spectral_flatness = 10.0*np.log10(stats.mstats.gmean(sp_norm)/np.mean(sp_norm))
# Estimate spectral entropy
spectral_entropy_estimate = 0
for isess in range(len(sp_norm)):
if sp_norm[isess] != 0:
logps = np.log2(sp_norm[isess])
else:
logps = 0
spectral_entropy_estimate = spectral_entropy_estimate - logps * sp_norm[isess]
spectral_entropy_estimate = spectral_entropy_estimate / np.log2(len(sp_norm))
# spectral_entropy_estimate = (spectral_entropy_estimate - 0.5) / (1.5 - spectral_entropy_estimate)
dominant_freq_df['_dom_freq_value'] = [max_freq]
dominant_freq_df['_dom_freq_magnitude'] = [max_freq_val]
dominant_freq_df['_dom_freq_ratio'] = [dom_freq_ratio]
dominant_freq_df['_spectral_flatness'] = [spectral_flatness[0].real]
dominant_freq_df['_spectral_entropy'] = [spectral_entropy_estimate[0].real]
return dominant_freq_df
def signal_entropy(windowData):
data_norm = windowData/np.std(windowData)
h, d = histogram(data_norm)
lowerbound = d[0]
upperbound = d[1]
ncell = int(d[2])
estimate = 0
sigma = 0
count = 0
for n in range(ncell):
if h[n] != 0:
logf = np.log(h[n])
else:
logf = 0
count = count + h[n]
estimate = estimate - h[n] * logf
sigma = sigma + h[n] * logf ** 2
nbias = -(float(ncell) - 1) / (2 * count)
estimate = estimate / count
estimate = estimate + np.log(count) + np.log((upperbound - lowerbound) / ncell) - nbias
# Scale the entropy estimate to stretch the range
estimate = np.exp(estimate ** 2) - np.exp(0) - 1
return estimate
def extractFeatures(pt, filteredData, windowLen=60, sr=50):
'''
Input:
- filteredData = dictionary of filtered acc data, for every day a seperate dataframe
filteredData is automatically result of first function.
- windowLen = desired window length of features in seconds
- sr = sample frequency of recorded accelerometry data, in Hz, AppleWatch accelerometry = 50 Hz.
Writes feature dataframes per day to .csv
Returns: One dictionary with feature dataframes.
'''
tDelta = sr*windowLen # time-delta is factor between filtered data sample rate and desired windowlength
# Define all names of feature-labels which will be calculated over all axes
totalFeatLabels = [] # one list for all feature names (SVM,X,Y and Z)
for axis in ['SVM','X', 'Y', 'Z']: # loop over all axes, calculate features per axis
# add list with features for every axis
featureList = ['_maxAcc','_iqrAcc', '_90prcAcc','_medianAcc','_meanAcc',
'_stddev','_variance','_coefVar','_accRange',
'_lowPeaks','_highPeaks', '_time1gAcc','_accEntropy','_jerkRatio',
'_RMS',
'_specPow_totalu4Hz', '_specPow_low','_specPow_mid', '_specPow_high',
'_domFreq_magnitude', '_domFreq_ratio',
'_spectral_flatness', '_spectral_entropy',] #'_domFreq_value', (left out, no variation)
for feat in featureList:
totalFeatLabels.append(axis+feat)
# features only for XYZ
if np.logical_or(axis == 'X', axis == 'Y'): # ratio RMS only relevant for x y and z
totalFeatLabels.append(axis+'_ratioRMS')
elif axis == 'Z':
totalFeatLabels.append(axis+'_ratioRMS')
# features only once calculated, in SVM
if axis == 'SVM':
for l in ['_spectralVar','_spectralSmoothness1','_spectrallowPeaks',
'_spectralSmoothness2','_spectralhighPeaks']:
totalFeatLabels.append(axis+l)
totalFeatLabels.extend(['crossCor_XY','crossCor_XZ','crossCor_YZ'])
''' TotalFeatLabels is now a list with all feature labels of X,Y,Z,SVM.
One dataframe per session will be calculated and afterwards merged into one total feature-dataframe.
'''
features = {} # empty dict to store feature-dataframes per day
list_days = filteredData.keys() # define days to calculate features for
for day in list_days: # loop over every day in filteredData
# basis for new feature dataframe is timestamps of filteredData
# Add timestamp of beginning of faeture-window to list for feature dataframe timestamps
timeStamps = filteredData[day]['timeStamp'][::tDelta] # take every timestamp at beginning of a feature window
features[day] = pd.DataFrame(data=timeStamps, columns=['timeStamp'])
# create dict with empty lists for all feature-names
totalFeatureLists = {} # empty dict to store features in lists for this session
for label in totalFeatLabels:
totalFeatureLists[label] = []
# CALCULATION OF FEATURES, PER AXIS
for axis in ['SVM','X', 'Y', 'Z']: # loop over all axes, calculate features per axis
for windowStart in np.arange(0,len(filteredData[day][axis]),tDelta): # iterate over windows of 120 hz * 60 s
windowData = filteredData[day][axis][windowStart : windowStart+tDelta] # create windowdata per column and per window
## DISTRIBUTIVE AND DESCRIPTIVE FEATURES FROM TIME DOMAIN
# max acceleration (source: Griffiths 2012)
maxAcc = np.max(np.abs(windowData))
totalFeatureLists[axis+'_maxAcc'].append(maxAcc)
# IQR of acc
iqrAcc = scipy.stats.iqr(windowData)
totalFeatureLists[axis+'_iqrAcc'].append(iqrAcc)
# 90-th percentile acc, (Rispens 2015 https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4296095/?report=classic )
perc90 = np.percentile(np.abs(windowData), 90)
totalFeatureLists[axis+'_90prcAcc'].append(perc90)
# median of acc
medianAcc = np.median(np.abs(windowData))
totalFeatureLists[axis+'_medianAcc'].append(medianAcc)
# mean of acc
meanAcc = np.mean(np.abs(windowData))
totalFeatureLists[axis+'_meanAcc'].append(meanAcc)
# standard deviation
stddev = np.std(np.abs(windowData))
totalFeatureLists[axis+'_stddev'].append(stddev)
# variance (var = mean(abs(x - x.mean())**2))
var = np.var(np.abs(windowData))
totalFeatureLists[axis+'_variance'].append(var)
# Coefficient of variance (stddev / mean)
coefVar = scipy.stats.variation(np.abs(windowData))
totalFeatureLists[axis+'_coefVar'].append(coefVar)
# range in signal value; from Mahadevan-Github
accRange = windowData.max(skipna=True) - windowData.min(skipna=True)
totalFeatureLists[axis+'_accRange'].append( accRange )
# number of acceleration peaks per axes
# low threshold peaks: activity indication
lowPeaks = len(find_peaks(np.abs(windowData),height=1, threshold=None, distance=600)[0]) # height = required value to be a peak, distance is horizontal distance to allow next peak
totalFeatureLists[axis+'_lowPeaks'].append( lowPeaks )
# high threshold peaks: amount of faster activity
highPeaks = len(find_peaks(np.abs(windowData),height=3, threshold=None, distance=600)[0]) # height = required value to be a peak, distance is horizontal distance to allow next peak
totalFeatureLists[axis+'_highPeaks'].append( highPeaks )
# % time spent in above 1g acceleration
time1gAcc = np.sum(np.abs(windowData) > 1)/len(windowData)
totalFeatureLists[axis+'_time1gAcc'].append( time1gAcc )
# entropy in accelerometry (source: Mahadevan-github)
sigEntropy = signal_entropy(windowData)
totalFeatureLists[axis+'_accEntropy'].append(sigEntropy)
## jerk ratio/smoothness; rate of acc-changes (Hogan 2009) acc to Mahadevan, PM aimed for 3-sec windows
ampl = np.max(np.abs(windowData))
jerk = windowData.diff(1) * sr #(divided by 1 / sr => multiply with sr)
jerkSqSum = np.sum(jerk ** 2)
scale = 360 * ampl ** 2 / tDelta / sr
meanSqJerk = jerkSqSum / sr / (tDelta / sr * 2)
jerkRatio = meanSqJerk / scale
totalFeatureLists[axis+'_jerkRatio'].append(jerkRatio)
# RMS according to classical definition
meanSqAcc = np.mean(np.square(windowData))
rmsAcc = np.sqrt(meanSqAcc)
totalFeatureLists[axis+'_RMS'].append(rmsAcc )
# RMS ratio (RMS-axis / RMS-svm) (Sekine '13: https://www.ncbi.nlm.nih.gov/pubmed/24370075)
# rms ratio in mediolateral direction is correlated with walking speed
if np.logical_or(axis == 'X' , axis == 'Y'):
svmRMS = np.sqrt(np.mean(np.square(filteredData[day]['SVM'][windowStart : windowStart+tDelta])))
ratioRMS = rmsAcc / svmRMS
totalFeatureLists[axis+'_ratioRMS'].append(ratioRMS)
elif axis == 'Z':
svmRMS = np.sqrt(np.mean(np.square(filteredData[day]['SVM'][windowStart : windowStart+tDelta])))
ratioRMS = rmsAcc / svmRMS
totalFeatureLists[axis+'_ratioRMS'].append(ratioRMS)
## FEATURES FROM SPECTRAL DOMAIN
# Griffiths: MSP: not described: mean over whole 0.2-4.0? mean per which bin-width?? svm or axis?
freq = np.fft.rfftfreq(len(windowData), d = 1/sr) # define freq's for rfft, resolution (bins/hz) is dependent on windowlength of data
lowFreq = np.logical_and(freq < 3.5, freq > 0.0) # select freq's of interest, total = 0-30hz since sr=60
rfft = np.fft.rfft(windowData) # real fast fourier transform (same as fft[freq > 0])
psd = np.log(np.abs(rfft)**2) # log to normalize (rfft gives same barplot as periodogram and fft)
## ??? is PSD correct as squared value of magnitude, log for normalization?
psdLow = np.sum(psd[lowFreq]) # sum of psd's between selected freq's
totalFeatureLists[axis+'_specPow_totalu4Hz'].append(psdLow)
## Evers (preprint 2020) gait cadence in 0.7 - 1.4 hz and 1.4 - 2.8 hz
freqGaitA = np.logical_and(freq < 1.4, freq > 0.7) # select freq's of interest, total = 0-30hz since sr=60
psdGaitA = np.sum(psd[freqGaitA]) # sum of psd's between selected freq's
totalFeatureLists[axis+'_specPow_low'].append(psdGaitA)
freqGaitB = np.logical_and(freq < 2.8, freq > 1.4) # select freq's of interest, total = 0-30hz since sr=60
psdGaitB = np.sum(psd[freqGaitB]) # sum of psd's between selected freq's
totalFeatureLists[axis+'_specPow_mid'].append(psdGaitB)
freqGaitC = np.logical_and(freq < 3.5, freq > 2.8) # select freq's of interest, total = 0-30hz since sr=60
psdGaitC = np.sum(psd[freqGaitC]) # sum of psd's between selected freq's
totalFeatureLists[axis+'_specPow_high'].append(psdGaitC)
# dom freq + ratio + spectral flatness and entropy (source: Mahadevan-github)
domFreqValues = dominant_frequency(windowData, sr, 3) # 4 (3) = cutoff for spectrum too analyze
# totalFeatureLists[axis+'_domFreq_value'].append( float(domFreqValues['_dom_freq_value']))
totalFeatureLists[axis+'_domFreq_magnitude'].append( float(domFreqValues['_dom_freq_magnitude']))
totalFeatureLists[axis+'_domFreq_ratio'].append(float( domFreqValues['_dom_freq_ratio']))
totalFeatureLists[axis+'_spectral_flatness'].append( float(domFreqValues['_spectral_flatness']))
totalFeatureLists[axis+'_spectral_entropy'].append( float(domFreqValues['_spectral_entropy']))
if axis == 'SVM':
# auto-correlation between x-y-z axes
crossCorXY = pearsonr(windowData, filteredData[day]['Y'][windowStart : windowStart+tDelta])
crossCorXZ = pearsonr(windowData, filteredData[day]['Z'][windowStart : windowStart+tDelta])
crossCorYZ = pearsonr(filteredData[day]['Y'][windowStart : windowStart+tDelta], filteredData[day]['Z'][windowStart : windowStart+tDelta])
totalFeatureLists['crossCor_XY'].append(crossCorXY[0])
totalFeatureLists['crossCor_XZ'].append(crossCorXZ[0])
totalFeatureLists['crossCor_YZ'].append(crossCorYZ[0])
# spectral variability and approximation of smoothness and PSD-line-length (importance ref by Beck 2019, Balasubramanian 20120)
normPSD = psd/psd[0] # normalize PSD by first value DC-normalization (Subramaninian '12')
spectralVar = np.var(normPSD[lowFreq])
totalFeatureLists[axis+'_spectralVar'].append(spectralVar)
# approximation of spectral length; find_peaks finds all small peaks,
# thresholds represent the distance from the peak to the neighbouring points
# sum of threshold-values indicates the distances the PSD-line makes to the peaks
ind, treshs = find_peaks(normPSD[lowFreq],height=None, threshold=0.05, distance=1) # first value returns peak-indices, second tresholds
spectralSmoothness = np.sum(treshs['left_thresholds']+treshs['right_thresholds'])
totalFeatureLists[axis+'_spectralSmoothness1'].append(spectralSmoothness)
# number of peaks found
spectralPeaks = len(ind)
totalFeatureLists[axis+'_spectrallowPeaks'].append(spectralPeaks)
# same smoothness and peaks, with higher peak-threshold
ind, treshs = find_peaks(normPSD[lowFreq],height=None, threshold=0.1, distance=2) # first value returns peak-indices, second tresholds
spectralSmoothness = np.sum(treshs['left_thresholds']+treshs['right_thresholds'])
totalFeatureLists[axis+'_spectralSmoothness2'].append(spectralSmoothness)
# number of peaks found
spectralPeaks = len(ind)
totalFeatureLists[axis+'_spectralhighPeaks'].append(spectralPeaks)
''' All features are calculated over all axes;
now writing all lists with features in to a feature dataframe per session in featureDict'''
# fill every column with calculated feature values
for col in totalFeatLabels:
features[day][col] = totalFeatureLists[col]
# save features per patient
fileName = '%s_%s%s%s_%isec_features.csv' % (pt, year,month,day, windowLen)
csv_full_file_write = os.path.join(path,'results',pt,fileName)
features[day].to_csv(csv_full_file_write, index=False)
return features
# execute data filtering and feature extraction
filteredData = load_filterWatchData('RCS02', 2020,6,8, 2020,6,11)
features = extractFeatures(pt='RCS02', filteredData=filteredData, )
###Output
no file for RCS02_08Jun2020_userAccel.csv
no file for RCS02_09Jun2020_userAccel.csv
doch
doch
|
Auto_scripts/Avocado/H2O-avocado.ipynb | ###Markdown
H2O Avocado Goal : - Create a ML model using Auto-sklearn for the Avocado dataset- Get RMSE over the predictions of these model Imports
###Code
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.automl import H2OAutoML
from time import process_time
###Output
_____no_output_____
###Markdown
Initialtisation of an internal server used by H2O
###Code
h2o.init()
###Output
Checking whether there is an H2O instance running at http://localhost:54321 ..... not found.
Attempting to start a local H2O server...
Java Version: openjdk version "1.8.0_152-release"; OpenJDK Runtime Environment (build 1.8.0_152-release-1056-b12); OpenJDK 64-Bit Server VM (build 25.152-b12, mixed mode)
Starting server from /opt/conda/lib/python3.7/site-packages/h2o/backend/bin/h2o.jar
Ice root: /tmp/tmp9w15t7cu
JVM stdout: /tmp/tmp9w15t7cu/h2o_unknownUser_started_from_python.out
JVM stderr: /tmp/tmp9w15t7cu/h2o_unknownUser_started_from_python.err
Server is running at http://127.0.0.1:54321
Connecting to H2O server at http://127.0.0.1:54321 ... successful.
###Markdown
importing our dataset and defining each column as "factor" or "predictor"
###Code
df = h2o.upload_file('../../Data/avocado_price/processed/train.csv')
response = "C1"
df[response] = df[response].asfactor()
predictors=[]
for col in df.columns:
if col != response:
predictors.append(col)
###Output
Parse progress: |█████████████████████████████████████████████████████████| 100%
###Markdown
Importing our train and test dataset
###Code
train = h2o.upload_file('../../Data/avocado_price/processed/train.csv')
valid = h2o.upload_file('../../Data/avocado_price/processed/test.csv')
###Output
Parse progress: |█████████████████████████████████████████████████████████| 100%
Parse progress: |█████████████████████████████████████████████████████████| 100%
###Markdown
Creating our model
###Code
avocado_gbm = H2OGradientBoostingEstimator()
t1_start = process_time()
avocado_gbm.train(x = predictors,
y = response,
training_frame = train,
validation_frame = valid)
t1_stop = process_time()
print("Elapsed time in seconds : ",t1_stop-t1_start)
###Output
gbm Model Build progress: |███████████████████████████████████████████████| 100%
Elapsed time in seconds : 0.22595822900000018
###Markdown
Getting our informations such as :- Most important features- RMSE
###Code
print(avocado_gbm)
###Output
Model Details
=============
H2OGradientBoostingEstimator : Gradient Boosting Machine
Model Key: GBM_model_python_1592829396011_1
Model Summary:
|
Demo_with_Auto_Keras.ipynb | ###Markdown
###Code
# 連接並mount自己的雲端硬碟(點選跑出網址複製最後的授權碼貼上並執行)
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# 切換到指定目錄
SYS_DIR = "/content/drive/My Drive/Colab Notebooks/AutoKerasDemos/"
import os
if os.path.isdir(SYS_DIR) is False:
os.mkdir(SYS_DIR)
os.chdir(SYS_DIR)
# 安裝安裝AutoKeras
!pip3 install autokeras
# 安裝 Tensorflow
!pip3 install tensorflow
# 引入基本函式庫
import autokeras as ak
import tensorflow as tf
from autokeras import ImageClassifier
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from sklearn.model_selection import train_test_split
from google.colab import files
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
# 這裡我要試驗的是手寫辨識Minst, 所以先下載 tensorflow內建的Minst資料集
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 初始化Image classifier.
clf = ak.ImageClassifier(overwrite=True, max_trials=1)
# 搜尋出最適合文字辨識的模型
clf.fit(x_train, y_train, epochs=10)
# 使用最佳模型進行預測
predicted_y = clf.predict(x_test)
print(predicted_y)
# 評估一下效果, 發現Accuracy準確率很高. Loss遺失率很低
print(clf.evaluate(x_test, y_test))
# 比較 20 筆
print('prediction:', ' '.join(predicted_y[0:20].ravel()))
print('actual :', ' '.join(y_test[0:20].astype(str)))
# 匯出模型,下次還能使用
model = clf.export_model()
print(type(model))
# 匯出
try:
model.save("model_autokeras", save_format="tf")
except Exception:
model.save("model_autokeras.h5")
# 載入模型, 試驗效果
loaded_model = load_model("model_autokeras", custom_objects=ak.CUSTOM_OBJECTS)
predicted_y = loaded_model.predict(tf.expand_dims(x_test, -1))
print(predicted_y)
# 用實際圖片來測試效果
from skimage import io
from skimage.transform import resize
import numpy as np
import matplotlib.pyplot as plt
X_ALL = np.empty((0, 28, 28))
for i in [7,2,3,5]:
image1 = io.imread(f'./imgs/{i}.jpg', as_gray=True)
plt.imshow(io.imread(f'./imgs/{i}.jpg'))
plt.show()
image_resized = resize(image1, (28, 28), anti_aliasing=True)
X1 = image_resized.reshape(1, 28, 28) #/ 255
X1 = (np.abs(1-X1) * 255).astype(int)
X_ALL = np.concatenate([X_ALL, X1])
predictions = loaded_model.predict(X_ALL)
for prediction in predictions:
print(np.argmax(prediction, axis=0))
###Output
_____no_output_____ |
Outliers_Removing_Technique.ipynb | ###Markdown
An Outlier of a dataset defines as a value that is more than 3 standard deviations away from the mean. So removing outliers from a df removes any row in the dataset which contains an outlier. Outlier calculation are performed seperately for each column.
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from datetime import datetime
mpl.rcParams['figure.figsize'] = (12, 7)
df = pd.read_csv("https://raw.githubusercontent.com/abidshafee/autoML-tsModel/main/throughput_metrics.csv", parse_dates=['Time'], index_col='Time')
df.describe(include='all')
df.head()
df[['SiteF','SiteE','SiteD','SiteC','SiteB','SiteA']].plot(subplots=True)
###Output
_____no_output_____
###Markdown
Detecting Outliers
###Code
sns.boxenplot(df['SiteA'])
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
Removing Outliers
###Code
import scipy.stats as sst
z_score = sst.zscore(df)
abs_z_scores = np.abs(z_score)
filter_data = (abs_z_scores<3).all(axis=1)
ndf = df[filter_data]
###Output
_____no_output_____
###Markdown
Because Outlier of a dataset defines as a value that is more than 3 (std) standard deviations away from the mean
###Code
ndf.describe(include='all')
ndf.info()
sns.boxenplot(ndf['SiteA'])
print(df.shape)
print(ndf.shape)
n_bins = 25
fig, axs = plt.subplots(1, 2, sharey=True, tight_layout=True)
# We can set the number of bins with the *bins* keyword argument.
axs[0].hist(ndf['SiteA'], bins=n_bins)
axs[1].hist(ndf['SiteB'], bins=n_bins)
###Output
_____no_output_____
###Markdown
An Outlier of a dataset defines as a value that is more than 3 standard deviations away from the mean. So removing outliers from a df removes any row in the dataset which contains an outlier. Outlier calculation are performed seperately for each column.
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from datetime import datetime
mpl.rcParams['figure.figsize'] = (12, 7)
df = pd.read_csv("https://raw.githubusercontent.com/abidshafee/autoML-tsModel/main/throughput_metrics.csv", parse_dates=['Time'], index_col='Time')
df.describe(include='all')
df.head()
df[['SiteF','SiteE','SiteD','SiteC','SiteB','SiteA']].plot(subplots=True)
###Output
_____no_output_____
###Markdown
Detecting Outliers
###Code
sns.boxenplot(df['SiteA'])
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
Removing Outliers
###Code
import scipy.stats as sst
z_score = sst.zscore(df)
abs_z_scores = np.abs(z_score)
filter_data = (abs_z_scores<3).all(axis=1)
ndf = df[filter_data]
###Output
_____no_output_____
###Markdown
Because Outlier of a dataset defines as a value that is more than 3 (std) standard deviations away from the mean
###Code
ndf.describe(include='all')
ndf.info()
sns.boxenplot(ndf['SiteA'])
print(df.shape)
print(ndf.shape)
n_bins = 25
fig, axs = plt.subplots(1, 2, sharey=True, tight_layout=True)
# We can set the number of bins with the *bins* keyword argument.
axs[0].hist(ndf['SiteA'], bins=n_bins)
axs[1].hist(ndf['SiteB'], bins=n_bins)
###Output
_____no_output_____ |
conveyor/examples/ml_example/.ipynb_checkpoints/Exploring Iris-checkpoint.ipynb | ###Markdown
Iris dataset classificationThis is one of a few notebooks designed to showcase how Conveyor can make your work in Jupyter more organized. The objective of this example is to seperate the Iris dataset classification task (covered [here](https://scikit-learn.org/stable/tutorial/statistical_inference/supervised_learning.html)) into smaller subtasks, from exploratory data analysis to evaluating different classification strategies.
###Code
import numpy as np
from sklearn import datasets
iris_X, iris_y = datasets.load_iris(return_X_y=True)
###Output
_____no_output_____
###Markdown
(From scikit-learn's website) "The iris dataset is a classification task consisting in identifying 3 different types of irises (Setosa, Versicolour, and Virginica) from their petal and sepal length and width."
###Code
np.unique(iris_y)
# iris_X appears to contain petal and sepal lengths and widths...
iris_X[0]
# The 0 class is a type of flower
iris_y[0]
###Output
_____no_output_____
###Markdown
How many of each type of flower does the dataset contain?
###Code
class_count = [0]*len(np.unique(iris_y))
for flower_type in iris_y:
class_count[flower_type] += 1
class_count
###Output
_____no_output_____
###Markdown
Are there any obvious identifying characteristics about each flower's petals? What about sepal and petal areas?
###Code
class_data = [iris_X[np.where(iris_y == flower_type)] for flower_type in np.unique(iris_y)]
class_areas_avg = []
class_areas = []
for flower_type in range(len(class_data)):
flower_avg_dims = np.mean(class_data[flower_type], axis=1)
class_areas_avg.append((flower_avg_dims[0]*flower_avg_dims[1],
flower_avg_dims[2]*flower_avg_dims[3]))
class_areas.append([(x[0]*x[1], x[2]*x[3]) for x in class_data[flower_type]])
# From classes 0 to 1 to 2 the average sizes increase
class_areas_avg
###Output
_____no_output_____
###Markdown
The average areas seem to be markedly different among the three types of flowers. Do the areas vary much across individual flowers, relative to these values? If so, area will not be a useful indicator for classifying our flowers.
###Code
np.var(class_areas, axis=1)
###Output
_____no_output_____
###Markdown
Let's get the areas for each flower in the order we see them.
###Code
flower_areas = []
for flower_idx in range(len(iris_X)):
flower_data = iris_X[flower_idx]
flower_areas.append([flower_data[0] * flower_data[1], flower_data[2] * flower_data[3]])
flower_areas
###Output
_____no_output_____ |
data-analysis/pandas/sf_salaries.ipynb | ###Markdown
SF Salaries ExerciseWelcome to a quick exercise for you to practice your pandas skills! We will be using the [SF Salaries Dataset](https://www.kaggle.com/kaggle/sf-salaries) from Kaggle! Just follow along and complete the tasks outlined in bold below. The tasks will get harder and harder as you go along. ** Import pandas as pd.**
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
** Read Salaries.csv as a dataframe called sal.**
###Code
sal = pd.read_csv('Salaries.csv')
###Output
_____no_output_____
###Markdown
** Check the head of the DataFrame. **
###Code
sal.head()
###Output
_____no_output_____
###Markdown
** Use the .info() method to find out how many entries there are.**
###Code
sal.info() # 148654 Entries
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 148654 entries, 0 to 148653
Data columns (total 13 columns):
Id 148654 non-null int64
EmployeeName 148654 non-null object
JobTitle 148654 non-null object
BasePay 148045 non-null float64
OvertimePay 148650 non-null float64
OtherPay 148650 non-null float64
Benefits 112491 non-null float64
TotalPay 148654 non-null float64
TotalPayBenefits 148654 non-null float64
Year 148654 non-null int64
Notes 0 non-null float64
Agency 148654 non-null object
Status 0 non-null float64
dtypes: float64(8), int64(2), object(3)
memory usage: 14.7+ MB
###Markdown
**What is the average BasePay ?**
###Code
sal['BasePay'].mean()
###Output
_____no_output_____
###Markdown
** What is the highest amount of OvertimePay in the dataset ? **
###Code
sal['OvertimePay'].max()
###Output
_____no_output_____
###Markdown
** What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll). **
###Code
sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['JobTitle']
###Output
_____no_output_____
###Markdown
** How much does JOSEPH DRISCOLL make (including benefits)? **
###Code
sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['TotalPayBenefits']
###Output
_____no_output_____
###Markdown
** What is the name of highest paid person (including benefits)?**
###Code
sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].max()] #['EmployeeName']
# or
# sal.loc[sal['TotalPayBenefits'].idxmax()]
###Output
_____no_output_____
###Markdown
** What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?**
###Code
sal[sal['TotalPayBenefits']== sal['TotalPayBenefits'].min()] #['EmployeeName']
# or
# sal.loc[sal['TotalPayBenefits'].idxmax()]['EmployeeName']
## ITS NEGATIVE!! VERY STRANGE
###Output
_____no_output_____
###Markdown
** What was the average (mean) BasePay of all employees per year? (2011-2014) ? **
###Code
sal.groupby('Year').mean()['BasePay']
###Output
_____no_output_____
###Markdown
** How many unique job titles are there? **
###Code
sal['JobTitle'].nunique()
###Output
_____no_output_____
###Markdown
** What are the top 5 most common jobs? **
###Code
sal['JobTitle'].value_counts().head(5)
###Output
_____no_output_____
###Markdown
** How many Job Titles were represented by only one person in 2013? (e.g. Job Titles with only one occurence in 2013?) **
###Code
sum(sal[sal['Year']==2013]['JobTitle'].value_counts() == 1) # pretty tricky way to do this...
###Output
_____no_output_____
###Markdown
** How many people have the word Chief in their job title? (This is pretty tricky) **
###Code
def chief_string(title):
if 'chief' in title.lower():
return True
else:
return False
sum(sal['JobTitle'].apply(lambda x: chief_string(x)))
###Output
_____no_output_____
###Markdown
** Bonus: Is there a correlation between length of the Job Title string and Salary? **
###Code
sal['title_len'] = sal['JobTitle'].apply(len)
sal[['title_len','TotalPayBenefits']].corr() # No correlation.
###Output
_____no_output_____ |
mine_domain3.ipynb | ###Markdown
Mine domain 3. Extract subgroups with high concentration of PHAs
###Code
import pickle
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn import neighbors, svm
import matplotlib as mpl
# Import Asterion modules
import read_database as rdb
import learn_data as ld
import asterion_learn as al
import visualize_data as vd
# Matplotlib settings for the current notebook
%matplotlib inline
# font = {'size': 25}
font = {'size': 16}
mpl.rc('font', **font)
###Output
_____no_output_____
###Markdown
** Load NEAs from the 3-rd domain **
###Code
dirpath = './asteroid_data/'
real_datasets = ['haz_real', 'nohaz_real']
gen_datasets = ['haz_gen', 'nohaz_gen']
genu_datasets = ['haz_gen', 'nohaz_gen']
name_sufixes = ['_dom3.p', '_dom3_rest.p']
dumps_real = [dirpath + ds + ns for ns in name_sufixes for ds in real_datasets]
dumps_gen = [dirpath + ds + ns for ns in name_sufixes for ds in gen_datasets]
dumps_genu = [dirpath + ds + ns for ns in name_sufixes for ds in genu_datasets]
haz_real, nohaz_real, haz_real_rest, nohaz_real_rest = map(rdb.loadObject, dumps_real)
haz_gen, nohaz_gen, haz_gen_rest, nohaz_gen_rest = map(rdb.loadObject, dumps_gen)
haz_genu, nohaz_genu, haz_genu_rest, nohaz_genu_rest = map(rdb.loadObject, dumps_genu)
gen_num = sum(map(len, [haz_gen, nohaz_gen]))
real_num = sum(map(len, [haz_real, nohaz_real]))
print "Number of virtual asteroids in the domain:", gen_num
print "Number of real asteroids in the domain:", real_num
###Output
Number of virtual asteroids in the domain: 5210
Number of real asteroids in the domain: 86
###Markdown
** Investigate distributions of NEAs orbital parameters in the 3-rd domain **
###Code
# vd.plot_alldistcombs(haz_gen, nohaz_gen, labels=True)
###Output
_____no_output_____
###Markdown
--- Atiras & Atens
###Code
haz_gen_extracted_aa = []
nohaz_gen_trapped_aa = []
haz_real_extracted_aa = []
nohaz_real_trapped_aa = []
###Output
_____no_output_____
###Markdown
** Atiras **
###Code
haz_gen_atiras, haz_gen_atiras_num = rdb.get_atiras(haz_gen)
nohaz_gen_atiras, nohaz_gen_atiras_num = rdb.get_atiras(nohaz_gen)
atiras_gen_num = haz_gen_atiras_num + nohaz_gen_atiras_num
haz_real_atiras, haz_real_atiras_num = rdb.get_atiras(haz_real)
nohaz_real_atiras, nohaz_real_atiras_num = rdb.get_atiras(nohaz_real)
atiras_real_num = haz_real_atiras_num + nohaz_real_atiras_num
print "Number of virtual Atiras:", atiras_gen_num
print "Number of real Atiras:", atiras_real_num
###Output
Number of virtual Atiras: 17
Number of real Atiras: 1
###Markdown
** Atens **
###Code
haz_gen_atens, haz_gen_atens_num = rdb.get_atens(haz_gen)
nohaz_gen_atens, nohaz_gen_atens_num = rdb.get_atens(nohaz_gen)
atens_gen_num = haz_gen_atens_num + nohaz_gen_atens_num
haz_real_atens, haz_real_atens_num = rdb.get_atens(haz_real)
nohaz_real_atens, nohaz_real_atens_num = rdb.get_atens(nohaz_real)
atens_real_num = haz_real_atens_num + nohaz_real_atens_num
print "Number of virtual Atens:", atens_gen_num
print "Number of real Atens:", atens_real_num
###Output
Number of virtual Atens: 546
Number of real Atens: 18
###Markdown
** Atiras + Atens **
###Code
haz_gen_atiras_atens = pd.concat((haz_gen_atiras, haz_gen_atens))
nohaz_gen_atiras_atens = pd.concat((nohaz_gen_atiras, nohaz_gen_atens))
haz_gen_atiras_atens_num = len(haz_gen_atiras_atens)
nohaz_gen_atiras_atens_num = len(nohaz_gen_atiras_atens)
atiras_atens_gen_num = haz_gen_atiras_atens_num + nohaz_gen_atiras_atens_num
haz_real_atiras_atens = pd.concat((haz_real_atiras, haz_real_atens))
nohaz_real_atiras_atens = pd.concat((nohaz_real_atiras, nohaz_real_atens))
haz_real_atiras_atens_num = len(haz_real_atiras_atens)
nohaz_real_atiras_atens_num = len(nohaz_real_atiras_atens)
atiras_atens_real_num = haz_real_atiras_atens_num + nohaz_real_atiras_atens_num
print "Number of virtual PHAs in the group:", haz_gen_atiras_atens_num
print "Number of virtual NHAs in the group:", nohaz_gen_atiras_atens_num
print "Number of virtual Atiras and Atens:", atiras_atens_gen_num
print "Virtual Atiras and Atens group weight:", float(atiras_atens_gen_num)/gen_num
print "Number of real PHAs in the group:", haz_real_atiras_atens_num
print "Number of real NHAs in the group:", nohaz_real_atiras_atens_num
print "Number of real Atiras and Atens:", atiras_atens_real_num
print "Real Atiras and Atens group weight:", float(atiras_atens_real_num)/real_num
# vd.display_allparams([haz_gen_atiras_atens, nohaz_gen_atiras_atens], vd.combs, vd.colnames)
###Output
_____no_output_____
###Markdown
Split Atiras and Atens by a *w*-*i* surface ** Amplify datasets by their symetric copies over the 'w' parameter **
###Code
haz_gen_atiras_atens_se = ld.add_doublemirror_column(haz_gen_atiras_atens, 'w', 180.0)
nohaz_gen_atiras_atens_se = ld.add_doublemirror_column(nohaz_gen_atiras_atens, 'w', 180.0)
cutcol = ['w', 'i']
vd.plot_distributions2d(cutcol, haz_gen_atiras_atens_se, nohaz_gen_atiras_atens_se, labels=True)
###Output
/usr/lib/pymodules/python2.7/matplotlib/collections.py:548: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if self._edgecolors == 'face':
###Markdown
** Cut *w* and *i* columns and nomalize datasets**
###Code
cutcol = ['w', 'i']
pairs, atiras_atens_wi_sc = ld.cut_normalize(cutcol, [haz_gen_atiras_atens, nohaz_gen_atiras_atens],
[haz_real_atiras_atens, nohaz_real_atiras_atens],
[haz_gen_atiras_atens_se, nohaz_gen_atiras_atens_se])
haz_gen_cut, nohaz_gen_cut = pairs[0]
haz_real_cut, nohaz_real_cut = pairs[1]
haz_gen_se_cut, nohaz_gen_se_cut = pairs[2]
###Output
_____no_output_____
###Markdown
** Find decision surface with SVM **
###Code
clf_aa = svm.SVC(gamma=80.0, C=0.4, class_weight={0: 1.1})
xtrain, ytrain = ld.mix_up(haz_gen_se_cut, nohaz_gen_se_cut)
clf_aa = clf_aa.fit(xtrain, ytrain)
# cutcol = ['w', 'i']
# clf_aa = svm.SVC(gamma=80.0, C=0.4, class_weight={0: 1.1}) #class_weight={0: 1.5}
# #(20 0.5), (30 0.1) (200 0.1)
# splitres = al.split_by_clf(clf_aa, cutcol, haz_gen_atiras_atens_se,
# nohaz_gen_atiras_atens_se,
# haz_gen_atiras_atens,
# nohaz_gen_atiras_atens)
# haz_gen_atiras_atens_wi, nohaz_gen_atiras_atens_wi = splitres[0]
# haz_gen_atiras_atens_wi__, nohaz_gen_atiras_atens_wi__ = splitres[1]
# haz_gen_aa_wi_sc = splitres[2]
###Output
_____no_output_____
###Markdown
** Estimate split quality for virtual Atiras & Atens **
###Code
predicted_gen = al.clf_split_quality(clf_aa, haz_gen_cut, nohaz_gen_cut)
haz_gen_atiras_atens_wi = haz_gen_atiras_atens.iloc[predicted_gen[0]]
nohaz_gen_atiras_atens_wi = nohaz_gen_atiras_atens.iloc[predicted_gen[1]]
haz_gen_atiras_atens_wi__ = haz_gen_atiras_atens.iloc[predicted_gen[2]]
nohaz_gen_atiras_atens_wi__ = nohaz_gen_atiras_atens.iloc[predicted_gen[3]]
###Output
purity of PHA region: 0.902097902098
number of PHAs in the PHA region: 129
number of NHAs in the PHA region: 14
purity of NHA region: 0.954761904762
number of PHAs in the NHA region: 19
number of NHAs in the NHA region: 401
fraction of correctly classified PHAs: 0.871621621622
###Markdown
** Estimate split quality for virtual Atiras & Atens **
###Code
predicted_real = al.clf_split_quality(clf_aa, haz_real_cut, nohaz_real_cut)
haz_real_atiras_atens_wi = haz_real_atiras_atens.iloc[predicted_real[0]]
nohaz_real_atiras_atens_wi = nohaz_real_atiras_atens.iloc[predicted_real[1]]
haz_real_atiras_atens_wi__ = haz_real_atiras_atens.iloc[predicted_real[2]]
nohaz_real_atiras_atens_wi__ = nohaz_real_atiras_atens.iloc[predicted_real[3]]
###Output
purity of PHA region: 1.0
number of PHAs in the PHA region: 1
number of NHAs in the PHA region: 0
purity of NHA region: 1.0
number of PHAs in the NHA region: 0
number of NHAs in the NHA region: 18
fraction of correctly classified PHAs: 1.0
###Markdown
** Plot decision surface **
###Code
vd.plot_clf2d(clf_aa, cutcol, haz_cut=haz_gen_cut, nohaz_cut=nohaz_gen_cut, s=6,
num=500, scales=atiras_atens_wi_sc, labels=True, cmap='winter', figsize=(8, 8) )
haz_gen_extracted_aa.append(haz_gen_atiras_atens_wi)
nohaz_gen_trapped_aa.append(nohaz_gen_atiras_atens_wi)
haz_real_extracted_aa.append(haz_real_atiras_atens_wi)
nohaz_real_trapped_aa.append(nohaz_real_atiras_atens_wi)
###Output
_____no_output_____
###Markdown
Atiras & Atens divisions qualitiy ** Divisions quality for virtual Atiras & Atens **
###Code
vd.print_summary(haz_gen_extracted_aa, nohaz_gen_trapped_aa,
haz_gen_atiras_atens, nohaz_gen_atiras_atens, 'virtual')
###Output
Number of correctly classified virtual PHAs 129
Number of trapped virtual NHAs: 14
Mass fraction of correctly classified virtual PHAs: 0.871621621622
Mass fraction of trapped virtual NHAs: 0.033734939759
Cummulative purity of the outlined PHA regions: 0.902097902098
###Markdown
** Divisions quality for real Atiras & Atens **
###Code
vd.print_summary(haz_real_extracted_aa, nohaz_real_trapped_aa,
haz_real_atiras_atens, nohaz_real_atiras_atens, 'real')
###Output
Number of correctly classified real PHAs 1
Number of trapped real NHAs: 0
Mass fraction of correctly classified real PHAs: 1.0
Mass fraction of trapped real NHAs: 0.0
Cummulative purity of the outlined PHA regions: 1.0
###Markdown
--- Apollos
###Code
haz_gen_extracted_ap = []
nohaz_gen_trapped_ap = []
haz_real_extracted_ap = []
nohaz_real_trapped_ap = []
haz_gen_apollo, haz_gen_apollo_num = rdb.get_apollos(haz_gen)
nohaz_gen_apollo, nohaz_gen_apollo_num = rdb.get_apollos(nohaz_gen)
apollo_gen_num = haz_gen_apollo_num + nohaz_gen_apollo_num
haz_real_apollo, haz_real_apollo_num = rdb.get_apollos(haz_real)
nohaz_real_apollo, nohaz_real_apollo_num = rdb.get_apollos(nohaz_real)
apollo_real_num = haz_real_apollo_num + nohaz_real_apollo_num
###Output
_____no_output_____
###Markdown
** Virtual Apollos **
###Code
print "Number of virtual PHAs in the group:", haz_gen_apollo_num
print "Number of virtual NHAs in the group:", nohaz_gen_apollo_num
print "Number of virtual Apollo:", apollo_gen_num
print "Apollo group weight:", float(apollo_gen_num)/gen_num
###Output
Number of virtual PHAs in the group: 2177
Number of virtual NHAs in the group: 2470
Number of virtual Apollo: 4647
Apollo group weight: 0.891938579655
###Markdown
** Real Apollos **
###Code
print "Number of real PHAs in the group:", haz_real_apollo_num
print "Number of real NHAs in the group:", nohaz_real_apollo_num
print "Number of real Apollo:", apollo_real_num
print "Apollo group weight:", float(apollo_real_num)/real_num
# vd.display_allparams([haz_gen_apollo, nohaz_gen_apollo], vd.combs, vd.colnames)
###Output
_____no_output_____
###Markdown
Split Apolllos by a *w*-*q*-*i* suarface ** Amplify Apollos by their symmetric copies over the *w* parameter **
###Code
haz_gen_apollo_se = ld.add_doublemirror_column(haz_gen_apollo, 'w', 180.0)
nohaz_gen_apollo_se = ld.add_doublemirror_column(nohaz_gen_apollo, 'w', 180.0)
cutcol = ['w', 'q']
vd.plot_distributions2d(cutcol, haz_gen_apollo_se, nohaz_gen_apollo_se, labels=True, invertaxes=[0,1])
###Output
_____no_output_____
###Markdown
** Cut *w*, *q* and *i* columns and nomalize datasets **
###Code
cutcol = ['w', 'q', 'i']
pairs, apollo_wqi_sc = ld.cut_normalize(cutcol, [haz_gen_apollo, nohaz_gen_apollo],
[haz_real_apollo, nohaz_real_apollo],
[haz_gen_apollo_se, nohaz_gen_apollo_se])
haz_gen_cut, nohaz_gen_cut = pairs[0]
haz_real_cut, nohaz_real_cut = pairs[1]
haz_gen_se_cut, nohaz_gen_se_cut = pairs[2]
###Output
_____no_output_____
###Markdown
** Prepare *w*-*q* domain mask to exclude out-of-domain points from the plot **
###Code
# genu = pd.concat((haz_genu, nohaz_genu, haz_gen, nohaz_gen))
# genu_rest = pd.concat((haz_genu_rest, nohaz_genu_rest, haz_gen_rest, nohaz_gen_rest))
genu = pd.concat((haz_genu, nohaz_genu))
genu_rest = pd.concat((haz_genu_rest, nohaz_genu_rest))
genu_se = ld.add_doublemirror_column(genu, 'w', 180.0)
genu_rest_se = ld.add_doublemirror_column(genu_rest, 'w', 180.0)
apollo_wq_sc = apollo_wqi_sc[:2]
cutcol_ = ['w', 'q']
clfmask = svm.SVC(gamma=10.0, C=500.0, class_weight={1: 10})
clfmask = al.sgmask_clf2d_fit(clfmask, cutcol_, genu_se, genu_rest_se, apollo_wq_sc)
vd.plot_clf2d(clfmask, cutcol_, num=200, figsize=(6,6), scales=apollo_wq_sc,
labels=True, cmap='Blues', invertaxes=[0, 1])
###Output
_____no_output_____
###Markdown
** Train SVM **
###Code
clf_apollo = svm.SVC(gamma=20.0, C=0.5)
xtrain, ytrain = ld.mix_up(haz_gen_se_cut, nohaz_gen_se_cut)
clf_apollo = clf_apollo.fit(xtrain, ytrain)
# cutcol = ['w', 'q', 'i']
# clf_apollo_wqi = svm.SVC(gamma=20.0, C=0.5)
# splitres = al.split_by_clf(clf_apollo_wqi, cutcol, haz_gen_apollo_se,
# nohaz_gen_apollo_se,
# haz_gen_apollo,
# nohaz_gen_apollo)
# haz_gen_apollo_wqi, nohaz_gen_apollo_wqi = splitres[0]
# haz_gen_apollo_wqi__, nohaz_gen_apollo_wqi__ = splitres[1]
# haz_gen_apollo_wqi_sc = splitres[2]
###Output
_____no_output_____
###Markdown
** Estimate split quality for virtual Apollos **
###Code
predicted_gen = al.clf_split_quality(clf_apollo, haz_gen_cut, nohaz_gen_cut)
haz_gen_apollo_wqi = haz_gen_apollo.iloc[predicted_gen[0]]
nohaz_gen_apollo_wqi = nohaz_gen_apollo.iloc[predicted_gen[1]]
haz_gen_apollo_wqi__ = haz_gen_apollo.iloc[predicted_gen[2]]
nohaz_gen_apollo_wqi__ = nohaz_gen_apollo.iloc[predicted_gen[3]]
###Output
purity of PHA region: 0.939252336449
number of PHAs in the PHA region: 2010
number of NHAs in the PHA region: 130
purity of NHA region: 0.93338651775
number of PHAs in the NHA region: 167
number of NHAs in the NHA region: 2340
fraction of correctly classified PHAs: 0.92328892972
###Markdown
** Estimate split quality for real Apollos **
###Code
predicted_real = al.clf_split_quality(clf_apollo, haz_real_cut, nohaz_real_cut)
haz_real_apollo_wqi = haz_real_apollo.iloc[predicted_real[0]]
nohaz_real_apollo_wqi = nohaz_real_apollo.iloc[predicted_real[1]]
haz_real_apollo_wqi__ = haz_real_apollo.iloc[predicted_real[2]]
nohaz_real_apollo_wqi__ = nohaz_real_apollo.iloc[predicted_real[3]]
print "Mass fraction of correctly classified PHAs:", float(len(haz_gen_apollo_wqi))/haz_gen_apollo_num
print "Mass fraction of trapped NHAs:", float(len(nohaz_gen_apollo_wqi))/nohaz_gen_apollo_num
# cutcol = ['w', 'q', 'i']
# clf_masks = [(clfmask, 0)]
# labels = [vd.colnames[nm] for nm in cutcol]
vd.plot_clf3d(clf_apollo, cutcol, num=250, labels=True, figsize=(9,8), mode='2d',
scales=apollo_wqi_sc, clf_masks=[(clfmask, 0)], invertaxes=[0, 1])
haz_gen_extracted_ap.append(haz_gen_apollo_wqi)
nohaz_gen_trapped_ap.append(nohaz_gen_apollo_wqi)
haz_real_extracted_ap.append(haz_real_apollo_wqi)
nohaz_real_trapped_ap.append(nohaz_real_apollo_wqi)
###Output
_____no_output_____
###Markdown
Apollo divisions qualitiy ** Divisions quality for virtual Apollos **
###Code
vd.print_summary(haz_gen_extracted_ap, nohaz_gen_trapped_ap, haz_gen_apollo, nohaz_gen_apollo, 'virtual')
###Output
Number of correctly classified virtual PHAs 2010
Number of trapped virtual NHAs: 130
Mass fraction of correctly classified virtual PHAs: 0.92328892972
Mass fraction of trapped virtual NHAs: 0.0526315789474
Cummulative purity of the outlined PHA regions: 0.939252336449
###Markdown
** Divisions quality for real Apollos **
###Code
vd.print_summary(haz_real_extracted_ap, nohaz_real_trapped_ap, haz_real_apollo, nohaz_real_apollo, 'real')
###Output
Number of correctly classified real PHAs 34
Number of trapped real NHAs: 1
Mass fraction of correctly classified real PHAs: 0.918918918919
Mass fraction of trapped real NHAs: 0.0333333333333
Cummulative purity of the outlined PHA regions: 0.971428571429
###Markdown
Count down cummulative split quality ** Virtual asteroids **
###Code
haz_gen_extracted = haz_gen_extracted_aa + haz_gen_extracted_ap
nohaz_gen_trapped = nohaz_gen_trapped_aa + nohaz_gen_trapped_ap
vd.print_summary(haz_gen_extracted, nohaz_gen_trapped, haz_gen, nohaz_gen, 'virtual')
###Output
Number of correctly classified virtual PHAs 2139
Number of trapped virtual NHAs: 144
Mass fraction of correctly classified virtual PHAs: 0.92
Mass fraction of trapped virtual NHAs: 0.0499133448873
Cummulative purity of the outlined PHA regions: 0.936925098555
###Markdown
**Real asteroids **
###Code
haz_real_extracted = haz_real_extracted_aa + haz_real_extracted_ap
nohaz_real_trapped = nohaz_real_trapped_aa + nohaz_real_trapped_ap
vd.print_summary(haz_real_extracted, nohaz_real_trapped, haz_real, nohaz_real, 'real')
###Output
Number of correctly classified real PHAs 35
Number of trapped real NHAs: 1
Mass fraction of correctly classified real PHAs: 0.921052631579
Mass fraction of trapped real NHAs: 0.0208333333333
Cummulative purity of the outlined PHA regions: 0.972222222222
|
log-prog-python/Exercicios_Logging.ipynb | ###Markdown
LoggingLogging é o processo de registrar eventos que ocorrem ao longo da execução do código. Permiti obter conhecimento sobre o funcionamento de seu código, localizar bugs eotimizar seu script.
###Code
import logging
###Output
_____no_output_____
###Markdown
Para usar, primeiramente instanciamos um objeto responsável por manipular estes registros através do método logging.getLogger.
###Code
logging.
log = logging.getLogger("meu-logger")
log.info("Hello, world")
###Output
_____no_output_____
###Markdown
Níveis de loggingVários tipos de eventos podem surgir e podemos especificar com quais queremos interagir.Se você definir o nível de log para INFO, ele incluirá asmensagens INFO, WARNING, ERROR e CRITICAL. ```* CRITICAL..50* ERROR.....40* WARNING...30* INFO......20* DEBUG.....10* NOTSET.....0```
###Code
log.critical("Registra um log do nível critical")
log.error("Registra um log do nível error")
log.warning("Registra um log do nível warning")
log.info("Registra um log do nível info")
log.debug("Registra um log do nível debug")
###Output
Registra um log do nível critical
Registra um log do nível error
Registra um log do nível warning
###Markdown
Config básicaUsamos o método basicConfig() para configurar o logging.Parâmetros comuns:* level* filename: especifica o nome do arquivo.* filemode: se o nome do arquivo for fornecido, o arquivo é aberto neste modo. O padrão é **a** , o que significa anexar.* format: este é o formato da mensagem de registro.
###Code
import logging
logging.basicConfig(level=logging.DEBUG)
logging.debug('This will get logged')
import logging
logging.basicConfig(filename='/content/drive/MyDrive/app.log', filemode='w', format='%(name)s - %(level)')
logging.warning('This will get logged to a file')
###Output
WARNING:root:This will get logged to a file
###Markdown
No link, podemos ver mais informações sobre o métodohttps://docs.python.org/3/library/logging.htmllogging.basicConfigSó podemos chamar o basicConfig(), basicamente, esta função só pode ser chamada uma vez. Caso contrário, precisamos resetar ela para ajustar os parâmetros.
###Code
def reset_log():
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
reset_log()
###Output
_____no_output_____
###Markdown
Formatação do Output
###Code
import logging
logging.basicConfig(format='%(process)d-%(levelname)s-%(message)s')
logging.warning('This is a Warning')
reset_log()
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
logging.info('Admin logged in')
###Output
2021-11-12 00:43:34,343 - Admin logged in
###Markdown
Registro e Captura de Eventos
###Code
reset_log()
name = 'John'
logging.error('%s raised an error', name)
reset_log()
a = 5
b = 0
try:
c = a / b
except Exception as e:
logging.error("Exception occurred", exc_info=True)
###Output
_____no_output_____ |
specs/ipyplotly_integration/Overview.ipynb | ###Markdown
OverviewThis notebook introduces the ipyplotly enhancements to the plotly.py visualization library and demonstrates some of its features. New Features - Traces can be added and updated interactively by simply assigning to properties - The full Traces and Layout API is generated from the plotly schema to provide a great experience for interactive use in the notebook - Data validation covering the full API with clear, informative error messages - Jupyter friendly docstrings on constructor params and properties - Support for setting array properties as numpy arrays. When numpy arrays are used, ipywidgets binary serialization protocol is used to avoid converting these to JSON strings. - Context manager API for animation - Programmatic export of figures to static SVG images (and PNG and PDF with cairosvg installed). Imports
###Code
# ipyplotly
from plotly.graph_objs import FigureWidget
from plotly.callbacks import Points, InputDeviceState
# pandas
import pandas as pd
# numpy
import numpy as np
# scikit learn
from sklearn import datasets
# ipywidgets
from ipywidgets import HBox, VBox, Button
# functools
from functools import partial
# Load iris dataset
iris_data = datasets.load_iris()
feature_names = [name.replace(' (cm)', '').replace(' ', '_') for name in iris_data.feature_names]
iris_df = pd.DataFrame(iris_data.data, columns=feature_names)
iris_class = iris_data.target + 1
iris_df.head()
###Output
_____no_output_____
###Markdown
Create and display an empty FigureWidgetA FigureWidget behaves almost identically to a Figure but it is also an ipywidget that can be displayed directly in the notebook without calling `iplot`
###Code
f1 = FigureWidget()
f1
###Output
_____no_output_____
###Markdown
Tab completion Entering ``f1.add_`` displays add methods for all of the supported trace types
###Code
# f1.add_
###Output
_____no_output_____
###Markdown
Entering ``f1.add_scatter()`` displays the names of all of the top-level properties for the scatter trace typeEntering ``f1.add_scatter()`` displays the signature pop-up. Expanding this pop-up reveals the method doc string which contains the descriptions of all of the top level properties
###Code
# f1.add_scatter(
###Output
_____no_output_____
###Markdown
Add scatter trace
###Code
scatt1 = f1.add_scatter(x=iris_df.sepal_length, y=iris_df.petal_width)
f1
scatt1.mode?
# That's not what we wanted, change the mode to 'markers'
scatt1.mode = 'markers'
# Set size to 8
scatt1.marker.size = 8
# Color markers by iris class
scatt1.marker.color = iris_class
# Change colorscale
scatt1.marker.cmin = 0.5
scatt1.marker.cmax = 3.5
scatt1.marker.colorscale = [[0, 'red'], [0.33, 'red'],
[0.33, 'green'], [0.67, 'green'],
[0.67, 'blue'], [1.0, 'blue']]
scatt1.marker.showscale = True
# Fix up colorscale ticks
scatt1.marker.colorbar.ticks = 'outside'
scatt1.marker.colorbar.tickvals = [1, 2, 3]
scatt1.marker.colorbar.ticktext = iris_data.target_names.tolist()
# Set colorscale title
scatt1.marker.colorbar.title = 'Species'
scatt1.marker.colorbar.titlefont.size = 16
scatt1.marker.colorbar.titlefont.family = 'Rockwell'
# Add axis labels
f1.layout.xaxis.title = 'sepal_length'
f1.layout.yaxis.title = 'petal_width'
f1
# Hover info
scatt1.text = iris_data.target_names[iris_data.target]
scatt1.hoverinfo = 'text+x+y'
f1.layout.hovermode = 'closest'
f1
###Output
_____no_output_____
###Markdown
Animate marker size change
###Code
# Set marker size based on petal_length
with f1.batch_animate(duration=1000):
scatt1.marker.size = np.sqrt(iris_df.petal_length.values * 50)
# Restore constant marker size
with f1.batch_animate(duration=1000):
scatt1.marker.size = 8
###Output
_____no_output_____
###Markdown
Set drag mode property callbackMake points more transparent when `dragmode` is `zoom`
###Code
def set_opacity(marker, layout, dragmode):
if dragmode == 'zoom':
marker.opacity = 0.5
else:
marker.opacity = 1.0
f1.layout.on_change(partial(set_opacity, scatt1.marker), 'dragmode')
###Output
_____no_output_____
###Markdown
Configure colorscale for brushing
###Code
scatt1.marker.colorbar = None
scatt1.marker.colorscale = [[0, 'lightgray'], [0.5, 'lightgray'], [0.5, 'red'], [1, 'red']]
scatt1.marker.cmin = -0.5
scatt1.marker.cmax = 1.5
scatt1.marker.colorbar.ticks = 'outside'
scatt1.marker.colorbar.tickvals = [0, 1]
scatt1.marker.colorbar.ticktext = ['unselected', 'selected']
# Reset colors to zeros (unselected)
scatt1.marker.color = np.zeros(iris_class.size)
selected = np.zeros(iris_class.size)
f1
###Output
_____no_output_____
###Markdown
Configure brushing callback
###Code
# Assigning these variables here is not required. But doing so tricks Jupyter into
# providing property tab completion on the parameters to the brush function below
trace, points, state = scatt1, Points(), InputDeviceState()
def brush(trace, points, state):
inds = np.array(points.point_inds)
if inds.size:
selected[inds] = 1
trace.marker.color = selected
scatt1.on_selected(brush)
###Output
_____no_output_____
###Markdown
Now box or lasso select points on the figure and see them turn red
###Code
# Reset brush
selected = np.zeros(iris_class.size)
scatt1.marker.color = selected
###Output
_____no_output_____
###Markdown
Create second plot with different features
###Code
f2 = FigureWidget(data=[{'type': 'scatter',
'x': iris_df.petal_length,
'y': iris_df.sepal_width,
'mode': 'markers'}])
f2
# Set axis titles
f2.layout.xaxis.title = 'petal_length'
f2.layout.yaxis.title = 'sepal_width'
# Grab trace reference
scatt2 = f2.data[0]
# Set marker styles / colorbars to match between figures
scatt2.marker = scatt1.marker
# Configure brush on both plots to update both plots
def brush(trace, points, state):
inds = np.array(points.point_inds)
if inds.size:
selected = scatt1.marker.color.copy()
selected[inds] = 1
scatt1.marker.color = selected
scatt2.marker.color = selected
scatt1.on_selected(brush)
scatt2.on_selected(brush)
f2.layout.on_change(partial(set_opacity, scatt2.marker), 'dragmode')
# Reset brush
def reset_brush(btn):
selected = np.zeros(iris_class.size)
scatt1.marker.color = selected
scatt2.marker.color = selected
# Create reset button
button = Button(description="clear")
button.on_click(reset_brush)
# Hide colorbar for figure 1
scatt1.marker.showscale = False
# Set dragmode to lasso for both plots
f1.layout.dragmode = 'lasso'
f2.layout.dragmode = 'lasso'
# Display two figures and the reset button
f1.layout.width = 500
f2.layout.width = 500
VBox([HBox([f1, f2]), button])
# Save figure 2 to a svg image in the exports directory
f2.save_image('exports/f2.svg')
# Save figure 1 to a pdf in the exports directory (requires cairosvg be installed)
# f1.save_image('exports/f1.pdf')
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.